aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog3449
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/Makefile.in1
-rw-r--r--gcc/ada/ChangeLog65
-rw-r--r--gcc/ada/atree.ads9
-rw-r--r--gcc/ada/exp_aggr.adb13
-rw-r--r--gcc/ada/exp_ch7.adb2
-rw-r--r--gcc/ada/gcc-interface/decl.cc14
-rw-r--r--gcc/ada/gcc-interface/misc.cc7
-rw-r--r--gcc/ada/libgnat/a-direct.adb52
-rw-r--r--gcc/ada/nlists.ads3
-rw-r--r--gcc/ada/sem_attr.adb25
-rw-r--r--gcc/ada/sem_ch10.adb170
-rw-r--r--gcc/ada/sem_util.adb9
-rw-r--r--gcc/ada/sem_util.ads3
-rw-r--r--gcc/ada/sinfo.ads31
-rw-r--r--gcc/ada/types.ads4
-rw-r--r--gcc/addresses.h29
-rw-r--r--gcc/alias.cc17
-rw-r--r--gcc/analyzer/ChangeLog71
-rw-r--r--gcc/analyzer/access-diagram.cc431
-rw-r--r--gcc/analyzer/analyzer-logging.cc5
-rw-r--r--gcc/analyzer/analyzer.cc15
-rw-r--r--gcc/analyzer/pending-diagnostic.cc7
-rw-r--r--gcc/analyzer/program-point.cc4
-rw-r--r--gcc/auto-profile.cc4
-rw-r--r--gcc/builtins.cc46
-rw-r--r--gcc/builtins.h2
-rw-r--r--gcc/c-family/ChangeLog21
-rw-r--r--gcc/c-family/c-common.cc2
-rw-r--r--gcc/c-family/c-opts.cc2
-rw-r--r--gcc/c-family/c-warn.cc8
-rw-r--r--gcc/c/ChangeLog14
-rw-r--r--gcc/c/Make-lang.in4
-rw-r--r--gcc/c/c-decl.cc21
-rw-r--r--gcc/c/c-objc-common.cc4
-rw-r--r--gcc/calls.cc4
-rw-r--r--gcc/cfganal.cc2
-rw-r--r--gcc/cfgbuild.cc95
-rw-r--r--gcc/cfgbuild.h1
-rw-r--r--gcc/cfgloop.cc25
-rw-r--r--gcc/cfgloop.h11
-rw-r--r--gcc/combine.cc65
-rw-r--r--gcc/common.opt4
-rw-r--r--gcc/common/config/i386/cpuinfo.h28
-rw-r--r--gcc/common/config/i386/i386-common.cc53
-rw-r--r--gcc/common/config/i386/i386-cpuinfo.h4
-rw-r--r--gcc/common/config/i386/i386-isas.h2
-rw-r--r--gcc/common/config/riscv/riscv-common.cc215
-rw-r--r--gcc/config.gcc28
-rw-r--r--gcc/config.in6
-rw-r--r--gcc/config/aarch64/aarch64-c.cc1
-rw-r--r--gcc/config/aarch64/aarch64-cores.def2
-rw-r--r--gcc/config/aarch64/aarch64-opts.h26
-rw-r--r--gcc/config/aarch64/aarch64-protos.h26
-rw-r--r--gcc/config/aarch64/aarch64-simd.md551
-rw-r--r--gcc/config/aarch64/aarch64-sve.md2973
-rw-r--r--gcc/config/aarch64/aarch64-sve2.md922
-rw-r--r--gcc/config/aarch64/aarch64-tune.md2
-rw-r--r--gcc/config/aarch64/aarch64.cc382
-rw-r--r--gcc/config/aarch64/aarch64.h9
-rw-r--r--gcc/config/aarch64/aarch64.md1015
-rw-r--r--gcc/config/aarch64/aarch64.opt29
-rw-r--r--gcc/config/aarch64/iterators.md3
-rw-r--r--gcc/config/arc/arc-passes.def6
-rw-r--r--gcc/config/arc/arc-protos.h11
-rw-r--r--gcc/config/arc/arc.cc892
-rw-r--r--gcc/config/arc/arc.h16
-rw-r--r--gcc/config/arc/arc.md552
-rw-r--r--gcc/config/arc/arc.opt4
-rw-r--r--gcc/config/arc/predicates.md16
-rw-r--r--gcc/config/arm/constraints.md9
-rw-r--r--gcc/config/arm/sync.md219
-rw-r--r--gcc/config/arm/unspecs.md4
-rw-r--r--gcc/config/darwin.cc34
-rw-r--r--gcc/config/gcn/gcn-valu.md63
-rw-r--r--gcc/config/gcn/gcn.cc2
-rw-r--r--gcc/config/gcn/gcn.md175
-rw-r--r--gcc/config/i386/avx5124fmapsintrin.h2
-rw-r--r--gcc/config/i386/avx5124vnniwintrin.h2
-rw-r--r--gcc/config/i386/avx512bf16intrin.h31
-rw-r--r--gcc/config/i386/avx512bitalgintrin.h155
-rw-r--r--gcc/config/i386/avx512bitalgvlintrin.h180
-rw-r--r--gcc/config/i386/avx512bwintrin.h291
-rw-r--r--gcc/config/i386/avx512dqintrin.h922
-rw-r--r--gcc/config/i386/avx512erintrin.h2
-rw-r--r--gcc/config/i386/avx512fintrin.h7411
-rw-r--r--gcc/config/i386/avx512fp16intrin.h5383
-rw-r--r--gcc/config/i386/avx512ifmaintrin.h4
-rw-r--r--gcc/config/i386/avx512pfintrin.h2
-rw-r--r--gcc/config/i386/avx512vbmi2intrin.h4
-rw-r--r--gcc/config/i386/avx512vbmiintrin.h4
-rw-r--r--gcc/config/i386/avx512vnniintrin.h4
-rw-r--r--gcc/config/i386/avx512vp2intersectintrin.h4
-rw-r--r--gcc/config/i386/avx512vpopcntdqintrin.h4
-rw-r--r--gcc/config/i386/constraints.md65
-rw-r--r--gcc/config/i386/cpuid.h2
-rw-r--r--gcc/config/i386/driver-i386.cc89
-rw-r--r--gcc/config/i386/gfniintrin.h76
-rw-r--r--gcc/config/i386/i386-builtin-types.def3
-rw-r--r--gcc/config/i386/i386-builtin.def1312
-rw-r--r--gcc/config/i386/i386-builtins.cc104
-rw-r--r--gcc/config/i386/i386-builtins.h2
-rw-r--r--gcc/config/i386/i386-c.cc23
-rw-r--r--gcc/config/i386/i386-expand.cc114
-rw-r--r--gcc/config/i386/i386-isa.def2
-rw-r--r--gcc/config/i386/i386-options.cc61
-rw-r--r--gcc/config/i386/i386-opts.h8
-rw-r--r--gcc/config/i386/i386-protos.h5
-rw-r--r--gcc/config/i386/i386.cc741
-rw-r--r--gcc/config/i386/i386.h82
-rw-r--r--gcc/config/i386/i386.md390
-rw-r--r--gcc/config/i386/i386.opt40
-rw-r--r--gcc/config/i386/immintrin.h2
-rw-r--r--gcc/config/i386/mmx.md831
-rw-r--r--gcc/config/i386/predicates.md3
-rw-r--r--gcc/config/i386/sse.md1714
-rw-r--r--gcc/config/i386/usermsrintrin.h60
-rw-r--r--gcc/config/i386/vaesintrin.h4
-rw-r--r--gcc/config/i386/vpclmulqdqintrin.h4
-rw-r--r--gcc/config/i386/x86-tune.def116
-rw-r--r--gcc/config/i386/x86gprintrin.h2
-rw-r--r--gcc/config/loongarch/loongarch.cc3
-rw-r--r--gcc/config/loongarch/loongarch.h8
-rw-r--r--gcc/config/loongarch/loongarch.md8
-rw-r--r--gcc/config/loongarch/t-loongarch3
-rw-r--r--gcc/config/nvptx/nvptx.cc5
-rw-r--r--gcc/config/pa/pa.md12
-rw-r--r--gcc/config/pa/pa.opt2
-rw-r--r--gcc/config/pa/pa32-linux.h5
-rw-r--r--gcc/config/riscv/autovec.md112
-rw-r--r--gcc/config/riscv/bitmanip.md14
-rw-r--r--gcc/config/riscv/constraints.md7
-rw-r--r--gcc/config/riscv/corev.def43
-rw-r--r--gcc/config/riscv/corev.md693
-rw-r--r--gcc/config/riscv/generic-ooo.md284
-rw-r--r--gcc/config/riscv/generic.md2
-rw-r--r--gcc/config/riscv/linux.h7
-rw-r--r--gcc/config/riscv/predicates.md5
-rw-r--r--gcc/config/riscv/riscv-builtins.cc13
-rw-r--r--gcc/config/riscv/riscv-cores.def1
-rw-r--r--gcc/config/riscv/riscv-ftypes.def11
-rw-r--r--gcc/config/riscv/riscv-opts.h228
-rw-r--r--gcc/config/riscv/riscv-protos.h16
-rw-r--r--gcc/config/riscv/riscv-string.cc155
-rw-r--r--gcc/config/riscv/riscv-subset.h11
-rw-r--r--gcc/config/riscv/riscv-v.cc456
-rw-r--r--gcc/config/riscv/riscv-vector-costs.cc125
-rw-r--r--gcc/config/riscv/riscv-vsetvl.cc4
-rw-r--r--gcc/config/riscv/riscv.cc448
-rw-r--r--gcc/config/riscv/riscv.h9
-rw-r--r--gcc/config/riscv/riscv.md77
-rw-r--r--gcc/config/riscv/riscv.opt186
-rw-r--r--gcc/config/riscv/sifive-7.md2
-rw-r--r--gcc/config/riscv/thead.cc11
-rw-r--r--gcc/config/riscv/vector-iterators.md219
-rw-r--r--gcc/config/riscv/vector.md63
-rw-r--r--gcc/config/rs6000/predicates.md5
-rw-r--r--gcc/config/rs6000/rs6000.cc222
-rw-r--r--gcc/config/rs6000/rs6000.md157
-rw-r--r--gcc/config/rs6000/vector.md4
-rw-r--r--gcc/config/rs6000/vsx.md7
-rw-r--r--gcc/config/s390/s390.md6
-rw-r--r--gcc/config/s390/vector.md16
-rwxr-xr-xgcc/configure33
-rw-r--r--gcc/configure.ac8
-rw-r--r--gcc/cp/ChangeLog80
-rw-r--r--gcc/cp/Make-lang.in4
-rw-r--r--gcc/cp/constexpr.cc19
-rw-r--r--gcc/cp/constraint.cc3
-rw-r--r--gcc/cp/cp-gimplify.cc52
-rw-r--r--gcc/cp/cp-tree.h12
-rw-r--r--gcc/cp/error.cc10
-rw-r--r--gcc/cp/mangle.cc92
-rw-r--r--gcc/cp/module.cc5
-rw-r--r--gcc/cp/parser.cc30
-rw-r--r--gcc/cp/pt.cc4
-rw-r--r--gcc/cp/semantics.cc25
-rw-r--r--gcc/d/ChangeLog87
-rw-r--r--gcc/d/d-builtins.cc3
-rw-r--r--gcc/d/d-diagnostic.cc15
-rw-r--r--gcc/d/d-lang.cc255
-rw-r--r--gcc/d/d-tree.h3
-rw-r--r--gcc/d/decl.cc4
-rw-r--r--gcc/d/dmd/MERGE2
-rw-r--r--gcc/d/dmd/VERSION2
-rw-r--r--gcc/d/dmd/access.d3
-rw-r--r--gcc/d/dmd/aggregate.d11
-rw-r--r--gcc/d/dmd/aggregate.h1
-rw-r--r--gcc/d/dmd/arrayop.d11
-rw-r--r--gcc/d/dmd/attrib.d9
-rw-r--r--gcc/d/dmd/blockexit.d108
-rw-r--r--gcc/d/dmd/canthrow.d43
-rw-r--r--gcc/d/dmd/chkformat.d32
-rw-r--r--gcc/d/dmd/clone.d22
-rw-r--r--gcc/d/dmd/compiler.d1
-rw-r--r--gcc/d/dmd/cond.d6
-rw-r--r--gcc/d/dmd/constfold.d18
-rw-r--r--gcc/d/dmd/cparse.d16
-rw-r--r--gcc/d/dmd/cppmangle.d12
-rw-r--r--gcc/d/dmd/ctfe.h1
-rw-r--r--gcc/d/dmd/ctfeexpr.d14
-rw-r--r--gcc/d/dmd/dcast.d64
-rw-r--r--gcc/d/dmd/dclass.d64
-rw-r--r--gcc/d/dmd/declaration.d23
-rw-r--r--gcc/d/dmd/delegatize.d1
-rw-r--r--gcc/d/dmd/denum.d7
-rw-r--r--gcc/d/dmd/dimport.d2
-rw-r--r--gcc/d/dmd/dinterpret.d310
-rw-r--r--gcc/d/dmd/dmacro.d56
-rw-r--r--gcc/d/dmd/dmangle.d20
-rw-r--r--gcc/d/dmd/dmodule.d48
-rw-r--r--gcc/d/dmd/doc.d351
-rw-r--r--gcc/d/dmd/doc.h3
-rw-r--r--gcc/d/dmd/dscope.d1
-rw-r--r--gcc/d/dmd/dstruct.d3
-rw-r--r--gcc/d/dmd/dsymbol.d88
-rw-r--r--gcc/d/dmd/dsymbol.h4
-rw-r--r--gcc/d/dmd/dsymbolsem.d364
-rw-r--r--gcc/d/dmd/dtemplate.d93
-rw-r--r--gcc/d/dmd/dtoh.d30
-rw-r--r--gcc/d/dmd/dversion.d13
-rw-r--r--gcc/d/dmd/errors.h3
-rw-r--r--gcc/d/dmd/errorsink.d1
-rw-r--r--gcc/d/dmd/escape.d40
-rw-r--r--gcc/d/dmd/expression.d381
-rw-r--r--gcc/d/dmd/expression.h7
-rw-r--r--gcc/d/dmd/expressionsem.d548
-rw-r--r--gcc/d/dmd/func.d57
-rw-r--r--gcc/d/dmd/globals.d90
-rw-r--r--gcc/d/dmd/globals.h83
-rw-r--r--gcc/d/dmd/hdrgen.d969
-rw-r--r--gcc/d/dmd/hdrgen.h4
-rw-r--r--gcc/d/dmd/iasm.d1
-rw-r--r--gcc/d/dmd/iasmgcc.d2
-rw-r--r--gcc/d/dmd/id.d4
-rw-r--r--gcc/d/dmd/importc.d5
-rw-r--r--gcc/d/dmd/init.d8
-rw-r--r--gcc/d/dmd/init.h2
-rw-r--r--gcc/d/dmd/initsem.d58
-rw-r--r--gcc/d/dmd/json.d25
-rw-r--r--gcc/d/dmd/json.h2
-rw-r--r--gcc/d/dmd/lexer.d163
-rw-r--r--gcc/d/dmd/location.d20
-rw-r--r--gcc/d/dmd/module.h2
-rw-r--r--gcc/d/dmd/mtype.d61
-rw-r--r--gcc/d/dmd/mtype.h5
-rw-r--r--gcc/d/dmd/mustuse.d3
-rw-r--r--gcc/d/dmd/nogc.d4
-rw-r--r--gcc/d/dmd/nspace.d3
-rw-r--r--gcc/d/dmd/ob.d20
-rw-r--r--gcc/d/dmd/objc.d32
-rw-r--r--gcc/d/dmd/opover.d40
-rw-r--r--gcc/d/dmd/optimize.d81
-rw-r--r--gcc/d/dmd/parse.d125
-rw-r--r--gcc/d/dmd/printast.d2
-rw-r--r--gcc/d/dmd/root/filename.d7
-rw-r--r--gcc/d/dmd/root/rootobject.d6
-rw-r--r--gcc/d/dmd/safe.d23
-rw-r--r--gcc/d/dmd/semantic2.d87
-rw-r--r--gcc/d/dmd/semantic3.d62
-rw-r--r--gcc/d/dmd/sideeffect.d9
-rw-r--r--gcc/d/dmd/statement.d256
-rw-r--r--gcc/d/dmd/statement.h11
-rw-r--r--gcc/d/dmd/statementsem.d481
-rw-r--r--gcc/d/dmd/staticcond.d3
-rw-r--r--gcc/d/dmd/tokens.d22
-rw-r--r--gcc/d/dmd/tokens.h1
-rw-r--r--gcc/d/dmd/traits.d104
-rw-r--r--gcc/d/dmd/typesem.d61
-rw-r--r--gcc/d/dmd/typinf.d4
-rw-r--r--gcc/d/dmd/utils.d24
-rw-r--r--gcc/d/expr.cc18
-rw-r--r--gcc/d/intrinsics.cc47
-rw-r--r--gcc/d/intrinsics.def128
-rw-r--r--gcc/d/modules.cc9
-rw-r--r--gcc/data-streamer-in.cc8
-rw-r--r--gcc/data-streamer.h2
-rw-r--r--gcc/diagnostic-format-json.cc230
-rw-r--r--gcc/diagnostic-format-sarif.cc212
-rw-r--r--gcc/diagnostic-show-locus.cc95
-rw-r--r--gcc/diagnostic.cc180
-rw-r--r--gcc/diagnostic.h182
-rw-r--r--gcc/doc/extend.texi263
-rw-r--r--gcc/doc/invoke.texi86
-rw-r--r--gcc/doc/options.texi26
-rw-r--r--gcc/doc/sourcebuild.texi15
-rw-r--r--gcc/doc/tm.texi26
-rw-r--r--gcc/doc/tm.texi.in26
-rw-r--r--gcc/double-int.h3
-rw-r--r--gcc/dumpfile.cc13
-rw-r--r--gcc/dwarf2cfi.cc6
-rw-r--r--gcc/dwarf2out.cc39
-rw-r--r--gcc/dwarf2out.h37
-rw-r--r--gcc/edit-context.cc52
-rw-r--r--gcc/emit-rtl.cc2
-rw-r--r--gcc/emit-rtl.h8
-rw-r--r--gcc/explow.cc5
-rw-r--r--gcc/expr.cc22
-rw-r--r--gcc/expr.h4
-rw-r--r--gcc/fold-const.cc28
-rw-r--r--gcc/fold-const.h2
-rw-r--r--gcc/fold-mem-offsets.cc901
-rw-r--r--gcc/fortran/ChangeLog108
-rw-r--r--gcc/fortran/decl.cc4
-rw-r--r--gcc/fortran/error.cc14
-rw-r--r--gcc/fortran/frontend-passes.cc2
-rw-r--r--gcc/fortran/gfortran.h1
-rw-r--r--gcc/fortran/intrinsic.texi27
-rw-r--r--gcc/fortran/invoke.texi6
-rw-r--r--gcc/fortran/match.cc9
-rw-r--r--gcc/fortran/openmp.cc64
-rw-r--r--gcc/fortran/options.cc9
-rw-r--r--gcc/fortran/parse.cc30
-rw-r--r--gcc/fortran/scanner.cc4
-rw-r--r--gcc/fortran/trans-array.cc44
-rw-r--r--gcc/fortran/trans-decl.cc126
-rw-r--r--gcc/fortran/trans-expr.cc40
-rw-r--r--gcc/fortran/trans-openmp.cc77
-rw-r--r--gcc/function.cc8
-rw-r--r--gcc/function.h6
-rw-r--r--gcc/gengtype.cc2
-rw-r--r--gcc/genmatch.cc4
-rw-r--r--gcc/genmodes.cc12
-rw-r--r--gcc/gensupport.cc34
-rw-r--r--gcc/ggc-common.cc15
-rw-r--r--gcc/gimple-fold.cc8
-rw-r--r--gcc/gimple-lower-bitint.cc6
-rw-r--r--gcc/gimple-match-head.cc2
-rw-r--r--gcc/gimple-range-cache.cc3
-rw-r--r--gcc/gimple-range-fold.cc4
-rw-r--r--gcc/gimple-range-gori.cc223
-rw-r--r--gcc/gimple-range-gori.h15
-rw-r--r--gcc/gimple-range.cc300
-rw-r--r--gcc/gimple-range.h28
-rw-r--r--gcc/gimple-ssa-sprintf.cc11
-rw-r--r--gcc/gimple-ssa-store-merging.cc2
-rw-r--r--gcc/gimple-ssa-strength-reduction.cc108
-rw-r--r--gcc/gimple-ssa-warn-access.cc53
-rw-r--r--gcc/gimple-ssa-warn-alloca.cc4
-rw-r--r--gcc/gimplify.cc168
-rw-r--r--gcc/godump.cc6
-rw-r--r--gcc/graphite-isl-ast-to-gimple.cc2
-rw-r--r--gcc/graphviz.cc10
-rw-r--r--gcc/inchash.h4
-rw-r--r--gcc/input.cc79
-rw-r--r--gcc/input.h3
-rw-r--r--gcc/ipa-cp.cc193
-rw-r--r--gcc/ipa-fnsummary.cc8
-rw-r--r--gcc/ipa-modref-tree.cc8
-rw-r--r--gcc/ipa-modref.cc10
-rw-r--r--gcc/ipa-modref.h2
-rw-r--r--gcc/ipa-param-manipulation.cc7
-rw-r--r--gcc/ipa-prop.cc387
-rw-r--r--gcc/ipa-prop.h25
-rw-r--r--gcc/ipa-sra.cc263
-rw-r--r--gcc/ipa-utils.cc15
-rw-r--r--gcc/ira-costs.cc11
-rw-r--r--gcc/ira-int.h2
-rw-r--r--gcc/ira.cc5
-rw-r--r--gcc/jit/ChangeLog5
-rw-r--r--gcc/jit/dummy-frontend.cc4
-rw-r--r--gcc/lra-constraints.cc32
-rw-r--r--gcc/lra-eliminations.cc6
-rw-r--r--gcc/lra.cc8
-rw-r--r--gcc/lto-streamer-in.cc19
-rw-r--r--gcc/lto-streamer-out.cc19
-rw-r--r--gcc/lto/ChangeLog4
-rw-r--r--gcc/lto/Make-lang.in4
-rw-r--r--gcc/m2/ChangeLog229
-rw-r--r--gcc/m2/Make-lang.in207
-rw-r--r--gcc/m2/README21
-rw-r--r--gcc/m2/gm2-compiler/M2CaseList.mod2
-rw-r--r--gcc/m2/gm2-compiler/M2Comp.mod876
-rw-r--r--gcc/m2/gm2-compiler/M2GCCDeclare.mod106
-rw-r--r--gcc/m2/gm2-compiler/M2Options.def171
-rw-r--r--gcc/m2/gm2-compiler/M2Options.mod227
-rw-r--r--gcc/m2/gm2-compiler/M2Preprocess.def25
-rw-r--r--gcc/m2/gm2-compiler/M2Preprocess.mod254
-rw-r--r--gcc/m2/gm2-compiler/M2Quads.mod8
-rw-r--r--gcc/m2/gm2-compiler/M2Search.def2
-rw-r--r--gcc/m2/gm2-compiler/P2SymBuild.mod23
-rw-r--r--gcc/m2/gm2-compiler/SymbolTable.mod16
-rw-r--r--gcc/m2/gm2-gcc/m2expr.cc13
-rw-r--r--gcc/m2/gm2-gcc/m2expr.def8
-rw-r--r--gcc/m2/gm2-gcc/m2expr.h2
-rw-r--r--gcc/m2/gm2-gcc/m2options.h19
-rw-r--r--gcc/m2/gm2-gcc/m2type.cc23
-rw-r--r--gcc/m2/gm2-lang.cc66
-rw-r--r--gcc/m2/gm2-libs-iso/SysClock.mod20
-rw-r--r--gcc/m2/lang-specs.h10
-rw-r--r--gcc/machmode.h6
-rw-r--r--gcc/match.pd203
-rw-r--r--gcc/omp-general.cc54
-rw-r--r--gcc/omp-low.cc2
-rw-r--r--gcc/opt-functions.awk13
-rw-r--r--gcc/opt-problem.cc6
-rw-r--r--gcc/opt-read.awk17
-rw-r--r--gcc/optabs.cc47
-rw-r--r--gcc/opth-gen.awk35
-rw-r--r--gcc/opts.cc8
-rw-r--r--gcc/passes.def5
-rw-r--r--gcc/poly-int-types.h8
-rw-r--r--gcc/poly-int.h584
-rw-r--r--gcc/pretty-print.cc71
-rw-r--r--gcc/pretty-print.h31
-rw-r--r--gcc/print-tree.cc4
-rw-r--r--gcc/real.cc8
-rw-r--r--gcc/reg-notes.def5
-rw-r--r--gcc/reload.cc38
-rw-r--r--gcc/reload.h2
-rw-r--r--gcc/reload1.cc14
-rw-r--r--gcc/rtl-ssa/blocks.cc6
-rw-r--r--gcc/rtl-tests.cc1
-rw-r--r--gcc/rtl.h16
-rw-r--r--gcc/rtlanal.cc2
-rw-r--r--gcc/rust/ChangeLog5
-rw-r--r--gcc/selftest-diagnostic.cc10
-rw-r--r--gcc/simplify-rtx.cc18
-rw-r--r--gcc/sreal.cc4
-rw-r--r--gcc/testsuite/ChangeLog1185
-rw-r--r--gcc/testsuite/c-c++-common/analyzer/out-of-bounds-diagram-strcat-2.c74
-rw-r--r--gcc/testsuite/c-c++-common/analyzer/out-of-bounds-diagram-strcat.c66
-rw-r--r--gcc/testsuite/c-c++-common/diagnostic-format-sarif-file-pr111700.c12
-rw-r--r--gcc/testsuite/c-c++-common/gomp/allocate-14.c2
-rw-r--r--gcc/testsuite/c-c++-common/gomp/allocate-15.c2
-rw-r--r--gcc/testsuite/c-c++-common/gomp/allocate-9.c2
-rw-r--r--gcc/testsuite/g++.dg/concepts/diagnostic3.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/hog1.C77
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/constexpr-diag1.C21
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/fold3.C62
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval36.C22
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/lambda-generic-mangle1.C9
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/lambda-generic-mangle1a.C10
-rw-r--r--gcc/testsuite/g++.dg/debug/dwarf2/pr85550.C1
-rw-r--r--gcc/testsuite/g++.dg/diagnostic/static_assert3.C7
-rw-r--r--gcc/testsuite/g++.dg/parse/error65.C10
-rw-r--r--gcc/testsuite/g++.dg/template/error60.C37
-rw-r--r--gcc/testsuite/g++.dg/torture/pr111773.C31
-rw-r--r--gcc/testsuite/g++.target/i386/mv16.C12
-rw-r--r--gcc/testsuite/g++.target/powerpc/pr111367.C22
-rw-r--r--gcc/testsuite/g++.target/riscv/rvv/autovec/bug-01.C33
-rw-r--r--gcc/testsuite/g++.target/riscv/rvv/rvv.exp3
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/20000105-1.c5
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/20000105-2.c3
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/20000211-1.c4
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/20000224-1.c1
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/20000314-2.c3
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/920501-11.c1
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/920501-23.c1
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/920501-8.c1
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/920701-1.c1
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/930529-1.c1
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/980816-1.c2
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pc44485.c2
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr106101.c13
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr111699-1.c7
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr49474.c3
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/20001111-1.c8
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr110817-1.c13
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr110817-2.c16
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr110817-3.c14
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr111331-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-17.c28
-rw-r--r--gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-18.c54
-rw-r--r--gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-19.c42
-rw-r--r--gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-6.c68
-rw-r--r--gcc/testsuite/gcc.dg/bitint-38.c43
-rw-r--r--gcc/testsuite/gcc.dg/bitint-39.c43
-rw-r--r--gcc/testsuite/gcc.dg/debug/dwarf2/inline4.c2
-rw-r--r--gcc/testsuite/gcc.dg/ipa/ipa-sra-32.c30
-rw-r--r--gcc/testsuite/gcc.dg/ipa/ipa-sra-4.c4
-rw-r--r--gcc/testsuite/gcc.dg/ipa/pr110378-4.c50
-rw-r--r--gcc/testsuite/gcc.dg/long_branch.c2
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic_group_plugin.c45
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_show_trees.c2
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_inlining.c2
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_paths.c2
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_show_locus.c26
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_string_literals.c2
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_tree_expression_range.c2
-rw-r--r--gcc/testsuite/gcc.dg/plugin/poly-int-tests.h6
-rw-r--r--gcc/testsuite/gcc.dg/pr100512.c4
-rw-r--r--gcc/testsuite/gcc.dg/pr103003.c4
-rw-r--r--gcc/testsuite/gcc.dg/pr103451.c8
-rw-r--r--gcc/testsuite/gcc.dg/pr108095.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr111694.c19
-rw-r--r--gcc/testsuite/gcc.dg/pr111708-1.c42
-rw-r--r--gcc/testsuite/gcc.dg/pr111708-2.c21
-rw-r--r--gcc/testsuite/gcc.dg/pr111845.c16
-rw-r--r--gcc/testsuite/gcc.dg/pr68435.c6
-rw-r--r--gcc/testsuite/gcc.dg/pr90263.c1
-rw-r--r--gcc/testsuite/gcc.dg/pr93917.c4
-rw-r--r--gcc/testsuite/gcc.dg/rtl/arm/stl-cond.c61
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr111519.c48
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr111807.c12
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr111818.c11
-rw-r--r--gcc/testsuite/gcc.dg/tree-prof/pr111559.c16
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/and-1.c6
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/bitops-5.c27
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/bitops-6.c33
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/bitops-7.c24
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/phi-opt-36.c51
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/phi-opt-37.c24
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr111583-1.c30
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr111583-2.c36
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr31531-1.c19
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr31531-2.c34
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/predcom-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-dse-26.c4
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-102.c32
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/vrp-unreachable.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/bb-slp-68.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/bb-slp-pr65935.c3
-rw-r--r--gcc/testsuite/gcc.dg/vect/bb-slp-pr69907.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/costmodel/ppc/costmodel-vect-store-1.c23
-rw-r--r--gcc/testsuite/gcc.dg/vect/costmodel/ppc/costmodel-vect-store-2.c29
-rw-r--r--gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/dynamic-lmul2-7.c25
-rw-r--r--gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/no-dynamic-lmul-1.c64
-rw-r--r--gcc/testsuite/gcc.dg/vect/fast-math-slp-38.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/no-scevccp-outer-7.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/no-scevccp-vect-iv-3.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr111764.c16
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr111846.c12
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr45752.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-8.c6
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr97832-2.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr97832-3.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr97832-4.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-12a.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-23.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-perm-10.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-perm-4.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-reduc-4.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-simd-clone-1.c46
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-simd-clone-2.c57
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1115.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s114.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1161.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1232.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s124.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1279.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s161.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s253.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s257.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s271.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s2711.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s2712.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s272.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s273.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s274.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s276.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s278.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s279.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s3111.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s353.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s441.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s443.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-vif.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-cond-arith-2.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-cond-arith-4.c8
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-cond-arith-5.c8
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-cond-arith-6.c8
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-cond-reduc-4.c3
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-live-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-multitypes-16.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-pr111779.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/armv9_warning.c5
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_aligned.c28
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_unaligned.c40
-rw-r--r--gcc/testsuite/gcc.target/aarch64/mops_4.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/stp_aligned.c25
-rw-r--r--gcc/testsuite/gcc.target/aarch64/stp_unaligned.c37
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_1024.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_128.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_2048.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_256.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_512.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pre_cond_share_1.c132
-rw-r--r--gcc/testsuite/gcc.target/aarch64/test_frame_10.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/test_frame_2.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/test_frame_4.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/test_frame_7.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/xorsign.c5
-rw-r--r--gcc/testsuite/gcc.target/arc/add_f-combine.c15
-rw-r--r--gcc/testsuite/gcc.target/arc/add_n-combine.c2
-rw-r--r--gcc/testsuite/gcc.target/arc/ashrsi-1.c36
-rw-r--r--gcc/testsuite/gcc.target/arc/ashrsi-2.c7
-rw-r--r--gcc/testsuite/gcc.target/arc/ashrsi-3.c7
-rw-r--r--gcc/testsuite/gcc.target/arc/ashrsi-4.c7
-rw-r--r--gcc/testsuite/gcc.target/arc/ashrsi-5.c8
-rw-r--r--gcc/testsuite/gcc.target/arc/enter-dw2-1.c18
-rw-r--r--gcc/testsuite/gcc.target/arc/firq-4.c1
-rw-r--r--gcc/testsuite/gcc.target/arc/firq-6.c1
-rw-r--r--gcc/testsuite/gcc.target/arc/loop-3.c2
-rw-r--r--gcc/testsuite/gcc.target/arc/lshrsi-1.c36
-rw-r--r--gcc/testsuite/gcc.target/arc/lshrsi-2.c7
-rw-r--r--gcc/testsuite/gcc.target/arc/lshrsi-3.c7
-rw-r--r--gcc/testsuite/gcc.target/arc/lshrsi-4.c7
-rw-r--r--gcc/testsuite/gcc.target/arc/lshrsi-5.c8
-rw-r--r--gcc/testsuite/gcc.target/arc/mtune-ARC600.c4
-rw-r--r--gcc/testsuite/gcc.target/arc/mtune-ARC601.c4
-rw-r--r--gcc/testsuite/gcc.target/arc/mtune-ARC700-xmac4
-rw-r--r--gcc/testsuite/gcc.target/arc/mtune-ARC700.c4
-rw-r--r--gcc/testsuite/gcc.target/arc/mtune-ARC725D.c4
-rw-r--r--gcc/testsuite/gcc.target/arc/mtune-ARC750D.c4
-rw-r--r--gcc/testsuite/gcc.target/arc/scc-ltu.c12
-rw-r--r--gcc/testsuite/gcc.target/arc/shlsi-1.c36
-rw-r--r--gcc/testsuite/gcc.target/arc/shlsi-2.c8
-rw-r--r--gcc/testsuite/gcc.target/arc/shlsi-3.c8
-rw-r--r--gcc/testsuite/gcc.target/arc/shlsi-4.c8
-rw-r--r--gcc/testsuite/gcc.target/arc/shlsi-5.c9
-rw-r--r--gcc/testsuite/gcc.target/arc/tls-ld.c3
-rw-r--r--gcc/testsuite/gcc.target/arc/tls-le.c2
-rw-r--r--gcc/testsuite/gcc.target/arc/uncached-7.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic_loaddi_7.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic_loaddi_8.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/pr111235.c39
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-1.c8
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-egprs-names.c17
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-inline-gpr-norex2.c25
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-interrupt-1.c102
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-legacy-insn-check-norex2-asm.c5
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-legacy-insn-check-norex2.c181
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-push2pop2-1.c45
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-push2pop2_force_drap-1.c29
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-push2pop2_interrupt-1.c28
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-spill_to_egprs-1.c25
-rw-r--r--gcc/testsuite/gcc.target/i386/ashldi3-2.c10
-rw-r--r--gcc/testsuite/gcc.target/i386/ashlti3-3.c10
-rw-r--r--gcc/testsuite/gcc.target/i386/avx512fp16-64-32-vecop-1.c8
-rw-r--r--gcc/testsuite/gcc.target/i386/funcspec-56.inc4
-rw-r--r--gcc/testsuite/gcc.target/i386/large-data.c13
-rw-r--r--gcc/testsuite/gcc.target/i386/lea-2.c7
-rw-r--r--gcc/testsuite/gcc.target/i386/noevex512-1.c13
-rw-r--r--gcc/testsuite/gcc.target/i386/noevex512-2.c13
-rw-r--r--gcc/testsuite/gcc.target/i386/noevex512-3.c13
-rw-r--r--gcc/testsuite/gcc.target/i386/part-vect-absneghf.c91
-rw-r--r--gcc/testsuite/gcc.target/i386/part-vect-copysignhf.c60
-rw-r--r--gcc/testsuite/gcc.target/i386/part-vect-fmaddsubhf-1.c22
-rw-r--r--gcc/testsuite/gcc.target/i386/part-vect-fmahf-1.c58
-rw-r--r--gcc/testsuite/gcc.target/i386/part-vect-hf-convert-1.c111
-rw-r--r--gcc/testsuite/gcc.target/i386/part-vect-roundhf.c217
-rw-r--r--gcc/testsuite/gcc.target/i386/part-vect-sqrtph-1.c20
-rw-r--r--gcc/testsuite/gcc.target/i386/part-vect-vminmaxph-1.c36
-rw-r--r--gcc/testsuite/gcc.target/i386/part-vect-xorsignhf.c60
-rw-r--r--gcc/testsuite/gcc.target/i386/pr106245-1.c10
-rw-r--r--gcc/testsuite/gcc.target/i386/pr110701.c12
-rw-r--r--gcc/testsuite/gcc.target/i386/pr111657.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/pr111745.c18
-rw-r--r--gcc/testsuite/gcc.target/i386/pr111845.c47
-rw-r--r--gcc/testsuite/gcc.target/i386/pr52146.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/pr90096.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/rcr-1.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/rcr-2.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/user_msr-1.c20
-rw-r--r--gcc/testsuite/gcc.target/i386/user_msr-2.c16
-rw-r--r--gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-1.c43
-rw-r--r--gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-2.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-3.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/x86gprintrin-1.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/x86gprintrin-2.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/x86gprintrin-3.c28
-rw-r--r--gcc/testsuite/gcc.target/i386/x86gprintrin-4.c32
-rw-r--r--gcc/testsuite/gcc.target/i386/x86gprintrin-5.c6
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-1.c14
-rw-r--r--gcc/testsuite/gcc.target/powerpc/const-build.c143
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr108338.c52
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr88558-p7.c13
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr88558-p8.c14
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr88558.h29
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-compile.c252
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addn.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addrn.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addun.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addurn.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-clip.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-clipu.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subn.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subrn.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subun.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-suburn.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile.c32
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-compile.c198
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mac.c25
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhsn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhsrn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhun.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhurn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macsn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macsrn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macun.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macurn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-msu.c25
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhsn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhsrn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhun.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhurn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulsn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulsrn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulun.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulurn.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/cv-mac-test-autogeneration.c18
-rw-r--r--gcc/testsuite/gcc.target/riscv/fle-ieee.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/fle-snan.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/fle.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/flef-ieee.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/flef-snan.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/flef.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/flt-ieee.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/flt-snan.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/fltf-ieee.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/fltf-snan.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/fold-mem-offsets-1.c16
-rw-r--r--gcc/testsuite/gcc.target/riscv/fold-mem-offsets-2.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/fold-mem-offsets-3.c17
-rw-r--r--gcc/testsuite/gcc.target/riscv/pr111466.c15
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-1.h2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-2.h2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-1.h2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-2.h2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-1.h2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-2.h2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-1.h2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-2.h2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_sqrt-1.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_sqrt-2.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-1.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-2.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-3.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-4.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-5.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-6.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-7.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/gather-scatter/offset_extend-1.c14
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-9.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111232.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111751.c55
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/bswap16-0.c17
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/bswap16-run-0.c44
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/cvt-0.c21
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/cvt-1.c22
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ceil-run-1.c18
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ceil-run-2.c18
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-floor-run-1.c18
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-floor-run-2.c18
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iceil-0.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iceil-run-0.c63
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ifloor-0.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ifloor-run-0.c63
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-irint-0.c14
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-irint-run-0.c63
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iround-0.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iround-run-0.c63
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-0.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-1.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-run-0.c69
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-run-1.c69
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-0.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-1.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-run-0.c69
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-run-1.c69
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llceil-0.c20
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llceil-run-0.c64
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llfloor-0.c20
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llfloor-run-0.c64
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llrint-0.c15
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llrint-run-0.c64
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llround-0.c20
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llround-run-0.c64
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-0.c14
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-1.c14
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-run-0.c63
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-run-1.c63
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-0.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-1.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-run-0.c72
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-run-1.c72
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-1.c33
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-2.c33
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-rint-run-1.c33
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-rint-run-2.c33
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-round-run-1.c18
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-round-run-2.c18
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-trunc-run-1.c18
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-trunc-run-2.c18
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/test-math.h24
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive-1.c21
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive-2.c45
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive_run-1.c27
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive_run-2.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/perm.h2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/bswap16-0.c34
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-1.c94
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-2.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-3.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/cvt-0.c47
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/def.h15
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-iceil-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-ifloor-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-irint-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-iround-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lceil-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lceil-1.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lfloor-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lfloor-1.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llceil-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llfloor-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llrint-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llround-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lrint-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lrint-1.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lround-0.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lround-1.c30
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-1.c48
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-10.c12
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-2.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-3.c36
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-4.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-5.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-6.c19
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-7.c12
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-8.c36
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-9.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/perm-4.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/abi-call-args-4-run.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-1.c81
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-2.c46
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr110119-2.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr90263.c7
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/fortran/pr111566.f9031
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/rvv.exp10
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111255.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c10
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c10
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/vsetvl/wredsum_vlmax.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/xtheadcondmov-indirect.c89
-rw-r--r--gcc/testsuite/gcc.target/riscv/xtheadmempair-4.c29
-rw-r--r--gcc/testsuite/gcc.target/riscv/zbb-andn-orn-01.c17
-rw-r--r--gcc/testsuite/gcc.target/riscv/zbb-andn-orn-02.c17
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm.c57
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_0_imm.c73
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_imm_imm.c73
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_imm_reg.c65
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_reg_reg.c65
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg.c65
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_0_imm.c73
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_imm_imm.c73
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_imm_reg.c65
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_reg_reg.c77
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_0_imm.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_imm_imm.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_imm_reg.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_reg_reg.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-xor-01.c2
-rw-r--r--gcc/testsuite/gdc.dg/analyzer/analyzer.exp51
-rw-r--r--gcc/testsuite/gdc.dg/analyzer/pr111537.d7
-rw-r--r--gcc/testsuite/gdc.dg/builtins_reject.d17
-rw-r--r--gcc/testsuite/gdc.dg/intrinsics_reject.d87
-rw-r--r--gcc/testsuite/gdc.test/compilable/dtoh_StructDeclaration.d15
-rw-r--r--gcc/testsuite/gdc.test/compilable/dtoh_TemplateDeclaration.d2
-rw-r--r--gcc/testsuite/gdc.test/compilable/dtoh_functions.d4
-rw-r--r--gcc/testsuite/gdc.test/compilable/issue22682.d8
-rw-r--r--gcc/testsuite/gdc.test/compilable/issue24174.d36
-rw-r--r--gcc/testsuite/gdc.test/compilable/obsolete_body.d5
-rw-r--r--gcc/testsuite/gdc.test/compilable/shortened_methods.d5
-rw-r--r--gcc/testsuite/gdc.test/compilable/test23145.d13
-rw-r--r--gcc/testsuite/gdc.test/compilable/test24066.d11
-rw-r--r--gcc/testsuite/gdc.test/compilable/test24107.d17
-rw-r--r--gcc/testsuite/gdc.test/compilable/test24109.d17
-rw-r--r--gcc/testsuite/gdc.test/compilable/test24118.d15
-rw-r--r--gcc/testsuite/gdc.test/compilable/testpull1810.d (renamed from gcc/testsuite/gdc.test/fail_compilation/testpull1810.d)2
-rw-r--r--gcc/testsuite/gdc.test/compilable/warn12809.d (renamed from gcc/testsuite/gdc.test/fail_compilation/warn12809.d)28
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/aa_init.d16
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/body.d11
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/ccast.d10
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/chkformat.d2
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/dephexstrings.d9
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag10169.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag10783.d5
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag12063.d42
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag12829.d13
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag13609a.d7
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag14145.d9
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag15713.d9
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag3913.d6
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag5385.d24
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag7477.d4
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag8697.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/diag8894.d12
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/dip1000_deprecation.d19
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/dip22a.d12
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/enum_function.d9
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail10528.d20
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail10534.d32
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail109.d2
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail121.d5
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail136.d2
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail17570.d7
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail17969.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail18219.d9
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail18892.d6
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail18970.d10
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail18979.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail1900.d6
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail19076.d5
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail19103.d8
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail196.d49
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail20637.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail22054.d10
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail22529.d2
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail23109.d2
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail235.d4
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail61.d11
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail7861.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail9.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fail_scope.d30
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/faildottypeinfo.d5
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/fnconstraint.d11
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/goto_skip.d57
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/hexstring.d18
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/ice10713.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/ice10938.d5
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/ice11518.d6
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/ice11982.d20
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/ice6538.d2
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/ice8100.d7
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/issue11070.d17
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/issue12652.d24
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/issue22682.d18
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/issue3396.d24
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/lexer1.d4
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/lexer2.d8
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/main.d9
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/match_func_ptr.d17
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/misc_parser_err_cov1.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/nogc3.d18
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/noreturn_expr.d16
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/noreturn_expr2.d14
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/operator_undefined.d20
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/parseStc.d6
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/reserved_version.d98
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/reserved_version_switch.d12
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/retscope.d2
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/skip.d6
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/switch_skip.d48
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/switches.d68
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test13536.d7
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test15785.d5
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test15897.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test16188.d1
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test16193.d5
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test16365.d9
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test17380spec.d7
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test20522.d23
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test20655.d32
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test21353.d13
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test21912.d20
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test22329.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test23112.d3
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test24015.d20
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test24036.d21
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test24065.d18
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test24084.d28
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/test24110.d12
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/testOpApply.d16
-rw-r--r--gcc/testsuite/gdc.test/fail_compilation/testsemi.d3
-rw-r--r--gcc/testsuite/gdc.test/runnable/issue11070.d19
-rw-r--r--gcc/testsuite/gdc.test/runnable/staticaa.d153
-rw-r--r--gcc/testsuite/gdc.test/runnable/template9.d7
-rw-r--r--gcc/testsuite/gdc.test/runnable/test24078.d6
-rw-r--r--gcc/testsuite/gdc.test/runnable/test24139.d25
-rw-r--r--gcc/testsuite/gfortran.dg/allocate_with_source_25.f902
-rw-r--r--gcc/testsuite/gfortran.dg/block_17.f909
-rw-r--r--gcc/testsuite/gfortran.dg/coarray/alloc_comp_6.f9029
-rw-r--r--gcc/testsuite/gfortran.dg/coarray/alloc_comp_7.f9049
-rw-r--r--gcc/testsuite/gfortran.dg/derived_function_interface_1.f902
-rw-r--r--gcc/testsuite/gfortran.dg/finalize_38.f9016
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-10.f9075
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-11.f9033
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-12.f9024
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-13.f9025
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-13a.f9034
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-14.f9095
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-15.f9038
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-4.f904
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-7.f9010
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-8.f9029
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/allocate-9.f90112
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/strictly-structured-block-5.f9077
-rw-r--r--gcc/testsuite/gfortran.dg/implied_do_io_8.f9018
-rw-r--r--gcc/testsuite/gfortran.dg/pr104351.f9014
-rw-r--r--gcc/testsuite/gfortran.dg/pr67740.f9032
-rw-r--r--gcc/testsuite/gfortran.dg/pr95690.f904
-rw-r--r--gcc/testsuite/gfortran.dg/vect/pr60510.f1
-rw-r--r--gcc/testsuite/gm2/coroutines/pim/run/pass/coroutines-pim-run-pass.exp4
-rw-r--r--gcc/testsuite/gm2/extensions/run/pass/packedrecord3.mod49
-rw-r--r--gcc/testsuite/gm2/iso/run/pass/iso-run-pass.exp6
-rw-r--r--gcc/testsuite/gm2/iso/run/pass/m2date.mod3
-rw-r--r--gcc/testsuite/gm2/pimlib/base/run/pass/pimlib-base-run-pass.exp5
-rw-r--r--gcc/testsuite/gm2/projects/iso/run/pass/halma/projects-iso-run-pass-halma.exp7
-rw-r--r--gcc/testsuite/gm2/switches/whole-program/pass/run/switches-whole-program-pass-run.exp4
-rw-r--r--gcc/testsuite/lib/gdc-utils.exp3
-rw-r--r--gcc/testsuite/lib/target-supports.exp291
-rw-r--r--gcc/text-art/styled-string.cc5
-rw-r--r--gcc/text-art/table.cc35
-rw-r--r--gcc/text-art/table.h21
-rw-r--r--gcc/text-art/types.h7
-rw-r--r--gcc/timevar.def1
-rw-r--r--gcc/toplev.cc12
-rw-r--r--gcc/tree-affine.cc2
-rw-r--r--gcc/tree-cfg.cc3
-rw-r--r--gcc/tree-core.h10
-rw-r--r--gcc/tree-dfa.cc15
-rw-r--r--gcc/tree-dfa.h8
-rw-r--r--gcc/tree-diagnostic-path.cc4
-rw-r--r--gcc/tree-diagnostic.cc6
-rw-r--r--gcc/tree-if-conv.cc152
-rw-r--r--gcc/tree-loop-distribution.cc18
-rw-r--r--gcc/tree-pass.h2
-rw-r--r--gcc/tree-pretty-print.cc7
-rw-r--r--gcc/tree-pretty-print.h2
-rw-r--r--gcc/tree-scalar-evolution.cc10
-rw-r--r--gcc/tree-scalar-evolution.h1
-rw-r--r--gcc/tree-sra.cc26
-rw-r--r--gcc/tree-ssa-ccp.cc3
-rw-r--r--gcc/tree-ssa-dce.cc8
-rw-r--r--gcc/tree-ssa-live.cc2
-rw-r--r--gcc/tree-ssa-loop-im.cc8
-rw-r--r--gcc/tree-ssa-loop-ivcanon.cc5
-rw-r--r--gcc/tree-ssa-loop-ivopts.cc12
-rw-r--r--gcc/tree-ssa-loop-niter.cc47
-rw-r--r--gcc/tree-ssa-math-opts.cc48
-rw-r--r--gcc/tree-ssa-sccvn.cc13
-rw-r--r--gcc/tree-ssa-sccvn.h2
-rw-r--r--gcc/tree-ssa-strlen.cc53
-rw-r--r--gcc/tree-ssa.cc17
-rw-r--r--gcc/tree-ssanames.cc27
-rw-r--r--gcc/tree-vect-data-refs.cc3
-rw-r--r--gcc/tree-vect-loop-manip.cc485
-rw-r--r--gcc/tree-vect-loop.cc262
-rw-r--r--gcc/tree-vect-patterns.cc2
-rw-r--r--gcc/tree-vect-slp.cc25
-rw-r--r--gcc/tree-vect-stmts.cc1075
-rw-r--r--gcc/tree-vectorizer.cc2
-rw-r--r--gcc/tree-vectorizer.h42
-rw-r--r--gcc/tree-vrp.cc148
-rw-r--r--gcc/tree.cc35
-rw-r--r--gcc/tree.h46
-rw-r--r--gcc/value-range-pretty-print.cc16
-rw-r--r--gcc/value-range-storage.cc20
-rw-r--r--gcc/value-range-storage.h6
-rw-r--r--gcc/value-range.cc16
-rw-r--r--gcc/value-range.h4
-rw-r--r--gcc/value-relation.cc36
-rw-r--r--gcc/value-relation.h4
-rw-r--r--gcc/vec.h148
-rw-r--r--gcc/wide-int-print.cc84
-rw-r--r--gcc/wide-int-print.h38
-rw-r--r--gcc/wide-int.cc168
-rw-r--r--gcc/wide-int.h683
1066 files changed, 47704 insertions, 22763 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 37be830..0f1bd1d 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,3452 @@
+2023-10-17 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.cc (aarch64_layout_frame): Don't make
+ the position of the LR save slot dependent on stack clash
+ protection unless shadow call stacks are enabled.
+
+2023-10-17 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.h (aarch64_frame): Add vectors that
+ store the list saved GPRs, FPRs and predicate registers.
+ * config/aarch64/aarch64.cc (aarch64_layout_frame): Initialize
+ the lists of saved registers. Use them to choose push candidates.
+ Invalidate pop candidates if we're not going to do a pop.
+ (aarch64_next_callee_save): Delete.
+ (aarch64_save_callee_saves): Take a list of registers,
+ rather than a range. Make !skip_wb select only write-back
+ candidates.
+ (aarch64_expand_prologue): Update calls accordingly.
+ (aarch64_restore_callee_saves): Take a list of registers,
+ rather than a range. Always skip pop candidates. Also skip
+ LR if shadow call stacks are enabled.
+ (aarch64_expand_epilogue): Update calls accordingly.
+
+2023-10-17 Richard Sandiford <richard.sandiford@arm.com>
+
+ * cfgbuild.h (find_sub_basic_blocks): Declare.
+ * cfgbuild.cc (update_profile_for_new_sub_basic_block): New function,
+ split out from...
+ (find_many_sub_basic_blocks): ...here.
+ (find_sub_basic_blocks): New function.
+ * function.cc (thread_prologue_and_epilogue_insns): Handle
+ epilogues that contain jumps.
+
+2023-10-17 Andrew Pinski <apinski@marvell.com>
+
+ PR tree-optimization/110817
+ * tree-ssanames.cc (ssa_name_has_boolean_range): Remove the
+ check for boolean type as they don't have "[0,1]" range.
+
+2023-10-17 Andrew Pinski <pinskia@gmail.com>
+
+ PR tree-optimization/111432
+ * match.pd (`a & (x | CST)`): New pattern.
+
+2023-10-17 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * tree-cfg.cc (move_sese_region_to_fn): Initialize profile_count for
+ new basic block.
+
+2023-10-17 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111846
+ * tree-vectorizer.h (_slp_tree::simd_clone_info): Add.
+ (SLP_TREE_SIMD_CLONE_INFO): New.
+ * tree-vect-slp.cc (_slp_tree::_slp_tree): Initialize
+ SLP_TREE_SIMD_CLONE_INFO.
+ (_slp_tree::~_slp_tree): Release it.
+ * tree-vect-stmts.cc (vectorizable_simd_clone_call): Use
+ SLP_TREE_SIMD_CLONE_INFO or STMT_VINFO_SIMD_CLONE_INFO
+ dependent on if we're doing SLP.
+
+2023-10-17 Jakub Jelinek <jakub@redhat.com>
+
+ * wide-int-print.h (print_dec_buf_size): For length, divide number
+ of bits by 3 and add 3 instead of division by 4 and adding 4.
+ * wide-int-print.cc (print_decs): Remove superfluous ()s. Don't call
+ print_hex, instead call print_decu on either negated value after
+ printing - or on wi itself.
+ (print_decu): Don't call print_hex, instead print even large numbers
+ decimally.
+ (pp_wide_int_large): Assume len from print_dec_buf_size is big enough
+ even if it returns false.
+ * pretty-print.h (pp_wide_int): Use print_dec_buf_size to check if
+ pp_wide_int_large should be used.
+ * tree-pretty-print.cc (dump_generic_node): Use print_hex_buf_size
+ to compute needed buffer size.
+
+2023-10-17 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/111818
+ * tree-ssa.cc (maybe_optimize_var): When clearing
+ DECL_NOT_GIMPLE_REG_P always rewrite into SSA.
+
+2023-10-17 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111807
+ * tree-sra.cc (build_ref_for_model): Only call
+ build_reconstructed_reference when the offsets are the same.
+
+2023-10-17 Vineet Gupta <vineetg@rivosinc.com>
+
+ PR target/111466
+ * expr.cc (expand_expr_real_2): Do not clear SUBREG_PROMOTED_VAR_P.
+
+2023-10-17 Chenghui Pan <panchenghui@loongson.cn>
+
+ * config/loongarch/loongarch.cc (loongarch_expand_vector_group_init):
+ fix impl related to vec_initv32qiv16qi template to avoid ICE.
+
+2023-10-17 Lulu Cheng <chenglulu@loongson.cn>
+ Chenghua Xu <xuchenghua@loongson.cn>
+
+ * config/loongarch/loongarch.h (ASM_OUTPUT_ALIGN_WITH_NOP):
+ Delete.
+
+2023-10-17 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/riscv-vector-costs.cc (max_number_of_live_regs): Fix big LMUL issue.
+ (get_store_value): New function.
+
+2023-10-16 Jeff Law <jlaw@ventanamicro.com>
+
+ * explow.cc (probe_stack_range): Handle case when expand_binop
+ does not construct its result in the expected location.
+
+2023-10-16 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic.cc (diagnostic_initialize): When LANG=C, update
+ default for -fdiagnostics-text-art-charset from emoji to ascii.
+ * doc/invoke.texi (fdiagnostics-text-art-charset): Document the above.
+
+2023-10-16 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic.cc (diagnostic_initialize): Ensure
+ context->extra_output_kind is initialized.
+
+2023-10-16 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.cc (ix86_can_inline_p):
+ Handle CM_LARGE and CM_LARGE_PIC.
+ (x86_elf_aligned_decl_common): Ditto.
+ (x86_output_aligned_bss): Ditto.
+ * config/i386/i386.opt: Update doc for -mlarge-data-threshold=.
+ * doc/invoke.texi: Update doc for -mlarge-data-threshold=.
+
+2023-10-16 Christoph Müllner <christoph.muellner@vrull.eu>
+
+ * config/riscv/riscv-protos.h (emit_block_move): Remove redundant
+ prototype. Improve comment.
+ * config/riscv/riscv.cc (riscv_block_move_straight): Move from riscv.cc
+ into riscv-string.cc.
+ (riscv_adjust_block_mem, riscv_block_move_loop): Likewise.
+ (riscv_expand_block_move): Likewise.
+ * config/riscv/riscv-string.cc (riscv_block_move_straight): Add moved
+ function.
+ (riscv_adjust_block_mem, riscv_block_move_loop): Likewise.
+ (riscv_expand_block_move): Likewise.
+
+2023-10-16 Manolis Tsamis <manolis.tsamis@vrull.eu>
+
+ * Makefile.in: Add fold-mem-offsets.o.
+ * passes.def: Schedule a new pass.
+ * tree-pass.h (make_pass_fold_mem_offsets): Declare.
+ * common.opt: New options.
+ * doc/invoke.texi: Document new option.
+ * fold-mem-offsets.cc: New file.
+
+2023-10-16 Andrew Pinski <pinskia@gmail.com>
+
+ PR tree-optimization/101541
+ * match.pd (A CMP 0 ? A : -A): Improve
+ using bitwise_equal_p.
+
+2023-10-16 Andrew Pinski <pinskia@gmail.com>
+
+ PR tree-optimization/31531
+ * match.pd (~X op ~Y): Allow for an optional nop convert.
+ (~X op C): Likewise.
+
+2023-10-16 Roger Sayle <roger@nextmovesoftware.com>
+
+ * config/arc/arc.md (*ashlsi3_1): New pre-reload splitter to
+ use bset dst,0,src to implement 1<<x on !TARGET_BARREL_SHIFTER.
+
+2023-10-16 Stefan Schulze Frielinghaus <stefansf@linux.ibm.com>
+
+ * config/s390/vector.md (popcountv8hi2_vx): Sign extend each
+ unsigned vector element.
+
+2023-10-16 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/riscv-vector-costs.cc (costs::preferred_new_lmul_p): Use VLS modes.
+
+2023-10-16 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ * fold-const.cc (expr_not_equal_to): Replace get_global_range_query
+ by get_range_query.
+ * gimple-fold.cc (size_must_be_zero_p): Likewise.
+ * gimple-range-fold.cc (fur_source::fur_source): Likewise.
+ * gimple-ssa-warn-access.cc (check_nul_terminated_array): Likewise.
+ * tree-dfa.cc (get_ref_base_and_extent): Likewise.
+
+2023-10-16 liuhongt <hongtao.liu@intel.com>
+
+ * config/i386/mmx.md (V2FI_32): New mode iterator
+ (movd_v2hf_to_sse): Rename to ..
+ (movd_<mode>_to_sse): .. this.
+ (movd_v2hf_to_sse_reg): Rename to ..
+ (movd_<mode>_to_sse_reg): .. this.
+ (fix<fixunssuffix>_trunc<mode><mmxintvecmodelower>2): New
+ expander.
+ (fix<fixunssuffix>_truncv2hfv2si2): Ditto.
+ (float<floatunssuffix><mmxintvecmodelower><mode>2): Ditto.
+ (float<floatunssuffix>v2siv2hf2): Ditto.
+ (extendv2hfv2sf2): Ditto.
+ (truncv2sfv2hf2): Ditto.
+ * config/i386/sse.md (*vec_concatv8hf_movss): Rename to ..
+ (*vec_concat<mode>_movss): .. this.
+
+2023-10-16 liuhongt <hongtao.liu@intel.com>
+
+ * config/i386/i386-expand.cc (ix86_sse_copysign_to_positive):
+ Handle HFmode.
+ (ix86_expand_round_sse4): Ditto.
+ * config/i386/i386.md (roundhf2): New expander.
+ (lroundhf<mode>2): Ditto.
+ (lrinthf<mode>2): Ditto.
+ (l<rounding_insn>hf<mode>2): Ditto.
+ * config/i386/mmx.md (sqrt<mode>2): Ditto.
+ (btrunc<mode>2): Ditto.
+ (nearbyint<mode>2): Ditto.
+ (rint<mode>2): Ditto.
+ (lrint<mode><mmxintvecmodelower>2): Ditto.
+ (floor<mode>2): Ditto.
+ (lfloor<mode><mmxintvecmodelower>2): Ditto.
+ (ceil<mode>2): Ditto.
+ (lceil<mode><mmxintvecmodelower>2): Ditto.
+ (round<mode>2): Ditto.
+ (lround<mode><mmxintvecmodelower>2): Ditto.
+ * config/i386/sse.md (lrint<mode><sseintvecmodelower>2): Ditto.
+ (lfloor<mode><sseintvecmodelower>2): Ditto.
+ (lceil<mode><sseintvecmodelower>2): Ditto.
+ (lround<mode><sseintvecmodelower>2): Ditto.
+ (sse4_1_round<ssescalarmodesuffix>): Extend to V8HF.
+ (round<mode>2): Extend to V8HF/V16HF/V32HF.
+
+2023-10-15 Tobias Burnus <tobias@codesourcery.com>
+
+ * doc/invoke.texi (-fopenacc, -fopenmp, -fopenmp-simd): Use @samp not
+ @code; document more completely the supported Fortran sentinels.
+
+2023-10-15 Roger Sayle <roger@nextmovesoftware.com>
+
+ * optabs.cc (expand_subword_shift): Call simplify_expand_binop
+ instead of expand_binop. Optimize cases (i.e. avoid generating
+ RTL) when CARRIES or INTO_INPUT is zero. Use one_cmpl_optab
+ (i.e. NOT) instead of xor_optab with ~0 to calculate ~OP1.
+
+2023-10-15 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/111800
+ * wide-int-print.h (print_dec_buf_size, print_decs_buf_size,
+ print_decu_buf_size, print_hex_buf_size): New inline functions.
+ * wide-int.cc (assert_deceq): Use print_dec_buf_size.
+ (assert_hexeq): Use print_hex_buf_size.
+ * wide-int-print.cc (print_decs): Use print_decs_buf_size.
+ (print_decu): Use print_decu_buf_size.
+ (print_hex): Use print_hex_buf_size.
+ (pp_wide_int_large): Use print_dec_buf_size.
+ * value-range.cc (irange_bitmask::dump): Use print_hex_buf_size.
+ * value-range-pretty-print.cc (vrange_printer::print_irange_bitmasks):
+ Likewise.
+ * tree-ssa-loop-niter.cc (do_warn_aggressive_loop_optimizations): Use
+ print_dec_buf_size. Use TYPE_SIGN macro in print_dec call argument.
+
+2023-10-15 Stefan Schulze Frielinghaus <stefansf@linux.ibm.com>
+
+ * combine.cc (simplify_compare_const): Fix handling of unsigned
+ constants.
+
+2023-10-15 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/vector-iterators.md: Fix vsingle incorrect attribute for RVVM2x2QI.
+
+2023-10-14 Tobias Burnus <tobias@codesourcery.com>
+
+ * gimplify.cc (gimplify_bind_expr): Handle Fortran's
+ 'omp allocate' for stack variables.
+
+2023-10-14 Jakub Jelinek <jakub@redhat.com>
+
+ PR c/102989
+ * tree-core.h (struct tree_base): Remove int_length.offset
+ member, change type of int_length.unextended and int_length.extended
+ from unsigned char to unsigned short.
+ * tree.h (TREE_INT_CST_OFFSET_NUNITS): Remove.
+ (wi::extended_tree <N>::get_len): Don't use TREE_INT_CST_OFFSET_NUNITS,
+ instead compute it at runtime from TREE_INT_CST_EXT_NUNITS and
+ TREE_INT_CST_NUNITS.
+ * tree.cc (wide_int_to_tree_1): Don't assert
+ TREE_INT_CST_OFFSET_NUNITS value.
+ (make_int_cst): Don't initialize TREE_INT_CST_OFFSET_NUNITS.
+ * wide-int.h (WIDE_INT_MAX_ELTS): Change from 255 to 1024.
+ (WIDEST_INT_MAX_ELTS): Change from 510 to 2048, adjust comment.
+ (trailing_wide_int_storage): Change m_len type from unsigned char *
+ to unsigned short *.
+ (trailing_wide_int_storage::trailing_wide_int_storage): Change second
+ argument from unsigned char * to unsigned short *.
+ (trailing_wide_ints): Change m_max_len type from unsigned char to
+ unsigned short. Change m_len element type from
+ struct{unsigned char len;} to unsigned short.
+ (trailing_wide_ints <N>::operator []): Remove .len from m_len
+ accesses.
+ * value-range-storage.h (irange_storage::lengths_address): Change
+ return type from const unsigned char * to const unsigned short *.
+ (irange_storage::write_lengths_address): Change return type from
+ unsigned char * to unsigned short *.
+ * value-range-storage.cc (irange_storage::write_lengths_address):
+ Likewise.
+ (irange_storage::lengths_address): Change return type from
+ const unsigned char * to const unsigned short *.
+ (write_wide_int): Change len argument type from unsigned char *&
+ to unsigned short *&.
+ (irange_storage::set_irange): Change len variable type from
+ unsigned char * to unsigned short *.
+ (read_wide_int): Change len argument type from unsigned char to
+ unsigned short. Use trailing_wide_int_storage <unsigned short>
+ instead of trailing_wide_int_storage and
+ trailing_wide_int <unsigned short> instead of trailing_wide_int.
+ (irange_storage::get_irange): Change len variable type from
+ unsigned char * to unsigned short *.
+ (irange_storage::size): Multiply n by sizeof (unsigned short)
+ in len_size variable initialization.
+ (irange_storage::dump): Change len variable type from
+ unsigned char * to unsigned short *.
+
+2023-10-14 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/vector-iterators.md: Remove redundant iterators.
+
+2023-10-13 Andrew MacLeod <amacleod@redhat.com>
+
+ PR tree-optimization/111622
+ * value-relation.cc (equiv_oracle::add_partial_equiv): Do not
+ register a partial equivalence if an operand has no uses.
+
+2023-10-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111795
+ * tree-vect-stmts.cc (vectorizable_simd_clone_call): Handle
+ integer mode mask arguments.
+
+2023-10-13 Richard Biener <rguenther@suse.de>
+
+ * tree-vect-slp.cc (mask_call_maps): New.
+ (vect_get_operand_map): Handle IFN_MASK_CALL.
+ (vect_build_slp_tree_1): Likewise.
+ * tree-vect-stmts.cc (vectorizable_simd_clone_call): Handle
+ SLP.
+
+2023-10-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111779
+ * tree-sra.cc (sra_handled_bf_read_p): New function.
+ (build_access_from_expr_1): Handle some BIT_FIELD_REFs.
+ (sra_modify_expr): Likewise.
+ (make_fancy_name_1): Skip over BIT_FIELD_REF.
+
+2023-10-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111773
+ * tree-ssa-dce.cc (mark_stmt_if_obviously_necessary): Do
+ not elide noreturn calls that are reflected to the IL.
+
+2023-10-13 Kito Cheng <kito.cheng@sifive.com>
+
+ * config/riscv/riscv.cc (riscv_legitimize_poly_move): Bump
+ max_power to 64.
+ * config/riscv/riscv.h (MAX_POLY_VARIANT): New.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * config/riscv/autovec.md (lfloor<mode><v_i_l_ll_convert>2): New
+ pattern for lfloor/lfloorf.
+ * config/riscv/riscv-protos.h (enum insn_type): New enum value.
+ (expand_vec_lfloor): New func decl for expanding lfloor.
+ * config/riscv/riscv-v.cc (expand_vec_lfloor): New func impl
+ for expanding lfloor.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * config/riscv/autovec.md (lceil<mode><v_i_l_ll_convert>2): New
+ pattern] for lceil/lceilf.
+ * config/riscv/riscv-protos.h (enum insn_type): New enum value.
+ (expand_vec_lceil): New func decl for expanding lceil.
+ * config/riscv/riscv-v.cc (expand_vec_lceil): New func impl
+ for expanding lceil.
+
+2023-10-12 Michael Meissner <meissner@linux.ibm.com>
+
+ PR target/111778
+ * config/rs6000/rs6000.cc (can_be_built_by_li_lis_and_rldicl): Protect
+ code from shifts that are undefined.
+ (can_be_built_by_li_lis_and_rldicr): Likewise.
+ (can_be_built_by_li_and_rldic): Protect code from shifts that
+ undefined. Also replace uses of 1ULL with HOST_WIDE_INT_1U.
+
+2023-10-12 Alex Coplan <alex.coplan@arm.com>
+
+ * reg-notes.def (NOALIAS): Correct comment.
+
+2023-10-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR bootstrap/111787
+ * tree.h (wi::int_traits <unextended_tree>::needs_write_val_arg): New
+ static data member.
+ (int_traits <extended_tree <N>>::needs_write_val_arg): Likewise.
+ (wi::ints_for): Provide separate partial specializations for
+ generic_wide_int <extended_tree <N>> and INL_CONST_PRECISION or that
+ and CONST_PRECISION, rather than using
+ int_traits <extended_tree <N> >::precision_type as the second template
+ argument.
+ * rtl.h (wi::int_traits <rtx_mode_t>::needs_write_val_arg): New
+ static data member.
+ * double-int.h (wi::int_traits <double_int>::needs_write_val_arg):
+ Likewise.
+
+2023-10-12 Mary Bennett <mary.bennett@embecosm.com>
+
+ PR middle-end/111777
+ * doc/extend.texi: Change subsubsection to subsection for
+ CORE-V built-ins.
+
+2023-10-12 Tamar Christina <tamar.christina@arm.com>
+
+ * config/aarch64/aarch64-c.cc (aarch64_update_cpp_builtins): Add undef.
+
+2023-10-12 Jakub Jelinek <jakub@redhat.com>
+
+ * wide-int.h (widest_int_storage <N>::write_val): If l is small
+ and there is space in u.val array, store a canary value at the
+ end when checking.
+ (widest_int_storage <N>::set_len): Check the canary hasn't been
+ overwritten.
+
+2023-10-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR c/102989
+ * wide-int.h: Adjust file comment.
+ (WIDE_INT_MAX_INL_ELTS): Define to former value of WIDE_INT_MAX_ELTS.
+ (WIDE_INT_MAX_INL_PRECISION): Define.
+ (WIDE_INT_MAX_ELTS): Change to 255. Assert that WIDE_INT_MAX_INL_ELTS
+ is smaller than WIDE_INT_MAX_ELTS.
+ (RWIDE_INT_MAX_ELTS, RWIDE_INT_MAX_PRECISION, WIDEST_INT_MAX_ELTS,
+ WIDEST_INT_MAX_PRECISION): Define.
+ (WI_BINARY_RESULT_VAR, WI_UNARY_RESULT_VAR): Change write_val callers
+ to pass 0 as a new argument.
+ (class widest_int_storage): Likewise.
+ (widest_int, widest2_int): Change typedefs to use widest_int_storage
+ rather than fixed_wide_int_storage.
+ (enum wi::precision_type): Add INL_CONST_PRECISION enumerator.
+ (struct binary_traits): Add partial specializations for
+ INL_CONST_PRECISION.
+ (generic_wide_int): Add needs_write_val_arg static data member.
+ (int_traits): Likewise.
+ (wide_int_storage): Replace val non-static data member with a union
+ u of it and HOST_WIDE_INT *valp. Declare copy constructor, copy
+ assignment operator and destructor. Add unsigned int argument to
+ write_val.
+ (wide_int_storage::wide_int_storage): Initialize precision to 0
+ in the default ctor. Remove unnecessary {}s around STATIC_ASSERTs.
+ Assert in non-default ctor T's precision_type is not
+ INL_CONST_PRECISION and allocate u.valp for large precision. Add
+ copy constructor.
+ (wide_int_storage::~wide_int_storage): New.
+ (wide_int_storage::operator=): Add copy assignment operator. In
+ assignment operator remove unnecessary {}s around STATIC_ASSERTs,
+ assert ctor T's precision_type is not INL_CONST_PRECISION and
+ if precision changes, deallocate and/or allocate u.valp.
+ (wide_int_storage::get_val): Return u.valp rather than u.val for
+ large precision.
+ (wide_int_storage::write_val): Likewise. Add an unused unsigned int
+ argument.
+ (wide_int_storage::set_len): Use write_val instead of writing val
+ directly.
+ (wide_int_storage::from, wide_int_storage::from_array): Adjust
+ write_val callers.
+ (wide_int_storage::create): Allocate u.valp for large precisions.
+ (wi::int_traits <wide_int_storage>::get_binary_precision): New.
+ (fixed_wide_int_storage::fixed_wide_int_storage): Make default
+ ctor defaulted.
+ (fixed_wide_int_storage::write_val): Add unused unsigned int argument.
+ (fixed_wide_int_storage::from, fixed_wide_int_storage::from_array):
+ Adjust write_val callers.
+ (wi::int_traits <fixed_wide_int_storage>::get_binary_precision): New.
+ (WIDEST_INT): Define.
+ (widest_int_storage): New template class.
+ (wi::int_traits <widest_int_storage>): New.
+ (trailing_wide_int_storage::write_val): Add unused unsigned int
+ argument.
+ (wi::get_binary_precision): Use
+ wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_precision
+ rather than get_precision on get_binary_result.
+ (wi::copy): Adjust write_val callers. Don't call set_len if
+ needs_write_val_arg.
+ (wi::bit_not): If result.needs_write_val_arg, call write_val
+ again with upper bound estimate of len.
+ (wi::sext, wi::zext, wi::set_bit): Likewise.
+ (wi::bit_and, wi::bit_and_not, wi::bit_or, wi::bit_or_not,
+ wi::bit_xor, wi::add, wi::sub, wi::mul, wi::mul_high, wi::div_trunc,
+ wi::div_floor, wi::div_ceil, wi::div_round, wi::divmod_trunc,
+ wi::mod_trunc, wi::mod_floor, wi::mod_ceil, wi::mod_round,
+ wi::lshift, wi::lrshift, wi::arshift): Likewise.
+ (wi::bswap, wi::bitreverse): Assert result.needs_write_val_arg
+ is false.
+ (gt_ggc_mx, gt_pch_nx): Remove generic template for all
+ generic_wide_int, instead add functions and templates for each
+ storage of generic_wide_int. Make functions for
+ generic_wide_int <wide_int_storage> and templates for
+ generic_wide_int <widest_int_storage <N>> deleted.
+ (wi::mask, wi::shifted_mask): Adjust write_val calls.
+ * wide-int.cc (zeros): Decrease array size to 1.
+ (BLOCKS_NEEDED): Use CEIL.
+ (canonize): Use HOST_WIDE_INT_M1.
+ (wi::from_buffer): Pass 0 to write_val.
+ (wi::to_mpz): Use CEIL.
+ (wi::from_mpz): Likewise. Pass 0 to write_val. Use
+ WIDE_INT_MAX_INL_ELTS instead of WIDE_INT_MAX_ELTS.
+ (wi::mul_internal): Use WIDE_INT_MAX_INL_PRECISION instead of
+ MAX_BITSIZE_MODE_ANY_INT in automatic array sizes, for prec
+ above WIDE_INT_MAX_INL_PRECISION estimate precision from
+ lengths of operands. Use XALLOCAVEC allocated buffers for
+ prec above WIDE_INT_MAX_INL_PRECISION.
+ (wi::divmod_internal): Likewise.
+ (wi::lshift_large): For len > WIDE_INT_MAX_INL_ELTS estimate
+ it from xlen and skip.
+ (rshift_large_common): Remove xprecision argument, add len
+ argument with len computed in caller. Don't return anything.
+ (wi::lrshift_large, wi::arshift_large): Compute len here
+ and pass it to rshift_large_common, for lengths above
+ WIDE_INT_MAX_INL_ELTS using estimations from xlen if possible.
+ (assert_deceq, assert_hexeq): For lengths above
+ WIDE_INT_MAX_INL_ELTS use XALLOCAVEC allocated buffer.
+ (test_printing): Use WIDE_INT_MAX_INL_PRECISION instead of
+ WIDE_INT_MAX_PRECISION.
+ * wide-int-print.h (WIDE_INT_PRINT_BUFFER_SIZE): Use
+ WIDE_INT_MAX_INL_PRECISION instead of WIDE_INT_MAX_PRECISION.
+ * wide-int-print.cc (print_decs, print_decu, print_hex): For
+ lengths above WIDE_INT_MAX_INL_ELTS use XALLOCAVEC allocated buffer.
+ * tree.h (wi::int_traits<extended_tree <N>>): Change precision_type
+ to INL_CONST_PRECISION for N == ADDR_MAX_PRECISION.
+ (widest_extended_tree): Use WIDEST_INT_MAX_PRECISION instead of
+ WIDE_INT_MAX_PRECISION.
+ (wi::ints_for): Use int_traits <extended_tree <N> >::precision_type
+ instead of hard coded CONST_PRECISION.
+ (widest2_int_cst): Use WIDEST_INT_MAX_PRECISION instead of
+ WIDE_INT_MAX_PRECISION.
+ (wi::extended_tree <N>::get_len): Use WIDEST_INT_MAX_PRECISION rather
+ than WIDE_INT_MAX_PRECISION.
+ (wi::ints_for::zero): Use
+ wi::int_traits <wi::extended_tree <N> >::precision_type instead of
+ wi::CONST_PRECISION.
+ * tree.cc (build_replicated_int_cst): Formatting fix. Use
+ WIDE_INT_MAX_INL_ELTS rather than WIDE_INT_MAX_ELTS.
+ * print-tree.cc (print_node): Don't print TREE_UNAVAILABLE on
+ INTEGER_CSTs, TREE_VECs or SSA_NAMEs.
+ * double-int.h (wi::int_traits <double_int>::precision_type): Change
+ to INL_CONST_PRECISION from CONST_PRECISION.
+ * poly-int.h (struct poly_coeff_traits): Add partial specialization
+ for wi::INL_CONST_PRECISION.
+ * cfgloop.h (bound_wide_int): New typedef.
+ (struct nb_iter_bound): Change bound type from widest_int to
+ bound_wide_int.
+ (struct loop): Change nb_iterations_upper_bound,
+ nb_iterations_likely_upper_bound and nb_iterations_estimate type from
+ widest_int to bound_wide_int.
+ * cfgloop.cc (record_niter_bound): Return early if wi::min_precision
+ of i_bound is too large for bound_wide_int. Adjustments for the
+ widest_int to bound_wide_int type change in non-static data members.
+ (get_estimated_loop_iterations, get_max_loop_iterations,
+ get_likely_max_loop_iterations): Adjustments for the widest_int to
+ bound_wide_int type change in non-static data members.
+ * tree-vect-loop.cc (vect_transform_loop): Likewise.
+ * tree-ssa-loop-niter.cc (do_warn_aggressive_loop_optimizations): Use
+ XALLOCAVEC allocated buffer for i_bound len above
+ WIDE_INT_MAX_INL_ELTS.
+ (record_estimate): Return early if wi::min_precision of i_bound is too
+ large for bound_wide_int. Adjustments for the widest_int to
+ bound_wide_int type change in non-static data members.
+ (wide_int_cmp): Use bound_wide_int instead of widest_int.
+ (bound_index): Use bound_wide_int instead of widest_int.
+ (discover_iteration_bound_by_body_walk): Likewise. Use
+ widest_int::from to convert it to widest_int when passed to
+ record_niter_bound.
+ (maybe_lower_iteration_bound): Use widest_int::from to convert it to
+ widest_int when passed to record_niter_bound.
+ (estimate_numbers_of_iteration): Don't record upper bound if
+ loop->nb_iterations has too large precision for bound_wide_int.
+ (n_of_executions_at_most): Use widest_int::from.
+ * tree-ssa-loop-ivcanon.cc (remove_redundant_iv_tests): Adjust for
+ the widest_int to bound_wide_int changes.
+ * match.pd (fold_sign_changed_comparison simplification): Use
+ wide_int::from on wi::to_wide instead of wi::to_widest.
+ * value-range.h (irange::maybe_resize): Avoid using memcpy on
+ non-trivially copyable elements.
+ * value-range.cc (irange_bitmask::dump): Use XALLOCAVEC allocated
+ buffer for mask or value len above WIDE_INT_PRINT_BUFFER_SIZE.
+ * fold-const.cc (fold_convert_const_int_from_int, fold_unary_loc):
+ Use wide_int::from on wi::to_wide instead of wi::to_widest.
+ * tree-ssa-ccp.cc (bit_value_binop): Zero extend r1max from width
+ before calling wi::udiv_trunc.
+ * lto-streamer-out.cc (output_cfg): Adjustments for the widest_int to
+ bound_wide_int type change in non-static data members.
+ * lto-streamer-in.cc (input_cfg): Likewise.
+ (lto_input_tree_1): Use WIDE_INT_MAX_INL_ELTS rather than
+ WIDE_INT_MAX_ELTS. For length above WIDE_INT_MAX_INL_ELTS use
+ XALLOCAVEC allocated buffer. Formatting fix.
+ * data-streamer-in.cc (streamer_read_wide_int,
+ streamer_read_widest_int): Likewise.
+ * tree-affine.cc (aff_combination_expand): Use placement new to
+ construct name_expansion.
+ (free_name_expansion): Destruct name_expansion.
+ * gimple-ssa-strength-reduction.cc (struct slsr_cand_d): Change
+ index type from widest_int to offset_int.
+ (class incr_info_d): Change incr type from widest_int to offset_int.
+ (alloc_cand_and_find_basis, backtrace_base_for_ref,
+ restructure_reference, slsr_process_ref, create_mul_ssa_cand,
+ create_mul_imm_cand, create_add_ssa_cand, create_add_imm_cand,
+ slsr_process_add, cand_abs_increment, replace_mult_candidate,
+ replace_unconditional_candidate, incr_vec_index,
+ create_add_on_incoming_edge, create_phi_basis_1,
+ replace_conditional_candidate, record_increment,
+ record_phi_increments_1, phi_incr_cost_1, phi_incr_cost,
+ lowest_cost_path, total_savings, ncd_with_phi, ncd_of_cand_and_phis,
+ nearest_common_dominator_for_cands, insert_initializers,
+ all_phi_incrs_profitable_1, replace_one_candidate,
+ replace_profitable_candidates): Use offset_int rather than widest_int
+ and wi::to_offset rather than wi::to_widest.
+ * real.cc (real_to_integer): Use WIDE_INT_MAX_INL_ELTS rather than
+ 2 * WIDE_INT_MAX_ELTS and for words above that use XALLOCAVEC
+ allocated buffer.
+ * tree-ssa-loop-ivopts.cc (niter_for_exit): Use placement new
+ to construct tree_niter_desc and destruct it on failure.
+ (free_tree_niter_desc): Destruct tree_niter_desc if value is non-NULL.
+ * gengtype.cc (main): Remove widest_int handling.
+ * graphite-isl-ast-to-gimple.cc (widest_int_from_isl_expr_int): Use
+ WIDEST_INT_MAX_ELTS instead of WIDE_INT_MAX_ELTS.
+ * gimple-ssa-warn-alloca.cc (pass_walloca::execute): Use
+ WIDE_INT_MAX_INL_PRECISION instead of WIDE_INT_MAX_PRECISION and
+ assert get_len () fits into it.
+ * value-range-pretty-print.cc (vrange_printer::print_irange_bitmasks):
+ For mask or value lengths above WIDE_INT_MAX_INL_ELTS use XALLOCAVEC
+ allocated buffer.
+ * gimple-ssa-sprintf.cc (adjust_range_for_overflow): Use
+ wide_int::from on wi::to_wide instead of wi::to_widest.
+ * omp-general.cc (score_wide_int): New typedef.
+ (omp_context_compute_score): Use score_wide_int instead of widest_int
+ and adjust for those changes.
+ (struct omp_declare_variant_entry): Change score and
+ score_in_declare_simd_clone non-static data member type from widest_int
+ to score_wide_int.
+ (omp_resolve_late_declare_variant, omp_resolve_declare_variant): Use
+ score_wide_int instead of widest_int and adjust for those changes.
+ (omp_lto_output_declare_variant_alt): Likewise.
+ (omp_lto_input_declare_variant_alt): Likewise.
+ * godump.cc (go_output_typedef): Assert get_len () is smaller than
+ WIDE_INT_MAX_INL_ELTS.
+
+2023-10-12 Pan Li <pan2.li@intel.com>
+
+ * config/riscv/autovec.md (lround<mode><v_i_l_ll_convert>2): New
+ pattern for lround/lroundf.
+ * config/riscv/riscv-protos.h (enum insn_type): New enum value.
+ (expand_vec_lround): New func decl for expanding lround.
+ * config/riscv/riscv-v.cc (expand_vec_lround): New func impl
+ for expanding lround.
+
+2023-10-12 Jakub Jelinek <jakub@redhat.com>
+
+ * dwarf2out.h (wide_int_ptr): Remove.
+ (dw_wide_int_ptr): New typedef.
+ (struct dw_val_node): Change type of val_wide from wide_int_ptr
+ to dw_wide_int_ptr.
+ (struct dw_wide_int): New type.
+ (dw_wide_int::elt): New method.
+ (dw_wide_int::operator ==): Likewise.
+ * dwarf2out.cc (get_full_len): Change argument type to
+ const dw_wide_int & from const wide_int &. Use CEIL. Call
+ get_precision method instead of calling wi::get_precision.
+ (alloc_dw_wide_int): New function.
+ (add_AT_wide): Change w argument type to const wide_int_ref &
+ from const wide_int &. Use alloc_dw_wide_int.
+ (mem_loc_descriptor, loc_descriptor): Use alloc_dw_wide_int.
+ (insert_wide_int): Change val argument type to const wide_int_ref &
+ from const wide_int &.
+ (add_const_value_attribute): Pass rtx_mode_t temporary directly to
+ add_AT_wide instead of using a temporary variable.
+
+2023-10-12 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111764
+ * tree-vect-loop.cc (check_reduction_path): Remove the attempt
+ to allow x + x via special-casing of assigns.
+
+2023-10-12 Hu, Lin1 <lin1.hu@intel.com>
+
+ * common/config/i386/cpuinfo.h (get_available_features):
+ Detect USER_MSR.
+ * common/config/i386/i386-common.cc (OPTION_MASK_ISA2_USER_MSR_SET): New.
+ (OPTION_MASK_ISA2_USER_MSR_UNSET): Ditto.
+ (ix86_handle_option): Handle -musermsr.
+ * common/config/i386/i386-cpuinfo.h (enum processor_features):
+ Add FEATURE_USER_MSR.
+ * common/config/i386/i386-isas.h: Add ISA_NAME_TABLE_ENTRY for usermsr.
+ * config.gcc: Add usermsrintrin.h
+ * config/i386/cpuid.h (bit_USER_MSR): New.
+ * config/i386/i386-builtin-types.def:
+ Add DEF_FUNCTION_TYPE (VOID, UINT64, UINT64).
+ * config/i386/i386-builtins.cc (ix86_init_mmx_sse_builtins):
+ Add __builtin_urdmsr and __builtin_uwrmsr.
+ * config/i386/i386-builtins.h (ix86_builtins):
+ Add IX86_BUILTIN_URDMSR and IX86_BUILTIN_UWRMSR.
+ * config/i386/i386-c.cc (ix86_target_macros_internal):
+ Define __USER_MSR__.
+ * config/i386/i386-expand.cc (ix86_expand_builtin):
+ Handle new builtins.
+ * config/i386/i386-isa.def (USER_MSR): Add DEF_PTA(USER_MSR).
+ * config/i386/i386-options.cc (ix86_valid_target_attribute_inner_p):
+ Handle usermsr.
+ * config/i386/i386.md (urdmsr): New define_insn.
+ (uwrmsr): Ditto.
+ * config/i386/i386.opt: Add option -musermsr.
+ * config/i386/x86gprintrin.h: Include usermsrintrin.h
+ * doc/extend.texi: Document usermsr.
+ * doc/invoke.texi: Document -musermsr.
+ * doc/sourcebuild.texi: Document target usermsr.
+ * config/i386/usermsrintrin.h: New file.
+
+2023-10-12 Yang Yujie <yangyujie@loongson.cn>
+
+ * config.gcc: Add loongarch-driver.h to tm_files.
+ * config/loongarch/loongarch.h: Do not include loongarch-driver.h.
+ * config/loongarch/t-loongarch: Append loongarch-multilib.h to $(GTM_H)
+ instead of $(TM_H) for building generator programs.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ PR target/111367
+ * config/rs6000/rs6000.md (stack_protect_setsi): Support prefixed
+ instruction emission and incorporate to stack_protect_set<mode>.
+ (stack_protect_setdi): Rename to ...
+ (stack_protect_set<mode>): ... this, adjust constraint.
+ (stack_protect_testsi): Support prefixed instruction emission and
+ incorporate to stack_protect_test<mode>.
+ (stack_protect_testdi): Rename to ...
+ (stack_protect_test<mode>): ... this, adjust constraint.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * tree-vect-stmts.cc (vectorizable_store): Consider generated
+ VEC_PERM_EXPR stmt for VMAT_CONTIGUOUS_REVERSE in costing as
+ vec_perm.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * tree-vect-stmts.cc (vect_model_store_cost): Remove.
+ (vectorizable_store): Adjust the costing for the remaining memory
+ access types VMAT_CONTIGUOUS{, _DOWN, _REVERSE}.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * tree-vect-stmts.cc (vect_model_store_cost): Assert it will never
+ get VMAT_CONTIGUOUS_PERMUTE and remove VMAT_CONTIGUOUS_PERMUTE related
+ handlings.
+ (vectorizable_store): Adjust the cost handling on
+ VMAT_CONTIGUOUS_PERMUTE without calling vect_model_store_cost.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * tree-vect-stmts.cc (vect_model_store_cost): Assert it will never
+ get VMAT_LOAD_STORE_LANES.
+ (vectorizable_store): Adjust the cost handling on VMAT_LOAD_STORE_LANES
+ without calling vect_model_store_cost. Factor out new lambda function
+ update_prologue_cost.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * tree-vect-stmts.cc (vect_model_store_cost): Assert it won't get
+ VMAT_ELEMENTWISE and VMAT_STRIDED_SLP any more, and remove their
+ related handlings.
+ (vectorizable_store): Adjust the cost handling on VMAT_ELEMENTWISE
+ and VMAT_STRIDED_SLP without calling vect_model_store_cost.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * tree-vect-stmts.cc (vectorizable_store): Adjust costing on
+ vectorizable_scan_store without calling vect_model_store_cost
+ any more.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * tree-vect-stmts.cc (vect_model_store_cost): Assert it won't get
+ VMAT_GATHER_SCATTER any more, remove VMAT_GATHER_SCATTER related
+ handlings and the related parameter gs_info.
+ (vect_build_scatter_store_calls): Add the handlings on costing with
+ one more argument cost_vec.
+ (vectorizable_store): Adjust the cost handling on VMAT_GATHER_SCATTER
+ without calling vect_model_store_cost any more.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * tree-vect-stmts.cc (vectorizable_store): Move and duplicate the call
+ to vect_model_store_cost down to some different transform paths
+ according to the handlings of different vect_memory_access_types
+ or some special handling need.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * tree-vect-stmts.cc (vectorizable_store): Ensure the generated
+ vector store for some case of VMAT_ELEMENTWISE is supported.
+
+2023-10-12 Mo, Zewei <zewei.mo@intel.com>
+ Hu Lin1 <lin1.hu@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+
+ * config/i386/i386.cc (gen_push2): New function to emit push2
+ and adjust cfa offset.
+ (ix86_pro_and_epilogue_can_use_push2_pop2): New function to
+ determine whether push2/pop2 can be used.
+ (ix86_compute_frame_layout): Adjust preferred stack boundary
+ and stack alignment needed for push2/pop2.
+ (ix86_emit_save_regs): Emit push2 when available.
+ (ix86_emit_restore_reg_using_pop2): New function to emit pop2
+ and adjust cfa info.
+ (ix86_emit_restore_regs_using_pop2): New function to loop
+ through the saved regs and call above.
+ (ix86_expand_epilogue): Call ix86_emit_restore_regs_using_pop2
+ when push2pop2 available.
+ * config/i386/i386.md (push2_di): New pattern for push2.
+ (pop2_di): Likewise for pop2.
+
+2023-10-12 Pan Li <pan2.li@intel.com>
+
+ * config/riscv/autovec.md (lrint<mode><vlconvert>2): Rename from.
+ (lrint<mode><v_i_l_ll_convert>2): Rename to.
+ * config/riscv/vector-iterators.md: Rename and remove TARGET_64BIT.
+
+2023-10-11 Kito Cheng <kito.cheng@sifive.com>
+
+ * config/riscv/riscv-opts.h (TARGET_MIN_VLEN_OPTS): New.
+
+2023-10-11 Jeff Law <jlaw@ventanamicro.com>
+
+ * config/riscv/riscv.md (jump): Adjust sequence to use a "jump"
+ pseudo op instead of a "call" pseudo op.
+
+2023-10-11 Kito Cheng <kito.cheng@sifive.com>
+
+ * config/riscv/riscv-subset.h (riscv_subset_list::parse_single_std_ext):
+ New.
+ (riscv_subset_list::parse_single_multiletter_ext): Ditto.
+ (riscv_subset_list::clone): Ditto.
+ (riscv_subset_list::parse_single_ext): Ditto.
+ (riscv_subset_list::set_loc): Ditto.
+ (riscv_set_arch_by_subset_list): Ditto.
+ * common/config/riscv/riscv-common.cc
+ (riscv_subset_list::parse_single_std_ext): New.
+ (riscv_subset_list::parse_single_multiletter_ext): Ditto.
+ (riscv_subset_list::clone): Ditto.
+ (riscv_subset_list::parse_single_ext): Ditto.
+ (riscv_subset_list::set_loc): Ditto.
+ (riscv_set_arch_by_subset_list): Ditto.
+
+2023-10-11 Kito Cheng <kito.cheng@sifive.com>
+
+ * config/riscv/riscv.cc (riscv_convert_vector_bits): Get setting
+ from argument rather than get setting from global setting.
+ (riscv_override_options_internal): New, splited from
+ riscv_override_options, also take a gcc_options argument.
+ (riscv_option_override): Splited most part to
+ riscv_override_options_internal.
+
+2023-10-11 Kito Cheng <kito.cheng@sifive.com>
+
+ * doc/options.texi (Mask): Document TARGET_<NAME>_P and
+ TARGET_<NAME>_OPTS_P.
+ (InverseMask): Ditto.
+ * opth-gen.awk (Mask): Generate TARGET_<NAME>_P and
+ TARGET_<NAME>_OPTS_P macro.
+ (InverseMask): Ditto.
+
+2023-10-11 Andrew Pinski <pinskia@gmail.com>
+
+ PR tree-optimization/111282
+ * match.pd (`a & ~(a ^ b)`, `a & (a == b)`,
+ `a & ((~a) ^ b)`): New patterns.
+
+2023-10-11 Mary Bennett <mary.bennett@embecosm.com>
+
+ * common/config/riscv/riscv-common.cc: Add the XCValu
+ extension.
+ * config/riscv/constraints.md: Add builtins for the XCValu
+ extension.
+ * config/riscv/predicates.md (immediate_register_operand):
+ Likewise.
+ * config/riscv/corev.def: Likewise.
+ * config/riscv/corev.md: Likewise.
+ * config/riscv/riscv-builtins.cc (AVAIL): Likewise.
+ (RISCV_ATYPE_UHI): Likewise.
+ * config/riscv/riscv-ftypes.def: Likewise.
+ * config/riscv/riscv.opt: Likewise.
+ * config/riscv/riscv.cc (riscv_print_operand): Likewise.
+ * doc/extend.texi: Add XCValu documentation.
+ * doc/sourcebuild.texi: Likewise.
+
+2023-10-11 Mary Bennett <mary.bennett@embecosm.com>
+
+ * common/config/riscv/riscv-common.cc: Add XCVmac.
+ * config/riscv/riscv-ftypes.def: Add XCVmac builtins.
+ * config/riscv/riscv-builtins.cc: Likewise.
+ * config/riscv/riscv.md: Likewise.
+ * config/riscv/riscv.opt: Likewise.
+ * doc/extend.texi: Add XCVmac builtin documentation.
+ * doc/sourcebuild.texi: Likewise.
+ * config/riscv/corev.def: New file.
+ * config/riscv/corev.md: New file.
+
+2023-10-11 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/autovec.md: Fix index bug.
+ * config/riscv/riscv-protos.h (gather_scatter_valid_offset_mode_p): New function.
+ * config/riscv/riscv-v.cc (expand_gather_scatter): Fix index bug.
+ (gather_scatter_valid_offset_mode_p): New function.
+
+2023-10-11 Pan Li <pan2.li@intel.com>
+
+ * config/riscv/autovec.md (lrint<mode><vlconvert>2): New pattern
+ for lrint/lintf.
+ * config/riscv/riscv-protos.h (expand_vec_lrint): New func decl
+ for expanding lint.
+ * config/riscv/riscv-v.cc (emit_vec_cvt_x_f): New helper func impl
+ for vfcvt.x.f.v.
+ (expand_vec_lrint): New function impl for expanding lint.
+ * config/riscv/vector-iterators.md: New mode attr and iterator.
+
+2023-10-11 Richard Biener <rguenther@suse.de>
+ Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/111519
+ * tree-ssa-strlen.cc (strlen_pass::count_nonzero_bytes): Add vuse
+ argument and pass it through to recursive calls and
+ count_nonzero_bytes_addr calls. Don't shadow the stmt argument, but
+ change stmt for gimple_assign_single_p statements for which we don't
+ immediately punt.
+ (strlen_pass::count_nonzero_bytes_addr): Add vuse argument and pass
+ it through to recursive calls and count_nonzero_bytes calls. Don't
+ use get_strinfo if gimple_vuse (stmt) is different from vuse. Don't
+ shadow the stmt argument.
+
+2023-10-11 Roger Sayle <roger@nextmovesoftware.com>
+
+ PR middle-end/101955
+ PR tree-optimization/106245
+ * simplify-rtx.cc (simplify_relational_operation_1): Simplify
+ the RTL (ne:SI (subreg:QI (ashift:SI x 7) 0) 0) to (and:SI x 1).
+
+2023-10-11 liuhongt <hongtao.liu@intel.com>
+
+ PR target/111745
+ * config/i386/mmx.md (divv4hf3): Refine predicate of
+ operands[2] with register_operand.
+
+2023-10-10 Andrew Waterman <andrew@sifive.com>
+ Philipp Tomsich <philipp.tomsich@vrull.eu>
+ Jeff Law <jlaw@ventanamicro.com>
+
+ * config/riscv/riscv.cc (struct machine_function): Track if a
+ far-branch/jump is used within a function (and $ra needs to be
+ saved).
+ (riscv_print_operand): Implement 'N' (inverse integer branch).
+ (riscv_far_jump_used_p): Implement.
+ (riscv_save_return_addr_reg_p): New function.
+ (riscv_save_reg_p): Use riscv_save_return_addr_reg_p.
+ * config/riscv/riscv.h (FIXED_REGISTERS): Update $ra.
+ (CALL_USED_REGISTERS): Update $ra.
+ * config/riscv/riscv.md: Add new types "ret" and "jalr".
+ (length attribute): Handle long conditional and unconditional
+ branches.
+ (conditional branch pattern): Handle case where jump can not
+ reach the intended target.
+ (indirect_jump, tablejump): Use new "jalr" type.
+ (simple_return): Use new "ret" type.
+ (simple_return_internal, eh_return_internal): Likewise.
+ (gpr_restore_return, riscv_mret): Likewise.
+ (riscv_uret, riscv_sret): Likewise.
+ * config/riscv/generic.md (generic_branch): Also recognize jalr & ret
+ types.
+ * config/riscv/sifive-7.md (sifive_7_jump): Likewise.
+
+2023-10-10 Andrew Pinski <pinskia@gmail.com>
+
+ PR tree-optimization/111679
+ * match.pd (`a | ((~a) ^ b)`): New pattern.
+
+2023-10-10 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/111751
+ * config/riscv/autovec.md: Add VLS BOOL modes.
+
+2023-10-10 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111751
+ * fold-const.cc (fold_view_convert_expr): Up the buffer size
+ to 128 bytes.
+ * tree-ssa-sccvn.cc (visit_reference_op_load): Special case
+ constants, giving up when re-interpretation to the target type
+ fails.
+
+2023-10-10 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111751
+ * tree-ssa-sccvn.cc (visit_reference_op_load): Exempt
+ BLKmode result from the padding bits check.
+
+2023-10-10 Claudiu Zissulescu <claziss@gmail.com>
+
+ * config/arc/arc.cc (arc_select_cc_mode): Match NEG code with
+ the first operand.
+ * config/arc/arc.md (addsi_compare): Make pattern canonical.
+ (addsi_compare_2): Fix identation, constraint letters.
+ (addsi_compare_3): Likewise.
+
+2023-10-09 Eugene Rozenfeld <erozen@microsoft.com>
+
+ * auto-profile.cc (afdo_calculate_branch_prob): Fix count comparisons
+ * tree-vect-loop-manip.cc (vect_do_peeling): Guard against zero count
+ when scaling loop profile
+
+2023-10-09 Andrew MacLeod <amacleod@redhat.com>
+
+ PR tree-optimization/111694
+ * gimple-range-cache.cc (ranger_cache::fill_block_cache): Adjust
+ equivalence range.
+ * value-relation.cc (adjust_equivalence_range): New.
+ * value-relation.h (adjust_equivalence_range): New prototype.
+
+2023-10-09 Andrew MacLeod <amacleod@redhat.com>
+
+ * gimple-range-gori.cc (gori_compute::compute_operand1_range): Do
+ not call get_identity_relation.
+ (gori_compute::compute_operand2_range): Ditto.
+ * value-relation.cc (get_identity_relation): Remove.
+ * value-relation.h (get_identity_relation): Remove protyotype.
+
+2023-10-09 Robin Dapp <rdapp@ventanamicro.com>
+
+ * config/riscv/riscv-cores.def (RISCV_TUNE): Add parameter.
+ * config/riscv/riscv-opts.h (enum riscv_microarchitecture_type):
+ Add generic_ooo.
+ * config/riscv/riscv.cc (riscv_sched_adjust_cost): Implement
+ scheduler hook.
+ (TARGET_SCHED_ADJUST_COST): Define.
+ * config/riscv/riscv.md (no,yes"): Include generic-ooo.md
+ * config/riscv/riscv.opt: Add -madjust-lmul-cost.
+ * config/riscv/generic-ooo.md: New file.
+ * config/riscv/vector.md: Add vsetvl_pre.
+
+2023-10-09 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/riscv-opts.h (TARGET_VECTOR_MISALIGN_SUPPORTED): New macro.
+ * config/riscv/riscv.cc (riscv_support_vector_misalignment): Depend on movmisalign pattern.
+ * config/riscv/vector.md (movmisalign<mode>): New pattern.
+
+2023-10-09 Xianmiao Qu <cooper.qu@linux.alibaba.com>
+
+ * config/riscv/thead.cc (th_mempair_save_regs): Fix missing CFI
+ directives for store-pair instruction.
+
+2023-10-09 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111715
+ * alias.cc (reference_alias_ptr_type_1): When we have
+ a type-punning ref at the base search for the access
+ path part that's still semantically valid.
+
+2023-10-09 Pan Li <pan2.li@intel.com>
+
+ * config/riscv/riscv-v.cc (shuffle_bswap_pattern): New func impl
+ for shuffle bswap.
+ (expand_vec_perm_const_1): Add handling for shuffle bswap pattern.
+
+2023-10-09 Roger Sayle <roger@nextmovesoftware.com>
+
+ * config/i386/i386-expand.cc (ix86_split_ashr): Split shifts by
+ one into ashr[sd]i3_carry followed by rcr[sd]i2, if TARGET_USE_RCR
+ or -Oz.
+ (ix86_split_lshr): Likewise, split shifts by one bit into
+ lshr[sd]i3_carry followed by rcr[sd]i2, if TARGET_USE_RCR or -Oz.
+ * config/i386/i386.h (TARGET_USE_RCR): New backend macro.
+ * config/i386/i386.md (rcrsi2): New define_insn for rcrl.
+ (rcrdi2): New define_insn for rcrq.
+ (<anyshiftrt><mode>3_carry): New define_insn for right shifts that
+ set the carry flag from the least significant bit, modelled using
+ UNSPEC_CC_NE.
+ * config/i386/x86-tune.def (X86_TUNE_USE_RCR): New tuning parameter
+ controlling use of rcr 1 vs. shrd, which is significantly faster on
+ AMD processors.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/i386.opt: Allow -mno-evex512.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+ Hu, Lin1 <lin1.hu@intel.com>
+
+ * config/i386/sse.md (V48H_AVX512VL): Add TARGET_EVEX512.
+ (VFH): Ditto.
+ (VF2H): Ditto.
+ (VFH_AVX512VL): Ditto.
+ (VHFBF): Ditto.
+ (VHF_AVX512VL): Ditto.
+ (VI2H_AVX512VL): Ditto.
+ (VI2F_256_512): Ditto.
+ (VF48_I1248): Remove unused iterator.
+ (VF48H_AVX512VL): Add TARGET_EVEX512.
+ (VF_AVX512): Remove unused iterator.
+ (REDUC_PLUS_MODE): Add TARGET_EVEX512.
+ (REDUC_SMINMAX_MODE): Ditto.
+ (FMAMODEM): Ditto.
+ (VFH_SF_AVX512VL): Ditto.
+ (VEC_PERM_AVX2): Ditto.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+ Hu, Lin1 <lin1.hu@intel.com>
+
+ * config/i386/sse.md (VI1_AVX512VL): Add TARGET_EVEX512.
+ (VI8_FVL): Ditto.
+ (VI1_AVX512F): Ditto.
+ (VI1_AVX512VNNI): Ditto.
+ (VI1_AVX512VL_F): Ditto.
+ (VI12_VI48F_AVX512VL): Ditto.
+ (*avx512f_permvar_truncv32hiv32qi_1): Ditto.
+ (sdot_prod<mode>): Ditto.
+ (VEC_PERM_AVX2): Ditto.
+ (VPERMI2): Ditto.
+ (VPERMI2I): Ditto.
+ (vpmadd52<vpmadd52type>v8di): Ditto.
+ (usdot_prod<mode>): Ditto.
+ (vpdpbusd_v16si): Ditto.
+ (vpdpbusds_v16si): Ditto.
+ (vpdpwssd_v16si): Ditto.
+ (vpdpwssds_v16si): Ditto.
+ (VI48_AVX512VP2VL): Ditto.
+ (avx512vp2intersect_2intersectv16si): Ditto.
+ (VF_AVX512BF16VL): Ditto.
+ (VF1_AVX512_256): Ditto.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/i386-expand.cc (ix86_expand_vector_init_duplicate):
+ Make sure there is EVEX512 enabled.
+ (ix86_expand_vecop_qihi2): Refuse V32QI->V32HI when no EVEX512.
+ * config/i386/i386.cc (ix86_hard_regno_mode_ok): Disable 64 bit mask
+ when !TARGET_EVEX512.
+ * config/i386/i386.md (avx512bw_512): New.
+ (SWI1248_AVX512BWDQ_64): Add TARGET_EVEX512.
+ (*zero_extendsidi2): Change isa to avx512bw_512.
+ (kmov_isa): Ditto.
+ (*anddi_1): Ditto.
+ (*andn<mode>_1): Change isa to kmov_isa.
+ (*<code><mode>_1): Ditto.
+ (*notxor<mode>_1): Ditto.
+ (*one_cmpl<mode>2_1): Ditto.
+ (*one_cmplsi2_1_zext): Change isa to avx512bw_512.
+ (*ashl<mode>3_1): Change isa to kmov_isa.
+ (*lshr<mode>3_1): Ditto.
+ * config/i386/sse.md (VI12HFBF_AVX512VL): Add TARGET_EVEX512.
+ (VI1248_AVX512VLBW): Ditto.
+ (VHFBF_AVX512VL): Ditto.
+ (VI): Ditto.
+ (VIHFBF): Ditto.
+ (VI_AVX2): Ditto.
+ (VI1_AVX512): Ditto.
+ (VI12_256_512_AVX512VL): Ditto.
+ (VI2_AVX2_AVX512BW): Ditto.
+ (VI2_AVX512VNNIBW): Ditto.
+ (VI2_AVX512VL): Ditto.
+ (VI2HFBF_AVX512VL): Ditto.
+ (VI8_AVX2_AVX512BW): Ditto.
+ (VIMAX_AVX2_AVX512BW): Ditto.
+ (VIMAX_AVX512VL): Ditto.
+ (VI12_AVX2_AVX512BW): Ditto.
+ (VI124_AVX2_24_AVX512F_1_AVX512BW): Ditto.
+ (VI248_AVX512VL): Ditto.
+ (VI248_AVX512VLBW): Ditto.
+ (VI248_AVX2_8_AVX512F_24_AVX512BW): Ditto.
+ (VI248_AVX512BW): Ditto.
+ (VI248_AVX512BW_AVX512VL): Ditto.
+ (VI248_512): Ditto.
+ (VI124_256_AVX512F_AVX512BW): Ditto.
+ (VI_AVX512BW): Ditto.
+ (VIHFBF_AVX512BW): Ditto.
+ (SWI1248_AVX512BWDQ): Ditto.
+ (SWI1248_AVX512BW): Ditto.
+ (SWI1248_AVX512BWDQ2): Ditto.
+ (*knotsi_1_zext): Ditto.
+ (define_split for zero_extend + not): Ditto.
+ (kunpckdi): Ditto.
+ (REDUC_SMINMAX_MODE): Ditto.
+ (VEC_EXTRACT_MODE): Ditto.
+ (*avx512bw_permvar_truncv16siv16hi_1): Ditto.
+ (*avx512bw_permvar_truncv16siv16hi_1_hf): Ditto.
+ (truncv32hiv32qi2): Ditto.
+ (avx512bw_<code>v32hiv32qi2): Ditto.
+ (avx512bw_<code>v32hiv32qi2_mask): Ditto.
+ (avx512bw_<code>v32hiv32qi2_mask_store): Ditto.
+ (usadv64qi): Ditto.
+ (VEC_PERM_AVX2): Ditto.
+ (AVX512ZEXTMASK): Ditto.
+ (SWI24_MASK): New.
+ (vec_pack_trunc_<mode>): Change iterator to SWI24_MASK.
+ (avx512bw_packsswb<mask_name>): Add TARGET_EVEX512.
+ (avx512bw_packssdw<mask_name>): Ditto.
+ (avx512bw_interleave_highv64qi<mask_name>): Ditto.
+ (avx512bw_interleave_lowv64qi<mask_name>): Ditto.
+ (<mask_codefor>avx512bw_pshuflwv32hi<mask_name>): Ditto.
+ (<mask_codefor>avx512bw_pshufhwv32hi<mask_name>): Ditto.
+ (vec_unpacks_lo_di): Ditto.
+ (SWI48x_MASK): New.
+ (vec_unpacks_hi_<mode>): Change iterator to SWI48x_MASK.
+ (avx512bw_umulhrswv32hi3<mask_name>): Add TARGET_EVEX512.
+ (VI1248_AVX512VL_AVX512BW): Ditto.
+ (avx512bw_<code>v32qiv32hi2<mask_name>): Ditto.
+ (*avx512bw_zero_extendv32qiv32hi2_1): Ditto.
+ (*avx512bw_zero_extendv32qiv32hi2_2): Ditto.
+ (<insn>v32qiv32hi2): Ditto.
+ (pbroadcast_evex_isa): Change isa attribute to avx512bw_512.
+ (VPERMI2): Add TARGET_EVEX512.
+ (VPERMI2I): Ditto.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/i386-expand.cc (ix86_expand_sse2_mulvxdi3):
+ Add TARGET_EVEX512 for 512 bit usage.
+ * config/i386/i386.cc (standard_sse_constant_opcode): Ditto.
+ * config/i386/sse.md (VF1_VF2_AVX512DQ): Ditto.
+ (VF1_128_256VL): Ditto.
+ (VF2_AVX512VL): Ditto.
+ (VI8_256_512): Ditto.
+ (<mask_codefor>fixuns_trunc<mode><sseintvecmodelower>2<mask_name>):
+ Ditto.
+ (AVX512_VEC): Ditto.
+ (AVX512_VEC_2): Ditto.
+ (VI4F_BRCST32x2): Ditto.
+ (VI8F_BRCST64x2): Ditto.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/i386-builtins.cc
+ (ix86_vectorize_builtin_gather): Disable 512 bit gather
+ when !TARGET_EVEX512.
+ * config/i386/i386-expand.cc (ix86_valid_mask_cmp_mode):
+ Add TARGET_EVEX512.
+ (ix86_expand_int_sse_cmp): Ditto.
+ (ix86_expand_vector_init_one_nonzero): Disable subroutine
+ when !TARGET_EVEX512.
+ (ix86_emit_swsqrtsf): Add TARGET_EVEX512.
+ (ix86_vectorize_vec_perm_const): Disable subroutine when
+ !TARGET_EVEX512.
+ * config/i386/i386.cc
+ (standard_sse_constant_p): Add TARGET_EVEX512.
+ (standard_sse_constant_opcode): Ditto.
+ (ix86_get_ssemov): Ditto.
+ (ix86_legitimate_constant_p): Ditto.
+ (ix86_vectorize_builtin_scatter): Diable 512 bit scatter
+ when !TARGET_EVEX512.
+ * config/i386/i386.md (avx512f_512): New.
+ (movxi): Add TARGET_EVEX512.
+ (*movxi_internal_avx512f): Ditto.
+ (*movdi_internal): Change alternative 12 to ?Yv. Adjust mode
+ for alternative 13.
+ (*movsi_internal): Change alternative 8 to ?Yv. Adjust mode for
+ alternative 9.
+ (*movhi_internal): Change alternative 11 to *Yv.
+ (*movdf_internal): Change alternative 12 to Yv.
+ (*movsf_internal): Change alternative 5 to Yv. Adjust mode for
+ alternative 5 and 6.
+ (*mov<mode>_internal): Change alternative 4 to Yv.
+ (define_split for convert SF to DF): Add TARGET_EVEX512.
+ (extendbfsf2_1): Ditto.
+ * config/i386/predicates.md (bcst_mem_operand): Disable predicate
+ for 512 bit when !TARGET_EVEX512.
+ * config/i386/sse.md (VMOVE): Add TARGET_EVEX512.
+ (V48_AVX512VL): Ditto.
+ (V48_256_512_AVX512VL): Ditto.
+ (V48H_AVX512VL): Ditto.
+ (VI12_AVX512VL): Ditto.
+ (V): Ditto.
+ (V_512): Ditto.
+ (V_256_512): Ditto.
+ (VF): Ditto.
+ (VF1_VF2_AVX512DQ): Ditto.
+ (VFH): Ditto.
+ (VFB): Ditto.
+ (VF1): Ditto.
+ (VF1_AVX2): Ditto.
+ (VF2): Ditto.
+ (VF2H): Ditto.
+ (VF2_512_256): Ditto.
+ (VF2_512_256VL): Ditto.
+ (VF_512): Ditto.
+ (VFB_512): Ditto.
+ (VI48_AVX512VL): Ditto.
+ (VI1248_AVX512VLBW): Ditto.
+ (VF_AVX512VL): Ditto.
+ (VFH_AVX512VL): Ditto.
+ (VF1_AVX512VL): Ditto.
+ (VI): Ditto.
+ (VIHFBF): Ditto.
+ (VI_AVX2): Ditto.
+ (VI8): Ditto.
+ (VI8_AVX512VL): Ditto.
+ (VI2_AVX512F): Ditto.
+ (VI4_AVX512F): Ditto.
+ (VI4_AVX512VL): Ditto.
+ (VI48_AVX512F_AVX512VL): Ditto.
+ (VI8_AVX2_AVX512F): Ditto.
+ (VI8_AVX_AVX512F): Ditto.
+ (V8FI): Ditto.
+ (V16FI): Ditto.
+ (VI124_AVX2_24_AVX512F_1_AVX512BW): Ditto.
+ (VI248_AVX512VLBW): Ditto.
+ (VI248_AVX2_8_AVX512F_24_AVX512BW): Ditto.
+ (VI248_AVX512BW): Ditto.
+ (VI248_AVX512BW_AVX512VL): Ditto.
+ (VI48_AVX512F): Ditto.
+ (VI48_AVX_AVX512F): Ditto.
+ (VI12_AVX_AVX512F): Ditto.
+ (VI148_512): Ditto.
+ (VI124_256_AVX512F_AVX512BW): Ditto.
+ (VI48_512): Ditto.
+ (VI_AVX512BW): Ditto.
+ (VIHFBF_AVX512BW): Ditto.
+ (VI4F_256_512): Ditto.
+ (VI48F_256_512): Ditto.
+ (VI48F): Ditto.
+ (VI12_VI48F_AVX512VL): Ditto.
+ (V32_512): Ditto.
+ (AVX512MODE2P): Ditto.
+ (STORENT_MODE): Ditto.
+ (REDUC_PLUS_MODE): Ditto.
+ (REDUC_SMINMAX_MODE): Ditto.
+ (*andnot<mode>3): Change isa attribute to avx512f_512.
+ (*andnot<mode>3): Ditto.
+ (<code><mode>3): Ditto.
+ (<code>tf3): Ditto.
+ (FMAMODEM): Add TARGET_EVEX512.
+ (FMAMODE_AVX512): Ditto.
+ (VFH_SF_AVX512VL): Ditto.
+ (avx512f_fix_notruncv16sfv16si<mask_name><round_name>): Ditto.
+ (fix<fixunssuffix>_truncv16sfv16si2<mask_name><round_saeonly_name>):
+ Ditto.
+ (avx512f_cvtdq2pd512_2): Ditto.
+ (avx512f_cvtpd2dq512<mask_name><round_name>): Ditto.
+ (fix<fixunssuffix>_truncv8dfv8si2<mask_name><round_saeonly_name>):
+ Ditto.
+ (<mask_codefor>avx512f_cvtpd2ps512<mask_name><round_name>): Ditto.
+ (vec_unpacks_lo_v16sf): Ditto.
+ (vec_unpacks_hi_v16sf): Ditto.
+ (vec_unpacks_float_hi_v16si): Ditto.
+ (vec_unpacks_float_lo_v16si): Ditto.
+ (vec_unpacku_float_hi_v16si): Ditto.
+ (vec_unpacku_float_lo_v16si): Ditto.
+ (vec_pack_sfix_trunc_v8df): Ditto.
+ (avx512f_vec_pack_sfix_v8df): Ditto.
+ (<mask_codefor>avx512f_unpckhps512<mask_name>): Ditto.
+ (<mask_codefor>avx512f_unpcklps512<mask_name>): Ditto.
+ (<mask_codefor>avx512f_movshdup512<mask_name>): Ditto.
+ (<mask_codefor>avx512f_movsldup512<mask_name>): Ditto.
+ (AVX512_VEC): Ditto.
+ (AVX512_VEC_2): Ditto.
+ (vec_extract_lo_v64qi): Ditto.
+ (vec_extract_hi_v64qi): Ditto.
+ (VEC_EXTRACT_MODE): Ditto.
+ (<mask_codefor>avx512f_unpckhpd512<mask_name>): Ditto.
+ (avx512f_movddup512<mask_name>): Ditto.
+ (avx512f_unpcklpd512<mask_name>): Ditto.
+ (*<avx512>_vternlog<mode>_all): Ditto.
+ (*<avx512>_vpternlog<mode>_1): Ditto.
+ (*<avx512>_vpternlog<mode>_2): Ditto.
+ (*<avx512>_vpternlog<mode>_3): Ditto.
+ (avx512f_shufps512_mask): Ditto.
+ (avx512f_shufps512_1<mask_name>): Ditto.
+ (avx512f_shufpd512_mask): Ditto.
+ (avx512f_shufpd512_1<mask_name>): Ditto.
+ (<mask_codefor>avx512f_interleave_highv8di<mask_name>): Ditto.
+ (<mask_codefor>avx512f_interleave_lowv8di<mask_name>): Ditto.
+ (vec_dupv2df<mask_name>): Ditto.
+ (trunc<pmov_src_lower><mode>2): Ditto.
+ (*avx512f_<code><pmov_src_lower><mode>2): Ditto.
+ (*avx512f_vpermvar_truncv8div8si_1): Ditto.
+ (avx512f_<code><pmov_src_lower><mode>2_mask): Ditto.
+ (avx512f_<code><pmov_src_lower><mode>2_mask_store): Ditto.
+ (truncv8div8qi2): Ditto.
+ (avx512f_<code>v8div16qi2): Ditto.
+ (*avx512f_<code>v8div16qi2_store_1): Ditto.
+ (*avx512f_<code>v8div16qi2_store_2): Ditto.
+ (avx512f_<code>v8div16qi2_mask): Ditto.
+ (*avx512f_<code>v8div16qi2_mask_1): Ditto.
+ (*avx512f_<code>v8div16qi2_mask_store_1): Ditto.
+ (avx512f_<code>v8div16qi2_mask_store_2): Ditto.
+ (vec_widen_umult_even_v16si<mask_name>): Ditto.
+ (*vec_widen_umult_even_v16si<mask_name>): Ditto.
+ (vec_widen_smult_even_v16si<mask_name>): Ditto.
+ (*vec_widen_smult_even_v16si<mask_name>): Ditto.
+ (VEC_PERM_AVX2): Ditto.
+ (one_cmpl<mode>2): Ditto.
+ (<mask_codefor>one_cmpl<mode>2<mask_name>): Ditto.
+ (*one_cmpl<mode>2_pternlog_false_dep): Ditto.
+ (define_split to xor): Ditto.
+ (*andnot<mode>3): Ditto.
+ (define_split for ior): Ditto.
+ (*iornot<mode>3): Ditto.
+ (*xnor<mode>3): Ditto.
+ (*<nlogic><mode>3): Ditto.
+ (<mask_codefor>avx512f_interleave_highv16si<mask_name>): Ditto.
+ (<mask_codefor>avx512f_interleave_lowv16si<mask_name>): Ditto.
+ (avx512f_pshufdv3_mask): Ditto.
+ (avx512f_pshufd_1<mask_name>): Ditto.
+ (*vec_extractv4ti): Ditto.
+ (VEXTRACTI128_MODE): Ditto.
+ (define_split to vec_extract): Ditto.
+ (VI1248_AVX512VL_AVX512BW): Ditto.
+ (<mask_codefor>avx512f_<code>v16qiv16si2<mask_name>): Ditto.
+ (<insn>v16qiv16si2): Ditto.
+ (avx512f_<code>v16hiv16si2<mask_name>): Ditto.
+ (<insn>v16hiv16si2): Ditto.
+ (avx512f_zero_extendv16hiv16si2_1): Ditto.
+ (avx512f_<code>v8qiv8di2<mask_name>): Ditto.
+ (*avx512f_<code>v8qiv8di2<mask_name>_1): Ditto.
+ (*avx512f_<code>v8qiv8di2<mask_name>_2): Ditto.
+ (<insn>v8qiv8di2): Ditto.
+ (avx512f_<code>v8hiv8di2<mask_name>): Ditto.
+ (<insn>v8hiv8di2): Ditto.
+ (avx512f_<code>v8siv8di2<mask_name>): Ditto.
+ (*avx512f_zero_extendv8siv8di2_1): Ditto.
+ (*avx512f_zero_extendv8siv8di2_2): Ditto.
+ (<insn>v8siv8di2): Ditto.
+ (avx512f_roundps512_sfix): Ditto.
+ (vashrv8di3): Ditto.
+ (vashrv16si3): Ditto.
+ (pbroadcast_evex_isa): Change isa attribute to avx512f_512.
+ (vec_dupv4sf): Add TARGET_EVEX512.
+ (*vec_dupv4si): Ditto.
+ (*vec_dupv2di): Ditto.
+ (vec_dup<mode>): Change isa attribute to avx512f_512.
+ (VPERMI2): Add TARGET_EVEX512.
+ (VPERMI2I): Ditto.
+ (VEC_INIT_MODE): Ditto.
+ (VEC_INIT_HALF_MODE): Ditto.
+ (<mask_codefor>avx512f_vcvtph2ps512<mask_name><round_saeonly_name>):
+ Ditto.
+ (avx512f_vcvtps2ph512_mask_sae): Ditto.
+ (<mask_codefor>avx512f_vcvtps2ph512<mask_name><round_saeonly_name>):
+ Ditto.
+ (*avx512f_vcvtps2ph512<merge_mask_name>): Ditto.
+ (INT_BROADCAST_MODE): Ditto.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/i386-expand.cc (ix86_broadcast_from_constant):
+ Disable zmm broadcast for !TARGET_EVEX512.
+ * config/i386/i386-options.cc (ix86_option_override_internal):
+ Do not use PVW_512 when no-evex512.
+ (ix86_simd_clone_adjust): Add evex512 target into string.
+ * config/i386/i386.cc (type_natural_mode): Report ABI warning
+ when using zmm register w/o evex512.
+ (ix86_return_in_memory): Do not allow zmm when !TARGET_EVEX512.
+ (ix86_hard_regno_mode_ok): Ditto.
+ (ix86_set_reg_reg_cost): Ditto.
+ (ix86_rtx_costs): Ditto.
+ (ix86_vector_mode_supported_p): Ditto.
+ (ix86_preferred_simd_mode): Ditto.
+ (ix86_get_mask_mode): Ditto.
+ (ix86_simd_clone_compute_vecsize_and_simdlen): Disable 512 bit
+ libmvec call when !TARGET_EVEX512.
+ (ix86_simd_clone_usable): Ditto.
+ * config/i386/i386.h (BIGGEST_ALIGNMENT): Disable 512 alignment
+ when !TARGET_EVEX512
+ (MOVE_MAX): Do not use PVW_512 when !TARGET_EVEX512.
+ (STORE_MAX_PIECES): Ditto.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/i386-builtin.def (BDESC): Add
+ OPTION_MASK_ISA2_EVEX512.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/i386-builtin.def (BDESC): Add
+ OPTION_MASK_ISA2_EVEX512.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/i386-builtin.def (BDESC): Add
+ OPTION_MASK_ISA2_EVEX512.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/i386-builtin.def (BDESC): Add
+ OPTION_MASK_ISA2_EVEX512.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/i386-builtin.def (BDESC): Add
+ OPTION_MASK_ISA2_EVEX512.
+ * config/i386/i386-builtins.cc
+ (ix86_init_mmx_sse_builtins): Ditto.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+ Hu, Lin1 <lin1.hu@intel.com>
+
+ * config/i386/avx512fp16intrin.h: Add evex512 target for 512 bit
+ intrins.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config.gcc: Add avx512bitalgvlintrin.h.
+ * config/i386/avx5124fmapsintrin.h: Add evex512 target for 512 bit
+ intrins.
+ * config/i386/avx5124vnniwintrin.h: Ditto.
+ * config/i386/avx512bf16intrin.h: Ditto.
+ * config/i386/avx512bitalgintrin.h: Add evex512 target for 512 bit
+ intrins. Split 128/256 bit intrins to avx512bitalgvlintrin.h.
+ * config/i386/avx512erintrin.h: Add evex512 target for 512 bit
+ intrins
+ * config/i386/avx512ifmaintrin.h: Ditto
+ * config/i386/avx512pfintrin.h: Ditto
+ * config/i386/avx512vbmi2intrin.h: Ditto.
+ * config/i386/avx512vbmiintrin.h: Ditto.
+ * config/i386/avx512vnniintrin.h: Ditto.
+ * config/i386/avx512vp2intersectintrin.h: Ditto.
+ * config/i386/avx512vpopcntdqintrin.h: Ditto.
+ * config/i386/gfniintrin.h: Ditto.
+ * config/i386/immintrin.h: Add avx512bitalgvlintrin.h.
+ * config/i386/vaesintrin.h: Add evex512 target for 512 bit intrins.
+ * config/i386/vpclmulqdqintrin.h: Ditto.
+ * config/i386/avx512bitalgvlintrin.h: New.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/avx512bwintrin.h: Add evex512 target for 512 bit
+ intrins.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/avx512dqintrin.h: Add evex512 target for 512 bit
+ intrins.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * config/i386/avx512fintrin.h: Add evex512 target for 512 bit intrins.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * common/config/i386/i386-common.cc
+ (OPTION_MASK_ISA2_EVEX512_SET): New.
+ (OPTION_MASK_ISA2_EVEX512_UNSET): Ditto.
+ (ix86_handle_option): Handle EVEX512.
+ * config/i386/i386-c.cc
+ (ix86_target_macros_internal): Handle EVEX512. Add __EVEX256__
+ when AVX512VL is set.
+ * config/i386/i386-options.cc: (isa2_opts): Handle EVEX512.
+ (ix86_valid_target_attribute_inner_p): Ditto.
+ (ix86_option_override_internal): Set EVEX512 target if it is not
+ explicitly set when AVX512 is enabled. Disable
+ AVX512{PF,ER,4VNNIW,4FAMPS} for -mno-evex512.
+ * config/i386/i386.opt: Add mevex512. Temporaily RejectNegative.
+
+2023-10-09 Haochen Gui <guihaoc@gcc.gnu.org>
+
+ PR target/88558
+ * config/rs6000/rs6000.md (lrint<mode>di2): Remove TARGET_FPRND
+ from insn condition.
+ (lrint<mode>si2): New insn pattern for 32bit lrint.
+
+2023-10-09 Haochen Gui <guihaoc@gcc.gnu.org>
+
+ PR target/88558
+ * config/rs6000/rs6000.cc (rs6000_hard_regno_mode_ok_uncached):
+ Enable SImode on FP registers for P7.
+ * config/rs6000/rs6000.md (*movsi_internal1): Add fmr for SImode
+ move between FP registers. Set attribute isa of stfiwx to "*"
+ and attribute of stxsiwx to "p7".
+
+2023-10-09 Stefan Schulze Frielinghaus <stefansf@linux.ibm.com>
+
+ * config/s390/s390.md: Make use of new copysign RTL.
+
+2023-10-09 Hongyu Wang <hongyu.wang@intel.com>
+
+ * config/i386/sse.md (vec_concatv2di): Replace constraint "m"
+ with "jm" for alternative 0 and 1 of operand 2.
+ (sse4_1_<code><mode>3<mask_name>): Replace constraint "Bm" with
+ "ja" for alternative 0 and 1 of operand2.
+
+2023-10-08 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/111155
+ * text-art/table.cc (table::maybe_set_cell_span): New.
+ (table::add_other_table): New.
+ * text-art/table.h (class table::cell_placement): Add class table
+ as a friend.
+ (table::add_rows): New.
+ (table::add_row): Reimplement in terms of add_rows.
+ (table::maybe_set_cell_span): New decl.
+ (table::add_other_table): New decl.
+ * text-art/types.h (operator+): New operator for rect + coord.
+
+2023-10-08 David Malcolm <dmalcolm@redhat.com>
+
+ * genmatch.cc (main): Update for "m_" prefix of some fields of
+ line_maps.
+ * input.cc (make_location): Update for removal of
+ COMBINE_LOCATION_DATA.
+ (dump_line_table_statistics): Update for "m_" prefix of some
+ fields of line_maps.
+ (location_with_discriminator): Update for removal of
+ COMBINE_LOCATION_DATA.
+ (line_table_test::line_table_test): Update for "m_" prefix of some
+ fields of line_maps.
+ * toplev.cc (general_init): Likewise.
+ * tree.cc (set_block): Update for removal of
+ COMBINE_LOCATION_DATA.
+ (set_source_range): Likewise.
+
+2023-10-08 David Malcolm <dmalcolm@redhat.com>
+
+ * input.cc (make_location): Move implementation to
+ line_maps::make_location.
+
+2023-10-08 David Malcolm <dmalcolm@redhat.com>
+
+ PR driver/111700
+ * input.cc (file_cache::add_file): Update leading comment to
+ clarify that it can fail.
+ (file_cache::lookup_or_add_file): Likewise.
+ (file_cache::get_source_file_content): Gracefully handle
+ lookup_or_add_file failing.
+
+2023-10-08 liuhongt <hongtao.liu@intel.com>
+
+ * config/i386/i386.cc (ix86_build_const_vector): Handle V2HF
+ and V4HFmode.
+ (ix86_build_signbit_mask): Ditto.
+ * config/i386/mmx.md (mmxintvecmode): Ditto.
+ (<code><mode>2): New define_expand.
+ (*mmx_<code><mode>): New define_insn_and_split.
+ (*mmx_nabs<mode>2): Ditto.
+ (*mmx_andnot<mode>3): New define_insn.
+ (<code><mode>3): Ditto.
+ (copysign<mode>3): New define_expand.
+ (xorsign<mode>3): Ditto.
+ (signbit<mode>2): Ditto.
+
+2023-10-08 liuhongt <hongtao.liu@intel.com>
+
+ * config/i386/mmx.md (VHF_32_64): New mode iterator.
+ (<insn><mode>3): New define_expand, merged from ..
+ (<insn>v4hf3): .. this and
+ (<insn>v2hf3): .. this.
+ (movd_v2hf_to_sse_reg): New define_expand, splitted from ..
+ (movd_v2hf_to_sse): .. this.
+ (<code><mode>3): New define_expand.
+
+2023-10-08 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ * config/rs6000/rs6000.cc (can_be_built_by_li_and_rldic): New function.
+ (rs6000_emit_set_long_const): Call can_be_built_by_li_and_rldic.
+
+2023-10-08 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ * config/rs6000/rs6000.cc (can_be_built_by_li_lis_and_rldicl): New
+ function.
+ (can_be_built_by_li_lis_and_rldicr): New function.
+ (rs6000_emit_set_long_const): Call can_be_built_by_li_lis_and_rldicr and
+ can_be_built_by_li_lis_and_rldicl.
+
+2023-10-08 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ * config/rs6000/rs6000.cc (can_be_rotated_to_negative_lis): New
+ function.
+ (can_be_built_by_li_and_rotldi): Rename to ...
+ (can_be_built_by_li_lis_and_rotldi): ... this function.
+ (rs6000_emit_set_long_const): Call can_be_built_by_li_lis_and_rotldi.
+
+2023-10-08 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ * config/rs6000/rs6000.cc (can_be_built_by_li_and_rotldi): New function.
+ (rs6000_emit_set_long_const): Call can_be_built_by_li_and_rotldi.
+
+2023-10-08 Yanzhang Wang <yanzhang.wang@intel.com>
+
+ * config/riscv/linux.h: Pass the static-pie specific options to
+ the linker.
+
+2023-10-07 Saurabh Jha <saurabh.jha@arm.com>
+
+ * config/aarch64/aarch64-cores.def (AARCH64_CORE): Add support for
+ cortex-x4 core.
+ * config/aarch64/aarch64-tune.md: Regenerated.
+ * doc/invoke.texi: Add command-line option for cortex-x4 core.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * config/i386/constraints.md (jb): New constraint for vsib memory
+ that does not allow gpr32.
+ * config/i386/i386.md: (setcc_<mode>_sse): Replace m to jm for avx
+ alternative and set attr_gpr32 to 0.
+ (movmsk_df): Split avx/noavx alternatives and replace "r" to "jr" for
+ avx alternative.
+ (<sse>_rcp<mode>2): Split avx/noavx alternatives and replace
+ "m/Bm" to "jm/ja" for avx alternative, set its gpr32 attr to 0.
+ (*rsqrtsf2_sse): Likewise.
+ * config/i386/mmx.md (mmx_pmovmskb): Split alternative 1 to
+ avx/noavx and assign jr/r constraint to dest.
+ * config/i386/sse.md (<sse>_movmsk<ssemodesuffix><avxsizesuffix>):
+ Split avx/noavx alternatives and replace "r" to "jr" for avx alternative.
+ (*<sse>_movmsk<ssemodesuffix><avxsizesuffix>_<u>ext): Likewise.
+ (*<sse>_movmsk<ssemodesuffix><avxsizesuffix>_lt): Likewise.
+ (*<sse>_movmsk<ssemodesuffix><avxsizesuffix>_<u>ext_lt): Likewise.
+ (*<sse>_movmsk<ssemodesuffix><avxsizesuffix>_shift): Likewise.
+ (*<sse>_movmsk<ssemodesuffix><avxsizesuffix>_<u>ext_shift): Likewise.
+ (<sse2_avx2>_pmovmskb): Likewise.
+ (*<sse2_avx2>_pmovmskb_zext): Likewise.
+ (*sse2_pmovmskb_ext): Likewise.
+ (*<sse2_avx2>_pmovmskb_lt): Likewise.
+ (*<sse2_avx2>_pmovmskb_zext_lt): Likewise.
+ (*sse2_pmovmskb_ext_lt): Likewise.
+ (<sse>_rcp<mode>2): Split avx/noavx alternatives and replace
+ "m/Bm" to "jm/ja" for avx alternative, set its attr_gpr32 to 0.
+ (sse_vmrcpv4sf2): Likewise.
+ (*sse_vmrcpv4sf2): Likewise.
+ (rsqrt<mode>2): Likewise.
+ (sse_vmrsqrtv4sf2): Likewise.
+ (*sse_vmrsqrtv4sf2): Likewise.
+ (avx_h<insn>v4df3): Likewise.
+ (sse3_hsubv2df3): Likewise.
+ (avx_h<insn>v8sf3): Likewise.
+ (sse3_h<insn>v4sf3): Likewise.
+ (<sse3>_lddqu<avxsizesuffix>): Likewise.
+ (avx_cmp<mode>3): Likewise.
+ (avx_vmcmp<mode>3): Likewise.
+ (*sse2_gt<mode>3): Likewise.
+ (sse_ldmxcsr): Likewise.
+ (sse_stmxcsr): Likewise.
+ (avx_vtest<ssemodesuffix><avxsizesuffix>): Replace m to jm for
+ avx alternative and set attr_gpr32 to 0.
+ (avx2_permv2ti): Likewise.
+ (*avx_vperm2f128<mode>_full): Likewise.
+ (*avx_vperm2f128<mode>_nozero): Likewise.
+ (vec_set_lo_v32qi): Likewise.
+ (<avx_avx2>_maskload<ssemodesuffix><avxsizesuffix>): Likewise.
+ (<avx_avx2>_maskstore<ssemodesuffix><avxsi)zesuffix>: Likewise.
+ (avx_cmp<mode>3): Likewise.
+ (avx_vmcmp<mode>3): Likewise.
+ (*<sse>_maskcmp<mode>3_comm): Likewise.
+ (*avx2_gathersi<VEC_GATHER_MODE:mode>): Replace Tv to jb and set
+ attr_gpr32 to 0.
+ (*avx2_gathersi<VEC_GATHER_MODE:mode>_2): Likewise.
+ (*avx2_gatherdi<VEC_GATHER_MODE:mode>): Likewise.
+ (*avx2_gatherdi<VEC_GATHER_MODE:mode>_2): Likewise.
+ (*avx2_gatherdi<VI4F_256:mode>_3): Likewise.
+ (*avx2_gatherdi<VI4F_256:mode>_4): Likewise.
+ (avx_vbroadcastf128_<mode>): Restrict non-egpr alternative to
+ noavx512vl, set its constraint to jm and set attr_gpr32 to 0.
+ (vec_set_lo_<mode><mask_name>): Likewise.
+ (vec_set_lo_<mode><mask_name>): Likewise for SF/SI modes.
+ (vec_set_hi_<mode><mask_name>): Likewise.
+ (vec_set_hi_<mode><mask_name>): Likewise for SF/SI modes.
+ (vec_set_hi_<mode>): Likewise.
+ (vec_set_lo_<mode>): Likewise.
+ (avx2_set_hi_v32qi): Likewise.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * config/i386/i386.md (*movhi_internal): Split out non-gpr
+ supported pextrw with mem constraint to avx/noavx alternatives,
+ set jm and attr gpr32 0 to the noavx alternative.
+ (*mov<mode>_internal): Likewise.
+ * config/i386/mmx.md (mmx_pshufbv8qi3): Change "r/m/Bm" to
+ "jr/jm/ja" and set_attr gpr32 0 for noavx alternative.
+ (mmx_pshufbv4qi3): Likewise.
+ (*mmx_pinsrd): Likewise.
+ (*mmx_pinsrb): Likewise.
+ (*pinsrb): Likewise.
+ (mmx_pshufbv8qi3): Likewise.
+ (mmx_pshufbv4qi3): Likewise.
+ (@sse4_1_insertps_<mode>): Likewise.
+ (*mmx_pextrw): Split altrenatives and map non-EGPR
+ constraints, attr_gpr32 and attr_isa to noavx mnemonics.
+ (*movv2qi_internal): Likewise.
+ (*pextrw): Likewise.
+ (*mmx_pextrb): Likewise.
+ (*mmx_pextrb_zext): Likewise.
+ (*pextrb): Likewise.
+ (*pextrb_zext): Likewise.
+ (vec_extractv2si_1): Likewise.
+ (vec_extractv2si_1_zext): Likewise.
+ * config/i386/sse.md: (vi128_h_r): New mode attr for
+ pinsr{bw}/pextr{bw} with reg operand.
+ (*abs<mode>2): Split altrenatives and %v in mnemonics, map
+ non-EGPR constraints, gpr32 and isa attrs to noavx mnemonics.
+ (*vec_extract<mode>): Likewise.
+ (*vec_extract<mode>): Likewise for HFBF pattern.
+ (*vec_extract<PEXTR_MODE12:mode>_zext): Likewise.
+ (*vec_extractv4si_1): Likewise.
+ (*vec_extractv4si_zext): Likewise.
+ (*vec_extractv2di_1): Likewise.
+ (*vec_concatv2si_sse4_1): Likewise.
+ (<sse2p4_1>_pinsr<ssemodesuffix>): Likewise.
+ (vec_concatv2di): Likewise.
+ (*sse4_1_<code>v2qiv2di2<mask_name>_1): Likewise.
+ (ssse3_avx2>_pshufb<mode>3<mask_name>): Change "r/m/Bm" to
+ "jr/jm/ja" and set_attr gpr32 0 for noavx alternative, split
+ %v for avx/noavx alternatives if necessary.
+ (*vec_concatv2sf_sse4_1): Likewise.
+ (*sse4_1_extractps): Likewise.
+ (vec_set<mode>_0): Likewise for VI4F_128.
+ (*vec_setv4sf_sse4_1): Likewise.
+ (@sse4_1_insertps<mode>): Likewise.
+ (ssse3_pmaddubsw128): Likewise.
+ (*<ssse3_avx2>_pmulhrsw<mode>3<mask_name>): Likewise.
+ (<sse4_1_avx2>_packusdw<mask_name>): Likewise.
+ (<ssse3_avx2>_palignr<mode>): Likewise.
+ (<vi8_sse4_1_avx2_avx512>_movntdqa): Likewise.
+ (<sse4_1_avx2>_mpsadbw): Likewise.
+ (*sse4_1_mulv2siv2di3<mask_name>): Likewise.
+ (*<sse4_1_avx2>_mul<mode>3<mask_name>): Likewise.
+ (*sse4_1_<code><mode>3<mask_name>): Likewise.
+ (*<code>v8hi3): Likewise.
+ (*<code>v16qi3): Likewise.
+ (*sse4_1_<code>v8qiv8hi2<mask_name>_1): Likewise.
+ (*sse4_1_zero_extendv8qiv8hi2_3): Likewise.
+ (*sse4_1_zero_extendv8qiv8hi2_4): Likewise.
+ (*sse4_1_<code>v4qiv4si2<mask_name>_1): Likewise.
+ (*sse4_1_<code>v4hiv4si2<mask_name>_1): Likewise.
+ (*sse4_1_zero_extendv4hiv4si2_3): Likewise.
+ (*sse4_1_zero_extendv4hiv4si2_4): Likewise.
+ (*sse4_1_<code>v2hiv2di2<mask_name>_1): Likewise.
+ (*sse4_1_<code>v2siv2di2<mask_name>_1): Likewise.
+ (*sse4_1_zero_extendv2siv2di2_3): Likewise.
+ (*sse4_1_zero_extendv2siv2di2_4): Likewise.
+ (aesdec): Likewise.
+ (aesdeclast): Likewise.
+ (aesenc): Likewise.
+ (aesenclast): Likewise.
+ (pclmulqdq): Likewise.
+ (vgf2p8affineinvqb_<mode><mask_name>): Likewise.
+ (vgf2p8affineqb_<mode><mask_name>): Likewise.
+ (vgf2p8mulb_<mode><mask_name>): Likewise.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * config/i386/i386-protos.h (x86_evex_reg_mentioned_p): New
+ prototype.
+ * config/i386/i386.cc (x86_evex_reg_mentioned_p): New
+ function.
+ * config/i386/i386.md (sse4_1_round<mode>2): Set attr gpr32 0
+ and constraint jm to all non-evex alternatives, adjust
+ alternative outputs if evex reg is mentioned.
+ * config/i386/sse.md (<sse4_1>_ptest<mode>): Set attr gpr32 0
+ and constraint jm/ja to all non-evex alternatives.
+ (ptesttf2): Likewise.
+ (<sse4_1>_round<ssemodesuffix><avxsizesuffix): Likewise.
+ (sse4_1_round<ssescalarmodesuffix>): Likewise.
+ (sse4_2_pcmpestri): Likewise.
+ (sse4_2_pcmpestrm): Likewise.
+ (sse4_2_pcmpestr_cconly): Likewise.
+ (sse4_2_pcmpistr): Likewise.
+ (sse4_2_pcmpistri): Likewise.
+ (sse4_2_pcmpistrm): Likewise.
+ (sse4_2_pcmpistr_cconly): Likewise.
+ (aesimc): Likewise.
+ (aeskeygenassist): Likewise.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * config/i386/sse.md (avx2_ph<plusminus_mnemonic>wv16hi3): Set
+ attr gpr32 0 and constraint jm/ja to all mem alternatives.
+ (ssse3_ph<plusminus_mnemonic>wv8hi3): Likewise.
+ (ssse3_ph<plusminus_mnemonic>wv4hi3): Likewise.
+ (avx2_ph<plusminus_mnemonic>dv8si3): Likewise.
+ (ssse3_ph<plusminus_mnemonic>dv4si3): Likewise.
+ (ssse3_ph<plusminus_mnemonic>dv2si3): Likewise.
+ (<ssse3_avx2>_psign<mode>3): Likewise.
+ (ssse3_psign<mode>3): Likewise.
+ (<sse4_1>_blend<ssemodesuffix><avxsizesuffix): Likewise.
+ (<sse4_1>_blendv<ssemodesuffix><avxsizesuffix): Likewise.
+ (*<sse4_1>_blendv<ssemodesuffix><avxsizesuffix>_lt): Likewise.
+ (*<sse4_1>_blendv<ssefltmodesuff)ix><avxsizesuffix>_not_ltint: Likewise.
+ (<sse4_1>_dp<ssemodesuffix><avxsizesuffix>): Likewise.
+ (<sse4_1_avx2>_mpsadbw): Likewise.
+ (<sse4_1_avx2>_pblendvb): Likewise.
+ (*<sse4_1_avx2>_pblendvb_lt): Likewise.
+ (sse4_1_pblend<ssemodesuffix>): Likewise.
+ (*avx2_pblend<ssemodesuffix>): Likewise.
+ (avx2_permv2ti): Likewise.
+ (*avx_vperm2f128<mode>_nozero): Likewise.
+ (*avx2_eq<mode>3): Likewise.
+ (*sse4_1_eqv2di3): Likewise.
+ (sse4_2_gtv2di3): Likewise.
+ (avx2_gt<mode>3): Likewise.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * config/i386/i386.md (<xsave>): Set attr gpr32 0 and constraint
+ jm.
+ (<xsave>_rex64): Likewise.
+ (<xrstor>_rex64): Likewise.
+ (<xrstor>64): Likewise.
+ (fxsave64): Likewise.
+ (fxstore64): Likewise.
+
+2023-10-07 Hongyu Wang <hongyu.wang@intel.com>
+ Kong Lingling <lingling.kong@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * config/i386/i386.cc (ix86_get_ssemov): Check if egpr is used,
+ adjust mnemonic for vmovduq/vmovdqa.
+ * config/i386/sse.md (*<extract_type>_vinsert<shuffletype><extract_suf>_0):
+ Check if egpr is used, adjust mnemonic for vmovdqu/vmovdqa.
+ (avx_vec_concat<mode>): Likewise, and separate alternative 0 to
+ avx_noavx512f.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * config/i386/i386.cc (map_egpr_constraints): New funciton to
+ map common constraints to EGPR prohibited constraints.
+ (ix86_md_asm_adjust): Calls map_egpr_constraints.
+ * config/i386/i386.opt: Add option mapx-inline-asm-use-gpr32.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * config/i386/i386-protos.h (ix86_insn_base_reg_class): New
+ prototype.
+ (ix86_regno_ok_for_insn_base_p): Likewise.
+ (ix86_insn_index_reg_class): Likewise.
+ * config/i386/i386.cc (ix86_memory_address_use_extended_reg_class_p):
+ New helper function to scan the insn.
+ (ix86_insn_base_reg_class): New function to choose BASE_REG_CLASS.
+ (ix86_regno_ok_for_insn_base_p): Likewise for base regno.
+ (ix86_insn_index_reg_class): Likewise for INDEX_REG_CLASS.
+ * config/i386/i386.h (INSN_BASE_REG_CLASS): Define.
+ (REGNO_OK_FOR_INSN_BASE_P): Likewise.
+ (INSN_INDEX_REG_CLASS): Likewise.
+ (enum reg_class): Add INDEX_GPR16.
+ (GENERAL_GPR16_REGNO_P): Define.
+ * config/i386/i386.md (gpr32): New attribute.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * config/i386/constraints.md (jr): New register constraint
+ that prohibits EGPR.
+ (jR): Constraint that force usage of EGPR.
+ (jm): New memory constraint that prohibits EGPR.
+ (ja): Likewise for Bm constraint.
+ (jb): Likewise for Tv constraint.
+ (j<): New auto-dec memory constraint that prohibits EGPR.
+ (j>): Likewise for ">" constraint.
+ (jo): Likewise for "o" constraint.
+ (jv): Likewise for "V" constraint.
+ (jp): Likewise for "p" constraint.
+ * config/i386/i386.h (enum reg_class): Add new reg class
+ GENERAL_GPR16.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * config/i386/i386-protos.h (x86_extended_rex2reg_mentioned_p):
+ New function prototype.
+ * config/i386/i386.cc (regclass_map): Add mapping for 16 new
+ general registers.
+ (debugger64_register_map): Likewise.
+ (ix86_conditional_register_usage): Clear REX2 register when APX
+ disabled.
+ (ix86_code_end): Add handling for REX2 reg.
+ (print_reg): Likewise.
+ (ix86_output_jmp_thunk_or_indirect): Likewise.
+ (ix86_output_indirect_branch_via_reg): Likewise.
+ (ix86_attr_length_vex_default): Likewise.
+ (ix86_emit_save_regs): Adjust to allow saving r31.
+ (ix86_register_priority): Set REX2 reg priority same as REX.
+ (x86_extended_reg_mentioned_p): Add check for REX2 regs.
+ (x86_extended_rex2reg_mentioned_p): New function.
+ * config/i386/i386.h (CALL_USED_REGISTERS): Add new extended
+ registers.
+ (REG_ALLOC_ORDER): Likewise.
+ (FIRST_REX2_INT_REG): Define.
+ (LAST_REX2_INT_REG): Ditto.
+ (GENERAL_REGS): Add 16 new registers.
+ (INT_SSE_REGS): Likewise.
+ (FLOAT_INT_REGS): Likewise.
+ (FLOAT_INT_SSE_REGS): Likewise.
+ (INT_MASK_REGS): Likewise.
+ (ALL_REGS):Likewise.
+ (REX2_INT_REG_P): Define.
+ (REX2_INT_REGNO_P): Ditto.
+ (GENERAL_REGNO_P): Add REX2_INT_REGNO_P.
+ (REGNO_OK_FOR_INDEX_P): Ditto.
+ (REG_OK_FOR_INDEX_NONSTRICT_P): Add new extended registers.
+ * config/i386/i386.md: Add 16 new integer general
+ registers.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * common/config/i386/cpuinfo.h (XSTATE_APX_F): New macro.
+ (XCR_APX_F_ENABLED_MASK): Likewise.
+ (get_available_features): Detect APX_F under
+ * common/config/i386/i386-common.cc (OPTION_MASK_ISA2_APX_F_SET): New.
+ (OPTION_MASK_ISA2_APX_F_UNSET): Likewise.
+ (ix86_handle_option): Handle -mapxf.
+ * common/config/i386/i386-cpuinfo.h (FEATURE_APX_F): New.
+ * common/config/i386/i386-isas.h: Add entry for APX_F.
+ * config/i386/cpuid.h (bit_APX_F): New.
+ * config/i386/i386.h (bit_APX_F): (TARGET_APX_EGPR,
+ TARGET_APX_PUSH2POP2, TARGET_APX_NDD): New define.
+ * config/i386/i386-opts.h (enum apx_features): New enum.
+ * config/i386/i386-isa.def (APX_F): New DEF_PTA.
+ * config/i386/i386-options.cc (ix86_function_specific_save):
+ Save ix86_apx_features.
+ (ix86_function_specific_restore): Restore it.
+ (ix86_valid_target_attribute_inner_p): Add mapxf.
+ (ix86_option_override_internal): Set ix86_apx_features for PTA
+ and TARGET_APX_F. Also reports error when APX_F is set but not
+ having TARGET_64BIT.
+ * config/i386/i386.opt: (-mapxf): New ISA flag option.
+ (-mapx=): New enumeration option.
+ (apx_features): New enum type.
+ (apx_none): New enum value.
+ (apx_egpr): Likewise.
+ (apx_push2pop2): Likewise.
+ (apx_ndd): Likewise.
+ (apx_all): Likewise.
+ * doc/invoke.texi: Document mapxf.
+
+2023-10-07 Hongyu Wang <hongyu.wang@intel.com>
+ Kong Lingling <lingling.kong@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * addresses.h (index_reg_class): New wrapper function like
+ base_reg_class.
+ * doc/tm.texi: Document INSN_INDEX_REG_CLASS.
+ * doc/tm.texi.in: Ditto.
+ * lra-constraints.cc (index_part_to_reg): Pass index_class.
+ (process_address_1): Calls index_reg_class with curr_insn and
+ replace INDEX_REG_CLASS with its return value index_cl.
+ * reload.cc (find_reloads_address): Likewise.
+ (find_reloads_address_1): Likewise.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * addresses.h (base_reg_class): Add insn argument and new macro
+ INSN_BASE_REG_CLASS.
+ (regno_ok_for_base_p_1): Add insn argument and new macro
+ REGNO_OK_FOR_INSN_BASE_P.
+ (regno_ok_for_base_p): Add insn argument and parse to ok_for_base_p_1.
+ * doc/tm.texi: Document INSN_BASE_REG_CLASS and
+ REGNO_OK_FOR_INSN_BASE_P.
+ * doc/tm.texi.in: Ditto.
+ * lra-constraints.cc (process_address_1): Pass insn to
+ base_reg_class.
+ (curr_insn_transform): Ditto.
+ * reload.cc (find_reloads): Ditto.
+ (find_reloads_address): Ditto.
+ (find_reloads_address_1): Ditto.
+ (find_reloads_subreg_address): Ditto.
+ * reload1.cc (maybe_fix_stack_asms): Ditto.
+
+2023-10-07 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ PR target/108338
+ * config/rs6000/rs6000.md (movsf_from_si): Update to generate mtvsrws
+ for P9.
+
+2023-10-07 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ PR target/108338
+ * config/rs6000/predicates.md (lowpart_subreg_operator): New
+ define_predicate.
+ * config/rs6000/rs6000.md (any_rshift): New code_iterator.
+ (movsf_from_si2): Rename to ...
+ (movsf_from_si2_<code>): ... this.
+
+2023-10-07 Pan Li <pan2.li@intel.com>
+
+ PR target/111634
+ * config/riscv/riscv.cc (riscv_legitimize_address): Ensure
+ object is a REG before extracting its' REGNO.
+
+2023-10-06 Roger Sayle <roger@nextmovesoftware.com>
+
+ * config/i386/i386-expand.cc (ix86_split_ashl): Split shifts by
+ one into add3_cc_overflow_1 followed by add3_carry.
+ * config/i386/i386.md (@add<mode>3_cc_overflow_1): Renamed from
+ "*add<mode>3_cc_overflow_1" to provide generator function.
+
+2023-10-06 Roger Sayle <roger@nextmovesoftware.com>
+ Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.cc (ix86_avoid_lea_for_addr): Split LEAs used
+ to perform left shifts into shorter instructions with -Oz.
+
+2023-10-06 Vineet Gupta <vineetg@rivosinc.com>
+
+ * config/riscv/riscv.md (mvconst_internal): Add !ira_in_progress.
+
+2023-10-06 Sandra Loosemore <sandra@codesourcery.com>
+
+ * doc/extend.texi (Function Attributes): Mention standard attribute
+ syntax.
+ (Variable Attributes): Likewise.
+ (Type Attributes): Likewise.
+ (Attribute Syntax): Likewise.
+
+2023-10-06 Andrew Stubbs <ams@codesourcery.com>
+
+ * config/gcn/gcn-valu.md (*mov<mode>): Convert to compact syntax.
+ (mov<mode>_exec): Likewise.
+ (mov<mode>_sgprbase): Likewise.
+ * config/gcn/gcn.md (*mov<mode>_insn): Likewise.
+ (*movti_insn): Likewise.
+
+2023-10-06 Andrew Stubbs <ams@codesourcery.com>
+
+ * config/gcn/gcn.cc (print_operand): Adjust xcode type to fix warning.
+
+2023-10-06 Andrew Pinski <pinskia@gmail.com>
+
+ PR middle-end/111699
+ * match.pd ((c ? a : b) op d, (c ? a : b) op (c ? d : e),
+ (v ? w : 0) ? a : b, c1 ? c2 ? a : b : b): Enable only for GIMPLE.
+
+2023-10-06 Jakub Jelinek <jakub@redhat.com>
+
+ * ipa-prop.h (ipa_bits): Remove.
+ (struct ipa_jump_func): Remove bits member.
+ (struct ipcp_transformation): Remove bits member, adjust
+ ctor and dtor.
+ (ipa_get_ipa_bits_for_value): Remove.
+ * ipa-prop.cc (struct ipa_bit_ggc_hash_traits): Remove.
+ (ipa_bits_hash_table): Remove.
+ (ipa_print_node_jump_functions_for_edge): Don't print bits.
+ (ipa_get_ipa_bits_for_value): Remove.
+ (ipa_set_jfunc_bits): Remove.
+ (ipa_compute_jump_functions_for_edge): For pointers query
+ pointer alignment before ipa_set_jfunc_vr and update_bitmask
+ in there. For integral types, just rely on bitmask already
+ being handled in value ranges.
+ (ipa_check_create_edge_args): Don't create ipa_bits_hash_table.
+ (ipcp_transformation_initialize): Neither here.
+ (ipcp_transformation_t::duplicate): Don't copy bits vector.
+ (ipa_write_jump_function): Don't stream bits here.
+ (ipa_read_jump_function): Neither here.
+ (useful_ipcp_transformation_info_p): Don't test bits vec.
+ (write_ipcp_transformation_info): Don't stream bits here.
+ (read_ipcp_transformation_info): Neither here.
+ (ipcp_get_parm_bits): Get mask and value from m_vr rather
+ than bits.
+ (ipcp_update_bits): Remove.
+ (ipcp_update_vr): For pointers, set_ptr_info_alignment from
+ bitmask stored in value range.
+ (ipcp_transform_function): Don't test bits vector, don't call
+ ipcp_update_bits.
+ * ipa-cp.cc (propagate_bits_across_jump_function): Don't use
+ jfunc->bits, instead get mask and value from jfunc->m_vr.
+ (ipcp_store_bits_results): Remove.
+ (ipcp_store_vr_results): Incorporate parts of
+ ipcp_store_bits_results here, merge the bitmasks with value
+ range if both are supplied.
+ (ipcp_driver): Don't call ipcp_store_bits_results.
+ * ipa-sra.cc (zap_useless_ipcp_results): Remove *ts->bits
+ clearing.
+
+2023-10-06 Pan Li <pan2.li@intel.com>
+
+ * config/riscv/autovec.md: Update comments.
+
+2023-10-05 John David Anglin <danglin@gcc.gnu.org>
+
+ * config/pa/pa32-linux.h (MALLOC_ABI_ALIGNMENT): Delete.
+
+2023-10-05 Andrew MacLeod <amacleod@redhat.com>
+
+ * timevar.def (TV_TREE_FAST_VRP): New.
+ * tree-pass.h (make_pass_fast_vrp): New prototype.
+ * tree-vrp.cc (class fvrp_folder): New.
+ (fvrp_folder::fvrp_folder): New.
+ (fvrp_folder::~fvrp_folder): New.
+ (fvrp_folder::value_of_expr): New.
+ (fvrp_folder::value_on_edge): New.
+ (fvrp_folder::value_of_stmt): New.
+ (fvrp_folder::pre_fold_bb): New.
+ (fvrp_folder::post_fold_bb): New.
+ (fvrp_folder::pre_fold_stmt): New.
+ (fvrp_folder::fold_stmt): New.
+ (execute_fast_vrp): New.
+ (pass_data_fast_vrp): New.
+ (pass_vrp:execute): Check for fast VRP pass.
+ (make_pass_fast_vrp): New.
+
+2023-10-05 Andrew MacLeod <amacleod@redhat.com>
+
+ * gimple-range.cc (dom_ranger::dom_ranger): New.
+ (dom_ranger::~dom_ranger): New.
+ (dom_ranger::range_of_expr): New.
+ (dom_ranger::edge_range): New.
+ (dom_ranger::range_on_edge): New.
+ (dom_ranger::range_in_bb): New.
+ (dom_ranger::range_of_stmt): New.
+ (dom_ranger::maybe_push_edge): New.
+ (dom_ranger::pre_bb): New.
+ (dom_ranger::post_bb): New.
+ * gimple-range.h (class dom_ranger): New.
+
+2023-10-05 Andrew MacLeod <amacleod@redhat.com>
+
+ * gimple-range-gori.cc (gori_stmt_info::gori_stmt_info): New.
+ (gori_calc_operands): New.
+ (gori_on_edge): New.
+ (gori_name_helper): New.
+ (gori_name_on_edge): New.
+ * gimple-range-gori.h (gori_on_edge): New prototype.
+ (gori_name_on_edge): New prototype.
+
+2023-10-05 Sergei Trofimovich <siarheit@google.com>
+
+ PR ipa/111283
+ PR gcov-profile/111559
+ * ipa-utils.cc (ipa_merge_profiles): Avoid producing
+ uninitialized probabilities when merging counters with zero
+ denominators.
+
+2023-10-05 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/111657
+ * config/i386/i386-expand.cc (alg_usable_p): Reject libcall
+ strategy for non-default address spaces.
+ (decide_alg): Use loop strategy as a fallback strategy for
+ non-default address spaces.
+
+2023-10-05 Jakub Jelinek <jakub@redhat.com>
+
+ * sreal.cc (verify_aritmetics): Rename to ...
+ (verify_arithmetics): ... this.
+ (sreal_verify_arithmetics): Adjust caller.
+
+2023-10-05 Martin Jambor <mjambor@suse.cz>
+
+ Revert:
+ 2023-10-03 Martin Jambor <mjambor@suse.cz>
+
+ PR ipa/108007
+ * cgraph.h (cgraph_edge): Add a parameter to
+ redirect_call_stmt_to_callee.
+ * ipa-param-manipulation.h (ipa_param_adjustments): Add a
+ parameter to modify_call.
+ * cgraph.cc (cgraph_edge::redirect_call_stmt_to_callee): New
+ parameter killed_ssas, pass it to padjs->modify_call.
+ * ipa-param-manipulation.cc (purge_transitive_uses): New function.
+ (ipa_param_adjustments::modify_call): New parameter killed_ssas.
+ Instead of substituting uses, invoke purge_transitive_uses. If
+ hash of killed SSAs has not been provided, create a temporary one
+ and release SSAs that have been added to it.
+ * tree-inline.cc (redirect_all_calls): Create
+ id->killed_new_ssa_names earlier, pass it to edge redirection,
+ adjust a comment.
+ (copy_body): Release SSAs in id->killed_new_ssa_names.
+
+2023-10-05 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/autovec.md (@vec_series<mode>): Remove @.
+ (vec_series<mode>): Ditto.
+ * config/riscv/riscv-v.cc (expand_const_vector): Ditto.
+ (shuffle_decompress_patterns): Ditto.
+
+2023-10-05 Claudiu Zissulescu <claziss@gmail.com>
+
+ * config/arc/arc-passes.def: Remove arc_ifcvt pass.
+ * config/arc/arc-protos.h (arc_ccfsm_branch_deleted_p): Remove.
+ (arc_ccfsm_record_branch_deleted): Likewise.
+ (arc_ccfsm_cond_exec_p): Likewise.
+ (arc_ccfsm): Likewise.
+ (arc_ccfsm_record_condition): Likewise.
+ (make_pass_arc_ifcvt): Likewise.
+ * config/arc/arc.cc (arc_ccfsm): Remove.
+ (arc_ccfsm_current): Likewise.
+ (ARC_CCFSM_BRANCH_DELETED_P): Likewise.
+ (ARC_CCFSM_RECORD_BRANCH_DELETED): Likewise.
+ (ARC_CCFSM_COND_EXEC_P): Likewise.
+ (CCFSM_ISCOMPACT): Likewise.
+ (CCFSM_DBR_ISCOMPACT): Likewise.
+ (machine_function): Remove ccfsm related fields.
+ (arc_ifcvt): Remove pass.
+ (arc_print_operand): Remove `#` punct operand and other ccfsm
+ related code.
+ (arc_ccfsm_advance): Remove.
+ (arc_ccfsm_at_label): Likewise.
+ (arc_ccfsm_record_condition): Likewise.
+ (arc_ccfsm_post_advance): Likewise.
+ (arc_ccfsm_branch_deleted_p): Likewise.
+ (arc_ccfsm_record_branch_deleted): Likewise.
+ (arc_ccfsm_cond_exec_p): Likewise.
+ (arc_get_ccfsm_cond): Likewise.
+ (arc_final_prescan_insn): Remove ccfsm references.
+ (arc_internal_label): Likewise.
+ (arc_reorg): Likewise.
+ (arc_output_libcall): Likewise.
+ * config/arc/arc.md: Remove ccfsm references and update related
+ instruction patterns.
+
+2023-10-05 Claudiu Zissulescu <claziss@gmail.com>
+
+ * config/arc/arc.cc (arc_init): Remove '^' punct char.
+ (arc_print_operand): Remove related code.
+ * config/arc/arc.md: Update patterns which uses '%&'.
+
+2023-10-05 Claudiu Zissulescu <claziss@gmail.com>
+
+ * config/arc/arc-protos.h (arc_clear_unalign): Remove.
+ (arc_toggle_unalign): Likewise.
+ * config/arc/arc.cc (machine_function) Remove unalign.
+ (arc_init): Remove `&` punct character.
+ (arc_print_operand): Remove `&` related functions.
+ (arc_verify_short): Update function's number of parameters.
+ (output_short_suffix): Update function.
+ (arc_short_long): Likewise.
+ (arc_clear_unalign): Remove.
+ (arc_toggle_unalign): Likewise.
+ * config/arc/arc.h (ASM_OUTPUT_CASE_END): Remove.
+ (ASM_OUTPUT_ALIGN): Update.
+ * config/arc/arc.md: Remove all `%&` references.
+ * config/arc/arc.opt (mannotate-align): Ignore option.
+ * doc/invoke.texi (mannotate-align): Update description.
+
+2023-10-05 Richard Biener <rguenther@suse.de>
+
+ * tree-vect-slp.cc (vect_build_slp_tree_1): Do not
+ ask for internal_fn_p (CFN_LAST).
+
+2023-10-05 Richard Biener <rguenther@suse.de>
+
+ * tree-ssa-sccvn.cc (rpo_elim::eliminate_avail): Not
+ visited value numbers are available itself.
+
+2023-10-05 Richard Biener <rguenther@suse.de>
+
+ PR ipa/111643
+ * doc/extend.texi (attribute flatten): Clarify.
+
+2023-10-04 Roger Sayle <roger@nextmovesoftware.com>
+
+ * config/arc/arc-protos.h (emit_shift): Delete prototype.
+ (arc_pre_reload_split): New function prototype.
+ * config/arc/arc.cc (emit_shift): Delete function.
+ (arc_pre_reload_split): New predicate function, copied from i386,
+ to schedule define_insn_and_split splitters to the split1 pass.
+ * config/arc/arc.md (ashlsi3): Expand RTL template unconditionally.
+ (ashrsi3): Likewise.
+ (lshrsi3): Likewise.
+ (shift_si3): Move after other shift patterns, and disable when
+ operands[2] is one (which is handled by its own define_insn).
+ Use shiftr4_operator, instead of shift4_operator, as this is no
+ longer used for left shifts.
+ (shift_si3_loop): Likewise. Additionally remove match_scratch.
+ (*ashlsi3_nobs): New pre-reload define_insn_and_split.
+ (*ashrsi3_nobs): Likewise.
+ (*lshrsi3_nobs): Likewise.
+ (rotrsi3_cnt1): Rename define_insn from *rotrsi3_cnt1.
+ (add_shift): Rename define_insn from *add_shift.
+ * config/arc/predicates.md (shiftl4_operator): Delete.
+ (shift4_operator): Delete.
+
+2023-10-04 Roger Sayle <roger@nextmovesoftware.com>
+
+ * config/arc/arc.md (ashlsi3_cnt1): Rename define_insn *ashlsi2_cnt1.
+ Change type attribute to "unary", as this doesn't have operands[2].
+ Change length attribute to "*,4" to allow compact representation.
+ (lshrsi3_cnt1): Rename define_insn from *lshrsi3_cnt1. Change
+ insn type attribute to "unary", as this doesn't have operands[2].
+ (ashrsi3_cnt1): Rename define_insn from *ashrsi3_cnt1. Change
+ insn type attribute to "unary", as this doesn't have operands[2].
+
+2023-10-04 Roger Sayle <roger@nextmovesoftware.com>
+
+ PR rtl-optimization/110701
+ * combine.cc (record_dead_and_set_regs_1): Split comment into
+ pieces placed before the relevant clauses. When the SET_DEST
+ is a partial_subreg_p, mark the bits outside of the updated
+ portion of the destination as undefined.
+
+2023-10-04 Kito Cheng <kito.cheng@sifive.com>
+
+ PR bootstrap/111664
+ * opt-read.awk: Drop multidimensional arrays.
+ * opth-gen.awk: Ditto.
+
+2023-10-04 Xi Ruoyao <xry111@xry111.site>
+
+ * config/loongarch/loongarch.md (UNSPEC_FCOPYSIGN): Delete.
+ (copysign<mode>3): Use copysign RTL instead of UNSPEC.
+
+2023-10-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/111369
+ * match.pd (x == cstN ? cst4 : cst3): Use
+ build_nonstandard_integer_type only if type1 is BOOLEAN_TYPE.
+ Fix comment typo. Formatting fix.
+ (a?~t:t -> (-(a))^t): Always convert to type rather
+ than using build_nonstandard_integer_type. Perform negation
+ only if type has precision > 1 and is not signed BOOLEAN_TYPE.
+
+2023-10-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/111668
+ * match.pd (a ? CST1 : CST2): Handle the a ? -1 : 0 and
+ a ? 0 : -1 cases before the powerof2cst cases and differentiate
+ between 1-bit precision types, larger precision boolean types
+ and other integral types. Fix comment pastos and formatting.
+
+2023-10-03 Andrew MacLeod <amacleod@redhat.com>
+
+ * tree-ssanames.cc (set_range_info): Use get_ptr_info for
+ pointers rather than range_info_get_range.
+
+2023-10-03 Martin Jambor <mjambor@suse.cz>
+
+ * ipa-modref.h (modref_summary::dump): Make const.
+ * ipa-modref.cc (modref_summary::dump): Likewise.
+ (dump_lto_records): Dump to out instead of dump_file.
+
+2023-10-03 Martin Jambor <mjambor@suse.cz>
+
+ PR ipa/110378
+ * ipa-param-manipulation.cc
+ (ipa_param_body_adjustments::mark_dead_statements): Verify that any
+ return uses of PARAM will be removed.
+ (ipa_param_body_adjustments::mark_clobbers_dead): Likewise.
+ * ipa-sra.cc (isra_param_desc): New fields
+ remove_only_when_retval_removed and split_only_when_retval_removed.
+ (struct gensum_param_desc): Likewise. Fix comment long line.
+ (ipa_sra_function_summaries::duplicate): Copy the new flags.
+ (dump_gensum_param_descriptor): Dump the new flags.
+ (dump_isra_param_descriptor): Likewise.
+ (isra_track_scalar_value_uses): New parameter desc. Set its flag
+ remove_only_when_retval_removed when encountering a simple return.
+ (isra_track_scalar_param_local_uses): Replace parameter call_uses_p
+ with desc. Pass it to isra_track_scalar_value_uses and set its
+ call_uses.
+ (ptr_parm_has_nonarg_uses): Accept parameter descriptor as a
+ parameter. If there is a direct return use, mark any..
+ (create_parameter_descriptors): Pass the whole parameter descriptor to
+ isra_track_scalar_param_local_uses and ptr_parm_has_nonarg_uses.
+ (process_scan_results): Copy the new flags.
+ (isra_write_node_summary): Stream the new flags.
+ (isra_read_node_info): Likewise.
+ (adjust_parameter_descriptions): Check that transformations
+ requring return removal only happen when return value is removed.
+ Restructure main loop. Adjust dump message.
+
+2023-10-03 Martin Jambor <mjambor@suse.cz>
+
+ PR ipa/108007
+ * cgraph.h (cgraph_edge): Add a parameter to
+ redirect_call_stmt_to_callee.
+ * ipa-param-manipulation.h (ipa_param_adjustments): Add a
+ parameter to modify_call.
+ * cgraph.cc (cgraph_edge::redirect_call_stmt_to_callee): New
+ parameter killed_ssas, pass it to padjs->modify_call.
+ * ipa-param-manipulation.cc (purge_transitive_uses): New function.
+ (ipa_param_adjustments::modify_call): New parameter killed_ssas.
+ Instead of substituting uses, invoke purge_transitive_uses. If
+ hash of killed SSAs has not been provided, create a temporary one
+ and release SSAs that have been added to it.
+ * tree-inline.cc (redirect_all_calls): Create
+ id->killed_new_ssa_names earlier, pass it to edge redirection,
+ adjust a comment.
+ (copy_body): Release SSAs in id->killed_new_ssa_names.
+
+2023-10-03 Andrew MacLeod <amacleod@redhat.com>
+
+ * passes.def (pass_vrp): Pass "final pass" flag as parameter.
+ * tree-vrp.cc (vrp_pass_num): Remove.
+ (pass_vrp::my_pass): Remove.
+ (pass_vrp::pass_vrp): Add warn_p as a parameter.
+ (pass_vrp::final_p): New.
+ (pass_vrp::set_pass_param): Set final_p param.
+ (pass_vrp::execute): Call execute_range_vrp with no conditions.
+ (make_pass_vrp): Pass additional parameter.
+ (make_pass_early_vrp): Ditto.
+
+2023-10-03 Andrew MacLeod <amacleod@redhat.com>
+
+ * tree-ssanames.cc (set_range_info): Return true only if the
+ current value changes.
+
+2023-10-03 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic.cc (diagnostic_set_info_translated): Update for "m_"
+ prefixes to text_info fields.
+ (diagnostic_report_diagnostic): Likewise.
+ (verbatim): Use text_info ctor.
+ (simple_diagnostic_path::add_event): Likewise.
+ (simple_diagnostic_path::add_thread_event): Likewise.
+ * dumpfile.cc (dump_pretty_printer::decode_format): Update for
+ "m_" prefixes to text_info fields.
+ (dump_context::dump_printf_va): Use text_info ctor.
+ * graphviz.cc (graphviz_out::graphviz_out): Use text_info ctor.
+ (graphviz_out::print): Likewise.
+ * opt-problem.cc (opt_problem::opt_problem): Likewise.
+ * pretty-print.cc (pp_format): Update for "m_" prefixes to
+ text_info fields.
+ (pp_printf): Use text_info ctor.
+ (pp_verbatim): Likewise.
+ (assert_pp_format_va): Likewise.
+ * pretty-print.h (struct text_info): Add ctors. Add "m_" prefix
+ to all fields.
+ * text-art/styled-string.cc (styled_string::from_fmt_va): Use
+ text_info ctor.
+ * tree-diagnostic.cc (default_tree_printer): Update for "m_"
+ prefixes to text_info fields.
+ * tree-pretty-print.h (pp_ti_abstract_origin): Likewise.
+
+2023-10-03 Roger Sayle <roger@nextmovesoftware.com>
+
+ * config/arc/arc.md (CC_ltu): New mode iterator for CC and CC_C.
+ (scc_ltu_<mode>): New define_insn to handle LTU form of scc_insn.
+ (*scc_insn): Don't split to a conditional move sequence for LTU.
+
+2023-10-03 Andrea Corallo <andrea.corallo@arm.com>
+
+ * config/aarch64/aarch64.md (@ccmp<CC_ONLY:mode><GPI:mode>)
+ (@ccmp<CC_ONLY:mode><GPI:mode>_rev, *call_insn, *call_value_insn)
+ (*mov<mode>_aarch64, load_pair_sw_<SX:mode><SX2:mode>)
+ (load_pair_dw_<DX:mode><DX2:mode>)
+ (store_pair_sw_<SX:mode><SX2:mode>)
+ (store_pair_dw_<DX:mode><DX2:mode>, *extendsidi2_aarch64)
+ (*zero_extendsidi2_aarch64, *load_pair_zero_extendsidi2_aarch64)
+ (*extend<SHORT:mode><GPI:mode>2_aarch64)
+ (*zero_extend<SHORT:mode><GPI:mode>2_aarch64)
+ (*extendqihi2_aarch64, *zero_extendqihi2_aarch64)
+ (*add<mode>3_aarch64, *addsi3_aarch64_uxtw, *add<mode>3_poly_1)
+ (add<mode>3_compare0, *addsi3_compare0_uxtw)
+ (*add<mode>3_compareC_cconly, add<mode>3_compareC)
+ (*add<mode>3_compareV_cconly_imm, add<mode>3_compareV_imm)
+ (*add<mode>3nr_compare0, subdi3, subv<GPI:mode>_imm)
+ (*cmpv<GPI:mode>_insn, sub<mode>3_compare1_imm, neg<mode>2)
+ (cmp<mode>, fcmp<mode>, fcmpe<mode>, *cmov<mode>_insn)
+ (*cmovsi_insn_uxtw, <optab><mode>3, *<optab>si3_uxtw)
+ (*and<mode>3_compare0, *andsi3_compare0_uxtw, one_cmpl<mode>2)
+ (*<NLOGICAL:optab>_one_cmpl<mode>3, *and<mode>3nr_compare0)
+ (*aarch64_ashl_sisd_or_int_<mode>3)
+ (*aarch64_lshr_sisd_or_int_<mode>3)
+ (*aarch64_ashr_sisd_or_int_<mode>3, *ror<mode>3_insn)
+ (*<optab>si3_insn_uxtw, <optab>_trunc<fcvt_target><GPI:mode>2)
+ (<optab><fcvt_target><GPF:mode>2)
+ (<FCVT_F2FIXED:fcvt_fixed_insn><GPF:mode>3)
+ (<FCVT_FIXED2F:fcvt_fixed_insn><GPI:mode>3)
+ (*aarch64_<optab><mode>3_cssc, copysign<GPF:mode>3_insn): Update
+ to new syntax.
+ * config/aarch64/aarch64-sve2.md (@aarch64_scatter_stnt<mode>)
+ (@aarch64_scatter_stnt_<SVE_FULL_SDI:mode><SVE_PARTIAL_I:mode>)
+ (*aarch64_mul_unpredicated_<mode>)
+ (@aarch64_pred_<sve_int_op><mode>, *cond_<sve_int_op><mode>_2)
+ (*cond_<sve_int_op><mode>_3, *cond_<sve_int_op><mode>_any)
+ (*cond_<sve_int_op><mode>_z, @aarch64_pred_<sve_int_op><mode>)
+ (*cond_<sve_int_op><mode>_2, *cond_<sve_int_op><mode>_3)
+ (*cond_<sve_int_op><mode>_any, @aarch64_sve_<sve_int_op><mode>)
+ (@aarch64_sve_<sve_int_op>_lane_<mode>)
+ (@aarch64_sve_add_mul_lane_<mode>)
+ (@aarch64_sve_sub_mul_lane_<mode>, @aarch64_sve2_xar<mode>)
+ (*aarch64_sve2_bcax<mode>, @aarch64_sve2_eor3<mode>)
+ (*aarch64_sve2_nor<mode>, *aarch64_sve2_nand<mode>)
+ (*aarch64_sve2_bsl<mode>, *aarch64_sve2_nbsl<mode>)
+ (*aarch64_sve2_bsl1n<mode>, *aarch64_sve2_bsl2n<mode>)
+ (*aarch64_sve2_sra<mode>, @aarch64_sve_add_<sve_int_op><mode>)
+ (*aarch64_sve2_<su>aba<mode>, @aarch64_sve_add_<sve_int_op><mode>)
+ (@aarch64_sve_add_<sve_int_op>_lane_<mode>)
+ (@aarch64_sve_qadd_<sve_int_op><mode>)
+ (@aarch64_sve_qadd_<sve_int_op>_lane_<mode>)
+ (@aarch64_sve_sub_<sve_int_op><mode>)
+ (@aarch64_sve_sub_<sve_int_op>_lane_<mode>)
+ (@aarch64_sve_qsub_<sve_int_op><mode>)
+ (@aarch64_sve_qsub_<sve_int_op>_lane_<mode>)
+ (@aarch64_sve_<sve_fp_op><mode>, @aarch64_<sve_fp_op>_lane_<mode>)
+ (@aarch64_pred_<sve_int_op><mode>)
+ (@aarch64_pred_<sve_fp_op><mode>, *cond_<sve_int_op><mode>_2)
+ (*cond_<sve_int_op><mode>_z, @aarch64_sve_<optab><mode>)
+ (@aarch64_<optab>_lane_<mode>, @aarch64_sve_<optab><mode>)
+ (@aarch64_<optab>_lane_<mode>, @aarch64_pred_<sve_fp_op><mode>)
+ (*cond_<sve_fp_op><mode>_any_relaxed)
+ (*cond_<sve_fp_op><mode>_any_strict)
+ (@aarch64_pred_<sve_int_op><mode>, *cond_<sve_int_op><mode>)
+ (@aarch64_pred_<sve_fp_op><mode>, *cond_<sve_fp_op><mode>)
+ (*cond_<sve_fp_op><mode>_strict): Update to new syntax.
+ * config/aarch64/aarch64-sve.md (*aarch64_sve_mov<mode>_ldr_str)
+ (*aarch64_sve_mov<mode>_no_ldr_str, @aarch64_pred_mov<mode>)
+ (*aarch64_sve_mov<mode>, aarch64_wrffr)
+ (mask_scatter_store<mode><v_int_container>)
+ (*mask_scatter_store<mode><v_int_container>_<su>xtw_unpacked)
+ (*mask_scatter_store<mode><v_int_container>_sxtw)
+ (*mask_scatter_store<mode><v_int_container>_uxtw)
+ (@aarch64_scatter_store_trunc<VNx4_NARROW:mode><VNx4_WIDE:mode>)
+ (@aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>)
+ (*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_sxtw)
+ (*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_uxtw)
+ (*vec_duplicate<mode>_reg, vec_shl_insert_<mode>)
+ (vec_series<mode>, @extract_<last_op>_<mode>)
+ (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2)
+ (*cond_<optab><mode>_any, @aarch64_pred_<optab><mode>)
+ (@aarch64_sve_revbhw_<SVE_ALL:mode><PRED_HSD:mode>)
+ (@cond_<optab><mode>)
+ (*<optab><SVE_PARTIAL_I:mode><SVE_HSDI:mode>2)
+ (@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>)
+ (@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>)
+ (*cond_uxt<mode>_2, *cond_uxt<mode>_any, *cnot<mode>)
+ (*cond_cnot<mode>_2, *cond_cnot<mode>_any)
+ (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_relaxed)
+ (*cond_<optab><mode>_2_strict, *cond_<optab><mode>_any_relaxed)
+ (*cond_<optab><mode>_any_strict, @aarch64_pred_<optab><mode>)
+ (*cond_<optab><mode>_2, *cond_<optab><mode>_3)
+ (*cond_<optab><mode>_any, add<mode>3, sub<mode>3)
+ (@aarch64_pred_<su>abd<mode>, *aarch64_cond_<su>abd<mode>_2)
+ (*aarch64_cond_<su>abd<mode>_3, *aarch64_cond_<su>abd<mode>_any)
+ (@aarch64_sve_<optab><mode>, @aarch64_pred_<optab><mode>)
+ (*cond_<optab><mode>_2, *cond_<optab><mode>_z)
+ (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2)
+ (*cond_<optab><mode>_3, *cond_<optab><mode>_any, <optab><mode>3)
+ (*cond_bic<mode>_2, *cond_bic<mode>_any)
+ (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_const)
+ (*cond_<optab><mode>_any_const, *cond_<sve_int_op><mode>_m)
+ (*cond_<sve_int_op><mode>_z, *sdiv_pow2<mode>3)
+ (*cond_<sve_int_op><mode>_2, *cond_<sve_int_op><mode>_any)
+ (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_relaxed)
+ (*cond_<optab><mode>_2_strict, *cond_<optab><mode>_any_relaxed)
+ (*cond_<optab><mode>_any_strict, @aarch64_pred_<optab><mode>)
+ (*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict)
+ (*cond_<optab><mode>_2_const_relaxed)
+ (*cond_<optab><mode>_2_const_strict)
+ (*cond_<optab><mode>_3_relaxed, *cond_<optab><mode>_3_strict)
+ (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
+ (*cond_<optab><mode>_any_const_relaxed)
+ (*cond_<optab><mode>_any_const_strict)
+ (@aarch64_pred_<optab><mode>, *cond_add<mode>_2_const_relaxed)
+ (*cond_add<mode>_2_const_strict)
+ (*cond_add<mode>_any_const_relaxed)
+ (*cond_add<mode>_any_const_strict, @aarch64_pred_<optab><mode>)
+ (*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict)
+ (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
+ (@aarch64_pred_<optab><mode>, *cond_sub<mode>_3_const_relaxed)
+ (*cond_sub<mode>_3_const_strict, *cond_sub<mode>_const_relaxed)
+ (*cond_sub<mode>_const_strict, *aarch64_pred_abd<mode>_relaxed)
+ (*aarch64_pred_abd<mode>_strict)
+ (*aarch64_cond_abd<mode>_2_relaxed)
+ (*aarch64_cond_abd<mode>_2_strict)
+ (*aarch64_cond_abd<mode>_3_relaxed)
+ (*aarch64_cond_abd<mode>_3_strict)
+ (*aarch64_cond_abd<mode>_any_relaxed)
+ (*aarch64_cond_abd<mode>_any_strict, @aarch64_pred_<optab><mode>)
+ (@aarch64_pred_fma<mode>, *cond_fma<mode>_2, *cond_fma<mode>_4)
+ (*cond_fma<mode>_any, @aarch64_pred_fnma<mode>)
+ (*cond_fnma<mode>_2, *cond_fnma<mode>_4, *cond_fnma<mode>_any)
+ (<sur>dot_prod<vsi2qi>, @aarch64_<sur>dot_prod_lane<vsi2qi>)
+ (@<sur>dot_prod<vsi2qi>, @aarch64_<sur>dot_prod_lane<vsi2qi>)
+ (@aarch64_sve_add_<optab><vsi2qi>, @aarch64_pred_<optab><mode>)
+ (*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict)
+ (*cond_<optab><mode>_4_relaxed, *cond_<optab><mode>_4_strict)
+ (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
+ (@aarch64_<optab>_lane_<mode>, @aarch64_pred_<optab><mode>)
+ (*cond_<optab><mode>_4_relaxed, *cond_<optab><mode>_4_strict)
+ (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
+ (@aarch64_<optab>_lane_<mode>, @aarch64_sve_tmad<mode>)
+ (@aarch64_sve_<sve_fp_op>vnx4sf)
+ (@aarch64_sve_<sve_fp_op>_lanevnx4sf)
+ (@aarch64_sve_<sve_fp_op><mode>, *vcond_mask_<mode><vpred>)
+ (@aarch64_sel_dup<mode>, @aarch64_pred_cmp<cmp_op><mode>)
+ (*cmp<cmp_op><mode>_cc, *cmp<cmp_op><mode>_ptest)
+ (@aarch64_pred_fcm<cmp_op><mode>, @fold_extract_<last_op>_<mode>)
+ (@aarch64_fold_extract_vector_<last_op>_<mode>)
+ (@aarch64_sve_splice<mode>)
+ (@aarch64_sve_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>)
+ (@aarch64_sve_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>)
+ (*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_relaxed)
+ (*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_strict)
+ (*cond_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>)
+ (@aarch64_sve_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>)
+ (@aarch64_sve_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>)
+ (*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_relaxed)
+ (*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_strict)
+ (*cond_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>)
+ (@aarch64_sve_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>)
+ (*cond_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>)
+ (@aarch64_sve_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>)
+ (*cond_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>)
+ (@aarch64_sve_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>)
+ (*cond_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>)
+ (@aarch64_brk<brk_op>, *aarch64_sve_<inc_dec><mode>_cntp): Update
+ to new syntax.
+ * config/aarch64/aarch64-simd.md (aarch64_simd_dup<mode>)
+ (load_pair<DREG:mode><DREG2:mode>)
+ (vec_store_pair<DREG:mode><DREG2:mode>, aarch64_simd_stp<mode>)
+ (aarch64_simd_mov_from_<mode>low)
+ (aarch64_simd_mov_from_<mode>high, and<mode>3<vczle><vczbe>)
+ (ior<mode>3<vczle><vczbe>, aarch64_simd_ashr<mode><vczle><vczbe>)
+ (aarch64_simd_bsl<mode>_internal<vczle><vczbe>)
+ (*aarch64_simd_bsl<mode>_alt<vczle><vczbe>)
+ (aarch64_simd_bsldi_internal, aarch64_simd_bsldi_alt)
+ (store_pair_lanes<mode>, *aarch64_combine_internal<mode>)
+ (*aarch64_combine_internal_be<mode>, *aarch64_combinez<mode>)
+ (*aarch64_combinez_be<mode>)
+ (aarch64_cm<optab><mode><vczle><vczbe>, *aarch64_cm<optab>di)
+ (aarch64_cm<optab><mode><vczle><vczbe>, *aarch64_mov<mode>)
+ (*aarch64_be_mov<mode>, *aarch64_be_movoi): Update to new syntax.
+
+2023-10-03 Andrea Corallo <andrea.corallo@arm.com>
+
+ * gensupport.cc (convert_syntax): Skip spaces before "cons:"
+ in new compact pattern syntax.
+
+2023-10-03 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gensupport.cc (convert_syntax): Updated to support unordered
+ constraints in compact syntax.
+
+2023-10-02 Michael Meissner <meissner@linux.ibm.com>
+
+ * config/rs6000/rs6000.md (UNSPEC_COPYSIGN): Delete.
+ (copysign<mode>3_fcpsg): Use copysign RTL instead of UNSPEC.
+ (copysign<mode>3_hard): Likewise.
+ (copysign<mode>3_soft): Likewise.
+ * config/rs6000/vector.md (vector_copysign<mode>3): Use copysign RTL
+ instead of UNSPEC.
+ * config/rs6000/vsx.md (vsx_copysign<mode>3): Use copysign RTL instead
+ of UNSPEC.
+
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic-format-json.cc (toplevel_array): Remove global in
+ favor of json_output_format::m_top_level_array.
+ (cur_group): Likewise, for json_output_format::m_cur_group.
+ (cur_children_array): Likewise, for
+ json_output_format::m_cur_children_array.
+ (class json_output_format): New.
+ (json_begin_diagnostic): Remove, in favor of
+ json_output_format::on_begin_diagnostic.
+ (json_end_diagnostic): Convert to...
+ (json_output_format::on_end_diagnostic): ...this.
+ (json_begin_group): Remove, in favor of
+ json_output_format::on_begin_group.
+ (json_end_group): Remove, in favor of
+ json_output_format::on_end_group.
+ (json_flush_to_file): Remove, in favor of
+ json_output_format::flush_to_file.
+ (json_stderr_final_cb): Remove, in favor of json_output_format
+ dtor.
+ (json_output_base_file_name): Remove global.
+ (class json_stderr_output_format): New.
+ (json_file_final_cb): Remove.
+ (class json_file_output_format): New.
+ (json_emit_diagram): Remove.
+ (diagnostic_output_format_init_json): Update.
+ (diagnostic_output_format_init_json_file): Update.
+ * diagnostic-format-sarif.cc (the_builder): Remove this global,
+ moving to a field of the sarif_output_format.
+ (sarif_builder::maybe_make_artifact_content_object): Use the
+ context's m_file_cache.
+ (get_source_lines): Convert to...
+ (sarif_builder::get_source_lines): ...this, using context's
+ m_file_cache.
+ (sarif_begin_diagnostic): Remove, in favor of
+ sarif_output_format::on_begin_diagnostic.
+ (sarif_end_diagnostic): Remove, in favor of
+ sarif_output_format::on_end_diagnostic.
+ (sarif_begin_group): Remove, in favor of
+ sarif_output_format::on_begin_group.
+ (sarif_end_group): Remove, in favor of
+ sarif_output_format::on_end_group.
+ (sarif_flush_to_file): Delete.
+ (sarif_stderr_final_cb): Delete.
+ (sarif_output_base_file_name): Delete.
+ (sarif_file_final_cb): Delete.
+ (class sarif_output_format): New.
+ (sarif_emit_diagram): Delete.
+ (class sarif_stream_output_format): New.
+ (class sarif_file_output_format): New.
+ (diagnostic_output_format_init_sarif): Update.
+ (diagnostic_output_format_init_sarif_stderr): Update.
+ (diagnostic_output_format_init_sarif_file): Update.
+ (diagnostic_output_format_init_sarif_stream): Update.
+ * diagnostic-show-locus.cc (diagnostic_show_locus): Update.
+ * diagnostic.cc (default_diagnostic_final_cb): Delete, moving to
+ diagnostic_text_output_format's dtor.
+ (diagnostic_initialize): Update, making a new instance of
+ diagnostic_text_output_format.
+ (diagnostic_finish): Delete m_output_format, rather than calling
+ final_cb.
+ (diagnostic_report_diagnostic): Assert that m_output_format is
+ non-NULL. Replace call to begin_group_cb with call to
+ m_output_format->on_begin_group. Replace call to
+ diagnostic_starter with call to
+ m_output_format->on_begin_diagnostic. Replace call to
+ diagnostic_finalizer with call to
+ m_output_format->on_end_diagnostic.
+ (diagnostic_emit_diagram): Replace both optional call to
+ m_diagrams.m_emission_cb and default implementation with call to
+ m_output_format->on_diagram. Move default implementation to
+ diagnostic_text_output_format::on_diagram.
+ (auto_diagnostic_group::~auto_diagnostic_group): Replace call to
+ end_group_cb with call to m_output_format->on_end_group.
+ (diagnostic_text_output_format::~diagnostic_text_output_format):
+ New, based on default_diagnostic_final_cb.
+ (diagnostic_text_output_format::on_begin_diagnostic): New, based
+ on code from diagnostic_report_diagnostic.
+ (diagnostic_text_output_format::on_end_diagnostic): Likewise.
+ (diagnostic_text_output_format::on_diagram): New, based on code
+ from diagnostic_emit_diagram.
+ * diagnostic.h (class diagnostic_output_format): New.
+ (class diagnostic_text_output_format): New.
+ (diagnostic_context::begin_diagnostic): Move to...
+ (diagnostic_context::m_text_callbacks::begin_diagnostic): ...here.
+ (diagnostic_context::start_span): Move to...
+ (diagnostic_context::m_text_callbacks::start_span): ...here.
+ (diagnostic_context::end_diagnostic): Move to...
+ (diagnostic_context::m_text_callbacks::end_diagnostic): ...here.
+ (diagnostic_context::begin_group_cb): Remove, in favor of
+ m_output_format->on_begin_group.
+ (diagnostic_context::end_group_cb): Remove, in favor of
+ m_output_format->on_end_group.
+ (diagnostic_context::final_cb): Remove, in favor of
+ m_output_format's dtor.
+ (diagnostic_context::m_output_format): New field.
+ (diagnostic_context::m_diagrams.m_emission_cb): Remove, in favor
+ of m_output_format->on_diagram.
+ (diagnostic_starter): Update.
+ (diagnostic_finalizer): Update.
+ (diagnostic_output_format_init_sarif_stream): New.
+ * input.cc (location_get_source_line): Move implementation apart from
+ call to diagnostic_file_cache_init to...
+ (file_cache::get_source_line): ...this new function...
+ (location_get_source_line): ...and reintroduce, rewritten in terms of
+ file_cache::get_source_line.
+ (get_source_file_content): Likewise, refactor into...
+ (file_cache::get_source_file_content): ...this new function.
+ * input.h (file_cache::get_source_line): New decl.
+ (file_cache::get_source_file_content): New decl.
+ * selftest-diagnostic.cc
+ (test_diagnostic_context::test_diagnostic_context): Update.
+ * tree-diagnostic-path.cc (event_range::print): Update for
+ change to diagnostic_context's start_span callback.
+
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic-show-locus.cc: Update for reorganization of
+ source-printing fields of diagnostic_context.
+ * diagnostic.cc (diagnostic_set_caret_max_width): Likewise.
+ (diagnostic_initialize): Likewise.
+ * diagnostic.h (diagnostic_context::show_caret): Move to...
+ (diagnostic_context::m_source_printing::enabled): ...here.
+ (diagnostic_context::caret_max_width): Move to...
+ (diagnostic_context::m_source_printing::max_width): ...here.
+ (diagnostic_context::caret_chars): Move to...
+ (diagnostic_context::m_source_printing::caret_chars): ...here.
+ (diagnostic_context::colorize_source_p): Move to...
+ (diagnostic_context::m_source_printing::colorize_source_p): ...here.
+ (diagnostic_context::show_labels_p): Move to...
+ (diagnostic_context::m_source_printing::show_labels_p): ...here.
+ (diagnostic_context::show_line_numbers_p): Move to...
+ (diagnostic_context::m_source_printing::show_line_numbers_p): ...here.
+ (diagnostic_context::min_margin_width): Move to...
+ (diagnostic_context::m_source_printing::min_margin_width): ...here.
+ (diagnostic_context::show_ruler_p): Move to...
+ (diagnostic_context::m_source_printing::show_ruler_p): ...here.
+ (diagnostic_same_line): Update for above changes.
+ * opts.cc (common_handle_option): Update for reorganization of
+ source-printing fields of diagnostic_context.
+ * selftest-diagnostic.cc
+ (test_diagnostic_context::test_diagnostic_context): Likewise.
+ * toplev.cc (general_init): Likewise.
+ * tree-diagnostic-path.cc (struct event_range): Likewise.
+
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic.cc (diagnostic_initialize): Initialize
+ set_locations_cb to nullptr.
+
+2023-10-02 Wilco Dijkstra <wilco.dijkstra@arm.com>
+
+ PR target/111235
+ * config/arm/constraints.md: Remove Pf constraint.
+ * config/arm/sync.md (arm_atomic_load<mode>): Add new pattern.
+ (arm_atomic_load_acquire<mode>): Likewise.
+ (arm_atomic_store<mode>): Likewise.
+ (arm_atomic_store_release<mode>): Likewise.
+ (atomic_load<mode>): Switch patterns to define_expand.
+ (atomic_store<mode>): Likewise.
+ (arm_atomic_loaddi2_ldrd): Remove predication.
+ (arm_load_exclusive<mode>): Likewise.
+ (arm_load_acquire_exclusive<mode>): Likewise.
+ (arm_load_exclusivesi): Likewise.
+ (arm_load_acquire_exclusivesi): Likewise.
+ (arm_load_exclusivedi): Likewise.
+ (arm_load_acquire_exclusivedi): Likewise.
+ (arm_store_exclusive<mode>): Likewise.
+ (arm_store_release_exclusivedi): Likewise.
+ (arm_store_release_exclusive<mode>): Likewise.
+ * config/arm/unspecs.md: Add VUNSPEC_LDR and VUNSPEC_STR.
+
+2023-10-02 Tamar Christina <tamar.christina@arm.com>
+
+ Revert:
+ 2023-10-02 Tamar Christina <tamar.christina@arm.com>
+
+ PR tree-optimization/109154
+ * tree-if-conv.cc (INCLUDE_ALGORITHM): Remove.
+ (cmp_arg_entry): New.
+ (predicate_scalar_phi): Use it.
+
+2023-10-02 Tamar Christina <tamar.christina@arm.com>
+
+ * config/aarch64/aarch64-simd.md (xorsign<mode>3): Renamed to..
+ (@xorsign<mode>3): ...This.
+ * config/aarch64/aarch64.md (xorsign<mode>3): Renamed to...
+ (@xorsign<mode>3): ..This and emit vectors directly
+ * config/aarch64/iterators.md (VCONQ): Add SF and DF.
+
+2023-10-02 Tamar Christina <tamar.christina@arm.com>
+
+ * emit-rtl.cc (validate_subreg): Relax subreg rule.
+
+2023-10-02 Tamar Christina <tamar.christina@arm.com>
+
+ PR tree-optimization/109154
+ * tree-if-conv.cc (INCLUDE_ALGORITHM): Remove.
+ (cmp_arg_entry): New.
+ (predicate_scalar_phi): Use it.
+
+2023-10-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ PR bootstrap/111642
+ * rtl-tests.cc (const_poly_int_tests<N>::run): Use a local
+ poly_int64 typedef.
+ * simplify-rtx.cc (simplify_const_poly_int_tests<N>::run): Likewise.
+
+2023-10-02 Joern Rennecke <joern.rennecke@embecosm.com>
+ Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/riscv-protos.h (riscv_vector::expand_block_move):
+ Declare.
+ * config/riscv/riscv-v.cc (riscv_vector::expand_block_move):
+ New function.
+ * config/riscv/riscv.md (cpymemsi): Use riscv_vector::expand_block_move.
+ Change to ..
+ (cpymem<P:mode>) .. this.
+
+2023-10-01 Stefan Schulze Frielinghaus <stefansf@linux.ibm.com>
+
+ * combine.cc (simplify_compare_const): Properly handle unsigned
+ constants while narrowing comparison of memory and constants.
+
+2023-10-01 Feng Wang <wangfeng@eswincomputing.com>
+
+ * config/riscv/riscv-opts.h (MASK_ZICSR): Delete.
+ (MASK_ZIFENCEI): Delete;
+ (MASK_ZIHINTNTL): Ditto.
+ (MASK_ZIHINTPAUSE): Ditto.
+ (TARGET_ZICSR): Ditto.
+ (TARGET_ZIFENCEI): Ditto.
+ (TARGET_ZIHINTNTL): Ditto.
+ (TARGET_ZIHINTPAUSE): Ditto.
+ (MASK_ZAWRS): Ditto.
+ (TARGET_ZAWRS): Ditto.
+ (MASK_ZBA): Ditto.
+ (MASK_ZBB): Ditto.
+ (MASK_ZBC): Ditto.
+ (MASK_ZBS): Ditto.
+ (TARGET_ZBA): Ditto.
+ (TARGET_ZBB): Ditto.
+ (TARGET_ZBC): Ditto.
+ (TARGET_ZBS): Ditto.
+ (MASK_ZFINX): Ditto.
+ (MASK_ZDINX): Ditto.
+ (MASK_ZHINX): Ditto.
+ (MASK_ZHINXMIN): Ditto.
+ (TARGET_ZFINX): Ditto.
+ (TARGET_ZDINX): Ditto.
+ (TARGET_ZHINX): Ditto.
+ (TARGET_ZHINXMIN): Ditto.
+ (MASK_ZBKB): Ditto.
+ (MASK_ZBKC): Ditto.
+ (MASK_ZBKX): Ditto.
+ (MASK_ZKNE): Ditto.
+ (MASK_ZKND): Ditto.
+ (MASK_ZKNH): Ditto.
+ (MASK_ZKR): Ditto.
+ (MASK_ZKSED): Ditto.
+ (MASK_ZKSH): Ditto.
+ (MASK_ZKT): Ditto.
+ (TARGET_ZBKB): Ditto.
+ (TARGET_ZBKC): Ditto.
+ (TARGET_ZBKX): Ditto.
+ (TARGET_ZKNE): Ditto.
+ (TARGET_ZKND): Ditto.
+ (TARGET_ZKNH): Ditto.
+ (TARGET_ZKR): Ditto.
+ (TARGET_ZKSED): Ditto.
+ (TARGET_ZKSH): Ditto.
+ (TARGET_ZKT): Ditto.
+ (MASK_ZTSO): Ditto.
+ (TARGET_ZTSO): Ditto.
+ (MASK_VECTOR_ELEN_32): Ditto.
+ (MASK_VECTOR_ELEN_64): Ditto.
+ (MASK_VECTOR_ELEN_FP_32): Ditto.
+ (MASK_VECTOR_ELEN_FP_64): Ditto.
+ (MASK_VECTOR_ELEN_FP_16): Ditto.
+ (TARGET_VECTOR_ELEN_32): Ditto.
+ (TARGET_VECTOR_ELEN_64): Ditto.
+ (TARGET_VECTOR_ELEN_FP_32): Ditto.
+ (TARGET_VECTOR_ELEN_FP_64): Ditto.
+ (TARGET_VECTOR_ELEN_FP_16): Ditto.
+ (MASK_ZVBB): Ditto.
+ (MASK_ZVBC): Ditto.
+ (TARGET_ZVBB): Ditto.
+ (TARGET_ZVBC): Ditto.
+ (MASK_ZVKG): Ditto.
+ (MASK_ZVKNED): Ditto.
+ (MASK_ZVKNHA): Ditto.
+ (MASK_ZVKNHB): Ditto.
+ (MASK_ZVKSED): Ditto.
+ (MASK_ZVKSH): Ditto.
+ (MASK_ZVKN): Ditto.
+ (MASK_ZVKNC): Ditto.
+ (MASK_ZVKNG): Ditto.
+ (MASK_ZVKS): Ditto.
+ (MASK_ZVKSC): Ditto.
+ (MASK_ZVKSG): Ditto.
+ (MASK_ZVKT): Ditto.
+ (TARGET_ZVKG): Ditto.
+ (TARGET_ZVKNED): Ditto.
+ (TARGET_ZVKNHA): Ditto.
+ (TARGET_ZVKNHB): Ditto.
+ (TARGET_ZVKSED): Ditto.
+ (TARGET_ZVKSH): Ditto.
+ (TARGET_ZVKN): Ditto.
+ (TARGET_ZVKNC): Ditto.
+ (TARGET_ZVKNG): Ditto.
+ (TARGET_ZVKS): Ditto.
+ (TARGET_ZVKSC): Ditto.
+ (TARGET_ZVKSG): Ditto.
+ (TARGET_ZVKT): Ditto.
+ (MASK_ZVL32B): Ditto.
+ (MASK_ZVL64B): Ditto.
+ (MASK_ZVL128B): Ditto.
+ (MASK_ZVL256B): Ditto.
+ (MASK_ZVL512B): Ditto.
+ (MASK_ZVL1024B): Ditto.
+ (MASK_ZVL2048B): Ditto.
+ (MASK_ZVL4096B): Ditto.
+ (MASK_ZVL8192B): Ditto.
+ (MASK_ZVL16384B): Ditto.
+ (MASK_ZVL32768B): Ditto.
+ (MASK_ZVL65536B): Ditto.
+ (TARGET_ZVL32B): Ditto.
+ (TARGET_ZVL64B): Ditto.
+ (TARGET_ZVL128B): Ditto.
+ (TARGET_ZVL256B): Ditto.
+ (TARGET_ZVL512B): Ditto.
+ (TARGET_ZVL1024B): Ditto.
+ (TARGET_ZVL2048B): Ditto.
+ (TARGET_ZVL4096B): Ditto.
+ (TARGET_ZVL8192B): Ditto.
+ (TARGET_ZVL16384B): Ditto.
+ (TARGET_ZVL32768B): Ditto.
+ (TARGET_ZVL65536B): Ditto.
+ (MASK_ZICBOZ): Ditto.
+ (MASK_ZICBOM): Ditto.
+ (MASK_ZICBOP): Ditto.
+ (TARGET_ZICBOZ): Ditto.
+ (TARGET_ZICBOM): Ditto.
+ (TARGET_ZICBOP): Ditto.
+ (MASK_ZICOND): Ditto.
+ (TARGET_ZICOND): Ditto.
+ (MASK_ZFA): Ditto.
+ (TARGET_ZFA): Ditto.
+ (MASK_ZFHMIN): Ditto.
+ (MASK_ZFH): Ditto.
+ (MASK_ZVFHMIN): Ditto.
+ (MASK_ZVFH): Ditto.
+ (TARGET_ZFHMIN): Ditto.
+ (TARGET_ZFH): Ditto.
+ (TARGET_ZVFHMIN): Ditto.
+ (TARGET_ZVFH): Ditto.
+ (MASK_ZMMUL): Ditto.
+ (TARGET_ZMMUL): Ditto.
+ (MASK_ZCA): Ditto.
+ (MASK_ZCB): Ditto.
+ (MASK_ZCE): Ditto.
+ (MASK_ZCF): Ditto.
+ (MASK_ZCD): Ditto.
+ (MASK_ZCMP): Ditto.
+ (MASK_ZCMT): Ditto.
+ (TARGET_ZCA): Ditto.
+ (TARGET_ZCB): Ditto.
+ (TARGET_ZCE): Ditto.
+ (TARGET_ZCF): Ditto.
+ (TARGET_ZCD): Ditto.
+ (TARGET_ZCMP): Ditto.
+ (TARGET_ZCMT): Ditto.
+ (MASK_SVINVAL): Ditto.
+ (MASK_SVNAPOT): Ditto.
+ (TARGET_SVINVAL): Ditto.
+ (TARGET_SVNAPOT): Ditto.
+ (MASK_XTHEADBA): Ditto.
+ (MASK_XTHEADBB): Ditto.
+ (MASK_XTHEADBS): Ditto.
+ (MASK_XTHEADCMO): Ditto.
+ (MASK_XTHEADCONDMOV): Ditto.
+ (MASK_XTHEADFMEMIDX): Ditto.
+ (MASK_XTHEADFMV): Ditto.
+ (MASK_XTHEADINT): Ditto.
+ (MASK_XTHEADMAC): Ditto.
+ (MASK_XTHEADMEMIDX): Ditto.
+ (MASK_XTHEADMEMPAIR): Ditto.
+ (MASK_XTHEADSYNC): Ditto.
+ (TARGET_XTHEADBA): Ditto.
+ (TARGET_XTHEADBB): Ditto.
+ (TARGET_XTHEADBS): Ditto.
+ (TARGET_XTHEADCMO): Ditto.
+ (TARGET_XTHEADCONDMOV): Ditto.
+ (TARGET_XTHEADFMEMIDX): Ditto.
+ (TARGET_XTHEADFMV): Ditto.
+ (TARGET_XTHEADINT): Ditto.
+ (TARGET_XTHEADMAC): Ditto.
+ (TARGET_XTHEADMEMIDX): Ditto.
+ (TARGET_XTHEADMEMPAIR): Ditto.
+ (TARGET_XTHEADSYNC): Ditto.
+ (MASK_XVENTANACONDOPS): Ditto.
+ (TARGET_XVENTANACONDOPS): Ditto.
+ * config/riscv/riscv.opt: Add new Mask defination.
+ * doc/options.texi: Add explanation for this new usage.
+ * opt-functions.awk: Add new function to find the index
+ of target variable from extra_target_vars.
+ * opt-read.awk: Add new function to store the Mask flags.
+ * opth-gen.awk: Add new function to output the defination of
+ Mask Macro and Target Macro.
+
+2023-10-01 Joern Rennecke <joern.rennecke@embecosm.com>
+ Juzhe-Zhong <juzhe.zhong@rivai.ai>
+ Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/111566
+ * config/riscv/riscv-protos.h (riscv_vector::legitimize_move):
+ Change second parameter to rtx *.
+ * config/riscv/riscv-v.cc (risv_vector::legitimize_move): Likewise.
+ * config/riscv/vector.md: Changed callers of
+ riscv_vector::legitimize_move.
+ (*mov<mode>_mem_to_mem): Remove.
+
+2023-09-30 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/111649
+ * config/riscv/riscv-vsetvl.cc (vector_infos_manager::vector_infos_manager):
+ Replace safe_grow with safe_grow_cleared.
+
+2023-09-30 Jakub Jelinek <jakub@redhat.com>
+
+ * gimple-match-head.cc (gimple_bitwise_inverted_equal_p): Fix a pasto
+ in function comment.
+
+2023-09-30 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/111625
+ PR middle-end/111637
+ * gimple-lower-bitint.cc (range_to_prec): Use prec or -prec if
+ r.undefined_p ().
+ (bitint_large_huge::handle_operand_addr): For uninitialized operands
+ use limb_prec or -limb_prec precision.
+
+2023-09-30 Jakub Jelinek <jakub@redhat.com>
+
+ * vec.h (quick_grow): Uncomment static_assert.
+
+2023-09-30 Jivan Hakobyan <jivanhakobyan9@gmail.com>
+
+ * config/riscv/bitmanip.md (*<optab>_not_const<mode>): Added type attribute
+
+2023-09-29 Xiao Zeng <zengxiao@eswincomputing.com>
+
+ * config/riscv/riscv.cc (riscv_rtx_costs): Better handle costing
+ SETs when the outer code is INSN.
+
+2023-09-29 Jivan Hakobyan <jivanhakobyan9@gmail.com>
+
+ * config/riscv/bitmanip.md (*<optab>_not_const<mode>): New split
+ pattern.
+
+2023-09-29 Richard Sandiford <richard.sandiford@arm.com>
+
+ * poly-int.h (poly_int_pod): Delete.
+ (poly_coeff_traits::init_cast): New type.
+ (poly_int_full, poly_int_hungry, poly_int_fullness): New structures.
+ (poly_int): Replace constructors that take 1 and 2 coefficients with
+ a general one that takes an arbitrary number of coefficients.
+ Delegate initialization to two new private constructors, one of
+ which uses the coefficients as-is and one of which adds an extra
+ zero of the appropriate type (and precision, where applicable).
+ (gt_ggc_mx, gt_pch_nx): Operate on poly_ints rather than poly_int_pods.
+ * poly-int-types.h (poly_uint16_pod, poly_int64_pod, poly_uint64_pod)
+ (poly_offset_int_pod, poly_wide_int_pod, poly_widest_int_pod): Delete.
+ * gengtype.cc (main): Don't register poly_int64_pod.
+ * calls.cc (initialize_argument_information): Use poly_int rather
+ than poly_int_pod.
+ (combine_pending_stack_adjustment_and_call): Likewise.
+ * config/aarch64/aarch64.cc (pure_scalable_type_info): Likewise.
+ * data-streamer.h (bp_unpack_poly_value): Likewise.
+ * dwarf2cfi.cc (struct dw_trace_info): Likewise.
+ (struct queued_reg_save): Likewise.
+ * dwarf2out.h (struct dw_cfa_location): Likewise.
+ * emit-rtl.h (struct incoming_args): Likewise.
+ (struct rtl_data): Likewise.
+ * expr.cc (get_bit_range): Likewise.
+ (get_inner_reference): Likewise.
+ * expr.h (get_bit_range): Likewise.
+ * fold-const.cc (split_address_to_core_and_offset): Likewise.
+ (ptr_difference_const): Likewise.
+ * fold-const.h (ptr_difference_const): Likewise.
+ * function.cc (try_fit_stack_local): Likewise.
+ (instantiate_new_reg): Likewise.
+ * function.h (struct expr_status): Likewise.
+ (struct args_size): Likewise.
+ * genmodes.cc (ZERO_COEFFS): Likewise.
+ (mode_size_inline): Likewise.
+ (mode_nunits_inline): Likewise.
+ (emit_mode_precision): Likewise.
+ (emit_mode_size): Likewise.
+ (emit_mode_nunits): Likewise.
+ * gimple-fold.cc (get_base_constructor): Likewise.
+ * gimple-ssa-store-merging.cc (struct symbolic_number): Likewise.
+ * inchash.h (class hash): Likewise.
+ * ipa-modref-tree.cc (modref_access_node::dump): Likewise.
+ * ipa-modref.cc (modref_access_analysis::merge_call_side_effects):
+ Likewise.
+ * ira-int.h (ira_spilled_reg_stack_slot): Likewise.
+ * lra-eliminations.cc (self_elim_offsets): Likewise.
+ * machmode.h (mode_size, mode_precision, mode_nunits): Likewise.
+ * omp-low.cc (omplow_simd_context): Likewise.
+ * pretty-print.cc (pp_wide_integer): Likewise.
+ * pretty-print.h (pp_wide_integer): Likewise.
+ * reload.cc (struct decomposition): Likewise.
+ * reload.h (struct reload): Likewise.
+ * reload1.cc (spill_stack_slot_width): Likewise.
+ (struct elim_table): Likewise.
+ (offsets_at): Likewise.
+ (init_eliminable_invariants): Likewise.
+ * rtl.h (union rtunion): Likewise.
+ (poly_int_rtx_p): Likewise.
+ (strip_offset): Likewise.
+ (strip_offset_and_add): Likewise.
+ * rtlanal.cc (strip_offset): Likewise.
+ * tree-dfa.cc (get_ref_base_and_extent): Likewise.
+ (get_addr_base_and_unit_offset_1): Likewise.
+ (get_addr_base_and_unit_offset): Likewise.
+ * tree-dfa.h (get_ref_base_and_extent): Likewise.
+ (get_addr_base_and_unit_offset_1): Likewise.
+ (get_addr_base_and_unit_offset): Likewise.
+ * tree-ssa-loop-ivopts.cc (struct iv_use): Likewise.
+ (strip_offset): Likewise.
+ * tree-ssa-sccvn.h (struct vn_reference_op_struct): Likewise.
+ * tree.cc (ptrdiff_tree_p): Likewise.
+ * tree.h (poly_int_tree_p): Likewise.
+ (ptrdiff_tree_p): Likewise.
+ (get_inner_reference): Likewise.
+
+2023-09-29 John David Anglin <danglin@gcc.gnu.org>
+
+ * config/pa/pa.md (memory_barrier): Revise comment.
+ (memory_barrier_64, memory_barrier_32): Use ldcw,co on PA 2.0.
+ * config/pa/pa.opt (coherent-ldcw): Change default to disabled.
+
+2023-09-29 Jakub Jelinek <jakub@redhat.com>
+
+ * vec.h (quick_insert, ordered_remove, unordered_remove,
+ block_remove, qsort, sort, stablesort, quick_grow): Guard
+ std::is_trivially_{copyable,default_constructible} and
+ vec_detail::is_trivially_copyable_or_pair static assertions
+ with GCC_VERSION >= 5000.
+ (vec_detail::is_trivially_copyable_or_pair): Guard definition
+ with GCC_VERSION >= 5000.
+
+2023-09-29 Manos Anagnostakis <manos.anagnostakis@vrull.eu>
+
+ * config/aarch64/aarch64-opts.h (enum aarch64_ldp_policy): Removed.
+ (enum aarch64_ldp_stp_policy): Merged enums aarch64_ldp_policy
+ and aarch64_stp_policy to aarch64_ldp_stp_policy.
+ (enum aarch64_stp_policy): Removed.
+ * config/aarch64/aarch64-protos.h (struct tune_params): Removed
+ aarch64_ldp_policy_model and aarch64_stp_policy_model enum types
+ and left only the definitions to the aarch64-opts one.
+ * config/aarch64/aarch64.cc (aarch64_parse_ldp_policy): Removed.
+ (aarch64_parse_stp_policy): Removed.
+ (aarch64_override_options_internal): Removed calls to parsing
+ functions and added obvious direct assignments.
+ (aarch64_mem_ok_with_ldpstp_policy_model): Improved
+ code quality based on the new changes.
+ * config/aarch64/aarch64.opt: Use single enum type
+ aarch64_ldp_stp_policy for both ldp and stp options.
+
+2023-09-29 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111583
+ * tree-loop-distribution.cc (find_single_drs): Ensure the
+ load/store are always executed.
+
+2023-09-29 Jakub Jelinek <jakub@redhat.com>
+
+ * tree-vect-patterns.cc (vect_recog_over_widening_pattern): Use
+ quick_grow_cleared method on unprom rather than quick_grow.
+
+2023-09-29 Sergei Trofimovich <siarheit@google.com>
+
+ PR middle-end/111505
+ * ggc-common.cc (ggc_zero_out_root_pointers, ggc_common_finalize):
+ Add new helper. Use helper instead of memset() to wipe out pointers.
+
+2023-09-29 Richard Sandiford <richard.sandiford@arm.com>
+
+ * builtins.h (c_readstr): Take a fixed_size_mode rather than a
+ scalar_int_mode.
+ * builtins.cc (c_readstr): Likewise. Build a local array of
+ bytes and use native_decode_rtx to get the rtx image.
+ (builtin_memcpy_read_str): Simplify accordingly.
+ (builtin_strncpy_read_str): Likewise.
+ (builtin_memset_read_str): Likewise.
+ (builtin_memset_gen_str): Likewise.
+ * expr.cc (string_cst_read_str): Likewise.
+
+2023-09-29 Jakub Jelinek <jakub@redhat.com>
+
+ * tree-ssa-loop-im.cc (tree_ssa_lim_initialize): Use quick_grow_cleared
+ instead of quick_grow on vec<bitmap_head> members.
+ * cfganal.cc (control_dependences::control_dependences): Likewise.
+ * rtl-ssa/blocks.cc (function_info::build_info::build_info): Likewise.
+ (function_info::place_phis): Use safe_grow_cleared instead of safe_grow
+ on auto_vec<bitmap_head> vars.
+ * tree-ssa-live.cc (compute_live_vars): Use quick_grow_cleared instead
+ of quick_grow on vec<bitmap_head> var.
+
+2023-09-28 Vladimir N. Makarov <vmakarov@redhat.com>
+
+ Revert:
+ 2023-09-14 Vladimir N. Makarov <vmakarov@redhat.com>
+
+ * ira-costs.cc (find_costs_and_classes): Decrease memory cost
+ by equiv savings.
+
+2023-09-28 Wilco Dijkstra <wilco.dijkstra@arm.com>
+
+ PR target/111121
+ * config/aarch64/aarch64.md (aarch64_movmemdi): Add new expander.
+ (movmemdi): Call aarch64_expand_cpymem_mops for correct expansion.
+ * config/aarch64/aarch64.cc (aarch64_expand_cpymem_mops): Add support
+ for memmove.
+ * config/aarch64/aarch64-protos.h (aarch64_expand_cpymem_mops): Add new
+ function.
+
+2023-09-28 Pan Li <pan2.li@intel.com>
+
+ PR target/111506
+ * config/riscv/autovec.md (<float_cvt><mode><vnnconvert>2):
+ New pattern.
+ * config/riscv/vector-iterators.md: New iterator.
+
+2023-09-28 Vladimir N. Makarov <vmakarov@redhat.com>
+
+ * rtl.h (lra_in_progress): Change type to bool.
+ (ira_in_progress): Add new extern.
+ * ira.cc (ira_in_progress): New global.
+ (pass_ira::execute): Set up ira_in_progress.
+ * lra.cc: (lra_in_progress): Change type to bool and initialize.
+ (lra): Use bool values for lra_in_progress.
+ * lra-eliminations.cc (init_elim_table): Ditto.
+
+2023-09-28 Richard Biener <rguenther@suse.de>
+
+ PR target/111600
+ * gimple-ssa-warn-access.cc (pass_waccess::check_dangling_stores):
+ Use a heap allocated worklist for CFG traversal instead of
+ recursion.
+
+2023-09-28 Jakub Jelinek <jakub@redhat.com>
+ Jonathan Wakely <jwakely@redhat.com>
+
+ * vec.h: Mention in file comment limited support for non-POD types
+ in some operations.
+ (vec_destruct): New function template.
+ (release): Use it for non-trivially destructible T.
+ (truncate): Likewise.
+ (quick_push): Perform a placement new into slot
+ instead of assignment.
+ (pop): For non-trivially destructible T return void
+ rather than T & and destruct the popped element.
+ (quick_insert, ordered_remove): Note that they aren't suitable
+ for non-trivially copyable types. Add static_asserts for that.
+ (block_remove): Assert T is trivially copyable.
+ (vec_detail::is_trivially_copyable_or_pair): New trait.
+ (qsort, sort, stablesort): Assert T is trivially copyable or
+ std::pair with both trivally copyable types.
+ (quick_grow): Add assert T is trivially default constructible,
+ for now commented out.
+ (quick_grow_cleared): Don't call quick_grow, instead inline it
+ by hand except for the new static_assert.
+ (gt_ggc_mx): Assert T is trivially destructable.
+ (auto_vec::operator=): Formatting fixes.
+ (auto_vec::auto_vec): Likewise.
+ (vec_safe_grow_cleared): Don't call vec_safe_grow, instead inline
+ it manually and call quick_grow_cleared method rather than quick_grow.
+ (safe_grow_cleared): Likewise.
+ * edit-context.cc (class line_event): Move definition earlier.
+ * tree-ssa-loop-im.cc (seq_entry::seq_entry): Make default ctor
+ defaulted.
+ * ipa-fnsummary.cc (evaluate_properties_for_edge): Use
+ safe_grow_cleared instead of safe_grow followed by placement new
+ constructing the elements.
+
+2023-09-28 Richard Sandiford <richard.sandiford@arm.com>
+
+ * dwarf2out.cc (mem_loc_descriptor): Remove unused variables.
+ * tree-affine.cc (expr_to_aff_combination): Likewise.
+
2023-09-28 Richard Biener <rguenther@suse.de>
PR tree-optimization/111614
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 7cf5e25..304d626 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20230928
+20231018
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 9cc1626..747f749 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1443,6 +1443,7 @@ OBJS = \
fixed-value.o \
fold-const.o \
fold-const-call.o \
+ fold-mem-offsets.o \
function.o \
function-abi.o \
function-tests.o \
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index f549aa2..6cf1a63 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,68 @@
+2023-10-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/decl.cc (inline_status_for_subprog): Minor tweak.
+ (gnat_to_gnu_field): Try harder to get a packable form of the type
+ for a bitfield.
+
+2023-10-10 Ronan Desplanques <desplanques@adacore.com>
+
+ * libgnat/a-direct.adb (Start_Search_Internal): Tweak subprogram
+ body.
+
+2023-10-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ * sem_util.ads (Set_Scope_Is_Transient): Delete.
+ * sem_util.adb (Set_Scope_Is_Transient): Likewise.
+ * exp_ch7.adb (Create_Transient_Scope): Set Is_Transient directly.
+
+2023-10-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_aggr.adb (Is_Build_In_Place_Aggregate_Return): Return true
+ if the aggregate is a dependent expression of a conditional
+ expression being returned from a build-in-place function.
+
+2023-10-10 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR ada/111434
+ * sem_ch10.adb (Replace): New procedure to replace an entity with
+ another on the homonym chain.
+ (Install_Limited_With_Clause): Rename Non_Lim_View to Typ for the
+ sake of consistency. Call Replace to do the replacements and split
+ the code into the regular and the special cases. Add debuggging
+ output controlled by -gnatdi.
+ (Install_With_Clause): Print the Parent_With and Implicit_With flags
+ in the debugging output controlled by -gnatdi.
+ (Remove_Limited_With_Unit.Restore_Chain_For_Shadow (Shadow)): Rewrite
+ using a direct replacement of E4 by E2. Call Replace to do the
+ replacements. Add debuggging output controlled by -gnatdi.
+
+2023-10-10 Ronan Desplanques <desplanques@adacore.com>
+
+ * libgnat/a-direct.adb: Fix filesystem entry filtering.
+
+2023-10-10 Ronan Desplanques <desplanques@adacore.com>
+
+ * atree.ads, nlists.ads, types.ads: Remove references to extended
+ nodes. Fix typo.
+ * sinfo.ads: Likewise and fix position of
+ Comes_From_Check_Or_Contract description.
+
+2023-10-10 Javier Miranda <miranda@adacore.com>
+
+ * sem_attr.adb (Analyze_Attribute): Protect the frontend against
+ replacing 'Size by its static value if 'Size is not known at
+ compile time and we are processing pragmas Compile_Time_Warning or
+ Compile_Time_Errors.
+
+2023-10-03 David Malcolm <dmalcolm@redhat.com>
+
+ * gcc-interface/misc.cc: Use text_info ctor.
+
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * gcc-interface/misc.cc (gnat_post_options): Update for renaming
+ of diagnostic_context's show_caret to m_source_printing.enabled.
+
2023-09-26 Eric Botcazou <ebotcazou@adacore.com>
* exp_ch7.adb (Build_Finalizer.Process_Declarations): Remove call
diff --git a/gcc/ada/atree.ads b/gcc/ada/atree.ads
index abe5cc5..2ff65d2 100644
--- a/gcc/ada/atree.ads
+++ b/gcc/ada/atree.ads
@@ -252,7 +252,7 @@ package Atree is
-- The usual approach is to build a new node using this function and
-- then, using the value returned, use the Set_xxx functions to set
-- fields of the node as required. New_Node can only be used for
- -- non-entity nodes, i.e. it never generates an extended node.
+ -- non-entity nodes.
--
-- If we are currently parsing, as indicated by a previous call to
-- Set_Comes_From_Source_Default (True), then this call also resets
@@ -308,8 +308,7 @@ package Atree is
-- returns Empty, and New_Copy (Error) returns Error. Note that, unlike
-- Copy_Separate_Tree, New_Copy does not recursively copy any descendants,
-- so in general parent pointers are not set correctly for the descendants
- -- of the copied node. Both normal and extended nodes (entities) may be
- -- copied using New_Copy.
+ -- of the copied node.
function Relocate_Node (Source : Node_Id) return Node_Id;
-- Source is a non-entity node that is to be relocated. A new node is
@@ -359,7 +358,7 @@ package Atree is
-- caller, according to context.
procedure Extend_Node (Source : Node_Id);
- -- This turns a node into an entity; it function is used only by Sinfo.CN.
+ -- This turns a node into an entity; it is only used by Sinfo.CN.
type Ignored_Ghost_Record_Proc is access procedure (N : Node_Or_Entity_Id);
@@ -540,7 +539,7 @@ package Atree is
-- newly constructed replacement subtree. The actual mechanism is to swap
-- the contents of these two nodes fixing up the parent pointers of the
-- replaced node (we do not attempt to preserve parent pointers for the
- -- original node). Neither Old_Node nor New_Node can be extended nodes.
+ -- original node).
-- ??? The above explanation is incorrect, instead Copy_Node is called.
--
-- Note: New_Node may not contain references to Old_Node, for example as
diff --git a/gcc/ada/exp_aggr.adb b/gcc/ada/exp_aggr.adb
index 165f517..e5f3632 100644
--- a/gcc/ada/exp_aggr.adb
+++ b/gcc/ada/exp_aggr.adb
@@ -173,8 +173,11 @@ package body Exp_Aggr is
------------------------------------------------------
function Is_Build_In_Place_Aggregate_Return (N : Node_Id) return Boolean;
- -- True if N is an aggregate (possibly qualified or converted) that is
- -- being returned from a build-in-place function.
+ -- True if N is an aggregate (possibly qualified or a dependent expression
+ -- of a conditional expression, and possibly recursively so) that is being
+ -- returned from a build-in-place function. Such qualified and conditional
+ -- expressions are transparent for this purpose because an enclosing return
+ -- is propagated resp. distributed into these expressions by the expander.
function Build_Record_Aggr_Code
(N : Node_Id;
@@ -8463,7 +8466,11 @@ package body Exp_Aggr is
P : Node_Id := Parent (N);
begin
- while Nkind (P) = N_Qualified_Expression loop
+ while Nkind (P) in N_Case_Expression
+ | N_Case_Expression_Alternative
+ | N_If_Expression
+ | N_Qualified_Expression
+ loop
P := Parent (P);
end loop;
diff --git a/gcc/ada/exp_ch7.adb b/gcc/ada/exp_ch7.adb
index 5049de54..00b7692 100644
--- a/gcc/ada/exp_ch7.adb
+++ b/gcc/ada/exp_ch7.adb
@@ -4529,7 +4529,7 @@ package body Exp_Ch7 is
Push_Scope (Trans_Scop);
Scope_Stack.Table (Scope_Stack.Last).Node_To_Be_Wrapped := Context;
- Set_Scope_Is_Transient;
+ Scope_Stack.Table (Scope_Stack.Last).Is_Transient := True;
-- The transient scope must also manage the secondary stack
diff --git a/gcc/ada/gcc-interface/decl.cc b/gcc/ada/gcc-interface/decl.cc
index 5e16b56..20ab185 100644
--- a/gcc/ada/gcc-interface/decl.cc
+++ b/gcc/ada/gcc-interface/decl.cc
@@ -5114,7 +5114,7 @@ inline_status_for_subprog (Entity_Id subprog)
tree gnu_type;
/* This is a kludge to work around a pass ordering issue: for small
- record types with many components, i.e. typically bit-fields, the
+ record types with many components, i.e. typically bitfields, the
initialization routine can contain many assignments that will be
merged by the GIMPLE store merging pass. But this pass runs very
late in the pipeline, in particular after the inlining decisions
@@ -7702,6 +7702,18 @@ gnat_to_gnu_field (Entity_Id gnat_field, tree gnu_record_type, int packed,
gnu_field_type = maybe_pad_type (gnu_field_type, gnu_size, 0, gnat_field,
false, definition, true);
+ /* For a bitfield, if the type still has BLKmode, try again to change it
+ to an integral mode form. This may be necessary on strict-alignment
+ platforms with a size clause that is much larger than the field type,
+ because maybe_pad_type has preserved the alignment of the field type,
+ which may be too low for the new size. */
+ if (!needs_strict_alignment
+ && RECORD_OR_UNION_TYPE_P (gnu_field_type)
+ && !TYPE_FAT_POINTER_P (gnu_field_type)
+ && TYPE_MODE (gnu_field_type) == BLKmode
+ && is_bitfield)
+ gnu_field_type = make_packable_type (gnu_field_type, true, 1);
+
/* If a padding record was made, declare it now since it will never be
declared otherwise. This is necessary to ensure that its subtrees
are properly marked. */
diff --git a/gcc/ada/gcc-interface/misc.cc b/gcc/ada/gcc-interface/misc.cc
index 3b21bf5..453ae80 100644
--- a/gcc/ada/gcc-interface/misc.cc
+++ b/gcc/ada/gcc-interface/misc.cc
@@ -269,7 +269,7 @@ gnat_post_options (const char **pfilename ATTRIBUTE_UNUSED)
/* No caret by default for Ada. */
if (!OPTION_SET_P (flag_diagnostics_show_caret))
- global_dc->show_caret = false;
+ global_dc->m_source_printing.enabled = false;
/* Copy global settings to local versions. */
gnat_encodings = global_options.x_gnat_encodings;
@@ -293,7 +293,6 @@ static void
internal_error_function (diagnostic_context *context, const char *msgid,
va_list *ap)
{
- text_info tinfo;
char *buffer, *p, *loc;
String_Template temp, temp_loc;
String_Pointer sp, sp_loc;
@@ -309,9 +308,7 @@ internal_error_function (diagnostic_context *context, const char *msgid,
pp_clear_output_area (context->printer);
/* Format the message into the pretty-printer. */
- tinfo.format_spec = msgid;
- tinfo.args_ptr = ap;
- tinfo.err_no = errno;
+ text_info tinfo (msgid, ap, errno);
pp_format_verbatim (context->printer, &tinfo);
/* Extract a (writable) pointer to the formatted text. */
diff --git a/gcc/ada/libgnat/a-direct.adb b/gcc/ada/libgnat/a-direct.adb
index 4b08d41..594971c 100644
--- a/gcc/ada/libgnat/a-direct.adb
+++ b/gcc/ada/libgnat/a-direct.adb
@@ -1379,13 +1379,21 @@ package body Ada.Directories is
Compose (Directory, File_Name) & ASCII.NUL;
Path : String renames
Path_C (Path_C'First .. Path_C'Last - 1);
- Found : Boolean := False;
Attr : aliased File_Attributes;
Exists : Integer;
Error : Integer;
- Kind : File_Kind;
- Size : File_Size;
+ type Result (Found : Boolean := False) is record
+ case Found is
+ when True =>
+ Kind : File_Kind;
+ Size : File_Size;
+ when False =>
+ null;
+ end case;
+ end record;
+
+ Res : Result := (Found => False);
begin
-- Get the file attributes for the directory item
@@ -1414,32 +1422,30 @@ package body Ada.Directories is
elsif Exists = 1 then
if Is_Regular_File_Attr (Path_C'Address, Attr'Access) = 1
- and then Filter (Ordinary_File)
then
- Found := True;
- Kind := Ordinary_File;
- Size :=
- File_Size
- (File_Length_Attr
- (-1, Path_C'Address, Attr'Access));
+ if Filter (Ordinary_File) then
+ Res := (Found => True,
+ Kind => Ordinary_File,
+ Size => File_Size
+ (File_Length_Attr
+ (-1, Path_C'Address, Attr'Access)));
+ end if;
elsif Is_Directory_Attr (Path_C'Address, Attr'Access) = 1
- and then Filter (File_Kind'First)
then
- Found := True;
- Kind := File_Kind'First;
- -- File_Kind'First is used instead of Directory due
- -- to a name overload issue with the procedure
- -- parameter Directory.
- Size := 0;
+ if Filter (File_Kind'First) then
+ Res := (Found => True,
+ Kind => File_Kind'First,
+ Size => 0);
+ end if;
elsif Filter (Special_File) then
- Found := True;
- Kind := Special_File;
- Size := 0;
+ Res := (Found => True,
+ Kind => Special_File,
+ Size => 0);
end if;
- if Found then
+ if Res.Found then
Search.State.Dir_Contents.Append
(Directory_Entry_Type'
(Valid => True,
@@ -1447,9 +1453,9 @@ package body Ada.Directories is
To_Unbounded_String (File_Name),
Full_Name => To_Unbounded_String (Path),
Attr_Error_Code => 0,
- Kind => Kind,
+ Kind => Res.Kind,
Modification_Time => Modification_Time (Path),
- Size => Size));
+ Size => Res.Size));
end if;
end if;
end;
diff --git a/gcc/ada/nlists.ads b/gcc/ada/nlists.ads
index 5e88032..7afe80f 100644
--- a/gcc/ada/nlists.ads
+++ b/gcc/ada/nlists.ads
@@ -43,9 +43,6 @@ package Nlists is
-- this header, which may be used to access the nodes in the list using
-- the set of routines that define this interface.
- -- Note: node lists can contain either nodes or entities (extended nodes)
- -- or a mixture of nodes and extended nodes.
-
function In_Same_List (N1, N2 : Node_Or_Entity_Id) return Boolean;
pragma Inline (In_Same_List);
-- Equivalent to List_Containing (N1) = List_Containing (N2)
diff --git a/gcc/ada/sem_attr.adb b/gcc/ada/sem_attr.adb
index d03761b..3eba3a2 100644
--- a/gcc/ada/sem_attr.adb
+++ b/gcc/ada/sem_attr.adb
@@ -6457,17 +6457,30 @@ package body Sem_Attr is
or else Size_Known_At_Compile_Time (Entity (P)))
then
declare
- Siz : Uint;
+ Prefix_E : Entity_Id := Entity (P);
+ Siz : Uint;
begin
- if Known_Static_RM_Size (Entity (P)) then
- Siz := RM_Size (Entity (P));
+ -- Handle private and incomplete types
+
+ if Present (Underlying_Type (Prefix_E)) then
+ Prefix_E := Underlying_Type (Prefix_E);
+ end if;
+
+ if Known_Static_RM_Size (Prefix_E) then
+ Siz := RM_Size (Prefix_E);
else
- Siz := Esize (Entity (P));
+ Siz := Esize (Prefix_E);
end if;
- Rewrite (N, Make_Integer_Literal (Sloc (N), Siz));
- Analyze (N);
+ -- Protect the frontend against cases where the attribute
+ -- Size_Known_At_Compile_Time is set, but the Esize value
+ -- is not available (see Einfo.ads).
+
+ if Present (Siz) then
+ Rewrite (N, Make_Integer_Literal (Sloc (N), Siz));
+ Analyze (N);
+ end if;
end;
end if;
diff --git a/gcc/ada/sem_ch10.adb b/gcc/ada/sem_ch10.adb
index a6cbe46..ba4beae 100644
--- a/gcc/ada/sem_ch10.adb
+++ b/gcc/ada/sem_ch10.adb
@@ -238,6 +238,9 @@ package body Sem_Ch10 is
-- Reset all visibility flags on unit after compiling it, either as a main
-- unit or as a unit in the context.
+ procedure Replace (Old_E, New_E : Entity_Id);
+ -- Replace Old_E by New_E on visibility list
+
procedure Unchain (E : Entity_Id);
-- Remove single entity from visibility list
@@ -5310,15 +5313,12 @@ package body Sem_Ch10 is
and then not Is_Child_Unit (Lim_Typ)
then
declare
- Non_Lim_View : constant Entity_Id :=
- Non_Limited_View (Lim_Typ);
+ Typ : constant Entity_Id := Non_Limited_View (Lim_Typ);
Prev : Entity_Id;
begin
- Prev := Current_Entity (Lim_Typ);
-
- -- Replace Non_Lim_View in the homonyms list, so that the
+ -- Replace Typ by Lim_Typ in the homonyms list, so that the
-- limited view becomes available.
-- If the nonlimited view is a record with an anonymous
@@ -5350,38 +5350,47 @@ package body Sem_Ch10 is
--
-- [*] denotes the visible entity (Current_Entity)
- if Prev = Non_Lim_View
- or else
- (Ekind (Prev) = E_Incomplete_Type
- and then Full_View (Prev) = Non_Lim_View)
- or else
- (Ekind (Prev) = E_Incomplete_Type
- and then From_Limited_With (Prev)
- and then
- Ekind (Non_Limited_View (Prev)) = E_Incomplete_Type
- and then
- Full_View (Non_Limited_View (Prev)) = Non_Lim_View)
- then
- Set_Current_Entity (Lim_Typ);
+ Prev := Current_Entity (Lim_Typ);
- else
- while Present (Homonym (Prev))
- and then Homonym (Prev) /= Non_Lim_View
- loop
- Prev := Homonym (Prev);
- end loop;
+ while Present (Prev) loop
+ -- This is a regular replacement
- Set_Homonym (Prev, Lim_Typ);
- end if;
+ if Prev = Typ
+ or else (Ekind (Prev) = E_Incomplete_Type
+ and then Full_View (Prev) = Typ)
+ then
+ Replace (Prev, Lim_Typ);
- Set_Homonym (Lim_Typ, Homonym (Non_Lim_View));
- end;
+ if Debug_Flag_I then
+ Write_Str (" (homonym) replace ");
+ Write_Name (Chars (Typ));
+ Write_Eol;
+ end if;
- if Debug_Flag_I then
- Write_Str (" (homonym) chain ");
- Write_Name (Chars (Lim_Typ));
- Write_Eol;
- end if;
+ exit;
+
+ -- This is where E1 is replaced with E4
+
+ elsif Ekind (Prev) = E_Incomplete_Type
+ and then From_Limited_With (Prev)
+ and then
+ Ekind (Non_Limited_View (Prev)) = E_Incomplete_Type
+ and then Full_View (Non_Limited_View (Prev)) = Typ
+ then
+ Replace (Prev, Lim_Typ);
+
+ if Debug_Flag_I then
+ Write_Str (" (homonym) E1 -> E4 ");
+ Write_Name (Chars (Typ));
+ Write_Eol;
+ end if;
+
+ exit;
+ end if;
+
+ Prev := Homonym (Prev);
+ end loop;
+ end;
end if;
Next_Entity (Lim_Typ);
@@ -5474,6 +5483,10 @@ package body Sem_Ch10 is
if Debug_Flag_I then
if Private_Present (With_Clause) then
Write_Str ("install private withed unit ");
+ elsif Parent_With (With_Clause) then
+ Write_Str ("install parent withed unit ");
+ elsif Implicit_With (With_Clause) then
+ Write_Str ("install implicit withed unit ");
else
Write_Str ("install withed unit ");
end if;
@@ -6816,9 +6829,10 @@ package body Sem_Ch10 is
------------------------------
procedure Restore_Chain_For_Shadow (Shadow : Entity_Id) is
- Is_E3 : Boolean;
+ Typ : constant Entity_Id := Non_Limited_View (Shadow);
+ pragma Assert (not In_Chain (Typ));
+
Prev : Entity_Id;
- Typ : Entity_Id;
begin
-- If the package has incomplete types, the limited view of the
@@ -6827,9 +6841,8 @@ package body Sem_Ch10 is
-- the incomplete type at stake. This in turn has a full view
-- E3 that is the full declaration, with a corresponding
-- shadow entity E4. When reinstalling the nonlimited view,
- -- the nonvisible entity E1 is first replaced with E2, but then
- -- E3 must *not* become the visible entity as it is replacing E4
- -- in the homonyms list and simply be ignored.
+ -- the visible entity E4 is replaced directly with E2 in the
+ -- the homonyms list and E3 is simply ignored.
--
-- regular views limited views
--
@@ -6842,40 +6855,42 @@ package body Sem_Ch10 is
--
-- [*] denotes the visible entity (Current_Entity)
- Typ := Non_Limited_View (Shadow);
- pragma Assert (not In_Chain (Typ));
+ Prev := Current_Entity (Shadow);
- Is_E3 := Nkind (Parent (Typ)) = N_Full_Type_Declaration
- and then Present (Incomplete_View (Parent (Typ)));
+ while Present (Prev) loop
+ -- This is a regular replacement
- Prev := Current_Entity (Shadow);
+ if Prev = Shadow then
+ Replace (Prev, Typ);
- if Prev = Shadow then
- if Is_E3 then
- Set_Name_Entity_Id (Chars (Prev), Homonym (Prev));
- return;
+ if Debug_Flag_I then
+ Write_Str (" (homonym) replace ");
+ Write_Name (Chars (Typ));
+ Write_Eol;
+ end if;
- else
- Set_Current_Entity (Typ);
- end if;
+ exit;
- else
- while Present (Homonym (Prev))
- and then Homonym (Prev) /= Shadow
- loop
- Prev := Homonym (Prev);
- end loop;
+ -- This is where E4 is replaced with E2
- if Is_E3 then
- Set_Homonym (Prev, Homonym (Shadow));
- return;
+ elsif Ekind (Prev) = E_Incomplete_Type
+ and then From_Limited_With (Prev)
+ and then Ekind (Typ) = E_Incomplete_Type
+ and then Full_View (Typ) = Non_Limited_View (Prev)
+ then
+ Replace (Prev, Typ);
- else
- Set_Homonym (Prev, Typ);
+ if Debug_Flag_I then
+ Write_Str (" (homonym) E4 -> E2 ");
+ Write_Name (Chars (Typ));
+ Write_Eol;
+ end if;
+
+ exit;
end if;
- end if;
- Set_Homonym (Typ, Homonym (Shadow));
+ Prev := Homonym (Prev);
+ end loop;
end Restore_Chain_For_Shadow;
--------------------
@@ -7178,6 +7193,35 @@ package body Sem_Ch10 is
end sm;
-------------
+ -- Replace --
+ -------------
+
+ procedure Replace (Old_E, New_E : Entity_Id) is
+ Prev : Entity_Id;
+
+ begin
+ Prev := Current_Entity (Old_E);
+
+ if No (Prev) then
+ return;
+
+ elsif Prev = Old_E then
+ Set_Current_Entity (New_E);
+ Set_Homonym (New_E, Homonym (Old_E));
+
+ else
+ while Present (Prev) and then Homonym (Prev) /= Old_E loop
+ Prev := Homonym (Prev);
+ end loop;
+
+ if Present (Prev) then
+ Set_Homonym (Prev, New_E);
+ Set_Homonym (New_E, Homonym (Old_E));
+ end if;
+ end if;
+ end Replace;
+
+ -------------
-- Unchain --
-------------
diff --git a/gcc/ada/sem_util.adb b/gcc/ada/sem_util.adb
index e778bab..26ddb52 100644
--- a/gcc/ada/sem_util.adb
+++ b/gcc/ada/sem_util.adb
@@ -27792,15 +27792,6 @@ package body Sem_Util is
end if;
end Set_Rep_Info;
- ----------------------------
- -- Set_Scope_Is_Transient --
- ----------------------------
-
- procedure Set_Scope_Is_Transient (V : Boolean := True) is
- begin
- Scope_Stack.Table (Scope_Stack.Last).Is_Transient := V;
- end Set_Scope_Is_Transient;
-
-------------------
-- Set_Size_Info --
-------------------
diff --git a/gcc/ada/sem_util.ads b/gcc/ada/sem_util.ads
index 92016bc..dda71e4 100644
--- a/gcc/ada/sem_util.ads
+++ b/gcc/ada/sem_util.ads
@@ -3165,9 +3165,6 @@ package Sem_Util is
-- from sub(type) entity T2 to (sub)type entity T1, as well as Is_Volatile
-- if T1 is a base type.
- procedure Set_Scope_Is_Transient (V : Boolean := True);
- -- Set the flag Is_Transient of the current scope
-
procedure Set_Size_Info (T1, T2 : Entity_Id);
pragma Inline (Set_Size_Info);
-- Copies the Esize field and Has_Biased_Representation flag from sub(type)
diff --git a/gcc/ada/sinfo.ads b/gcc/ada/sinfo.ads
index 57fd704..fc9bcfb 100644
--- a/gcc/ada/sinfo.ads
+++ b/gcc/ada/sinfo.ads
@@ -82,12 +82,6 @@ package Sinfo is
-- for this purpose, so e.g. in X := (if A then B else C);
-- Paren_Count for the right side will be 1.
- -- Comes_From_Check_Or_Contract
- -- This flag is present in all N_If_Statement nodes and
- -- gets set when an N_If_Statement is generated as part of
- -- the expansion of a Check, Assert, or contract-related
- -- pragma.
-
-- Comes_From_Source
-- This flag is present in all nodes. It is set if the
-- node is built by the scanner or parser, and clear if
@@ -953,6 +947,12 @@ package Sinfo is
-- attribute definition clause is given, rather than testing this at the
-- freeze point.
+ -- Comes_From_Check_Or_Contract
+ -- This flag is present in all N_If_Statement nodes and
+ -- gets set when an N_If_Statement is generated as part of
+ -- the expansion of a Check, Assert, or contract-related
+ -- pragma.
+
-- Comes_From_Extended_Return_Statement
-- Present in N_Simple_Return_Statement nodes. True if this node was
-- constructed as part of the N_Extended_Return_Statement expansion.
@@ -2809,12 +2809,6 @@ package Sinfo is
-- fields are defined (and access subprograms declared) in package
-- Einfo.
- -- Note: N_Defining_Identifier is an extended node whose fields are
- -- deliberately laid out to match the layout of fields in an ordinary
- -- N_Identifier node allowing for easy alteration of an identifier
- -- node into a defining identifier node. For details, see procedure
- -- Sinfo.CN.Change_Identifier_To_Defining_Identifier.
-
-- N_Defining_Identifier
-- Sloc points to identifier
-- Chars contains the Name_Id for the identifier
@@ -3156,12 +3150,6 @@ package Sinfo is
-- additional fields are defined (and access subprograms declared)
-- in package Einfo.
- -- Note: N_Defining_Character_Literal is an extended node whose fields
- -- are deliberately laid out to match layout of fields in an ordinary
- -- N_Character_Literal node, allowing for easy alteration of a character
- -- literal node into a defining character literal node. For details, see
- -- Sinfo.CN.Change_Character_Literal_To_Defining_Character_Literal.
-
-- N_Defining_Character_Literal
-- Sloc points to literal
-- Chars contains the Name_Id for the identifier
@@ -5416,13 +5404,6 @@ package Sinfo is
-- additional fields are defined (and access subprograms declared)
-- in package Einfo.
- -- Note: N_Defining_Operator_Symbol is an extended node whose fields
- -- are deliberately laid out to match the layout of fields in an
- -- ordinary N_Operator_Symbol node allowing for easy alteration of
- -- an operator symbol node into a defining operator symbol node.
- -- See Sinfo.CN.Change_Operator_Symbol_To_Defining_Operator_Symbol
- -- for further details.
-
-- N_Defining_Operator_Symbol
-- Sloc points to literal
-- Chars contains the Name_Id for the operator symbol
diff --git a/gcc/ada/types.ads b/gcc/ada/types.ads
index c10f404..b7612bf 100644
--- a/gcc/ada/types.ads
+++ b/gcc/ada/types.ads
@@ -405,9 +405,7 @@ package Types is
subtype Entity_Id is Node_Id;
-- A synonym for node types, used in the Einfo package to refer to nodes
- -- that are entities (i.e. nodes with an Nkind of N_Defining_xxx). All such
- -- nodes are extended nodes and these are the only extended nodes, so that
- -- in practice entity and extended nodes are synonymous.
+ -- that are entities (i.e. nodes with an Nkind of N_Defining_xxx).
--
-- Note that Sinfo.Nodes.N_Entity_Id is the same as Entity_Id, except it
-- has a predicate requiring the correct Nkind. Opt_N_Entity_Id is the same
diff --git a/gcc/addresses.h b/gcc/addresses.h
index 3519c24..08bf39c 100644
--- a/gcc/addresses.h
+++ b/gcc/addresses.h
@@ -28,8 +28,12 @@ inline enum reg_class
base_reg_class (machine_mode mode ATTRIBUTE_UNUSED,
addr_space_t as ATTRIBUTE_UNUSED,
enum rtx_code outer_code ATTRIBUTE_UNUSED,
- enum rtx_code index_code ATTRIBUTE_UNUSED)
+ enum rtx_code index_code ATTRIBUTE_UNUSED,
+ rtx_insn *insn ATTRIBUTE_UNUSED = NULL)
{
+#ifdef INSN_BASE_REG_CLASS
+ return INSN_BASE_REG_CLASS (insn);
+#else
#ifdef MODE_CODE_BASE_REG_CLASS
return MODE_CODE_BASE_REG_CLASS (MACRO_MODE (mode), as, outer_code,
index_code);
@@ -44,6 +48,17 @@ base_reg_class (machine_mode mode ATTRIBUTE_UNUSED,
return BASE_REG_CLASS;
#endif
#endif
+#endif
+}
+
+inline enum reg_class
+index_reg_class (rtx_insn *insn ATTRIBUTE_UNUSED = NULL)
+{
+#ifdef INSN_INDEX_REG_CLASS
+ return INSN_INDEX_REG_CLASS (insn);
+#else
+ return INDEX_REG_CLASS;
+#endif
}
/* Wrapper function to unify target macros REGNO_MODE_CODE_OK_FOR_BASE_P,
@@ -56,8 +71,12 @@ ok_for_base_p_1 (unsigned regno ATTRIBUTE_UNUSED,
machine_mode mode ATTRIBUTE_UNUSED,
addr_space_t as ATTRIBUTE_UNUSED,
enum rtx_code outer_code ATTRIBUTE_UNUSED,
- enum rtx_code index_code ATTRIBUTE_UNUSED)
+ enum rtx_code index_code ATTRIBUTE_UNUSED,
+ rtx_insn* insn ATTRIBUTE_UNUSED = NULL)
{
+#ifdef REGNO_OK_FOR_INSN_BASE_P
+ return REGNO_OK_FOR_INSN_BASE_P (regno, insn);
+#else
#ifdef REGNO_MODE_CODE_OK_FOR_BASE_P
return REGNO_MODE_CODE_OK_FOR_BASE_P (regno, MACRO_MODE (mode), as,
outer_code, index_code);
@@ -72,6 +91,7 @@ ok_for_base_p_1 (unsigned regno ATTRIBUTE_UNUSED,
return REGNO_OK_FOR_BASE_P (regno);
#endif
#endif
+#endif
}
/* Wrapper around ok_for_base_p_1, for use after register allocation is
@@ -79,12 +99,13 @@ ok_for_base_p_1 (unsigned regno ATTRIBUTE_UNUSED,
inline bool
regno_ok_for_base_p (unsigned regno, machine_mode mode, addr_space_t as,
- enum rtx_code outer_code, enum rtx_code index_code)
+ enum rtx_code outer_code, enum rtx_code index_code,
+ rtx_insn *insn = NULL)
{
if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0)
regno = reg_renumber[regno];
- return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
+ return ok_for_base_p_1 (regno, mode, as, outer_code, index_code, insn);
}
#endif /* GCC_ADDRESSES_H */
diff --git a/gcc/alias.cc b/gcc/alias.cc
index 7c1af1f..86d8f71 100644
--- a/gcc/alias.cc
+++ b/gcc/alias.cc
@@ -774,7 +774,22 @@ reference_alias_ptr_type_1 (tree *t)
&& (TYPE_MAIN_VARIANT (TREE_TYPE (inner))
!= TYPE_MAIN_VARIANT
(TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1))))))
- return TREE_TYPE (TREE_OPERAND (inner, 1));
+ {
+ tree alias_ptrtype = TREE_TYPE (TREE_OPERAND (inner, 1));
+ /* Unless we have the (aggregate) effective type of the access
+ somewhere on the access path. If we have for example
+ (&a->elts[i])->l.len exposed by abstraction we'd see
+ MEM <A> [(B *)a].elts[i].l.len and we can use the alias set
+ of 'len' when typeof (MEM <A> [(B *)a].elts[i]) == B for
+ example. See PR111715. */
+ tree inner = *t;
+ while (handled_component_p (inner)
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (inner))
+ != TYPE_MAIN_VARIANT (TREE_TYPE (alias_ptrtype))))
+ inner = TREE_OPERAND (inner, 0);
+ if (TREE_CODE (inner) == MEM_REF)
+ return alias_ptrtype;
+ }
/* Otherwise, pick up the outermost object that we could have
a pointer to. */
diff --git a/gcc/analyzer/ChangeLog b/gcc/analyzer/ChangeLog
index 272c7fb..840de8c 100644
--- a/gcc/analyzer/ChangeLog
+++ b/gcc/analyzer/ChangeLog
@@ -1,3 +1,74 @@
+2023-10-09 David Malcolm <dmalcolm@redhat.com>
+
+ * access-diagram.cc (boundaries::add): Explicitly state
+ "boundaries::" scope for "kind" enum.
+
+2023-10-08 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/111155
+ * access-diagram.cc (boundaries::boundaries): Add logger param
+ (boundaries::add): Add logging.
+ (boundaries::get_hard_boundaries_in_range): New.
+ (boundaries::m_logger): New field.
+ (boundaries::get_table_x_for_offset): Make public.
+ (class svalue_spatial_item): New.
+ (class compound_svalue_spatial_item): New.
+ (add_ellipsis_to_gaps): New.
+ (valid_region_spatial_item::valid_region_spatial_item): Add theme
+ param. Initialize m_boundaries, m_existing_sval, and
+ m_existing_sval_spatial_item.
+ (valid_region_spatial_item::add_boundaries): Set m_boundaries.
+ Add boundaries for any m_existing_sval_spatial_item.
+ (valid_region_spatial_item::add_array_elements_to_table): Rewrite
+ creation of min/max index in terms of
+ maybe_add_array_index_to_table. Rewrite ellipsis code using
+ add_ellipsis_to_gaps. Add index values for any hard boundaries
+ within the valid region.
+ (valid_region_spatial_item::maybe_add_array_index_to_table): New,
+ based on code formerly in add_array_elements_to_table.
+ (valid_region_spatial_item::make_table): Make use of
+ m_existing_sval_spatial_item, if any.
+ (valid_region_spatial_item::m_boundaries): New field.
+ (valid_region_spatial_item::m_existing_sval): New field.
+ (valid_region_spatial_item::m_existing_sval_spatial_item): New
+ field.
+ (class svalue_spatial_item): Rename to...
+ (class written_svalue_spatial_item): ...this.
+ (class string_region_spatial_item): Rename to..
+ (class string_literal_spatial_item): ...this. Add "kind".
+ (string_literal_spatial_item::add_boundaries): Use m_kind to
+ determine kind of boundary. Update for renaming of m_actual_bits
+ to m_bits.
+ (string_literal_spatial_item::make_table): Likewise. Support not
+ displaying a row for byte indexes, and not displaying a row for
+ the type.
+ (string_literal_spatial_item::add_column_for_byte): Make byte index
+ row optional.
+ (svalue_spatial_item::make): Convert to...
+ (make_written_svalue_spatial_item): ...this.
+ (make_existing_svalue_spatial_item): New.
+ (access_diagram_impl::access_diagram_impl): Pass theme to
+ m_valid_region_spatial_item ctor. Update for renaming of
+ m_svalue_spatial_item.
+ (access_diagram_impl::find_boundaries): Pass logger to boundaries.
+ Update for renaming of...
+ (access_diagram_impl::m_svalue_spatial_item): Rename to...
+ (access_diagram_impl::m_written_svalue_spatial_item): ...this.
+
+2023-10-03 David Malcolm <dmalcolm@redhat.com>
+
+ * analyzer-logging.cc (logger::log_va_partial): Use text_info
+ ctor.
+ * analyzer.cc (make_label_text): Likewise.
+ (make_label_text_n): Likewise.
+ * pending-diagnostic.cc (evdesc::event_desc::formatted_print):
+ Likewise.
+
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * program-point.cc: Update for grouping of source printing fields
+ within diagnostic_context.
+
2023-09-15 David Malcolm <dmalcolm@redhat.com>
* analyzer.cc (get_stmt_location): Handle null stmt.
diff --git a/gcc/analyzer/access-diagram.cc b/gcc/analyzer/access-diagram.cc
index a51d594..c7d190e 100644
--- a/gcc/analyzer/access-diagram.cc
+++ b/gcc/analyzer/access-diagram.cc
@@ -630,8 +630,8 @@ class boundaries
public:
enum class kind { HARD, SOFT};
- boundaries (const region &base_reg)
- : m_base_reg (base_reg)
+ boundaries (const region &base_reg, logger *logger)
+ : m_base_reg (base_reg), m_logger (logger)
{
}
@@ -646,6 +646,16 @@ public:
{
add (range.m_start, kind);
add (range.m_next, kind);
+ if (m_logger)
+ {
+ m_logger->start_log_line ();
+ m_logger->log_partial ("added access_range: ");
+ range.dump_to_pp (m_logger->get_printer (), true);
+ m_logger->log_partial (" (%s)",
+ (kind == boundaries::kind::HARD)
+ ? "HARD" : "soft");
+ m_logger->end_log_line ();
+ }
}
void add (const region &reg, region_model_manager *mgr, enum kind kind)
@@ -714,8 +724,30 @@ public:
return m_all_offsets.size ();
}
+ std::vector<region_offset>
+ get_hard_boundaries_in_range (byte_offset_t min_offset,
+ byte_offset_t max_offset) const
+ {
+ std::vector<region_offset> result;
+ for (auto &offset : m_hard_offsets)
+ {
+ if (!offset.concrete_p ())
+ continue;
+ byte_offset_t byte;
+ if (!offset.get_concrete_byte_offset (&byte))
+ continue;
+ if (byte < min_offset)
+ continue;
+ if (byte > max_offset)
+ continue;
+ result.push_back (offset);
+ }
+ return result;
+ }
+
private:
const region &m_base_reg;
+ logger *m_logger;
std::set<region_offset> m_all_offsets;
std::set<region_offset> m_hard_offsets;
};
@@ -1085,7 +1117,6 @@ public:
logger.dec_indent ();
}
-private:
int get_table_x_for_offset (region_offset offset) const
{
auto slot = m_table_x_for_offset.find (offset);
@@ -1097,6 +1128,7 @@ private:
return slot->second;
}
+private:
int get_table_x_for_prev_offset (region_offset offset) const
{
auto slot = m_table_x_for_prev_offset.find (offset);
@@ -1132,6 +1164,124 @@ public:
style_manager &sm) const = 0;
};
+/* A spatial_item that involves showing an svalue at a particular offset. */
+
+class svalue_spatial_item : public spatial_item
+{
+public:
+ enum class kind
+ {
+ WRITTEN,
+ EXISTING
+ };
+protected:
+ svalue_spatial_item (const svalue &sval,
+ access_range bits,
+ enum kind kind)
+ : m_sval (sval), m_bits (bits), m_kind (kind)
+ {
+ }
+
+ const svalue &m_sval;
+ access_range m_bits;
+ enum kind m_kind;
+};
+
+static std::unique_ptr<spatial_item>
+make_existing_svalue_spatial_item (const svalue *sval,
+ const access_range &bits,
+ const theme &theme);
+
+class compound_svalue_spatial_item : public svalue_spatial_item
+{
+public:
+ compound_svalue_spatial_item (const compound_svalue &sval,
+ const access_range &bits,
+ enum kind kind,
+ const theme &theme)
+ : svalue_spatial_item (sval, bits, kind),
+ m_compound_sval (sval)
+ {
+ const binding_map &map = m_compound_sval.get_map ();
+ auto_vec <const binding_key *> binding_keys;
+ for (auto iter : map)
+ {
+ const binding_key *key = iter.first;
+ const svalue *bound_sval = iter.second;
+ if (const concrete_binding *concrete_key
+ = key->dyn_cast_concrete_binding ())
+ {
+ access_range range (nullptr,
+ concrete_key->get_bit_range ());
+ if (std::unique_ptr<spatial_item> child
+ = make_existing_svalue_spatial_item (bound_sval,
+ range,
+ theme))
+ m_children.push_back (std::move (child));
+ }
+ }
+ }
+
+ void add_boundaries (boundaries &out, logger *logger) const final override
+ {
+ LOG_SCOPE (logger);
+ for (auto &iter : m_children)
+ iter->add_boundaries (out, logger);
+ }
+
+ table make_table (const bit_to_table_map &btm,
+ style_manager &sm) const final override
+ {
+ std::vector<table> child_tables;
+ int max_rows = 0;
+ for (auto &iter : m_children)
+ {
+ table child_table (iter->make_table (btm, sm));
+ max_rows = MAX (max_rows, child_table.get_size ().h);
+ child_tables.push_back (std::move (child_table));
+ }
+ table t (table::size_t (btm.get_num_columns (), max_rows));
+ for (auto &&child_table : child_tables)
+ t.add_other_table (std::move (child_table),
+ table::coord_t (0, 0));
+ return t;
+ }
+
+private:
+ const compound_svalue &m_compound_sval;
+ std::vector<std::unique_ptr<spatial_item>> m_children;
+};
+
+/* Loop through the TABLE_X_RANGE columns of T, adding
+ cells containing "..." in any unoccupied ranges of table cell. */
+
+static void
+add_ellipsis_to_gaps (table &t,
+ style_manager &sm,
+ const table::range_t &table_x_range,
+ const table::range_t &table_y_range)
+{
+ int table_x = table_x_range.get_min ();
+ while (table_x < table_x_range.get_next ())
+ {
+ /* Find a run of unoccupied table cells. */
+ const int start_table_x = table_x;
+ while (table_x < table_x_range.get_next ()
+ && !t.get_placement_at (table::coord_t (table_x,
+ table_y_range.get_min ())))
+ table_x++;
+ const table::range_t unoccupied_x_range (start_table_x, table_x);
+ if (unoccupied_x_range.get_size () > 0)
+ t.set_cell_span (table::rect_t (unoccupied_x_range, table_y_range),
+ styled_string (sm, "..."));
+ /* Skip occupied table cells. */
+ while (table_x < table_x_range.get_next ()
+ && t.get_placement_at (table::coord_t (table_x,
+ table_y_range.get_min ())))
+ table_x++;
+ }
+}
+
/* Subclass of spatial_item for visualizing the region of memory
that's valid to access relative to the base region of region accessed in
the operation. */
@@ -1140,14 +1290,23 @@ class valid_region_spatial_item : public spatial_item
{
public:
valid_region_spatial_item (const access_operation &op,
- diagnostic_event_id_t region_creation_event_id)
+ diagnostic_event_id_t region_creation_event_id,
+ const theme &theme)
: m_op (op),
- m_region_creation_event_id (region_creation_event_id)
- {}
+ m_region_creation_event_id (region_creation_event_id),
+ m_boundaries (nullptr),
+ m_existing_sval (op.m_model.get_store_value (op.m_base_region, nullptr)),
+ m_existing_sval_spatial_item
+ (make_existing_svalue_spatial_item (m_existing_sval,
+ op.get_valid_bits (),
+ theme))
+ {
+ }
void add_boundaries (boundaries &out, logger *logger) const final override
{
LOG_SCOPE (logger);
+ m_boundaries = &out;
access_range valid_bits = m_op.get_valid_bits ();
if (logger)
{
@@ -1158,6 +1317,18 @@ public:
}
out.add (valid_bits, boundaries::kind::HARD);
+ if (m_existing_sval_spatial_item)
+ {
+ if (logger)
+ {
+ logger->start_log_line ();
+ logger->log_partial ("existing svalue: ");
+ m_existing_sval->dump_to_pp (logger->get_printer (), true);
+ logger->end_log_line ();
+ }
+ m_existing_sval_spatial_item->add_boundaries (out, logger);
+ }
+
/* Support for showing first and final element in array types. */
if (tree base_type = m_op.m_base_region->get_type ())
if (TREE_CODE (base_type) == ARRAY_TYPE)
@@ -1193,65 +1364,102 @@ public:
{
tree base_type = m_op.m_base_region->get_type ();
gcc_assert (TREE_CODE (base_type) == ARRAY_TYPE);
+ gcc_assert (m_boundaries != nullptr);
tree domain = TYPE_DOMAIN (base_type);
if (!(TYPE_MIN_VALUE (domain) && TYPE_MAX_VALUE (domain)))
return;
- region_model_manager * const mgr = m_op.get_manager ();
const int table_y = 0;
const int table_h = 1;
const table::range_t table_y_range (table_y, table_y + table_h);
t.add_row ();
- const svalue *min_idx_sval
- = mgr->get_or_create_constant_svalue (TYPE_MIN_VALUE (domain));
- const region *min_element = mgr->get_element_region (m_op.m_base_region,
- TREE_TYPE (base_type),
- min_idx_sval);
- const access_range min_element_range (*min_element, mgr);
- const table::range_t min_element_x_range
- = btm.get_table_x_for_range (min_element_range);
-
- t.set_cell_span (table::rect_t (min_element_x_range,
- table_y_range),
- fmt_styled_string (sm, "[%E]",
- TYPE_MIN_VALUE (domain)));
-
- const svalue *max_idx_sval
- = mgr->get_or_create_constant_svalue (TYPE_MAX_VALUE (domain));
- const region *max_element = mgr->get_element_region (m_op.m_base_region,
+
+ const table::range_t min_x_range
+ = maybe_add_array_index_to_table (t, btm, sm, table_y_range,
+ TYPE_MIN_VALUE (domain));
+ const table::range_t max_x_range
+ = maybe_add_array_index_to_table (t, btm, sm, table_y_range,
+ TYPE_MAX_VALUE (domain));
+
+ if (TREE_TYPE (base_type) == char_type_node)
+ {
+ /* For a char array,: if there are any hard boundaries in
+ m_boundaries that are *within* the valid region,
+ then show those index values. */
+ std::vector<region_offset> hard_boundaries
+ = m_boundaries->get_hard_boundaries_in_range
+ (tree_to_shwi (TYPE_MIN_VALUE (domain)),
+ tree_to_shwi (TYPE_MAX_VALUE (domain)));
+ for (auto &offset : hard_boundaries)
+ {
+ const int table_x = btm.get_table_x_for_offset (offset);
+ if (!offset.concrete_p ())
+ continue;
+ byte_offset_t byte;
+ if (!offset.get_concrete_byte_offset (&byte))
+ continue;
+ table::range_t table_x_range (table_x, table_x + 1);
+ t.maybe_set_cell_span (table::rect_t (table_x_range,
+ table_y_range),
+ fmt_styled_string (sm, "[%wi]",
+ byte.to_shwi ()));
+ }
+ }
+
+ add_ellipsis_to_gaps (t, sm,
+ table::range_t (min_x_range.get_next (),
+ max_x_range.get_min ()),
+ table_y_range);
+ }
+
+ table::range_t
+ maybe_add_array_index_to_table (table &t,
+ const bit_to_table_map &btm,
+ style_manager &sm,
+ const table::range_t table_y_range,
+ tree idx_cst) const
+ {
+ region_model_manager * const mgr = m_op.get_manager ();
+ tree base_type = m_op.m_base_region->get_type ();
+ const svalue *idx_sval
+ = mgr->get_or_create_constant_svalue (idx_cst);
+ const region *element_reg = mgr->get_element_region (m_op.m_base_region,
TREE_TYPE (base_type),
- max_idx_sval);
- if (min_element == max_element)
- return; // 1-element array
+ idx_sval);
+ const access_range element_range (*element_reg, mgr);
+ const table::range_t element_x_range
+ = btm.get_table_x_for_range (element_range);
- const access_range max_element_range (*max_element, mgr);
- const table::range_t max_element_x_range
- = btm.get_table_x_for_range (max_element_range);
- t.set_cell_span (table::rect_t (max_element_x_range,
- table_y_range),
- fmt_styled_string (sm, "[%E]",
- TYPE_MAX_VALUE (domain)));
+ t.maybe_set_cell_span (table::rect_t (element_x_range,
+ table_y_range),
+ fmt_styled_string (sm, "[%E]", idx_cst));
- const table::range_t other_elements_x_range (min_element_x_range.next,
- max_element_x_range.start);
- if (other_elements_x_range.get_size () > 0)
- t.set_cell_span (table::rect_t (other_elements_x_range, table_y_range),
- styled_string (sm, "..."));
+ return element_x_range;
}
table make_table (const bit_to_table_map &btm,
style_manager &sm) const final override
{
- table t (table::size_t (btm.get_num_columns (), 1));
+ table t (table::size_t (btm.get_num_columns (), 0));
if (tree base_type = m_op.m_base_region->get_type ())
if (TREE_CODE (base_type) == ARRAY_TYPE)
add_array_elements_to_table (t, btm, sm);
+ /* Make use of m_existing_sval_spatial_item, if any. */
+ if (m_existing_sval_spatial_item)
+ {
+ table table_for_existing
+ = m_existing_sval_spatial_item->make_table (btm, sm);
+ const int table_y = t.add_rows (table_for_existing.get_size ().h);
+ t.add_other_table (std::move (table_for_existing),
+ table::coord_t (0, table_y));
+ }
+
access_range valid_bits = m_op.get_valid_bits ();
- const int table_y = t.get_size ().h - 1;
+ const int table_y = t.add_row ();
const int table_h = 1;
table::rect_t rect = btm.get_table_rect (valid_bits, table_y, table_h);
styled_string s;
@@ -1306,6 +1514,9 @@ public:
private:
const access_operation &m_op;
diagnostic_event_id_t m_region_creation_event_id;
+ mutable const boundaries *m_boundaries;
+ const svalue *m_existing_sval;
+ std::unique_ptr<spatial_item> m_existing_sval_spatial_item;
};
/* Subclass of spatial_item for visualizing the region of memory
@@ -1362,15 +1573,10 @@ private:
to the accessed region.
Can be subclassed to give visualizations of specific kinds of svalue. */
-class svalue_spatial_item : public spatial_item
+class written_svalue_spatial_item : public spatial_item
{
public:
- static std::unique_ptr<svalue_spatial_item> make (const access_operation &op,
- const svalue &sval,
- access_range actual_bits,
- const theme &theme);
-
- svalue_spatial_item (const access_operation &op,
+ written_svalue_spatial_item (const access_operation &op,
const svalue &sval,
access_range actual_bits)
: m_op (op), m_sval (sval), m_actual_bits (actual_bits)
@@ -1479,15 +1685,15 @@ protected:
└──────────────────────────────────────────────────────────────────────┘
*/
-class string_region_spatial_item : public svalue_spatial_item
+class string_literal_spatial_item : public svalue_spatial_item
{
public:
- string_region_spatial_item (const access_operation &op,
- const svalue &sval,
- access_range actual_bits,
- const string_region &string_reg,
- const theme &theme)
- : svalue_spatial_item (op, sval, actual_bits),
+ string_literal_spatial_item (const svalue &sval,
+ access_range actual_bits,
+ const string_region &string_reg,
+ const theme &theme,
+ enum kind kind)
+ : svalue_spatial_item (sval, actual_bits, kind),
m_string_reg (string_reg),
m_theme (theme),
m_ellipsis_threshold (param_analyzer_text_art_string_ellipsis_threshold),
@@ -1501,16 +1707,18 @@ public:
void add_boundaries (boundaries &out, logger *logger) const override
{
LOG_SCOPE (logger);
- out.add (m_actual_bits, boundaries::kind::HARD);
+ out.add (m_bits, m_kind == svalue_spatial_item::kind::WRITTEN
+ ? boundaries::kind::HARD
+ : boundaries::kind::SOFT);
tree string_cst = get_string_cst ();
/* TREE_STRING_LENGTH is sizeof, not strlen. */
if (m_show_full_string)
- out.add_all_bytes_in_range (m_actual_bits);
+ out.add_all_bytes_in_range (m_bits);
else
{
byte_range bytes (0, 0);
- bool valid = m_actual_bits.as_concrete_byte_range (&bytes);
+ bool valid = m_bits.as_concrete_byte_range (&bytes);
gcc_assert (valid);
byte_range head_of_string (bytes.get_start_byte_offset (),
m_ellipsis_head_len);
@@ -1532,11 +1740,13 @@ public:
{
table t (table::size_t (btm.get_num_columns (), 0));
- const int byte_idx_table_y = t.add_row ();
+ const int byte_idx_table_y = (m_kind == svalue_spatial_item::kind::WRITTEN
+ ? t.add_row ()
+ : -1);
const int byte_val_table_y = t.add_row ();
byte_range bytes (0, 0);
- bool valid = m_actual_bits.as_concrete_byte_range (&bytes);
+ bool valid = m_bits.as_concrete_byte_range (&bytes);
gcc_assert (valid);
tree string_cst = get_string_cst ();
if (m_show_full_string)
@@ -1616,14 +1826,17 @@ public:
byte_idx,
byte_idx_table_y, byte_val_table_y);
- /* Ellipsis (two rows high). */
+ /* Ellipsis. */
const byte_range ellipsis_bytes
(m_ellipsis_head_len + bytes.get_start_byte_offset (),
TREE_STRING_LENGTH (string_cst)
- (m_ellipsis_head_len + m_ellipsis_tail_len));
const table::rect_t table_rect
- = btm.get_table_rect (&m_string_reg, ellipsis_bytes,
- byte_idx_table_y, 2);
+ = ((byte_idx_table_y != -1)
+ ? btm.get_table_rect (&m_string_reg, ellipsis_bytes,
+ byte_idx_table_y, 2)
+ : btm.get_table_rect (&m_string_reg, ellipsis_bytes,
+ byte_val_table_y, 1));
t.set_cell_span(table_rect, styled_string (sm, "..."));
/* Tail of string. */
@@ -1637,12 +1850,15 @@ public:
byte_idx_table_y, byte_val_table_y);
}
- const int summary_table_y = t.add_row ();
- t.set_cell_span (btm.get_table_rect (&m_string_reg, bytes,
- summary_table_y, 1),
- fmt_styled_string (sm,
- _("string literal (type: %qT)"),
- TREE_TYPE (string_cst)));
+ if (m_kind == svalue_spatial_item::kind::WRITTEN)
+ {
+ const int summary_table_y = t.add_row ();
+ t.set_cell_span (btm.get_table_rect (&m_string_reg, bytes,
+ summary_table_y, 1),
+ fmt_styled_string (sm,
+ _("string literal (type: %qT)"),
+ TREE_TYPE (string_cst)));
+ }
return t;
}
@@ -1687,7 +1903,7 @@ private:
gcc_assert (byte_idx_within_string < TREE_STRING_LENGTH (string_cst));
const byte_range bytes (byte_idx_within_cluster, 1);
- if (1) // show_byte_indices
+ if (byte_idx_table_y != -1)
{
const table::rect_t idx_table_rect
= btm.get_table_rect (&m_string_reg, bytes, byte_idx_table_y, 1);
@@ -1729,18 +1945,54 @@ private:
const bool m_show_utf8;
};
-std::unique_ptr<svalue_spatial_item>
-svalue_spatial_item::make (const access_operation &op,
- const svalue &sval,
- access_range actual_bits,
- const theme &theme)
+static std::unique_ptr<spatial_item>
+make_written_svalue_spatial_item (const access_operation &op,
+ const svalue &sval,
+ access_range actual_bits,
+ const theme &theme)
{
if (const initial_svalue *initial_sval = sval.dyn_cast_initial_svalue ())
if (const string_region *string_reg
= initial_sval->get_region ()->dyn_cast_string_region ())
- return make_unique <string_region_spatial_item> (op, sval, actual_bits,
- *string_reg, theme);
- return make_unique <svalue_spatial_item> (op, sval, actual_bits);
+ return make_unique <string_literal_spatial_item>
+ (sval, actual_bits,
+ *string_reg, theme,
+ svalue_spatial_item::kind::WRITTEN);
+ return make_unique <written_svalue_spatial_item> (op, sval, actual_bits);
+}
+
+static std::unique_ptr<spatial_item>
+make_existing_svalue_spatial_item (const svalue *sval,
+ const access_range &bits,
+ const theme &theme)
+{
+ if (!sval)
+ return nullptr;
+
+ switch (sval->get_kind ())
+ {
+ default:
+ return nullptr;
+
+ case SK_INITIAL:
+ {
+ const initial_svalue *initial_sval = (const initial_svalue *)sval;
+ if (const string_region *string_reg
+ = initial_sval->get_region ()->dyn_cast_string_region ())
+ return make_unique <string_literal_spatial_item>
+ (*sval, bits,
+ *string_reg, theme,
+ svalue_spatial_item::kind::EXISTING);
+ return nullptr;
+ }
+
+ case SK_COMPOUND:
+ return make_unique<compound_svalue_spatial_item>
+ (*((const compound_svalue *)sval),
+ bits,
+ svalue_spatial_item::kind::EXISTING,
+ theme);
+ }
}
/* Widget subclass implementing access diagrams. */
@@ -1759,7 +2011,7 @@ public:
m_theme (theme),
m_logger (logger),
m_invalid (false),
- m_valid_region_spatial_item (op, region_creation_event_id),
+ m_valid_region_spatial_item (op, region_creation_event_id, theme),
m_accessed_region_spatial_item (op),
m_btm (),
m_calc_req_size_called (false)
@@ -1800,10 +2052,11 @@ public:
if (op.m_sval_hint)
{
access_range actual_bits = m_op.get_actual_bits ();
- m_svalue_spatial_item = svalue_spatial_item::make (m_op,
- *op.m_sval_hint,
- actual_bits,
- m_theme);
+ m_written_svalue_spatial_item
+ = make_written_svalue_spatial_item (m_op,
+ *op.m_sval_hint,
+ actual_bits,
+ m_theme);
}
/* Two passes:
@@ -1856,9 +2109,9 @@ public:
add_aligned_child_table (std::move (t_headings));
}
- if (m_svalue_spatial_item)
+ if (m_written_svalue_spatial_item)
{
- table t_sval (m_svalue_spatial_item->make_table (m_btm, m_sm));
+ table t_sval (m_written_svalue_spatial_item->make_table (m_btm, m_sm));
add_aligned_child_table (std::move (t_sval));
}
else
@@ -1942,12 +2195,12 @@ private:
find_boundaries () const
{
std::unique_ptr<boundaries> result
- = make_unique<boundaries> (*m_op.m_base_region);
+ = make_unique<boundaries> (*m_op.m_base_region, m_logger);
m_valid_region_spatial_item.add_boundaries (*result, m_logger);
m_accessed_region_spatial_item.add_boundaries (*result, m_logger);
- if (m_svalue_spatial_item)
- m_svalue_spatial_item->add_boundaries (*result, m_logger);
+ if (m_written_svalue_spatial_item)
+ m_written_svalue_spatial_item->add_boundaries (*result, m_logger);
return result;
}
@@ -2324,7 +2577,7 @@ private:
valid_region_spatial_item m_valid_region_spatial_item;
accessed_region_spatial_item m_accessed_region_spatial_item;
- std::unique_ptr<svalue_spatial_item> m_svalue_spatial_item;
+ std::unique_ptr<spatial_item> m_written_svalue_spatial_item;
std::unique_ptr<boundaries> m_boundaries;
diff --git a/gcc/analyzer/analyzer-logging.cc b/gcc/analyzer/analyzer-logging.cc
index b78481c..ddfbb5b 100644
--- a/gcc/analyzer/analyzer-logging.cc
+++ b/gcc/analyzer/analyzer-logging.cc
@@ -144,10 +144,7 @@ logger::log_partial (const char *fmt, ...)
void
logger::log_va_partial (const char *fmt, va_list *ap)
{
- text_info text;
- text.format_spec = fmt;
- text.args_ptr = ap;
- text.err_no = 0;
+ text_info text (fmt, ap, 0);
pp_format (m_pp, &text);
pp_output_formatted_text (m_pp);
}
diff --git a/gcc/analyzer/analyzer.cc b/gcc/analyzer/analyzer.cc
index 94c5cf2..9d4bc78 100644
--- a/gcc/analyzer/analyzer.cc
+++ b/gcc/analyzer/analyzer.cc
@@ -425,19 +425,13 @@ make_label_text (bool can_colorize, const char *fmt, ...)
if (!can_colorize)
pp_show_color (pp) = false;
- text_info ti;
rich_location rich_loc (line_table, UNKNOWN_LOCATION);
va_list ap;
va_start (ap, fmt);
- ti.format_spec = _(fmt);
- ti.args_ptr = &ap;
- ti.err_no = 0;
- ti.x_data = NULL;
- ti.m_richloc = &rich_loc;
-
+ text_info ti (_(fmt), &ap, 0, NULL, &rich_loc);
pp_format (pp, &ti);
pp_output_formatted_text (pp);
@@ -461,7 +455,6 @@ make_label_text_n (bool can_colorize, unsigned HOST_WIDE_INT n,
if (!can_colorize)
pp_show_color (pp) = false;
- text_info ti;
rich_location rich_loc (line_table, UNKNOWN_LOCATION);
va_list ap;
@@ -470,11 +463,7 @@ make_label_text_n (bool can_colorize, unsigned HOST_WIDE_INT n,
const char *fmt = ngettext (singular_fmt, plural_fmt, n);
- ti.format_spec = fmt;
- ti.args_ptr = &ap;
- ti.err_no = 0;
- ti.x_data = NULL;
- ti.m_richloc = &rich_loc;
+ text_info ti (fmt, &ap, 0, NULL, &rich_loc);
pp_format (pp, &ti);
pp_output_formatted_text (pp);
diff --git a/gcc/analyzer/pending-diagnostic.cc b/gcc/analyzer/pending-diagnostic.cc
index e36ed4f..c7d3370 100644
--- a/gcc/analyzer/pending-diagnostic.cc
+++ b/gcc/analyzer/pending-diagnostic.cc
@@ -96,15 +96,10 @@ evdesc::event_desc::formatted_print (const char *fmt, ...) const
pp_show_color (pp) = m_colorize;
- text_info ti;
rich_location rich_loc (line_table, UNKNOWN_LOCATION);
va_list ap;
va_start (ap, fmt);
- ti.format_spec = _(fmt);
- ti.args_ptr = &ap;
- ti.err_no = 0;
- ti.x_data = NULL;
- ti.m_richloc = &rich_loc;
+ text_info ti (_(fmt), &ap, 0, nullptr, &rich_loc);
pp_format (pp, &ti);
pp_output_formatted_text (pp);
va_end (ap);
diff --git a/gcc/analyzer/program-point.cc b/gcc/analyzer/program-point.cc
index d7db2f5..914d49f 100644
--- a/gcc/analyzer/program-point.cc
+++ b/gcc/analyzer/program-point.cc
@@ -256,8 +256,8 @@ public:
debug_diagnostic_context ()
{
diagnostic_initialize (this, 0);
- show_line_numbers_p = true;
- show_caret = true;
+ m_source_printing.show_line_numbers_p = true;
+ m_source_printing.enabled = true;
}
~debug_diagnostic_context ()
{
diff --git a/gcc/auto-profile.cc b/gcc/auto-profile.cc
index ff3b763..e75b046 100644
--- a/gcc/auto-profile.cc
+++ b/gcc/auto-profile.cc
@@ -1434,7 +1434,7 @@ afdo_calculate_branch_prob (bb_set *annotated_bb)
else
total_count += AFDO_EINFO (e)->get_count ();
}
- if (num_unknown_succ == 0 && total_count > profile_count::zero ())
+ if (num_unknown_succ == 0 && total_count.nonzero_p())
{
FOR_EACH_EDGE (e, ei, bb->succs)
e->probability
@@ -1571,7 +1571,7 @@ afdo_annotate_cfg (const stmt_set &promoted_stmts)
DECL_SOURCE_LOCATION (current_function_decl));
afdo_source_profile->mark_annotated (cfun->function_start_locus);
afdo_source_profile->mark_annotated (cfun->function_end_locus);
- if (max_count > profile_count::zero ())
+ if (max_count.nonzero_p())
{
/* Calculate, propagate count and probability information on CFG. */
afdo_calculate_branch_prob (&annotated_bb);
diff --git a/gcc/builtins.cc b/gcc/builtins.cc
index 40dfd36..cb90bd0 100644
--- a/gcc/builtins.cc
+++ b/gcc/builtins.cc
@@ -743,39 +743,22 @@ c_strlen (tree arg, int only_value, c_strlen_data *data, unsigned eltsize)
as needed. */
rtx
-c_readstr (const char *str, scalar_int_mode mode,
+c_readstr (const char *str, fixed_size_mode mode,
bool null_terminated_p/*=true*/)
{
- HOST_WIDE_INT ch;
- unsigned int i, j;
- HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
+ auto_vec<target_unit, MAX_BITSIZE_MODE_ANY_INT / BITS_PER_UNIT> bytes;
- gcc_assert (GET_MODE_CLASS (mode) == MODE_INT);
- unsigned int len = (GET_MODE_PRECISION (mode) + HOST_BITS_PER_WIDE_INT - 1)
- / HOST_BITS_PER_WIDE_INT;
+ bytes.reserve (GET_MODE_SIZE (mode));
- gcc_assert (len <= MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT);
- for (i = 0; i < len; i++)
- tmp[i] = 0;
-
- ch = 1;
- for (i = 0; i < GET_MODE_SIZE (mode); i++)
+ target_unit ch = 1;
+ for (unsigned int i = 0; i < GET_MODE_SIZE (mode); ++i)
{
- j = i;
- if (WORDS_BIG_ENDIAN)
- j = GET_MODE_SIZE (mode) - i - 1;
- if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
- && GET_MODE_SIZE (mode) >= UNITS_PER_WORD)
- j = j + UNITS_PER_WORD - 2 * (j % UNITS_PER_WORD) - 1;
- j *= BITS_PER_UNIT;
-
if (ch || !null_terminated_p)
ch = (unsigned char) str[i];
- tmp[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT);
+ bytes.quick_push (ch);
}
- wide_int c = wide_int::from_array (tmp, len, GET_MODE_PRECISION (mode));
- return immed_wide_int_const (c, mode);
+ return native_decode_rtx (mode, bytes, 0);
}
/* Cast a target constant CST to target CHAR and if that value fits into
@@ -3530,10 +3513,7 @@ builtin_memcpy_read_str (void *data, void *, HOST_WIDE_INT offset,
string but the caller guarantees it's large enough for MODE. */
const char *rep = (const char *) data;
- /* The by-pieces infrastructure does not try to pick a vector mode
- for memcpy expansion. */
- return c_readstr (rep + offset, as_a <scalar_int_mode> (mode),
- /*nul_terminated=*/false);
+ return c_readstr (rep + offset, mode, /*nul_terminated=*/false);
}
/* LEN specify length of the block of memcpy/memset operation.
@@ -3994,9 +3974,7 @@ builtin_strncpy_read_str (void *data, void *, HOST_WIDE_INT offset,
if ((unsigned HOST_WIDE_INT) offset > strlen (str))
return const0_rtx;
- /* The by-pieces infrastructure does not try to pick a vector mode
- for strncpy expansion. */
- return c_readstr (str + offset, as_a <scalar_int_mode> (mode));
+ return c_readstr (str + offset, mode);
}
/* Helper to check the sizes of sequences and the destination of calls
@@ -4227,8 +4205,7 @@ builtin_memset_read_str (void *data, void *prev,
memset (p, *c, size);
- /* Vector modes should be handled above. */
- return c_readstr (p, as_a <scalar_int_mode> (mode));
+ return c_readstr (p, mode);
}
/* Callback routine for store_by_pieces. Return the RTL of a register
@@ -4275,8 +4252,7 @@ builtin_memset_gen_str (void *data, void *prev,
p = XALLOCAVEC (char, size);
memset (p, 1, size);
- /* Vector modes should be handled above. */
- coeff = c_readstr (p, as_a <scalar_int_mode> (mode));
+ coeff = c_readstr (p, mode);
target = convert_to_mode (mode, (rtx) data, 1);
target = expand_mult (mode, target, coeff, NULL_RTX, 1);
diff --git a/gcc/builtins.h b/gcc/builtins.h
index 3b5c34c..88a26d7 100644
--- a/gcc/builtins.h
+++ b/gcc/builtins.h
@@ -105,7 +105,7 @@ struct c_strlen_data
};
extern tree c_strlen (tree, int, c_strlen_data * = NULL, unsigned = 1);
-extern rtx c_readstr (const char *, scalar_int_mode, bool = true);
+extern rtx c_readstr (const char *, fixed_size_mode, bool = true);
extern void expand_builtin_setjmp_setup (rtx, rtx);
extern void expand_builtin_setjmp_receiver (rtx);
extern void expand_builtin_update_setjmp_buf (rtx);
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 30ce662..b4b1b90 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,24 @@
+2023-10-15 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/111800
+ * c-warn.cc (match_case_to_enum_1): Assert w.get_precision ()
+ is smaller or equal to WIDE_INT_MAX_INL_PRECISION rather than
+ w.get_len () is smaller or equal to WIDE_INT_MAX_INL_ELTS.
+
+2023-10-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR c/102989
+ * c-warn.cc (match_case_to_enum_1): Use wi::to_wide just once instead
+ of 3 times, assert get_len () is smaller than WIDE_INT_MAX_INL_ELTS.
+
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * c-common.cc (maybe_add_include_fixit): Update for renaming of
+ diagnostic_context's show_caret to m_source_printing.enabled.
+ * c-opts.cc (c_common_init_options): Update for renaming of
+ diagnostic_context's colorize_source_p to
+ m_source_printing.colorize_source_p.
+
2023-09-20 Jakub Jelinek <jakub@redhat.com>
PR c++/111392
diff --git a/gcc/c-family/c-common.cc b/gcc/c-family/c-common.cc
index aae5726..f044db5 100644
--- a/gcc/c-family/c-common.cc
+++ b/gcc/c-family/c-common.cc
@@ -9569,7 +9569,7 @@ maybe_add_include_fixit (rich_location *richloc, const char *header,
richloc->add_fixit_insert_before (include_insert_loc, text);
free (text);
- if (override_location && global_dc->show_caret)
+ if (override_location && global_dc->m_source_printing.enabled)
{
/* Replace the primary location with that of the insertion point for the
fix-it hint.
diff --git a/gcc/c-family/c-opts.cc b/gcc/c-family/c-opts.cc
index fe2d143..ce2e021 100644
--- a/gcc/c-family/c-opts.cc
+++ b/gcc/c-family/c-opts.cc
@@ -272,7 +272,7 @@ c_common_init_options (unsigned int decoded_options_count,
if (c_dialect_cxx ())
set_std_cxx17 (/*ISO*/false);
- global_dc->colorize_source_p = true;
+ global_dc->m_source_printing.colorize_source_p = true;
}
/* Handle switch SCODE with argument ARG. VALUE is true, unless no-
diff --git a/gcc/c-family/c-warn.cc b/gcc/c-family/c-warn.cc
index e67dd87..3e2d02a 100644
--- a/gcc/c-family/c-warn.cc
+++ b/gcc/c-family/c-warn.cc
@@ -1517,13 +1517,15 @@ match_case_to_enum_1 (tree key, tree type, tree label)
return;
char buf[WIDE_INT_PRINT_BUFFER_SIZE];
+ wide_int w = wi::to_wide (key);
+ gcc_assert (w.get_precision () <= WIDE_INT_MAX_INL_PRECISION);
if (tree_fits_uhwi_p (key))
- print_dec (wi::to_wide (key), buf, UNSIGNED);
+ print_dec (w, buf, UNSIGNED);
else if (tree_fits_shwi_p (key))
- print_dec (wi::to_wide (key), buf, SIGNED);
+ print_dec (w, buf, SIGNED);
else
- print_hex (wi::to_wide (key), buf);
+ print_hex (w, buf);
if (TYPE_NAME (type) == NULL_TREE)
warning_at (DECL_SOURCE_LOCATION (CASE_LABEL (label)),
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index c23f756..92e90e7 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,17 @@
+2023-10-17 Martin Uecker <uecker@tugraz.at>
+
+ PR c/111708
+ * c-decl.cc (grokdeclarator): Add error.
+
+2023-10-03 David Malcolm <dmalcolm@redhat.com>
+
+ * c-objc-common.cc (c_tree_printer): Update for "m_" prefixes to
+ text_info fields.
+
+2023-09-30 Eugene Rozenfeld <erozen@microsoft.com>
+
+ * Make-lang.in: Make create_fdas_for_cc1 target not .PHONY
+
2023-09-20 Jakub Jelinek <jakub@redhat.com>
* c-parser.cc (c_parser_postfix_expression_after_primary): Parse
diff --git a/gcc/c/Make-lang.in b/gcc/c/Make-lang.in
index 79bc0df..3ef8a67 100644
--- a/gcc/c/Make-lang.in
+++ b/gcc/c/Make-lang.in
@@ -91,8 +91,6 @@ cc1$(exeext): $(C_OBJS) cc1-checksum.o $(BACKEND) $(LIBDEPS)
components_in_prev = "bfd opcodes binutils fixincludes gas gcc gmp mpfr mpc isl gold intl ld libbacktrace libcpp libcody libdecnumber libiberty libiberty-linker-plugin libiconv zlib lto-plugin libctf libsframe"
components_in_prev_target = "libstdc++-v3 libsanitizer libvtv libgcc libbacktrace libphobos zlib libgomp libatomic"
-.PHONY: create_fdas_for_cc1
-
cc1.fda: create_fdas_for_cc1
$(PROFILE_MERGER) $(shell ls -ha cc1_*.fda) --output_file cc1.fda -gcov_version 2
@@ -116,6 +114,8 @@ create_fdas_for_cc1: ../stage1-gcc/cc1$(exeext) ../prev-gcc/$(PERF_DATA)
$(CREATE_GCOV) -binary ../prev-gcc/cc1$(exeext) -gcov $$profile_name -profile $$perf_path -gcov_version 2; \
fi; \
done;
+
+ $(STAMP) $@
#
# Build hooks:
diff --git a/gcc/c/c-decl.cc b/gcc/c/c-decl.cc
index 5822faf..0de3847 100644
--- a/gcc/c/c-decl.cc
+++ b/gcc/c/c-decl.cc
@@ -8032,6 +8032,27 @@ grokdeclarator (const struct c_declarator *declarator,
TREE_THIS_VOLATILE (decl) = 1;
}
}
+
+ /* C99 6.2.2p7: It is invalid (compile-time undefined
+ behavior) to create an 'extern' declaration for a
+ function if there is a global declaration that is
+ 'static' and the global declaration is not visible.
+ (If the static declaration _is_ currently visible,
+ the 'extern' declaration is taken to refer to that decl.) */
+ if (!initialized
+ && TREE_PUBLIC (decl)
+ && current_scope != file_scope)
+ {
+ tree global_decl = identifier_global_value (declarator->u.id.id);
+ tree visible_decl = lookup_name (declarator->u.id.id);
+
+ if (global_decl
+ && global_decl != visible_decl
+ && VAR_OR_FUNCTION_DECL_P (global_decl)
+ && !TREE_PUBLIC (global_decl))
+ error_at (loc, "function previously declared %<static%> "
+ "redeclared %<extern%>");
+ }
}
else
{
diff --git a/gcc/c/c-objc-common.cc b/gcc/c/c-objc-common.cc
index e4aed61..c8f49aa 100644
--- a/gcc/c/c-objc-common.cc
+++ b/gcc/c/c-objc-common.cc
@@ -272,7 +272,7 @@ c_tree_printer (pretty_printer *pp, text_info *text, const char *spec,
if (*spec != 'v')
{
- t = va_arg (*text->args_ptr, tree);
+ t = va_arg (*text->m_args_ptr, tree);
if (set_locus)
text->set_location (0, DECL_SOURCE_LOCATION (t),
SHOW_RANGE_WITH_CARET);
@@ -316,7 +316,7 @@ c_tree_printer (pretty_printer *pp, text_info *text, const char *spec,
return true;
case 'v':
- pp_c_cv_qualifiers (cpp, va_arg (*text->args_ptr, int), hash);
+ pp_c_cv_qualifiers (cpp, va_arg (*text->m_args_ptr, int), hash);
return true;
default:
diff --git a/gcc/calls.cc b/gcc/calls.cc
index 1f3a6d5..e9e6951 100644
--- a/gcc/calls.cc
+++ b/gcc/calls.cc
@@ -1291,7 +1291,7 @@ initialize_argument_information (int num_actuals ATTRIBUTE_UNUSED,
cumulative_args_t args_so_far,
int reg_parm_stack_space,
rtx *old_stack_level,
- poly_int64_pod *old_pending_adj,
+ poly_int64 *old_pending_adj,
bool *must_preallocate, int *ecf_flags,
bool *may_tailcall, bool call_from_thunk_p)
{
@@ -2298,7 +2298,7 @@ load_register_parameters (struct arg_data *args, int num_actuals,
bytes that should be popped after the call. */
static bool
-combine_pending_stack_adjustment_and_call (poly_int64_pod *adjustment_out,
+combine_pending_stack_adjustment_and_call (poly_int64 *adjustment_out,
poly_int64 unadjusted_args_size,
struct args_size *args_size,
unsigned int preferred_unit_stack_boundary)
diff --git a/gcc/cfganal.cc b/gcc/cfganal.cc
index cc858b9..490eef7 100644
--- a/gcc/cfganal.cc
+++ b/gcc/cfganal.cc
@@ -468,7 +468,7 @@ control_dependences::control_dependences ()
bitmap_obstack_initialize (&m_bitmaps);
control_dependence_map.create (last_basic_block_for_fn (cfun));
- control_dependence_map.quick_grow (last_basic_block_for_fn (cfun));
+ control_dependence_map.quick_grow_cleared (last_basic_block_for_fn (cfun));
for (int i = 0; i < last_basic_block_for_fn (cfun); ++i)
bitmap_initialize (&control_dependence_map[i], &m_bitmaps);
for (int i = 0; i < num_edges; ++i)
diff --git a/gcc/cfgbuild.cc b/gcc/cfgbuild.cc
index 15ed4de..9a6b34fb 100644
--- a/gcc/cfgbuild.cc
+++ b/gcc/cfgbuild.cc
@@ -693,6 +693,43 @@ compute_outgoing_frequencies (basic_block b)
}
}
+/* Update the profile information for BB, which was created by splitting
+ an RTL block that had a non-final jump. */
+
+static void
+update_profile_for_new_sub_basic_block (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+
+ bool initialized_src = false, uninitialized_src = false;
+ bb->count = profile_count::zero ();
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (e->count ().initialized_p ())
+ {
+ bb->count += e->count ();
+ initialized_src = true;
+ }
+ else
+ uninitialized_src = true;
+ }
+ /* When some edges are missing with read profile, this is
+ most likely because RTL expansion introduced loop.
+ When profile is guessed we may have BB that is reachable
+ from unlikely path as well as from normal path.
+
+ TODO: We should handle loops created during BB expansion
+ correctly here. For now we assume all those loop to cycle
+ precisely once. */
+ if (!initialized_src
+ || (uninitialized_src
+ && profile_status_for_fn (cfun) < PROFILE_GUESSED))
+ bb->count = profile_count::uninitialized ();
+
+ compute_outgoing_frequencies (bb);
+}
+
/* Assume that some pass has inserted labels or control flow
instructions within a basic block. Split basic blocks as needed
and create edges. */
@@ -744,40 +781,15 @@ find_many_sub_basic_blocks (sbitmap blocks)
if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
{
- edge e;
- edge_iterator ei;
-
if (STATE (bb) == BLOCK_ORIGINAL)
continue;
if (STATE (bb) == BLOCK_NEW)
{
- bool initialized_src = false, uninitialized_src = false;
- bb->count = profile_count::zero ();
- FOR_EACH_EDGE (e, ei, bb->preds)
- {
- if (e->count ().initialized_p ())
- {
- bb->count += e->count ();
- initialized_src = true;
- }
- else
- uninitialized_src = true;
- }
- /* When some edges are missing with read profile, this is
- most likely because RTL expansion introduced loop.
- When profile is guessed we may have BB that is reachable
- from unlikely path as well as from normal path.
-
- TODO: We should handle loops created during BB expansion
- correctly here. For now we assume all those loop to cycle
- precisely once. */
- if (!initialized_src
- || (uninitialized_src
- && profile_status_for_fn (cfun) < PROFILE_GUESSED))
- bb->count = profile_count::uninitialized ();
+ update_profile_for_new_sub_basic_block (bb);
+ continue;
}
- /* If nothing changed, there is no need to create new BBs. */
- else if (EDGE_COUNT (bb->succs) == n_succs[bb->index])
+ /* If nothing changed, there is no need to create new BBs. */
+ if (EDGE_COUNT (bb->succs) == n_succs[bb->index])
{
/* In rare occassions RTL expansion might have mistakely assigned
a probabilities different from what is in CFG. This happens
@@ -788,10 +800,33 @@ find_many_sub_basic_blocks (sbitmap blocks)
update_br_prob_note (bb);
continue;
}
-
compute_outgoing_frequencies (bb);
}
FOR_EACH_BB_FN (bb, cfun)
SET_STATE (bb, 0);
}
+
+/* Like find_many_sub_basic_blocks, but look only within BB. */
+
+void
+find_sub_basic_blocks (basic_block bb)
+{
+ basic_block end_bb = bb->next_bb;
+ find_bb_boundaries (bb);
+ if (bb->next_bb == end_bb)
+ return;
+
+ /* Re-scan and wire in all edges. This expects simple (conditional)
+ jumps at the end of each new basic blocks. */
+ make_edges (bb, end_bb->prev_bb, 1);
+
+ /* Update branch probabilities. Expect only (un)conditional jumps
+ to be created with only the forward edges. */
+ if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
+ {
+ compute_outgoing_frequencies (bb);
+ for (bb = bb->next_bb; bb != end_bb; bb = bb->next_bb)
+ update_profile_for_new_sub_basic_block (bb);
+ }
+}
diff --git a/gcc/cfgbuild.h b/gcc/cfgbuild.h
index 51d3ecc..4191fb3 100644
--- a/gcc/cfgbuild.h
+++ b/gcc/cfgbuild.h
@@ -24,5 +24,6 @@ extern bool inside_basic_block_p (const rtx_insn *);
extern bool control_flow_insn_p (const rtx_insn *);
extern void rtl_make_eh_edge (sbitmap, basic_block, rtx);
extern void find_many_sub_basic_blocks (sbitmap);
+extern void find_sub_basic_blocks (basic_block);
#endif /* GCC_CFGBUILD_H */
diff --git a/gcc/cfgloop.cc b/gcc/cfgloop.cc
index 6d46b5b..5a05896 100644
--- a/gcc/cfgloop.cc
+++ b/gcc/cfgloop.cc
@@ -1895,33 +1895,38 @@ void
record_niter_bound (class loop *loop, const widest_int &i_bound,
bool realistic, bool upper)
{
+ if (wi::min_precision (i_bound, SIGNED) > bound_wide_int ().get_precision ())
+ return;
+
+ bound_wide_int bound = bound_wide_int::from (i_bound, SIGNED);
+
/* Update the bounds only when there is no previous estimation, or when the
current estimation is smaller. */
if (upper
&& (!loop->any_upper_bound
- || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
+ || wi::ltu_p (bound, loop->nb_iterations_upper_bound)))
{
loop->any_upper_bound = true;
- loop->nb_iterations_upper_bound = i_bound;
+ loop->nb_iterations_upper_bound = bound;
if (!loop->any_likely_upper_bound)
{
loop->any_likely_upper_bound = true;
- loop->nb_iterations_likely_upper_bound = i_bound;
+ loop->nb_iterations_likely_upper_bound = bound;
}
}
if (realistic
&& (!loop->any_estimate
- || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
+ || wi::ltu_p (bound, loop->nb_iterations_estimate)))
{
loop->any_estimate = true;
- loop->nb_iterations_estimate = i_bound;
+ loop->nb_iterations_estimate = bound;
}
if (!realistic
&& (!loop->any_likely_upper_bound
- || wi::ltu_p (i_bound, loop->nb_iterations_likely_upper_bound)))
+ || wi::ltu_p (bound, loop->nb_iterations_likely_upper_bound)))
{
loop->any_likely_upper_bound = true;
- loop->nb_iterations_likely_upper_bound = i_bound;
+ loop->nb_iterations_likely_upper_bound = bound;
}
/* If an upper bound is smaller than the realistic estimate of the
@@ -2018,7 +2023,7 @@ get_estimated_loop_iterations (class loop *loop, widest_int *nit)
return false;
}
- *nit = loop->nb_iterations_estimate;
+ *nit = widest_int::from (loop->nb_iterations_estimate, SIGNED);
return true;
}
@@ -2032,7 +2037,7 @@ get_max_loop_iterations (const class loop *loop, widest_int *nit)
if (!loop->any_upper_bound)
return false;
- *nit = loop->nb_iterations_upper_bound;
+ *nit = widest_int::from (loop->nb_iterations_upper_bound, SIGNED);
return true;
}
@@ -2066,7 +2071,7 @@ get_likely_max_loop_iterations (class loop *loop, widest_int *nit)
if (!loop->any_likely_upper_bound)
return false;
- *nit = loop->nb_iterations_likely_upper_bound;
+ *nit = widest_int::from (loop->nb_iterations_likely_upper_bound, SIGNED);
return true;
}
diff --git a/gcc/cfgloop.h b/gcc/cfgloop.h
index f28ec70..559d512 100644
--- a/gcc/cfgloop.h
+++ b/gcc/cfgloop.h
@@ -44,6 +44,9 @@ enum iv_extend_code
IV_UNKNOWN_EXTEND
};
+typedef generic_wide_int <fixed_wide_int_storage <WIDE_INT_MAX_INL_PRECISION> >
+ bound_wide_int;
+
/* The structure describing a bound on number of iterations of a loop. */
class GTY ((chain_next ("%h.next"))) nb_iter_bound {
@@ -58,7 +61,7 @@ public:
overflows (as MAX + 1 is sometimes produced as the estimate on number
of executions of STMT).
b) it is consistent with the result of number_of_iterations_exit. */
- widest_int bound;
+ bound_wide_int bound;
/* True if, after executing the statement BOUND + 1 times, we will
leave the loop; that is, all the statements after it are executed at most
@@ -161,14 +164,14 @@ public:
/* An integer guaranteed to be greater or equal to nb_iterations. Only
valid if any_upper_bound is true. */
- widest_int nb_iterations_upper_bound;
+ bound_wide_int nb_iterations_upper_bound;
- widest_int nb_iterations_likely_upper_bound;
+ bound_wide_int nb_iterations_likely_upper_bound;
/* An integer giving an estimate on nb_iterations. Unlike
nb_iterations_upper_bound, there is no guarantee that it is at least
nb_iterations. */
- widest_int nb_iterations_estimate;
+ bound_wide_int nb_iterations_estimate;
/* If > 0, an integer, where the user asserted that for any
I in [ 0, nb_iterations ) and for any J in
diff --git a/gcc/combine.cc b/gcc/combine.cc
index e46d202..cb48e7f 100644
--- a/gcc/combine.cc
+++ b/gcc/combine.cc
@@ -11923,7 +11923,7 @@ simplify_compare_const (enum rtx_code code, machine_mode mode,
/* (unsigned) < 0x80000000 is equivalent to >= 0. */
else if (is_a <scalar_int_mode> (mode, &int_mode)
&& GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
- && ((unsigned HOST_WIDE_INT) const_op
+ && (((unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode))
== HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
{
const_op = 0;
@@ -11962,7 +11962,7 @@ simplify_compare_const (enum rtx_code code, machine_mode mode,
/* (unsigned) >= 0x80000000 is equivalent to < 0. */
else if (is_a <scalar_int_mode> (mode, &int_mode)
&& GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
- && ((unsigned HOST_WIDE_INT) const_op
+ && (((unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode))
== HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
{
const_op = 0;
@@ -12003,14 +12003,15 @@ simplify_compare_const (enum rtx_code code, machine_mode mode,
&& !MEM_VOLATILE_P (op0)
/* The optimization makes only sense for constants which are big enough
so that we have a chance to chop off something at all. */
- && (unsigned HOST_WIDE_INT) const_op > 0xff
- /* Bail out, if the constant does not fit into INT_MODE. */
- && (unsigned HOST_WIDE_INT) const_op
- < ((HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1) << 1) - 1)
+ && ((unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode)) > 0xff
/* Ensure that we do not overflow during normalization. */
- && (code != GTU || (unsigned HOST_WIDE_INT) const_op < HOST_WIDE_INT_M1U))
+ && (code != GTU
+ || ((unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode))
+ < HOST_WIDE_INT_M1U)
+ && trunc_int_for_mode (const_op, int_mode) == const_op)
{
- unsigned HOST_WIDE_INT n = (unsigned HOST_WIDE_INT) const_op;
+ unsigned HOST_WIDE_INT n
+ = (unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode);
enum rtx_code adjusted_code;
/* Normalize code to either LEU or GEU. */
@@ -12051,15 +12052,15 @@ simplify_compare_const (enum rtx_code code, machine_mode mode,
HOST_WIDE_INT_PRINT_HEX ") to (MEM %s "
HOST_WIDE_INT_PRINT_HEX ").\n", GET_MODE_NAME (int_mode),
GET_MODE_NAME (narrow_mode_iter), GET_RTX_NAME (code),
- (unsigned HOST_WIDE_INT)const_op, GET_RTX_NAME (adjusted_code),
- n);
+ (unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode),
+ GET_RTX_NAME (adjusted_code), n);
}
poly_int64 offset = (BYTES_BIG_ENDIAN
? 0
: (GET_MODE_SIZE (int_mode)
- GET_MODE_SIZE (narrow_mode_iter)));
*pop0 = adjust_address_nv (op0, narrow_mode_iter, offset);
- *pop1 = GEN_INT (n);
+ *pop1 = gen_int_mode (n, narrow_mode_iter);
return adjusted_code;
}
}
@@ -13410,27 +13411,43 @@ record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
if (REG_P (dest))
{
- /* If we are setting the whole register, we know its value. Otherwise
- show that we don't know the value. We can handle a SUBREG if it's
- the low part, but we must be careful with paradoxical SUBREGs on
- RISC architectures because we cannot strip e.g. an extension around
- a load and record the naked load since the RTL middle-end considers
- that the upper bits are defined according to LOAD_EXTEND_OP. */
+ /* If we are setting the whole register, we know its value. */
if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
+ /* We can handle a SUBREG if it's the low part, but we must be
+ careful with paradoxical SUBREGs on RISC architectures because
+ we cannot strip e.g. an extension around a load and record the
+ naked load since the RTL middle-end considers that the upper bits
+ are defined according to LOAD_EXTEND_OP. */
else if (GET_CODE (setter) == SET
&& GET_CODE (SET_DEST (setter)) == SUBREG
&& SUBREG_REG (SET_DEST (setter)) == dest
&& known_le (GET_MODE_PRECISION (GET_MODE (dest)),
BITS_PER_WORD)
&& subreg_lowpart_p (SET_DEST (setter)))
- record_value_for_reg (dest, record_dead_insn,
- WORD_REGISTER_OPERATIONS
- && word_register_operation_p (SET_SRC (setter))
- && paradoxical_subreg_p (SET_DEST (setter))
- ? SET_SRC (setter)
- : gen_lowpart (GET_MODE (dest),
- SET_SRC (setter)));
+ {
+ if (WORD_REGISTER_OPERATIONS
+ && word_register_operation_p (SET_SRC (setter))
+ && paradoxical_subreg_p (SET_DEST (setter)))
+ record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
+ else if (!partial_subreg_p (SET_DEST (setter)))
+ record_value_for_reg (dest, record_dead_insn,
+ gen_lowpart (GET_MODE (dest),
+ SET_SRC (setter)));
+ else
+ {
+ record_value_for_reg (dest, record_dead_insn,
+ gen_lowpart (GET_MODE (dest),
+ SET_SRC (setter)));
+
+ unsigned HOST_WIDE_INT mask;
+ reg_stat_type *rsp = &reg_stat[REGNO (dest)];
+ mask = GET_MODE_MASK (GET_MODE (SET_DEST (setter)));
+ rsp->last_set_nonzero_bits |= ~mask;
+ rsp->last_set_sign_bit_copies = 1;
+ }
+ }
+ /* Otherwise show that we don't know the value. */
else
record_value_for_reg (dest, record_dead_insn, NULL_RTX);
}
diff --git a/gcc/common.opt b/gcc/common.opt
index f137a1f..b103b8d 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1252,6 +1252,10 @@ fcprop-registers
Common Var(flag_cprop_registers) Optimization
Perform a register copy-propagation optimization pass.
+ffold-mem-offsets
+Target Bool Var(flag_fold_mem_offsets) Init(1)
+Fold instructions calculating memory offsets to the memory access instruction if possible.
+
fcrossjumping
Common Var(flag_crossjumping) Optimization
Perform cross-jumping optimization.
diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
index 24ae0db..f721888 100644
--- a/gcc/common/config/i386/cpuinfo.h
+++ b/gcc/common/config/i386/cpuinfo.h
@@ -608,6 +608,20 @@ get_intel_cpu (struct __processor_model *cpu_model,
cpu_model->__cpu_type = INTEL_COREI7;
cpu_model->__cpu_subtype = INTEL_COREI7_ARROWLAKE_S;
break;
+ case 0xdd:
+ /* Clearwater Forest. */
+ cpu = "clearwaterforest";
+ CHECK___builtin_cpu_is ("clearwaterforest");
+ cpu_model->__cpu_type = INTEL_CLEARWATERFOREST;
+ break;
+ case 0xcc:
+ /* Panther Lake. */
+ cpu = "pantherlake";
+ CHECK___builtin_cpu_is ("corei7");
+ CHECK___builtin_cpu_is ("pantherlake");
+ cpu_model->__cpu_type = INTEL_COREI7;
+ cpu_model->__cpu_subtype = INTEL_COREI7_PANTHERLAKE;
+ break;
case 0x17:
case 0x1d:
/* Penryn. */
@@ -678,6 +692,7 @@ get_available_features (struct __processor_model *cpu_model,
#define XSTATE_HI_ZMM 0x80
#define XSTATE_TILECFG 0x20000
#define XSTATE_TILEDATA 0x40000
+#define XSTATE_APX_F 0x80000
#define XCR_AVX_ENABLED_MASK \
(XSTATE_SSE | XSTATE_YMM)
@@ -685,11 +700,13 @@ get_available_features (struct __processor_model *cpu_model,
(XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM)
#define XCR_AMX_ENABLED_MASK \
(XSTATE_TILECFG | XSTATE_TILEDATA)
+#define XCR_APX_F_ENABLED_MASK XSTATE_APX_F
- /* Check if AVX and AVX512 are usable. */
+ /* Check if AVX, AVX512 and APX are usable. */
int avx_usable = 0;
int avx512_usable = 0;
int amx_usable = 0;
+ int apx_usable = 0;
/* Check if KL is usable. */
int has_kl = 0;
if ((ecx & bit_OSXSAVE))
@@ -709,6 +726,8 @@ get_available_features (struct __processor_model *cpu_model,
}
amx_usable = ((xcrlow & XCR_AMX_ENABLED_MASK)
== XCR_AMX_ENABLED_MASK);
+ apx_usable = ((xcrlow & XCR_APX_F_ENABLED_MASK)
+ == XCR_APX_F_ENABLED_MASK);
}
#define set_feature(f) \
@@ -833,6 +852,8 @@ get_available_features (struct __processor_model *cpu_model,
set_feature (FEATURE_IBT);
if (edx & bit_UINTR)
set_feature (FEATURE_UINTR);
+ if (edx & bit_USER_MSR)
+ set_feature (FEATURE_USER_MSR);
if (amx_usable)
{
if (edx & bit_AMX_TILE)
@@ -922,6 +943,11 @@ get_available_features (struct __processor_model *cpu_model,
if (edx & bit_AMX_COMPLEX)
set_feature (FEATURE_AMX_COMPLEX);
}
+ if (apx_usable)
+ {
+ if (edx & bit_APX_F)
+ set_feature (FEATURE_APX_F);
+ }
}
}
diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
index 95468b7..79b1b35 100644
--- a/gcc/common/config/i386/i386-common.cc
+++ b/gcc/common/config/i386/i386-common.cc
@@ -123,6 +123,9 @@ along with GCC; see the file COPYING3. If not see
#define OPTION_MASK_ISA2_SM3_SET OPTION_MASK_ISA2_SM3
#define OPTION_MASK_ISA2_SHA512_SET OPTION_MASK_ISA2_SHA512
#define OPTION_MASK_ISA2_SM4_SET OPTION_MASK_ISA2_SM4
+#define OPTION_MASK_ISA2_APX_F_SET OPTION_MASK_ISA2_APX_F
+#define OPTION_MASK_ISA2_EVEX512_SET OPTION_MASK_ISA2_EVEX512
+#define OPTION_MASK_ISA2_USER_MSR_SET OPTION_MASK_ISA2_USER_MSR
/* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
as -msse4.2. */
@@ -309,6 +312,9 @@ along with GCC; see the file COPYING3. If not see
#define OPTION_MASK_ISA2_SM3_UNSET OPTION_MASK_ISA2_SM3
#define OPTION_MASK_ISA2_SHA512_UNSET OPTION_MASK_ISA2_SHA512
#define OPTION_MASK_ISA2_SM4_UNSET OPTION_MASK_ISA2_SM4
+#define OPTION_MASK_ISA2_APX_F_UNSET OPTION_MASK_ISA2_APX_F
+#define OPTION_MASK_ISA2_EVEX512_UNSET OPTION_MASK_ISA2_EVEX512
+#define OPTION_MASK_ISA2_USER_MSR_UNSET OPTION_MASK_ISA2_USER_MSR
/* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
as -mno-sse4.1. */
@@ -1341,6 +1347,47 @@ ix86_handle_option (struct gcc_options *opts,
}
return true;
+ case OPT_mapxf:
+ if (value)
+ {
+ opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_APX_F_SET;
+ opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_APX_F_SET;
+ opts->x_ix86_apx_features = apx_all;
+ }
+ else
+ {
+ opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_APX_F_UNSET;
+ opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_APX_F_UNSET;
+ opts->x_ix86_apx_features = apx_none;
+ }
+ return true;
+
+ case OPT_mevex512:
+ if (value)
+ {
+ opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_EVEX512_SET;
+ opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_EVEX512_SET;
+ }
+ else
+ {
+ opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_EVEX512_UNSET;
+ opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_EVEX512_UNSET;
+ }
+ return true;
+
+ case OPT_musermsr:
+ if (value)
+ {
+ opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_USER_MSR_SET;
+ opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_USER_MSR_SET;
+ }
+ else
+ {
+ opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_USER_MSR_UNSET;
+ opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_USER_MSR_UNSET;
+ }
+ return true;
+
case OPT_mfma:
if (value)
{
@@ -2030,6 +2077,7 @@ const char *const processor_names[] =
"tremont",
"sierraforest",
"grandridge",
+ "clearwaterforest",
"knl",
"knm",
"skylake",
@@ -2047,6 +2095,7 @@ const char *const processor_names[] =
"graniterapids-d",
"arrowlake",
"arrowlake-s",
+ "pantherlake",
"intel",
"lujiazui",
"geode",
@@ -2179,6 +2228,8 @@ const pta processor_alias_table[] =
M_CPU_SUBTYPE (INTEL_COREI7_ARROWLAKE_S), P_PROC_AVX2},
{"lunarlake", PROCESSOR_ARROWLAKE_S, CPU_HASWELL, PTA_ARROWLAKE_S,
M_CPU_SUBTYPE (INTEL_COREI7_ARROWLAKE_S), P_PROC_AVX2},
+ {"pantherlake", PROCESSOR_PANTHERLAKE, CPU_HASWELL, PTA_PANTHERLAKE,
+ M_CPU_SUBTYPE (INTEL_COREI7_PANTHERLAKE), P_PROC_AVX2},
{"bonnell", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
M_CPU_TYPE (INTEL_BONNELL), P_PROC_SSSE3},
{"atom", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
@@ -2199,6 +2250,8 @@ const pta processor_alias_table[] =
M_CPU_SUBTYPE (INTEL_SIERRAFOREST), P_PROC_AVX2},
{"grandridge", PROCESSOR_GRANDRIDGE, CPU_HASWELL, PTA_GRANDRIDGE,
M_CPU_TYPE (INTEL_GRANDRIDGE), P_PROC_AVX2},
+ {"clearwaterforest", PROCESSOR_CLEARWATERFOREST, CPU_HASWELL,
+ PTA_CLEARWATERFOREST, M_CPU_TYPE (INTEL_CLEARWATERFOREST), P_PROC_AVX2},
{"knl", PROCESSOR_KNL, CPU_SLM, PTA_KNL,
M_CPU_TYPE (INTEL_KNL), P_PROC_AVX512F},
{"knm", PROCESSOR_KNM, CPU_SLM, PTA_KNM,
diff --git a/gcc/common/config/i386/i386-cpuinfo.h b/gcc/common/config/i386/i386-cpuinfo.h
index 9153b4d..533b748 100644
--- a/gcc/common/config/i386/i386-cpuinfo.h
+++ b/gcc/common/config/i386/i386-cpuinfo.h
@@ -62,6 +62,7 @@ enum processor_types
ZHAOXIN_FAM7H,
INTEL_SIERRAFOREST,
INTEL_GRANDRIDGE,
+ INTEL_CLEARWATERFOREST,
CPU_TYPE_MAX,
BUILTIN_CPU_TYPE_MAX = CPU_TYPE_MAX
};
@@ -101,6 +102,7 @@ enum processor_subtypes
INTEL_COREI7_GRANITERAPIDS_D,
INTEL_COREI7_ARROWLAKE,
INTEL_COREI7_ARROWLAKE_S,
+ INTEL_COREI7_PANTHERLAKE,
CPU_SUBTYPE_MAX
};
@@ -261,6 +263,8 @@ enum processor_features
FEATURE_SM3,
FEATURE_SHA512,
FEATURE_SM4,
+ FEATURE_APX_F,
+ FEATURE_USER_MSR,
CPU_FEATURE_MAX
};
diff --git a/gcc/common/config/i386/i386-isas.h b/gcc/common/config/i386/i386-isas.h
index 2297903..6875924 100644
--- a/gcc/common/config/i386/i386-isas.h
+++ b/gcc/common/config/i386/i386-isas.h
@@ -191,4 +191,6 @@ ISA_NAMES_TABLE_START
ISA_NAMES_TABLE_ENTRY("sm3", FEATURE_SM3, P_NONE, "-msm3")
ISA_NAMES_TABLE_ENTRY("sha512", FEATURE_SHA512, P_NONE, "-msha512")
ISA_NAMES_TABLE_ENTRY("sm4", FEATURE_SM4, P_NONE, "-msm4")
+ ISA_NAMES_TABLE_ENTRY("apxf", FEATURE_APX_F, P_NONE, "-mapxf")
+ ISA_NAMES_TABLE_ENTRY("usermsr", FEATURE_USER_MSR, P_NONE, "-musermsr")
ISA_NAMES_TABLE_END
diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc
index 9a0a68f..d721628 100644
--- a/gcc/common/config/riscv/riscv-common.cc
+++ b/gcc/common/config/riscv/riscv-common.cc
@@ -310,6 +310,9 @@ static const struct riscv_ext_version riscv_ext_version_table[] =
{"svnapot", ISA_SPEC_CLASS_NONE, 1, 0},
{"svpbmt", ISA_SPEC_CLASS_NONE, 1, 0},
+ {"xcvmac", ISA_SPEC_CLASS_NONE, 1, 0},
+ {"xcvalu", ISA_SPEC_CLASS_NONE, 1, 0},
+
{"xtheadba", ISA_SPEC_CLASS_NONE, 1, 0},
{"xtheadbb", ISA_SPEC_CLASS_NONE, 1, 0},
{"xtheadbs", ISA_SPEC_CLASS_NONE, 1, 0},
@@ -1036,6 +1039,41 @@ riscv_subset_list::parse_std_ext (const char *p)
return p;
}
+/* Parsing function for one standard extensions.
+
+ Return Value:
+ Points to the end of extensions.
+
+ Arguments:
+ `p`: Current parsing position. */
+
+const char *
+riscv_subset_list::parse_single_std_ext (const char *p)
+{
+ if (*p == 'x' || *p == 's' || *p == 'z')
+ {
+ error_at (m_loc,
+ "%<-march=%s%>: Not single-letter extension. "
+ "%<%c%>",
+ m_arch, *p);
+ return nullptr;
+ }
+
+ unsigned major_version = 0;
+ unsigned minor_version = 0;
+ bool explicit_version_p = false;
+ char subset[2] = {0, 0};
+
+ subset[0] = *p;
+
+ p++;
+
+ p = parsing_subset_version (subset, p, &major_version, &minor_version,
+ /* std_ext_p= */ true, &explicit_version_p);
+
+ add (subset, major_version, minor_version, explicit_version_p, false);
+ return p;
+}
/* Check any implied extensions for EXT. */
void
@@ -1148,6 +1186,105 @@ riscv_subset_list::handle_combine_ext ()
`ext_type`: What kind of extensions, 's', 'z' or 'x'.
`ext_type_str`: Full name for kind of extension. */
+
+const char *
+riscv_subset_list::parse_single_multiletter_ext (const char *p,
+ const char *ext_type,
+ const char *ext_type_str)
+{
+ unsigned major_version = 0;
+ unsigned minor_version = 0;
+ size_t ext_type_len = strlen (ext_type);
+
+ if (strncmp (p, ext_type, ext_type_len) != 0)
+ return NULL;
+
+ char *subset = xstrdup (p);
+ const char *end_of_version;
+ bool explicit_version_p = false;
+ char *ext;
+ char backup;
+ size_t len = strlen (p);
+ size_t end_of_version_pos, i;
+ bool found_any_number = false;
+ bool found_minor_version = false;
+
+ end_of_version_pos = len;
+ /* Find the begin of version string. */
+ for (i = len -1; i > 0; --i)
+ {
+ if (ISDIGIT (subset[i]))
+ {
+ found_any_number = true;
+ continue;
+ }
+ /* Might be version seperator, but need to check one more char,
+ we only allow <major>p<minor>, so we could stop parsing if found
+ any more `p`. */
+ if (subset[i] == 'p' &&
+ !found_minor_version &&
+ found_any_number && ISDIGIT (subset[i-1]))
+ {
+ found_minor_version = true;
+ continue;
+ }
+
+ end_of_version_pos = i + 1;
+ break;
+ }
+
+ backup = subset[end_of_version_pos];
+ subset[end_of_version_pos] = '\0';
+ ext = xstrdup (subset);
+ subset[end_of_version_pos] = backup;
+
+ end_of_version
+ = parsing_subset_version (ext, subset + end_of_version_pos, &major_version,
+ &minor_version, /* std_ext_p= */ false,
+ &explicit_version_p);
+ free (ext);
+
+ if (end_of_version == NULL)
+ {
+ free (subset);
+ return NULL;
+ }
+
+ subset[end_of_version_pos] = '\0';
+
+ if (strlen (subset) == 1)
+ {
+ error_at (m_loc, "%<-march=%s%>: name of %s must be more than 1 letter",
+ m_arch, ext_type_str);
+ free (subset);
+ return NULL;
+ }
+
+ add (subset, major_version, minor_version, explicit_version_p, false);
+ p += end_of_version - subset;
+ free (subset);
+
+ if (*p != '\0' && *p != '_')
+ {
+ error_at (m_loc, "%<-march=%s%>: %s must separate with %<_%>",
+ m_arch, ext_type_str);
+ return NULL;
+ }
+
+ return p;
+
+}
+
+/* Parsing function for multi-letter extensions.
+
+ Return Value:
+ Points to the end of extensions.
+
+ Arguments:
+ `p`: Current parsing position.
+ `ext_type`: What kind of extensions, 's', 'z' or 'x'.
+ `ext_type_str`: Full name for kind of extension. */
+
const char *
riscv_subset_list::parse_multiletter_ext (const char *p,
const char *ext_type,
@@ -1250,6 +1387,30 @@ riscv_subset_list::parse_multiletter_ext (const char *p,
return p;
}
+/* Parsing function for a single-letter or multi-letter extensions.
+
+ Return Value:
+ Points to the end of extensions.
+
+ Arguments:
+ `p`: Current parsing position. */
+
+const char *
+riscv_subset_list::parse_single_ext (const char *p)
+{
+ switch (p[0])
+ {
+ case 'x':
+ return parse_single_multiletter_ext (p, "x", "non-standard extension");
+ case 'z':
+ return parse_single_multiletter_ext (p, "z", "sub-extension");
+ case 's':
+ return parse_single_multiletter_ext (p, "s", "supervisor extension");
+ default:
+ return parse_single_std_ext (p);
+ }
+}
+
/* Parsing arch string to subset list, return NULL if parsing failed. */
riscv_subset_list *
@@ -1342,6 +1503,26 @@ fail:
return NULL;
}
+/* Clone whole subset list. */
+
+riscv_subset_list *
+riscv_subset_list::clone () const
+{
+ riscv_subset_list *new_list = new riscv_subset_list (m_arch, m_loc);
+ for (riscv_subset_t *itr = m_head; itr != NULL; itr = itr->next)
+ new_list->add (itr->name.c_str (), itr->major_version, itr->minor_version,
+ itr->explicit_version_p, true);
+
+ new_list->m_xlen = m_xlen;
+ return new_list;
+}
+
+void
+riscv_subset_list::set_loc (location_t loc)
+{
+ m_loc = loc;
+}
+
/* Return the current arch string. */
std::string
@@ -1480,6 +1661,9 @@ static const riscv_ext_flag_table_t riscv_ext_flag_table[] =
{"ztso", &gcc_options::x_riscv_ztso_subext, MASK_ZTSO},
+ {"xcvmac", &gcc_options::x_riscv_xcv_subext, MASK_XCVMAC},
+ {"xcvalu", &gcc_options::x_riscv_xcv_subext, MASK_XCVALU},
+
{"xtheadba", &gcc_options::x_riscv_xthead_subext, MASK_XTHEADBA},
{"xtheadbb", &gcc_options::x_riscv_xthead_subext, MASK_XTHEADBB},
{"xtheadbs", &gcc_options::x_riscv_xthead_subext, MASK_XTHEADBS},
@@ -1498,6 +1682,37 @@ static const riscv_ext_flag_table_t riscv_ext_flag_table[] =
{NULL, NULL, 0}
};
+/* Apply SUBSET_LIST to OPTS if OPTS is not null, also set CURRENT_SUBSET_LIST
+ to SUBSET_LIST, just note this WON'T delete old CURRENT_SUBSET_LIST. */
+
+void
+riscv_set_arch_by_subset_list (riscv_subset_list *subset_list,
+ struct gcc_options *opts)
+{
+ if (opts)
+ {
+ const riscv_ext_flag_table_t *arch_ext_flag_tab;
+ /* Clean up target flags before we set. */
+ for (arch_ext_flag_tab = &riscv_ext_flag_table[0]; arch_ext_flag_tab->ext;
+ ++arch_ext_flag_tab)
+ opts->*arch_ext_flag_tab->var_ref &= ~arch_ext_flag_tab->mask;
+
+ if (subset_list->xlen () == 32)
+ opts->x_target_flags &= ~MASK_64BIT;
+ else if (subset_list->xlen () == 64)
+ opts->x_target_flags |= MASK_64BIT;
+
+ for (arch_ext_flag_tab = &riscv_ext_flag_table[0]; arch_ext_flag_tab->ext;
+ ++arch_ext_flag_tab)
+ {
+ if (subset_list->lookup (arch_ext_flag_tab->ext))
+ opts->*arch_ext_flag_tab->var_ref |= arch_ext_flag_tab->mask;
+ }
+ }
+
+ current_subset_list = subset_list;
+}
+
/* Parse a RISC-V ISA string into an option mask. Must clear or set all arch
dependent mask bits, in case more than one -march string is passed. */
diff --git a/gcc/config.gcc b/gcc/config.gcc
index ee46d96..37311fc 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -436,18 +436,20 @@ i[34567]86-*-* | x86_64-*-*)
avx512vbmi2vlintrin.h avx512vnniintrin.h
avx512vnnivlintrin.h vaesintrin.h vpclmulqdqintrin.h
avx512vpopcntdqvlintrin.h avx512bitalgintrin.h
- pconfigintrin.h wbnoinvdintrin.h movdirintrin.h
- waitpkgintrin.h cldemoteintrin.h avx512bf16vlintrin.h
- avx512bf16intrin.h enqcmdintrin.h serializeintrin.h
- avx512vp2intersectintrin.h avx512vp2intersectvlintrin.h
- tsxldtrkintrin.h amxtileintrin.h amxint8intrin.h
- amxbf16intrin.h x86gprintrin.h uintrintrin.h
- hresetintrin.h keylockerintrin.h avxvnniintrin.h
- mwaitintrin.h avx512fp16intrin.h avx512fp16vlintrin.h
- avxifmaintrin.h avxvnniint8intrin.h avxneconvertintrin.h
+ avx512bitalgvlintrin.h pconfigintrin.h wbnoinvdintrin.h
+ movdirintrin.h waitpkgintrin.h cldemoteintrin.h
+ avx512bf16vlintrin.h avx512bf16intrin.h enqcmdintrin.h
+ serializeintrin.h avx512vp2intersectintrin.h
+ avx512vp2intersectvlintrin.h tsxldtrkintrin.h
+ amxtileintrin.h amxint8intrin.h amxbf16intrin.h
+ x86gprintrin.h uintrintrin.h hresetintrin.h
+ keylockerintrin.h avxvnniintrin.h mwaitintrin.h
+ avx512fp16intrin.h avx512fp16vlintrin.h avxifmaintrin.h
+ avxvnniint8intrin.h avxneconvertintrin.h
cmpccxaddintrin.h amxfp16intrin.h prfchiintrin.h
raointintrin.h amxcomplexintrin.h avxvnniint16intrin.h
- sm3intrin.h sha512intrin.h sm4intrin.h"
+ sm3intrin.h sha512intrin.h sm4intrin.h
+ usermsrintrin.h"
;;
ia64-*-*)
extra_headers=ia64intrin.h
@@ -706,7 +708,7 @@ skylake goldmont goldmont-plus tremont cascadelake tigerlake cooperlake \
sapphirerapids alderlake rocketlake eden-x2 nano nano-1000 nano-2000 nano-3000 \
nano-x2 eden-x4 nano-x4 lujiazui x86-64 x86-64-v2 x86-64-v3 x86-64-v4 \
sierraforest graniterapids graniterapids-d grandridge arrowlake arrowlake-s \
-native"
+clearwaterforest pantherlake native"
# Additional x86 processors supported by --with-cpu=. Each processor
# MUST be separated by exactly one space.
@@ -2524,7 +2526,7 @@ riscv*-*-freebsd*)
loongarch*-*-linux*)
tm_file="elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file}"
- tm_file="${tm_file} loongarch/gnu-user.h loongarch/linux.h"
+ tm_file="${tm_file} loongarch/gnu-user.h loongarch/linux.h loongarch/loongarch-driver.h"
extra_options="${extra_options} linux-android.opt"
tmake_file="${tmake_file} loongarch/t-multilib loongarch/t-linux"
gnu_ld=yes
@@ -2537,7 +2539,7 @@ loongarch*-*-linux*)
loongarch*-*-elf*)
tm_file="elfos.h newlib-stdint.h ${tm_file}"
- tm_file="${tm_file} loongarch/elf.h loongarch/linux.h"
+ tm_file="${tm_file} loongarch/elf.h loongarch/linux.h loongarch/loongarch-driver.h"
tmake_file="${tmake_file} loongarch/t-multilib loongarch/t-linux"
gnu_ld=yes
gas=yes
diff --git a/gcc/config.in b/gcc/config.in
index d04718a..98ddddf 100644
--- a/gcc/config.in
+++ b/gcc/config.in
@@ -604,6 +604,12 @@
#endif
+/* Define if your macOS assembler supports .build_version directives */
+#ifndef USED_FOR_TARGET
+#undef HAVE_AS_MACOS_BUILD_VERSION
+#endif
+
+
/* Define if the assembler understands -march=rv*_zifencei. */
#ifndef USED_FOR_TARGET
#undef HAVE_AS_MARCH_ZIFENCEI
diff --git a/gcc/config/aarch64/aarch64-c.cc b/gcc/config/aarch64/aarch64-c.cc
index 578ec6f..ab8844f 100644
--- a/gcc/config/aarch64/aarch64-c.cc
+++ b/gcc/config/aarch64/aarch64-c.cc
@@ -82,6 +82,7 @@ aarch64_update_cpp_builtins (cpp_reader *pfile)
{
aarch64_def_or_undef (flag_unsafe_math_optimizations, "__ARM_FP_FAST", pfile);
+ cpp_undef (pfile, "__ARM_ARCH");
builtin_define_with_int_value ("__ARM_ARCH", AARCH64_ISA_V9A ? 9 : 8);
builtin_define_with_int_value ("__ARM_SIZEOF_MINIMAL_ENUM",
diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
index dd47423..eae40b2 100644
--- a/gcc/config/aarch64/aarch64-cores.def
+++ b/gcc/config/aarch64/aarch64-cores.def
@@ -182,6 +182,8 @@ AARCH64_CORE("cortex-x2", cortexx2, cortexa57, V9A, (SVE2_BITPERM, MEMTAG, I8M
AARCH64_CORE("cortex-x3", cortexx3, cortexa57, V9A, (SVE2_BITPERM, MEMTAG, I8MM, BF16), neoversen2, 0x41, 0xd4e, -1)
+AARCH64_CORE("cortex-x4", cortexx4, cortexa57, V9_2A, (SVE2_BITPERM, MEMTAG, PROFILE), neoversen2, 0x41, 0xd81, -1)
+
AARCH64_CORE("neoverse-n2", neoversen2, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversen2, 0x41, 0xd49, -1)
AARCH64_CORE("neoverse-v2", neoversev2, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversev2, 0x41, 0xd4f, -1)
diff --git a/gcc/config/aarch64/aarch64-opts.h b/gcc/config/aarch64/aarch64-opts.h
index db83485..831e28a 100644
--- a/gcc/config/aarch64/aarch64-opts.h
+++ b/gcc/config/aarch64/aarch64-opts.h
@@ -108,20 +108,18 @@ enum aarch64_key_type {
AARCH64_KEY_B
};
-/* Load pair policy type. */
-enum aarch64_ldp_policy {
- LDP_POLICY_DEFAULT,
- LDP_POLICY_ALWAYS,
- LDP_POLICY_NEVER,
- LDP_POLICY_ALIGNED
-};
-
-/* Store pair policy type. */
-enum aarch64_stp_policy {
- STP_POLICY_DEFAULT,
- STP_POLICY_ALWAYS,
- STP_POLICY_NEVER,
- STP_POLICY_ALIGNED
+/* An enum specifying how to handle load and store pairs using
+ a fine-grained policy:
+ - LDP_STP_POLICY_DEFAULT: Use the policy defined in the tuning structure.
+ - LDP_STP_POLICY_ALIGNED: Emit ldp/stp if the source pointer is aligned
+ to at least double the alignment of the type.
+ - LDP_STP_POLICY_ALWAYS: Emit ldp/stp regardless of alignment.
+ - LDP_STP_POLICY_NEVER: Do not emit ldp/stp. */
+enum aarch64_ldp_stp_policy {
+ AARCH64_LDP_STP_POLICY_DEFAULT,
+ AARCH64_LDP_STP_POLICY_ALIGNED,
+ AARCH64_LDP_STP_POLICY_ALWAYS,
+ AARCH64_LDP_STP_POLICY_NEVER
};
#endif
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 3c8f418..60a55f4 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -568,30 +568,9 @@ struct tune_params
/* Place prefetch struct pointer at the end to enable type checking
errors when tune_params misses elements (e.g., from erroneous merges). */
const struct cpu_prefetch_tune *prefetch;
-/* An enum specifying how to handle load pairs using a fine-grained policy:
- - LDP_POLICY_ALIGNED: Emit ldp if the source pointer is aligned
- to at least double the alignment of the type.
- - LDP_POLICY_ALWAYS: Emit ldp regardless of alignment.
- - LDP_POLICY_NEVER: Do not emit ldp. */
- enum aarch64_ldp_policy_model
- {
- LDP_POLICY_ALIGNED,
- LDP_POLICY_ALWAYS,
- LDP_POLICY_NEVER
- } ldp_policy_model;
-/* An enum specifying how to handle store pairs using a fine-grained policy:
- - STP_POLICY_ALIGNED: Emit stp if the source pointer is aligned
- to at least double the alignment of the type.
- - STP_POLICY_ALWAYS: Emit stp regardless of alignment.
- - STP_POLICY_NEVER: Do not emit stp. */
-
- enum aarch64_stp_policy_model
- {
- STP_POLICY_ALIGNED,
- STP_POLICY_ALWAYS,
- STP_POLICY_NEVER
- } stp_policy_model;
+ /* Define models for the aarch64_ldp_stp_policy. */
+ enum aarch64_ldp_stp_policy ldp_policy_model, stp_policy_model;
};
/* Classifies an address.
@@ -789,6 +768,7 @@ bool aarch64_emit_approx_div (rtx, rtx, rtx);
bool aarch64_emit_approx_sqrt (rtx, rtx, bool);
tree aarch64_vector_load_decl (tree);
void aarch64_expand_call (rtx, rtx, rtx, bool);
+bool aarch64_expand_cpymem_mops (rtx *, bool);
bool aarch64_expand_cpymem (rtx *);
bool aarch64_expand_setmem (rtx *);
bool aarch64_float_const_zero_rtx_p (rtx);
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index f67eb70..81ff5ba 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -91,25 +91,25 @@
})
(define_insn "aarch64_simd_dup<mode>"
- [(set (match_operand:VDQ_I 0 "register_operand" "=w, w")
+ [(set (match_operand:VDQ_I 0 "register_operand")
(vec_duplicate:VDQ_I
- (match_operand:<VEL> 1 "register_operand" "w,?r")))]
+ (match_operand:<VEL> 1 "register_operand")))]
"TARGET_SIMD"
- "@
- dup\\t%0.<Vtype>, %1.<Vetype>[0]
- dup\\t%0.<Vtype>, %<vwcore>1"
- [(set_attr "type" "neon_dup<q>, neon_from_gp<q>")]
+ {@ [ cons: =0 , 1 ; attrs: type ]
+ [ w , w ; neon_dup<q> ] dup\t%0.<Vtype>, %1.<Vetype>[0]
+ [ w , ?r ; neon_from_gp<q> ] dup\t%0.<Vtype>, %<vwcore>1
+ }
)
(define_insn "aarch64_simd_dup<mode>"
- [(set (match_operand:VDQF_F16 0 "register_operand" "=w,w")
+ [(set (match_operand:VDQF_F16 0 "register_operand")
(vec_duplicate:VDQF_F16
- (match_operand:<VEL> 1 "register_operand" "w,r")))]
+ (match_operand:<VEL> 1 "register_operand")))]
"TARGET_SIMD"
- "@
- dup\\t%0.<Vtype>, %1.<Vetype>[0]
- dup\\t%0.<Vtype>, %<vwcore>1"
- [(set_attr "type" "neon_dup<q>, neon_from_gp<q>")]
+ {@ [ cons: =0 , 1 ; attrs: type ]
+ [ w , w ; neon_dup<q> ] dup\t%0.<Vtype>, %1.<Vetype>[0]
+ [ w , r ; neon_from_gp<q> ] dup\t%0.<Vtype>, %<vwcore>1
+ }
)
(define_insn "aarch64_dup_lane<mode>"
@@ -143,54 +143,59 @@
)
(define_insn "*aarch64_simd_mov<VDMOV:mode>"
- [(set (match_operand:VDMOV 0 "nonimmediate_operand"
- "=w, r, m, m, m, w, ?r, ?w, ?r, w, w")
- (match_operand:VDMOV 1 "general_operand"
- "m, m, Dz, w, r, w, w, r, r, Dn, Dz"))]
+ [(set (match_operand:VDMOV 0 "nonimmediate_operand")
+ (match_operand:VDMOV 1 "general_operand"))]
"TARGET_FLOAT
&& (register_operand (operands[0], <MODE>mode)
|| aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
- "@
- ldr\t%d0, %1
- ldr\t%x0, %1
- str\txzr, %0
- str\t%d1, %0
- str\t%x1, %0
- * return TARGET_SIMD ? \"mov\t%0.<Vbtype>, %1.<Vbtype>\" : \"fmov\t%d0, %d1\";
- * return TARGET_SIMD ? \"umov\t%0, %1.d[0]\" : \"fmov\t%x0, %d1\";
- fmov\t%d0, %1
- mov\t%0, %1
- * return aarch64_output_simd_mov_immediate (operands[1], 64);
- fmov\t%d0, xzr"
- [(set_attr "type" "neon_load1_1reg<q>, load_8, store_8, neon_store1_1reg<q>,\
- store_8, neon_logic<q>, neon_to_gp<q>, f_mcr,\
- mov_reg, neon_move<q>, f_mcr")
- (set_attr "arch" "*,*,*,*,*,*,*,*,*,simd,*")]
-)
-
-(define_insn "*aarch64_simd_mov<VQMOV:mode>"
- [(set (match_operand:VQMOV 0 "nonimmediate_operand"
- "=w, Umn, m, w, ?r, ?w, ?r, w, w")
- (match_operand:VQMOV 1 "general_operand"
- "m, Dz, w, w, w, r, r, Dn, Dz"))]
+ {@ [cons: =0, 1; attrs: type, arch]
+ [w , m ; neon_load1_1reg<q> , * ] ldr\t%d0, %1
+ [r , m ; load_8 , * ] ldr\t%x0, %1
+ [m , Dz; store_8 , * ] str\txzr, %0
+ [m , w ; neon_store1_1reg<q>, * ] str\t%d1, %0
+ [m , r ; store_8 , * ] str\t%x1, %0
+ [w , w ; neon_logic<q> , simd] mov\t%0.<Vbtype>, %1.<Vbtype>
+ [w , w ; neon_logic<q> , * ] fmov\t%d0, %d1
+ [?r, w ; neon_to_gp<q> , simd] umov\t%0, %1.d[0]
+ [?r, w ; neon_to_gp<q> , * ] fmov\t%x0, %d1
+ [?w, r ; f_mcr , * ] fmov\t%d0, %1
+ [?r, r ; mov_reg , * ] mov\t%0, %1
+ [w , Dn; neon_move<q> , simd] << aarch64_output_simd_mov_immediate (operands[1], 64);
+ [w , Dz; f_mcr , * ] fmov\t%d0, xzr
+ }
+)
+
+(define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
+ [(set (match_operand:VQMOV 0 "nonimmediate_operand")
+ (match_operand:VQMOV 1 "general_operand"))]
"TARGET_FLOAT
&& (register_operand (operands[0], <MODE>mode)
|| aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
- "@
- ldr\t%q0, %1
- stp\txzr, xzr, %0
- str\t%q1, %0
- mov\t%0.<Vbtype>, %1.<Vbtype>
- #
- #
- #
- * return aarch64_output_simd_mov_immediate (operands[1], 128);
- fmov\t%d0, xzr"
- [(set_attr "type" "neon_load1_1reg<q>, store_16, neon_store1_1reg<q>,\
- neon_logic<q>, multiple, multiple,\
- multiple, neon_move<q>, fmov")
- (set_attr "length" "4,4,4,4,8,8,8,4,4")
- (set_attr "arch" "*,*,*,simd,*,*,*,simd,*")]
+ {@ [cons: =0, 1; attrs: type, arch, length]
+ [w , m ; neon_load1_1reg<q> , * , 4] ldr\t%q0, %1
+ [Umn, Dz; store_16 , * , 4] stp\txzr, xzr, %0
+ [m , w ; neon_store1_1reg<q>, * , 4] str\t%q1, %0
+ [w , w ; neon_logic<q> , simd, 4] mov\t%0.<Vbtype>, %1.<Vbtype>
+ [?r , w ; multiple , * , 8] #
+ [?w , r ; multiple , * , 8] #
+ [?r , r ; multiple , * , 8] #
+ [w , Dn; neon_move<q> , simd, 4] << aarch64_output_simd_mov_immediate (operands[1], 128);
+ [w , Dz; fmov , * , 4] fmov\t%d0, xzr
+ }
+ "&& reload_completed
+ && (REG_P (operands[0])
+ && REG_P (operands[1])
+ && !(FP_REGNUM_P (REGNO (operands[0]))
+ && FP_REGNUM_P (REGNO (operands[1]))))"
+ [(const_int 0)]
+ {
+ if (GP_REGNUM_P (REGNO (operands[0]))
+ && GP_REGNUM_P (REGNO (operands[1])))
+ aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
+ else
+ aarch64_split_simd_move (operands[0], operands[1]);
+ DONE;
+ }
)
;; When storing lane zero we can use the normal STR and its more permissive
@@ -207,45 +212,45 @@
)
(define_insn "load_pair<DREG:mode><DREG2:mode>"
- [(set (match_operand:DREG 0 "register_operand" "=w,r")
- (match_operand:DREG 1 "aarch64_mem_pair_operand" "Ump,Ump"))
- (set (match_operand:DREG2 2 "register_operand" "=w,r")
- (match_operand:DREG2 3 "memory_operand" "m,m"))]
+ [(set (match_operand:DREG 0 "register_operand")
+ (match_operand:DREG 1 "aarch64_mem_pair_operand"))
+ (set (match_operand:DREG2 2 "register_operand")
+ (match_operand:DREG2 3 "memory_operand"))]
"TARGET_FLOAT
&& rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
GET_MODE_SIZE (<DREG:MODE>mode)))"
- "@
- ldp\t%d0, %d2, %z1
- ldp\t%x0, %x2, %z1"
- [(set_attr "type" "neon_ldp,load_16")]
+ {@ [ cons: =0 , 1 , =2 , 3 ; attrs: type ]
+ [ w , Ump , w , m ; neon_ldp ] ldp\t%d0, %d2, %z1
+ [ r , Ump , r , m ; load_16 ] ldp\t%x0, %x2, %z1
+ }
)
(define_insn "vec_store_pair<DREG:mode><DREG2:mode>"
- [(set (match_operand:DREG 0 "aarch64_mem_pair_operand" "=Ump,Ump")
- (match_operand:DREG 1 "register_operand" "w,r"))
- (set (match_operand:DREG2 2 "memory_operand" "=m,m")
- (match_operand:DREG2 3 "register_operand" "w,r"))]
+ [(set (match_operand:DREG 0 "aarch64_mem_pair_operand")
+ (match_operand:DREG 1 "register_operand"))
+ (set (match_operand:DREG2 2 "memory_operand")
+ (match_operand:DREG2 3 "register_operand"))]
"TARGET_FLOAT
&& rtx_equal_p (XEXP (operands[2], 0),
plus_constant (Pmode,
XEXP (operands[0], 0),
GET_MODE_SIZE (<DREG:MODE>mode)))"
- "@
- stp\t%d1, %d3, %z0
- stp\t%x1, %x3, %z0"
- [(set_attr "type" "neon_stp,store_16")]
+ {@ [ cons: =0 , 1 , =2 , 3 ; attrs: type ]
+ [ Ump , w , m , w ; neon_stp ] stp\t%d1, %d3, %z0
+ [ Ump , r , m , r ; store_16 ] stp\t%x1, %x3, %z0
+ }
)
(define_insn "aarch64_simd_stp<mode>"
- [(set (match_operand:VP_2E 0 "aarch64_mem_pair_lanes_operand" "=Umn,Umn")
- (vec_duplicate:VP_2E (match_operand:<VEL> 1 "register_operand" "w,r")))]
+ [(set (match_operand:VP_2E 0 "aarch64_mem_pair_lanes_operand")
+ (vec_duplicate:VP_2E (match_operand:<VEL> 1 "register_operand")))]
"TARGET_SIMD"
- "@
- stp\\t%<Vetype>1, %<Vetype>1, %y0
- stp\\t%<vw>1, %<vw>1, %y0"
- [(set_attr "type" "neon_stp, store_<ldpstp_vel_sz>")]
+ {@ [ cons: =0 , 1 ; attrs: type ]
+ [ Umn , w ; neon_stp ] stp\t%<Vetype>1, %<Vetype>1, %y0
+ [ Umn , r ; store_<ldpstp_vel_sz> ] stp\t%<vw>1, %<vw>1, %y0
+ }
)
(define_insn "load_pair<VQ:mode><VQ2:mode>"
@@ -276,33 +281,6 @@
[(set_attr "type" "neon_stp_q")]
)
-
-(define_split
- [(set (match_operand:VQMOV 0 "register_operand" "")
- (match_operand:VQMOV 1 "register_operand" ""))]
- "TARGET_FLOAT
- && reload_completed
- && GP_REGNUM_P (REGNO (operands[0]))
- && GP_REGNUM_P (REGNO (operands[1]))"
- [(const_int 0)]
-{
- aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
- DONE;
-})
-
-(define_split
- [(set (match_operand:VQMOV 0 "register_operand" "")
- (match_operand:VQMOV 1 "register_operand" ""))]
- "TARGET_FLOAT
- && reload_completed
- && ((FP_REGNUM_P (REGNO (operands[0])) && GP_REGNUM_P (REGNO (operands[1])))
- || (GP_REGNUM_P (REGNO (operands[0])) && FP_REGNUM_P (REGNO (operands[1]))))"
- [(const_int 0)]
-{
- aarch64_split_simd_move (operands[0], operands[1]);
- DONE;
-})
-
(define_expand "@aarch64_split_simd_mov<mode>"
[(set (match_operand:VQMOV 0)
(match_operand:VQMOV 1))]
@@ -372,35 +350,37 @@
)
(define_insn_and_split "aarch64_simd_mov_from_<mode>low"
- [(set (match_operand:<VHALF> 0 "register_operand" "=w,?r")
+ [(set (match_operand:<VHALF> 0 "register_operand")
(vec_select:<VHALF>
- (match_operand:VQMOV_NO2E 1 "register_operand" "w,w")
- (match_operand:VQMOV_NO2E 2 "vect_par_cnst_lo_half" "")))]
+ (match_operand:VQMOV_NO2E 1 "register_operand")
+ (match_operand:VQMOV_NO2E 2 "vect_par_cnst_lo_half")))]
"TARGET_SIMD"
- "@
- #
- umov\t%0, %1.d[0]"
+ {@ [ cons: =0 , 1 ; attrs: type ]
+ [ w , w ; mov_reg ] #
+ [ ?r , w ; neon_to_gp<q> ] umov\t%0, %1.d[0]
+ }
"&& reload_completed && aarch64_simd_register (operands[0], <VHALF>mode)"
[(set (match_dup 0) (match_dup 1))]
{
operands[1] = aarch64_replace_reg_mode (operands[1], <VHALF>mode);
}
- [(set_attr "type" "mov_reg,neon_to_gp<q>")
+ [
(set_attr "length" "4")]
)
(define_insn "aarch64_simd_mov_from_<mode>high"
- [(set (match_operand:<VHALF> 0 "register_operand" "=w,?r,?r")
+ [(set (match_operand:<VHALF> 0 "register_operand")
(vec_select:<VHALF>
- (match_operand:VQMOV_NO2E 1 "register_operand" "w,w,w")
- (match_operand:VQMOV_NO2E 2 "vect_par_cnst_hi_half" "")))]
+ (match_operand:VQMOV_NO2E 1 "register_operand")
+ (match_operand:VQMOV_NO2E 2 "vect_par_cnst_hi_half")))]
"TARGET_FLOAT"
- "@
- dup\t%d0, %1.d[1]
- umov\t%0, %1.d[1]
- fmov\t%0, %1.d[1]"
- [(set_attr "type" "neon_dup<q>,neon_to_gp<q>,f_mrc")
- (set_attr "arch" "simd,simd,*")
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ w , w ; neon_dup<q> , simd ] dup\t%d0, %1.d[1]
+ [ ?r , w ; neon_to_gp<q> , simd ] umov\t%0, %1.d[1]
+ [ ?r , w ; f_mrc , * ] fmov\t%0, %1.d[1]
+ }
+ [
+
(set_attr "length" "4")]
)
@@ -500,7 +480,7 @@
}
)
-(define_expand "xorsign<mode>3"
+(define_expand "@xorsign<mode>3"
[(match_operand:VHSDF 0 "register_operand")
(match_operand:VHSDF 1 "register_operand")
(match_operand:VHSDF 2 "register_operand")]
@@ -1204,27 +1184,27 @@
;; For AND (vector, register) and BIC (vector, immediate)
(define_insn "and<mode>3<vczle><vczbe>"
- [(set (match_operand:VDQ_I 0 "register_operand" "=w,w")
- (and:VDQ_I (match_operand:VDQ_I 1 "register_operand" "w,0")
- (match_operand:VDQ_I 2 "aarch64_reg_or_bic_imm" "w,Db")))]
- "TARGET_SIMD"
- "@
- and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
- * return aarch64_output_simd_mov_immediate (operands[2], <bitsize>,\
- AARCH64_CHECK_BIC);"
+ [(set (match_operand:VDQ_I 0 "register_operand")
+ (and:VDQ_I (match_operand:VDQ_I 1 "register_operand")
+ (match_operand:VDQ_I 2 "aarch64_reg_or_bic_imm")))]
+ "TARGET_SIMD"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ w , w , w ] and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+ [ w , 0 , Db ] << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, AARCH64_CHECK_BIC);
+ }
[(set_attr "type" "neon_logic<q>")]
)
;; For ORR (vector, register) and ORR (vector, immediate)
(define_insn "ior<mode>3<vczle><vczbe>"
- [(set (match_operand:VDQ_I 0 "register_operand" "=w,w")
- (ior:VDQ_I (match_operand:VDQ_I 1 "register_operand" "w,0")
- (match_operand:VDQ_I 2 "aarch64_reg_or_orr_imm" "w,Do")))]
- "TARGET_SIMD"
- "@
- orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
- * return aarch64_output_simd_mov_immediate (operands[2], <bitsize>,\
- AARCH64_CHECK_ORR);"
+ [(set (match_operand:VDQ_I 0 "register_operand")
+ (ior:VDQ_I (match_operand:VDQ_I 1 "register_operand")
+ (match_operand:VDQ_I 2 "aarch64_reg_or_orr_imm")))]
+ "TARGET_SIMD"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ w , w , w ] orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+ [ w , 0 , Do ] << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, AARCH64_CHECK_ORR);
+ }
[(set_attr "type" "neon_logic<q>")]
)
@@ -1353,14 +1333,14 @@
)
(define_insn "aarch64_simd_ashr<mode><vczle><vczbe>"
- [(set (match_operand:VDQ_I 0 "register_operand" "=w,w")
- (ashiftrt:VDQ_I (match_operand:VDQ_I 1 "register_operand" "w,w")
- (match_operand:VDQ_I 2 "aarch64_simd_rshift_imm" "D1,Dr")))]
+ [(set (match_operand:VDQ_I 0 "register_operand")
+ (ashiftrt:VDQ_I (match_operand:VDQ_I 1 "register_operand")
+ (match_operand:VDQ_I 2 "aarch64_simd_rshift_imm")))]
"TARGET_SIMD"
- "@
- cmlt\t%0.<Vtype>, %1.<Vtype>, #0
- sshr\t%0.<Vtype>, %1.<Vtype>, %2"
- [(set_attr "type" "neon_compare<q>,neon_shift_imm<q>")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ w , w , D1 ; neon_compare<q> ] cmlt\t%0.<Vtype>, %1.<Vtype>, #0
+ [ w , w , Dr ; neon_shift_imm<q> ] sshr\t%0.<Vtype>, %1.<Vtype>, %2
+ }
)
(define_insn "aarch64_<sra_op>sra_n<mode>_insn"
@@ -3701,20 +3681,21 @@
;; in *aarch64_simd_bsl<mode>_alt.
(define_insn "aarch64_simd_bsl<mode>_internal<vczle><vczbe>"
- [(set (match_operand:VDQ_I 0 "register_operand" "=w,w,w")
+ [(set (match_operand:VDQ_I 0 "register_operand")
(xor:VDQ_I
(and:VDQ_I
(xor:VDQ_I
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w,0,w")
- (match_operand:VDQ_I 2 "register_operand" "w,w,0"))
- (match_operand:VDQ_I 1 "register_operand" "0,w,w"))
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")
+ (match_operand:VDQ_I 2 "register_operand"))
+ (match_operand:VDQ_I 1 "register_operand"))
(match_dup:<V_INT_EQUIV> 3)
))]
"TARGET_SIMD"
- "@
- bsl\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
- bit\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
- bif\\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ w , 0 , w , w ] bsl\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
+ [ w , w , w , 0 ] bit\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+ [ w , w , 0 , w ] bif\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>
+ }
[(set_attr "type" "neon_bsl<q>")]
)
@@ -3725,19 +3706,20 @@
;; permutations of commutative operations, we have to have a separate pattern.
(define_insn "*aarch64_simd_bsl<mode>_alt<vczle><vczbe>"
- [(set (match_operand:VDQ_I 0 "register_operand" "=w,w,w")
+ [(set (match_operand:VDQ_I 0 "register_operand")
(xor:VDQ_I
(and:VDQ_I
(xor:VDQ_I
- (match_operand:VDQ_I 3 "register_operand" "w,w,0")
- (match_operand:<V_INT_EQUIV> 2 "register_operand" "w,0,w"))
- (match_operand:VDQ_I 1 "register_operand" "0,w,w"))
+ (match_operand:VDQ_I 3 "register_operand")
+ (match_operand:<V_INT_EQUIV> 2 "register_operand"))
+ (match_operand:VDQ_I 1 "register_operand"))
(match_dup:<V_INT_EQUIV> 2)))]
"TARGET_SIMD"
- "@
- bsl\\t%0.<Vbtype>, %3.<Vbtype>, %2.<Vbtype>
- bit\\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>
- bif\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ w , 0 , w , w ] bsl\t%0.<Vbtype>, %3.<Vbtype>, %2.<Vbtype>
+ [ w , w , 0 , w ] bit\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>
+ [ w , w , w , 0 ] bif\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+ }
[(set_attr "type" "neon_bsl<q>")]
)
@@ -3752,21 +3734,22 @@
;; would be better calculated on the integer side.
(define_insn_and_split "aarch64_simd_bsldi_internal"
- [(set (match_operand:DI 0 "register_operand" "=w,w,w,&r")
+ [(set (match_operand:DI 0 "register_operand")
(xor:DI
(and:DI
(xor:DI
- (match_operand:DI 3 "register_operand" "w,0,w,r")
- (match_operand:DI 2 "register_operand" "w,w,0,r"))
- (match_operand:DI 1 "register_operand" "0,w,w,r"))
+ (match_operand:DI 3 "register_operand")
+ (match_operand:DI 2 "register_operand"))
+ (match_operand:DI 1 "register_operand"))
(match_dup:DI 3)
))]
"TARGET_SIMD"
- "@
- bsl\\t%0.8b, %2.8b, %3.8b
- bit\\t%0.8b, %2.8b, %1.8b
- bif\\t%0.8b, %3.8b, %1.8b
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: type , length ]
+ [ w , 0 , w , w ; neon_bsl , 4 ] bsl\t%0.8b, %2.8b, %3.8b
+ [ w , w , w , 0 ; neon_bsl , 4 ] bit\t%0.8b, %2.8b, %1.8b
+ [ w , w , 0 , w ; neon_bsl , 4 ] bif\t%0.8b, %3.8b, %1.8b
+ [ &r , r , r , r ; multiple , 12 ] #
+ }
"&& REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
[(match_dup 1) (match_dup 1) (match_dup 2) (match_dup 3)]
{
@@ -3789,26 +3772,25 @@
emit_insn (gen_xordi3 (operands[0], scratch, operands[3]));
DONE;
}
- [(set_attr "type" "neon_bsl,neon_bsl,neon_bsl,multiple")
- (set_attr "length" "4,4,4,12")]
)
(define_insn_and_split "aarch64_simd_bsldi_alt"
- [(set (match_operand:DI 0 "register_operand" "=w,w,w,&r")
+ [(set (match_operand:DI 0 "register_operand")
(xor:DI
(and:DI
(xor:DI
- (match_operand:DI 3 "register_operand" "w,w,0,r")
- (match_operand:DI 2 "register_operand" "w,0,w,r"))
- (match_operand:DI 1 "register_operand" "0,w,w,r"))
+ (match_operand:DI 3 "register_operand")
+ (match_operand:DI 2 "register_operand"))
+ (match_operand:DI 1 "register_operand"))
(match_dup:DI 2)
))]
"TARGET_SIMD"
- "@
- bsl\\t%0.8b, %3.8b, %2.8b
- bit\\t%0.8b, %3.8b, %1.8b
- bif\\t%0.8b, %2.8b, %1.8b
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: type , length ]
+ [ w , 0 , w , w ; neon_bsl , 4 ] bsl\t%0.8b, %3.8b, %2.8b
+ [ w , w , 0 , w ; neon_bsl , 4 ] bit\t%0.8b, %3.8b, %1.8b
+ [ w , w , w , 0 ; neon_bsl , 4 ] bif\t%0.8b, %2.8b, %1.8b
+ [ &r , r , r , r ; multiple , 12 ] #
+ }
"&& REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
[(match_dup 0) (match_dup 1) (match_dup 2) (match_dup 3)]
{
@@ -3831,8 +3813,6 @@
emit_insn (gen_xordi3 (operands[0], scratch, operands[2]));
DONE;
}
- [(set_attr "type" "neon_bsl,neon_bsl,neon_bsl,multiple")
- (set_attr "length" "4,4,4,12")]
)
(define_expand "aarch64_simd_bsl<mode>"
@@ -4385,15 +4365,15 @@
;; This dedicated pattern must come first.
(define_insn "store_pair_lanes<mode>"
- [(set (match_operand:<VDBL> 0 "aarch64_mem_pair_lanes_operand" "=Umn, Umn")
+ [(set (match_operand:<VDBL> 0 "aarch64_mem_pair_lanes_operand")
(vec_concat:<VDBL>
- (match_operand:VDCSIF 1 "register_operand" "w, r")
- (match_operand:VDCSIF 2 "register_operand" "w, r")))]
+ (match_operand:VDCSIF 1 "register_operand")
+ (match_operand:VDCSIF 2 "register_operand")))]
"TARGET_FLOAT"
- "@
- stp\t%<single_type>1, %<single_type>2, %y0
- stp\t%<single_wx>1, %<single_wx>2, %y0"
- [(set_attr "type" "neon_stp, store_16")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ Umn , w , w ; neon_stp ] stp\t%<single_type>1, %<single_type>2, %y0
+ [ Umn , r , r ; store_16 ] stp\t%<single_wx>1, %<single_wx>2, %y0
+ }
)
;; Form a vector whose least significant half comes from operand 1 and whose
@@ -4404,73 +4384,70 @@
;; the register alternatives either don't accept or themselves disparage.
(define_insn "*aarch64_combine_internal<mode>"
- [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, w, Umn, Umn")
+ [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand")
(vec_concat:<VDBL>
- (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, 0, ?w, ?r")
- (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, ?r, Utv, w, ?r")))]
+ (match_operand:VDCSIF 1 "register_operand")
+ (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand")))]
"TARGET_FLOAT
&& !BYTES_BIG_ENDIAN
&& (register_operand (operands[0], <VDBL>mode)
|| register_operand (operands[2], <MODE>mode))"
- "@
- ins\t%0.<single_type>[1], %2.<single_type>[0]
- ins\t%0.<single_type>[1], %<single_wx>2
- fmov\t%0.d[1], %2
- ld1\t{%0.<single_type>}[1], %2
- stp\t%<single_type>1, %<single_type>2, %y0
- stp\t%<single_wx>1, %<single_wx>2, %y0"
- [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, f_mcr,
- neon_load1_one_lane<dblq>, neon_stp, store_16")
- (set_attr "arch" "simd,simd,*,simd,*,*")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ w , 0 , w ; neon_ins<dblq> , simd ] ins\t%0.<single_type>[1], %2.<single_type>[0]
+ [ w , 0 , ?r ; neon_from_gp<dblq> , simd ] ins\t%0.<single_type>[1], %<single_wx>2
+ [ w , 0 , ?r ; f_mcr , * ] fmov\t%0.d[1], %2
+ [ w , 0 , Utv ; neon_load1_one_lane<dblq> , simd ] ld1\t{%0.<single_type>}[1], %2
+ [ Umn , ?w , w ; neon_stp , * ] stp\t%<single_type>1, %<single_type>2, %y0
+ [ Umn , ?r , ?r ; store_16 , * ] stp\t%<single_wx>1, %<single_wx>2, %y0
+ }
)
(define_insn "*aarch64_combine_internal_be<mode>"
- [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, w, Umn, Umn")
+ [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand")
(vec_concat:<VDBL>
- (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, ?r, Utv, ?w, ?r")
- (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, 0, ?w, ?r")))]
+ (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand")
+ (match_operand:VDCSIF 1 "register_operand")))]
"TARGET_FLOAT
&& BYTES_BIG_ENDIAN
&& (register_operand (operands[0], <VDBL>mode)
|| register_operand (operands[2], <MODE>mode))"
- "@
- ins\t%0.<single_type>[1], %2.<single_type>[0]
- ins\t%0.<single_type>[1], %<single_wx>2
- fmov\t%0.d[1], %2
- ld1\t{%0.<single_type>}[1], %2
- stp\t%<single_type>2, %<single_type>1, %y0
- stp\t%<single_wx>2, %<single_wx>1, %y0"
- [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, f_mcr, neon_load1_one_lane<dblq>, neon_stp, store_16")
- (set_attr "arch" "simd,simd,*,simd,*,*")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ w , 0 , w ; neon_ins<dblq> , simd ] ins\t%0.<single_type>[1], %2.<single_type>[0]
+ [ w , 0 , ?r ; neon_from_gp<dblq> , simd ] ins\t%0.<single_type>[1], %<single_wx>2
+ [ w , 0 , ?r ; f_mcr , * ] fmov\t%0.d[1], %2
+ [ w , 0 , Utv ; neon_load1_one_lane<dblq> , simd ] ld1\t{%0.<single_type>}[1], %2
+ [ Umn , ?w , ?w ; neon_stp , * ] stp\t%<single_type>2, %<single_type>1, %y0
+ [ Umn , ?r , ?r ; store_16 , * ] stp\t%<single_wx>2, %<single_wx>1, %y0
+ }
)
;; In this insn, operand 1 should be low, and operand 2 the high part of the
;; dest vector.
(define_insn "*aarch64_combinez<mode>"
- [(set (match_operand:<VDBL> 0 "register_operand" "=w,w,w")
+ [(set (match_operand:<VDBL> 0 "register_operand")
(vec_concat:<VDBL>
- (match_operand:VDCSIF 1 "nonimmediate_operand" "w,?r,m")
+ (match_operand:VDCSIF 1 "nonimmediate_operand")
(match_operand:VDCSIF 2 "aarch64_simd_or_scalar_imm_zero")))]
"TARGET_FLOAT && !BYTES_BIG_ENDIAN"
- "@
- fmov\\t%<single_type>0, %<single_type>1
- fmov\t%<single_type>0, %<single_wx>1
- ldr\\t%<single_type>0, %1"
- [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")]
+ {@ [ cons: =0 , 1 ; attrs: type ]
+ [ w , w ; neon_move<q> ] fmov\t%<single_type>0, %<single_type>1
+ [ w , ?r ; neon_from_gp ] fmov\t%<single_type>0, %<single_wx>1
+ [ w , m ; neon_load1_1reg ] ldr\t%<single_type>0, %1
+ }
)
(define_insn "*aarch64_combinez_be<mode>"
- [(set (match_operand:<VDBL> 0 "register_operand" "=w,w,w")
+ [(set (match_operand:<VDBL> 0 "register_operand")
(vec_concat:<VDBL>
(match_operand:VDCSIF 2 "aarch64_simd_or_scalar_imm_zero")
- (match_operand:VDCSIF 1 "nonimmediate_operand" "w,?r,m")))]
+ (match_operand:VDCSIF 1 "nonimmediate_operand")))]
"TARGET_FLOAT && BYTES_BIG_ENDIAN"
- "@
- fmov\\t%<single_type>0, %<single_type>1
- fmov\t%<single_type>0, %<single_wx>1
- ldr\\t%<single_type>0, %1"
- [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")]
+ {@ [ cons: =0 , 1 ; attrs: type ]
+ [ w , w ; neon_move<q> ] fmov\t%<single_type>0, %<single_type>1
+ [ w , ?r ; neon_from_gp ] fmov\t%<single_type>0, %<single_wx>1
+ [ w , m ; neon_load1_1reg ] ldr\t%<single_type>0, %1
+ }
)
;; Form a vector whose first half (in array order) comes from operand 1
@@ -7051,17 +7028,17 @@
;; have different ideas of what should be passed to this pattern.
(define_insn "aarch64_cm<optab><mode><vczle><vczbe>"
- [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w,w")
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
(neg:<V_INT_EQUIV>
(COMPARISONS:<V_INT_EQUIV>
- (match_operand:VDQ_I 1 "register_operand" "w,w")
- (match_operand:VDQ_I 2 "aarch64_simd_reg_or_zero" "w,ZDz")
+ (match_operand:VDQ_I 1 "register_operand")
+ (match_operand:VDQ_I 2 "aarch64_simd_reg_or_zero")
)))]
"TARGET_SIMD"
- "@
- cm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>
- cm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0"
- [(set_attr "type" "neon_compare<q>, neon_compare_zero<q>")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ w , w , w ; neon_compare<q> ] cm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>
+ [ w , w , ZDz ; neon_compare_zero<q> ] cm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0
+ }
)
(define_insn_and_split "aarch64_cm<optab>di"
@@ -7100,17 +7077,17 @@
)
(define_insn "*aarch64_cm<optab>di"
- [(set (match_operand:DI 0 "register_operand" "=w,w")
+ [(set (match_operand:DI 0 "register_operand")
(neg:DI
(COMPARISONS:DI
- (match_operand:DI 1 "register_operand" "w,w")
- (match_operand:DI 2 "aarch64_simd_reg_or_zero" "w,ZDz")
+ (match_operand:DI 1 "register_operand")
+ (match_operand:DI 2 "aarch64_simd_reg_or_zero")
)))]
"TARGET_SIMD && reload_completed"
- "@
- cm<n_optab>\t%d0, %d<cmp_1>, %d<cmp_2>
- cm<optab>\t%d0, %d1, #0"
- [(set_attr "type" "neon_compare, neon_compare_zero")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ w , w , w ; neon_compare ] cm<n_optab>\t%d0, %d<cmp_1>, %d<cmp_2>
+ [ w , w , ZDz ; neon_compare_zero ] cm<optab>\t%d0, %d1, #0
+ }
)
;; cm(hs|hi)
@@ -7268,16 +7245,17 @@
;; fcm(eq|ge|gt|le|lt)
(define_insn "aarch64_cm<optab><mode><vczle><vczbe>"
- [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w,w")
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
(neg:<V_INT_EQUIV>
(COMPARISONS:<V_INT_EQUIV>
- (match_operand:VHSDF_HSDF 1 "register_operand" "w,w")
- (match_operand:VHSDF_HSDF 2 "aarch64_simd_reg_or_zero" "w,YDz")
+ (match_operand:VHSDF_HSDF 1 "register_operand")
+ (match_operand:VHSDF_HSDF 2 "aarch64_simd_reg_or_zero")
)))]
"TARGET_SIMD"
- "@
- fcm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>
- fcm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, 0"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ w , w , w ] fcm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>
+ [ w , w , YDz ] fcm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, 0
+ }
[(set_attr "type" "neon_fp_compare_<stype><q>")]
)
@@ -7880,33 +7858,29 @@
)
(define_insn "*aarch64_mov<mode>"
- [(set (match_operand:VSTRUCT_QD 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
- (match_operand:VSTRUCT_QD 1 "aarch64_simd_general_operand" " w,w,Utv"))]
+ [(set (match_operand:VSTRUCT_QD 0 "aarch64_simd_nonimmediate_operand")
+ (match_operand:VSTRUCT_QD 1 "aarch64_simd_general_operand"))]
"TARGET_SIMD && !BYTES_BIG_ENDIAN
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
- "@
- #
- st1\\t{%S1.<Vtype> - %<Vendreg>1.<Vtype>}, %0
- ld1\\t{%S0.<Vtype> - %<Vendreg>0.<Vtype>}, %1"
- [(set_attr "type" "multiple,neon_store<nregs>_<nregs>reg_q,\
- neon_load<nregs>_<nregs>reg_q")
- (set_attr "length" "<insn_count>,4,4")]
+ {@ [ cons: =0 , 1 ; attrs: type , length ]
+ [ w , w ; multiple , <insn_count> ] #
+ [ Utv , w ; neon_store<nregs>_<nregs>reg_q , 4 ] st1\t{%S1.<Vtype> - %<Vendreg>1.<Vtype>}, %0
+ [ w , Utv ; neon_load<nregs>_<nregs>reg_q , 4 ] ld1\t{%S0.<Vtype> - %<Vendreg>0.<Vtype>}, %1
+ }
)
(define_insn "*aarch64_mov<mode>"
- [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
- (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" " w,w,Utv"))]
+ [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand")
+ (match_operand:VSTRUCT 1 "aarch64_simd_general_operand"))]
"TARGET_SIMD && !BYTES_BIG_ENDIAN
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
- "@
- #
- st1\\t{%S1.16b - %<Vendreg>1.16b}, %0
- ld1\\t{%S0.16b - %<Vendreg>0.16b}, %1"
- [(set_attr "type" "multiple,neon_store<nregs>_<nregs>reg_q,\
- neon_load<nregs>_<nregs>reg_q")
- (set_attr "length" "<insn_count>,4,4")]
+ {@ [ cons: =0 , 1 ; attrs: type , length ]
+ [ w , w ; multiple , <insn_count> ] #
+ [ Utv , w ; neon_store<nregs>_<nregs>reg_q , 4 ] st1\t{%S1.16b - %<Vendreg>1.16b}, %0
+ [ w , Utv ; neon_load<nregs>_<nregs>reg_q , 4 ] ld1\t{%S0.16b - %<Vendreg>0.16b}, %1
+ }
)
(define_insn "*aarch64_movv8di"
@@ -7939,50 +7913,45 @@
)
(define_insn "*aarch64_be_mov<mode>"
- [(set (match_operand:VSTRUCT_2D 0 "nonimmediate_operand" "=w,m,w")
- (match_operand:VSTRUCT_2D 1 "general_operand" " w,w,m"))]
+ [(set (match_operand:VSTRUCT_2D 0 "nonimmediate_operand")
+ (match_operand:VSTRUCT_2D 1 "general_operand"))]
"TARGET_FLOAT
&& (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
- "@
- #
- stp\\t%d1, %R1, %0
- ldp\\t%d0, %R0, %1"
- [(set_attr "type" "multiple,neon_stp,neon_ldp")
- (set_attr "length" "8,4,4")]
+ {@ [ cons: =0 , 1 ; attrs: type , length ]
+ [ w , w ; multiple , 8 ] #
+ [ m , w ; neon_stp , 4 ] stp\t%d1, %R1, %0
+ [ w , m ; neon_ldp , 4 ] ldp\t%d0, %R0, %1
+ }
)
(define_insn "*aarch64_be_mov<mode>"
- [(set (match_operand:VSTRUCT_2Q 0 "nonimmediate_operand" "=w,m,w")
- (match_operand:VSTRUCT_2Q 1 "general_operand" " w,w,m"))]
+ [(set (match_operand:VSTRUCT_2Q 0 "nonimmediate_operand")
+ (match_operand:VSTRUCT_2Q 1 "general_operand"))]
"TARGET_FLOAT
&& (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
- "@
- #
- stp\\t%q1, %R1, %0
- ldp\\t%q0, %R0, %1"
- [(set_attr "type" "multiple,neon_stp_q,neon_ldp_q")
- (set_attr "arch" "simd,*,*")
- (set_attr "length" "8,4,4")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch , length ]
+ [ w , w ; multiple , simd , 8 ] #
+ [ m , w ; neon_stp_q , * , 4 ] stp\t%q1, %R1, %0
+ [ w , m ; neon_ldp_q , * , 4 ] ldp\t%q0, %R0, %1
+ }
)
(define_insn "*aarch64_be_movoi"
- [(set (match_operand:OI 0 "nonimmediate_operand" "=w,m,w")
- (match_operand:OI 1 "general_operand" " w,w,m"))]
+ [(set (match_operand:OI 0 "nonimmediate_operand")
+ (match_operand:OI 1 "general_operand"))]
"TARGET_FLOAT
&& (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], OImode)
|| register_operand (operands[1], OImode))"
- "@
- #
- stp\\t%q1, %R1, %0
- ldp\\t%q0, %R0, %1"
- [(set_attr "type" "multiple,neon_stp_q,neon_ldp_q")
- (set_attr "arch" "simd,*,*")
- (set_attr "length" "8,4,4")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch , length ]
+ [ w , w ; multiple , simd , 8 ] #
+ [ m , w ; neon_stp_q , * , 4 ] stp\t%q1, %R1, %0
+ [ w , m ; neon_ldp_q , * , 4 ] ldp\t%q0, %R0, %1
+ }
)
(define_insn "*aarch64_be_mov<mode>"
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index b223e7d..5a652d8 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -687,33 +687,35 @@
;; and after RA; before RA we want the predicated load and store patterns to
;; be used instead.
(define_insn "*aarch64_sve_mov<mode>_ldr_str"
- [(set (match_operand:SVE_FULL 0 "aarch64_sve_nonimmediate_operand" "=w, Utr, w, w")
- (match_operand:SVE_FULL 1 "aarch64_sve_general_operand" "Utr, w, w, Dn"))]
+ [(set (match_operand:SVE_FULL 0 "aarch64_sve_nonimmediate_operand")
+ (match_operand:SVE_FULL 1 "aarch64_sve_general_operand"))]
"TARGET_SVE
&& (<MODE>mode == VNx16QImode || !BYTES_BIG_ENDIAN)
&& ((lra_in_progress || reload_completed)
|| (register_operand (operands[0], <MODE>mode)
&& nonmemory_operand (operands[1], <MODE>mode)))"
- "@
- ldr\t%0, %1
- str\t%1, %0
- mov\t%0.d, %1.d
- * return aarch64_output_sve_mov_immediate (operands[1]);"
+ {@ [ cons: =0 , 1 ]
+ [ w , Utr ] ldr\t%0, %1
+ [ Utr , w ] str\t%1, %0
+ [ w , w ] mov\t%0.d, %1.d
+ [ w , Dn ] << aarch64_output_sve_mov_immediate (operands[1]);
+ }
)
;; Unpredicated moves that cannot use LDR and STR, i.e. partial vectors
;; or vectors for which little-endian ordering isn't acceptable. Memory
;; accesses require secondary reloads.
(define_insn "*aarch64_sve_mov<mode>_no_ldr_str"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w")
- (match_operand:SVE_ALL 1 "aarch64_nonmemory_operand" "w, Dn"))]
+ [(set (match_operand:SVE_ALL 0 "register_operand")
+ (match_operand:SVE_ALL 1 "aarch64_nonmemory_operand"))]
"TARGET_SVE
&& <MODE>mode != VNx16QImode
&& (BYTES_BIG_ENDIAN
|| maybe_ne (BYTES_PER_SVE_VECTOR, GET_MODE_SIZE (<MODE>mode)))"
- "@
- mov\t%0.d, %1.d
- * return aarch64_output_sve_mov_immediate (operands[1]);"
+ {@ [ cons: =0 , 1 ]
+ [ w , w ] mov\t%0.d, %1.d
+ [ w , Dn ] << aarch64_output_sve_mov_immediate (operands[1]);
+ }
)
;; Handle memory reloads for modes that can't use LDR and STR. We use
@@ -743,18 +745,19 @@
;; Note that this pattern is generated directly by aarch64_emit_sve_pred_move,
;; so changes to this pattern will need changes there as well.
(define_insn_and_split "@aarch64_pred_mov<mode>"
- [(set (match_operand:SVE_ALL 0 "nonimmediate_operand" "=w, w, m")
+ [(set (match_operand:SVE_ALL 0 "nonimmediate_operand")
(unspec:SVE_ALL
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (match_operand:SVE_ALL 2 "nonimmediate_operand" "w, m, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SVE_ALL 2 "nonimmediate_operand")]
UNSPEC_PRED_X))]
"TARGET_SVE
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
- "@
- #
- ld1<Vesize>\t%0.<Vctype>, %1/z, %2
- st1<Vesize>\t%2.<Vctype>, %1, %0"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ w , Upl , w ] #
+ [ w , Upl , m ] ld1<Vesize>\t%0.<Vctype>, %1/z, %2
+ [ m , Upl , w ] st1<Vesize>\t%2.<Vctype>, %1, %0
+ }
"&& register_operand (operands[0], <MODE>mode)
&& register_operand (operands[2], <MODE>mode)"
[(set (match_dup 0) (match_dup 2))]
@@ -949,16 +952,17 @@
)
(define_insn "*aarch64_sve_mov<mode>"
- [(set (match_operand:PRED_ALL 0 "nonimmediate_operand" "=Upa, m, Upa, Upa")
- (match_operand:PRED_ALL 1 "aarch64_mov_operand" "Upa, Upa, m, Dn"))]
+ [(set (match_operand:PRED_ALL 0 "nonimmediate_operand")
+ (match_operand:PRED_ALL 1 "aarch64_mov_operand"))]
"TARGET_SVE
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
- "@
- mov\t%0.b, %1.b
- str\t%1, %0
- ldr\t%0, %1
- * return aarch64_output_sve_mov_immediate (operands[1]);"
+ {@ [ cons: =0 , 1 ]
+ [ Upa , Upa ] mov\t%0.b, %1.b
+ [ m , Upa ] str\t%1, %0
+ [ Upa , m ] ldr\t%0, %1
+ [ Upa , Dn ] << aarch64_output_sve_mov_immediate (operands[1]);
+ }
)
;; Match PTRUES Pn.B when both the predicate and flags are useful.
@@ -1079,13 +1083,14 @@
;; Write to the FFR and start a new FFRT scheduling region.
(define_insn "aarch64_wrffr"
[(set (reg:VNx16BI FFR_REGNUM)
- (match_operand:VNx16BI 0 "aarch64_simd_reg_or_minus_one" "Dm, Upa"))
+ (match_operand:VNx16BI 0 "aarch64_simd_reg_or_minus_one"))
(set (reg:VNx16BI FFRT_REGNUM)
(unspec:VNx16BI [(match_dup 0)] UNSPEC_WRFFR))]
"TARGET_SVE"
- "@
- setffr
- wrffr\t%0.b"
+ {@ [ cons: 0 ]
+ [ Dm ] setffr
+ [ Upa ] wrffr\t%0.b
+ }
)
;; [L2 in the block comment above about FFR handling]
@@ -2331,21 +2336,22 @@
(define_insn "mask_scatter_store<mode><v_int_container>"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>" "Z, vgw, rk, rk, rk, rk")
- (match_operand:VNx4SI 1 "register_operand" "w, w, w, w, w, w")
- (match_operand:DI 2 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
- (match_operand:SVE_4 4 "register_operand" "w, w, w, w, w, w")]
+ [(match_operand:VNx4BI 5 "register_operand")
+ (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>")
+ (match_operand:VNx4SI 1 "register_operand")
+ (match_operand:DI 2 "const_int_operand")
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_4 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<Vesize>\t%4.s, %5, [%1.s]
- st1<Vesize>\t%4.s, %5, [%1.s, #%0]
- st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
- st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
- st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
- st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]"
+ {@ [ cons: 0 , 1 , 2 , 3 , 4 , 5 ]
+ [ Z , w , Ui1 , Ui1 , w , Upl ] st1<Vesize>\t%4.s, %5, [%1.s]
+ [ vgw , w , Ui1 , Ui1 , w , Upl ] st1<Vesize>\t%4.s, %5, [%1.s, #%0]
+ [ rk , w , Z , Ui1 , w , Upl ] st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
+ [ rk , w , Ui1 , Ui1 , w , Upl ] st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
+ [ rk , w , Z , i , w , Upl ] st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
+ [ rk , w , Ui1 , i , w , Upl ] st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]
+ }
)
;; Predicated scatter stores for 64-bit elements. The value of operand 2
@@ -2353,40 +2359,42 @@
(define_insn "mask_scatter_store<mode><v_int_container>"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
- (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>" "Z, vgd, rk, rk")
- (match_operand:VNx2DI 1 "register_operand" "w, w, w, w")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>")
+ (match_operand:VNx2DI 1 "register_operand")
(match_operand:DI 2 "const_int_operand")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, i")
- (match_operand:SVE_2 4 "register_operand" "w, w, w, w")]
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<Vesize>\t%4.d, %5, [%1.d]
- st1<Vesize>\t%4.d, %5, [%1.d, #%0]
- st1<Vesize>\t%4.d, %5, [%0, %1.d]
- st1<Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]"
+ {@ [ cons: 0 , 1 , 3 , 4 , 5 ]
+ [ Z , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%1.d]
+ [ vgd , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%1.d, #%0]
+ [ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d]
+ [ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]
+ }
)
;; Likewise, but with the offset being extended from 32 bits.
(define_insn_and_rewrite "*mask_scatter_store<mode><v_int_container>_<su>xtw_unpacked"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 0 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "register_operand")
(unspec:VNx2DI
[(match_operand 6)
(ANY_EXTEND:VNx2DI
- (match_operand:VNx2SI 1 "register_operand" "w, w"))]
+ (match_operand:VNx2SI 1 "register_operand"))]
UNSPEC_PRED_X)
(match_operand:DI 2 "const_int_operand")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
- (match_operand:SVE_2 4 "register_operand" "w, w")]
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw]
- st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw %p3]"
+ {@ [ cons: 0 , 1 , 3 , 4 , 5 ]
+ [ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw]
+ [ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw %p3]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (<VPRED>mode);
@@ -2398,22 +2406,23 @@
(define_insn_and_rewrite "*mask_scatter_store<mode><v_int_container>_sxtw"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 0 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "register_operand")
(unspec:VNx2DI
[(match_operand 6)
(sign_extend:VNx2DI
(truncate:VNx2SI
- (match_operand:VNx2DI 1 "register_operand" "w, w")))]
+ (match_operand:VNx2DI 1 "register_operand")))]
UNSPEC_PRED_X)
(match_operand:DI 2 "const_int_operand")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
- (match_operand:SVE_2 4 "register_operand" "w, w")]
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
- st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]"
+ {@ [ cons: 0 , 1 , 3 , 4 , 5 ]
+ [ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
+ [ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (<VPRED>mode);
@@ -2425,19 +2434,20 @@
(define_insn "*mask_scatter_store<mode><v_int_container>_uxtw"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 0 "aarch64_reg_or_zero" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "aarch64_reg_or_zero")
(and:VNx2DI
- (match_operand:VNx2DI 1 "register_operand" "w, w")
+ (match_operand:VNx2DI 1 "register_operand")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 2 "const_int_operand")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
- (match_operand:SVE_2 4 "register_operand" "w, w")]
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
- st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]"
+ {@ [ cons: 0 , 1 , 3 , 4 , 5 ]
+ [ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
+ [ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]
+ }
)
;; -------------------------------------------------------------------------
@@ -2454,22 +2464,23 @@
(define_insn "@aarch64_scatter_store_trunc<VNx4_NARROW:mode><VNx4_WIDE:mode>"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:VNx4BI 5 "register_operand")
(match_operand:DI 0 "aarch64_sve_gather_offset_<VNx4_NARROW:Vesize>" "Z, vg<VNx4_NARROW:Vesize>, rk, rk, rk, rk")
- (match_operand:VNx4SI 1 "register_operand" "w, w, w, w, w, w")
- (match_operand:DI 2 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
+ (match_operand:VNx4SI 1 "register_operand")
+ (match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_<VNx4_NARROW:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
(truncate:VNx4_NARROW
- (match_operand:VNx4_WIDE 4 "register_operand" "w, w, w, w, w, w"))]
+ (match_operand:VNx4_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s]
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s, #%0]
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]"
+ {@ [ cons: 1 , 2 , 4 , 5 ]
+ [ w , Ui1 , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s]
+ [ w , Ui1 , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s, #%0]
+ [ w , Z , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
+ [ w , Ui1 , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
+ [ w , Z , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
+ [ w , Ui1 , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]
+ }
)
;; Predicated truncating scatter stores for 64-bit elements. The value of
@@ -2477,43 +2488,45 @@
(define_insn "@aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 0 "aarch64_sve_gather_offset_<VNx2_NARROW:Vesize>" "Z, vg<VNx2_NARROW:Vesize>, rk, rk")
- (match_operand:VNx2DI 1 "register_operand" "w, w, w, w")
+ (match_operand:VNx2DI 1 "register_operand")
(match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, Ui1, Ui1, i")
(truncate:VNx2_NARROW
- (match_operand:VNx2_WIDE 4 "register_operand" "w, w, w, w"))]
+ (match_operand:VNx2_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d]
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d, #%0]
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d]
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]"
+ {@ [ cons: 1 , 4 , 5 ]
+ [ w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d]
+ [ w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d, #%0]
+ [ w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d]
+ [ w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]
+ }
)
;; Likewise, but with the offset being sign-extended from 32 bits.
(define_insn_and_rewrite "*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_sxtw"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 0 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "register_operand")
(unspec:VNx2DI
[(match_operand 6)
(sign_extend:VNx2DI
(truncate:VNx2SI
- (match_operand:VNx2DI 1 "register_operand" "w, w")))]
+ (match_operand:VNx2DI 1 "register_operand")))]
UNSPEC_PRED_X)
(match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i")
(truncate:VNx2_NARROW
- (match_operand:VNx2_WIDE 4 "register_operand" "w, w"))]
+ (match_operand:VNx2_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]"
+ {@ [ cons: 0 , 1 , 4 , 5 ]
+ [ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
+ [ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]
+ }
"&& !rtx_equal_p (operands[5], operands[6])"
{
operands[6] = copy_rtx (operands[5]);
@@ -2524,20 +2537,21 @@
(define_insn "*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_uxtw"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 0 "aarch64_reg_or_zero" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "aarch64_reg_or_zero")
(and:VNx2DI
- (match_operand:VNx2DI 1 "register_operand" "w, w")
+ (match_operand:VNx2DI 1 "register_operand")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i")
(truncate:VNx2_NARROW
- (match_operand:VNx2_WIDE 4 "register_operand" "w, w"))]
+ (match_operand:VNx2_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]"
+ {@ [ cons: 0 , 1 , 4 , 5 ]
+ [ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
+ [ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]
+ }
)
;; =========================================================================
@@ -2587,15 +2601,16 @@
;; the load at the first opportunity in order to allow the PTRUE to be
;; optimized with surrounding code.
(define_insn_and_split "*vec_duplicate<mode>_reg"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w")
+ [(set (match_operand:SVE_ALL 0 "register_operand")
(vec_duplicate:SVE_ALL
- (match_operand:<VEL> 1 "aarch64_sve_dup_operand" "r, w, Uty")))
+ (match_operand:<VEL> 1 "aarch64_sve_dup_operand")))
(clobber (match_scratch:VNx16BI 2 "=X, X, Upl"))]
"TARGET_SVE"
- "@
- mov\t%0.<Vetype>, %<vwcore>1
- mov\t%0.<Vetype>, %<Vetype>1
- #"
+ {@ [ cons: =0 , 1 ; attrs: length ]
+ [ w , r ; 4 ] mov\t%0.<Vetype>, %<vwcore>1
+ [ w , w ; 4 ] mov\t%0.<Vetype>, %<Vetype>1
+ [ w , Uty ; 8 ] #
+ }
"&& MEM_P (operands[1])"
[(const_int 0)]
{
@@ -2607,7 +2622,6 @@
CONST0_RTX (<MODE>mode)));
DONE;
}
- [(set_attr "length" "4,4,8")]
)
;; Duplicate an Advanced SIMD vector to fill an SVE vector (LE version).
@@ -2739,18 +2753,18 @@
;; Shift an SVE vector left and insert a scalar into element 0.
(define_insn "vec_shl_insert_<mode>"
- [(set (match_operand:SVE_FULL 0 "register_operand" "=?w, w, ??&w, ?&w")
+ [(set (match_operand:SVE_FULL 0 "register_operand")
(unspec:SVE_FULL
- [(match_operand:SVE_FULL 1 "register_operand" "0, 0, w, w")
- (match_operand:<VEL> 2 "aarch64_reg_or_zero" "rZ, w, rZ, w")]
+ [(match_operand:SVE_FULL 1 "register_operand")
+ (match_operand:<VEL> 2 "aarch64_reg_or_zero")]
UNSPEC_INSR))]
"TARGET_SVE"
- "@
- insr\t%0.<Vetype>, %<vwcore>2
- insr\t%0.<Vetype>, %<Vetype>2
- movprfx\t%0, %1\;insr\t%0.<Vetype>, %<vwcore>2
- movprfx\t%0, %1\;insr\t%0.<Vetype>, %<Vetype>2"
- [(set_attr "movprfx" "*,*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ ?w , 0 , rZ ; * ] insr\t%0.<Vetype>, %<vwcore>2
+ [ w , 0 , w ; * ] insr\t%0.<Vetype>, %<Vetype>2
+ [ ??&w , w , rZ ; yes ] movprfx\t%0, %1\;insr\t%0.<Vetype>, %<vwcore>2
+ [ ?&w , w , w ; yes ] movprfx\t%0, %1\;insr\t%0.<Vetype>, %<Vetype>2
+ }
)
;; -------------------------------------------------------------------------
@@ -2761,15 +2775,16 @@
;; -------------------------------------------------------------------------
(define_insn "vec_series<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(vec_series:SVE_I
- (match_operand:<VEL> 1 "aarch64_sve_index_operand" "Usi, r, r")
- (match_operand:<VEL> 2 "aarch64_sve_index_operand" "r, Usi, r")))]
+ (match_operand:<VEL> 1 "aarch64_sve_index_operand")
+ (match_operand:<VEL> 2 "aarch64_sve_index_operand")))]
"TARGET_SVE"
- "@
- index\t%0.<Vctype>, #%1, %<vccore>2
- index\t%0.<Vctype>, %<vccore>1, #%2
- index\t%0.<Vctype>, %<vccore>1, %<vccore>2"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ w , Usi , r ] index\t%0.<Vctype>, #%1, %<vccore>2
+ [ w , r , Usi ] index\t%0.<Vctype>, %<vccore>1, #%2
+ [ w , r , r ] index\t%0.<Vctype>, %<vccore>1, %<vccore>2
+ }
)
;; Optimize {x, x, x, x, ...} + {0, n, 2*n, 3*n, ...} if n is in range
@@ -2968,15 +2983,16 @@
;; Extract the last active element of operand 1 into operand 0.
;; If no elements are active, extract the last inactive element instead.
(define_insn "@extract_<last_op>_<mode>"
- [(set (match_operand:<VEL> 0 "register_operand" "=?r, w")
+ [(set (match_operand:<VEL> 0 "register_operand")
(unspec:<VEL>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:SVE_FULL 2 "register_operand" "w, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SVE_FULL 2 "register_operand")]
LAST))]
"TARGET_SVE"
- "@
- last<ab>\t%<vwcore>0, %1, %2.<Vetype>
- last<ab>\t%<Vetype>0, %1, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ ?r , Upl , w ] last<ab>\t%<vwcore>0, %1, %2.<Vetype>
+ [ w , Upl , w ] last<ab>\t%<Vetype>0, %1, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -3036,17 +3052,17 @@
;; Integer unary arithmetic predicated with a PTRUE.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_UNARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w"))]
+ (match_operand:SVE_I 2 "register_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated integer unary arithmetic with merging.
@@ -3063,18 +3079,18 @@
;; Predicated integer unary arithmetic, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_UNARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w"))
+ (match_operand:SVE_I 2 "register_operand"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated integer unary arithmetic, merging with an independent value.
@@ -3085,19 +3101,19 @@
;; as earlyclobber helps to make the instruction more regular to the
;; register allocator.
(define_insn "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_UNARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, w"))
- (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_I 2 "register_operand"))
+ (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[3])"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -3112,18 +3128,18 @@
;; Predicated integer unary operations.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")]
SVE_INT_UNARY)]
UNSPEC_PRED_X))]
"TARGET_SVE && <elem_bits> >= <min_elem_bits>"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Another way of expressing the REVB, REVH and REVW patterns, with this
@@ -3131,36 +3147,36 @@
;; of lanes and the data mode decides the granularity of the reversal within
;; each lane.
(define_insn "@aarch64_sve_revbhw_<SVE_ALL:mode><PRED_HSD:mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_ALL 0 "register_operand")
(unspec:SVE_ALL
- [(match_operand:PRED_HSD 1 "register_operand" "Upl, Upl")
+ [(match_operand:PRED_HSD 1 "register_operand")
(unspec:SVE_ALL
- [(match_operand:SVE_ALL 2 "register_operand" "0, w")]
+ [(match_operand:SVE_ALL 2 "register_operand")]
UNSPEC_REVBHW)]
UNSPEC_PRED_X))]
"TARGET_SVE && <PRED_HSD:elem_bits> > <SVE_ALL:container_bits>"
- "@
- rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>
- movprfx\t%0, %2\;rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>
+ }
)
;; Predicated integer unary operations with merging.
(define_insn "@cond_<optab><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "w, w, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")]
SVE_INT_UNARY)
- (match_operand:SVE_FULL_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <elem_bits> >= <min_elem_bits>"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -3191,53 +3207,53 @@
;; Predicated sign and zero extension from a narrower mode.
(define_insn "*<optab><SVE_PARTIAL_I:mode><SVE_HSDI:mode>2"
- [(set (match_operand:SVE_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_HSDI 0 "register_operand")
(unspec:SVE_HSDI
- [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand")
(ANY_EXTEND:SVE_HSDI
- (match_operand:SVE_PARTIAL_I 2 "register_operand" "0, w"))]
+ (match_operand:SVE_PARTIAL_I 2 "register_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
- "@
- <su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
- movprfx\t%0, %2\;<su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
+ }
)
;; Predicated truncate-and-sign-extend operations.
(define_insn "@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(sign_extend:SVE_FULL_HSDI
(truncate:SVE_PARTIAL_I
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")))]
UNSPEC_PRED_X))]
"TARGET_SVE
&& (~<SVE_FULL_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
- "@
- sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0, %2\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ }
)
;; Predicated truncate-and-sign-extend operations with merging.
(define_insn "@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(sign_extend:SVE_FULL_HSDI
(truncate:SVE_PARTIAL_I
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")))
- (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")))
+ (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& (~<SVE_FULL_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
- "@
- sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0, %3\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ }
)
;; Predicated truncate-and-zero-extend operations, merging with the
@@ -3246,19 +3262,19 @@
;; The canonical form of this operation is an AND of a constant rather
;; than (zero_extend (truncate ...)).
(define_insn "*cond_uxt<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(and:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_sve_uxt_immediate"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- uxt%e3\t%0.<Vetype>, %1/m, %0.<Vetype>
- movprfx\t%0, %2\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] uxt%e3\t%0.<Vetype>, %1/m, %0.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated truncate-and-zero-extend operations, merging with an
@@ -3270,20 +3286,20 @@
;; as early-clobber helps to make the instruction more regular to the
;; register allocator.
(define_insn "*cond_uxt<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(and:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_sve_uxt_immediate"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %4\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %4\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -3338,23 +3354,23 @@
)
(define_insn "*cnot<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
[(unspec:<VPRED>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 5 "aarch64_sve_ptrue_flag")
(eq:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
UNSPEC_PRED_Z)
(match_operand:SVE_I 4 "aarch64_simd_imm_one")
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated logical inverse with merging.
@@ -3385,16 +3401,16 @@
;; Predicated logical inverse, merging with the first input.
(define_insn_and_rewrite "*cond_cnot<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
;; Logical inverse of operand 2 (as above).
(unspec:SVE_I
[(unspec:<VPRED>
[(match_operand 5)
(const_int SVE_KNOWN_PTRUE)
(eq:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
UNSPEC_PRED_Z)
(match_operand:SVE_I 4 "aarch64_simd_imm_one")
@@ -3403,14 +3419,14 @@
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- cnot\t%0.<Vetype>, %1/m, %0.<Vetype>
- movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] cnot\t%0.<Vetype>, %1/m, %0.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
"&& !CONSTANT_P (operands[5])"
{
operands[5] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Predicated logical inverse, merging with an independent value.
@@ -3421,33 +3437,33 @@
;; as earlyclobber helps to make the instruction more regular to the
;; register allocator.
(define_insn_and_rewrite "*cond_cnot<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
;; Logical inverse of operand 2 (as above).
(unspec:SVE_I
[(unspec:<VPRED>
[(match_operand 5)
(const_int SVE_KNOWN_PTRUE)
(eq:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
UNSPEC_PRED_Z)
(match_operand:SVE_I 4 "aarch64_simd_imm_one")
(match_dup 3)]
UNSPEC_SEL)
- (match_operand:SVE_I 6 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_I 6 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[6])"
- "@
- cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %6\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 6 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %6\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
"&& !CONSTANT_P (operands[5])"
{
operands[5] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
;; -------------------------------------------------------------------------
@@ -3512,17 +3528,17 @@
;; Predicated floating-point unary operations.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated floating-point unary arithmetic with merging.
@@ -3542,43 +3558,43 @@
;; Predicated floating-point unary arithmetic, merging with the first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 3)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[3])"
{
operands[3] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated floating-point unary arithmetic, merging with an independent
@@ -3590,45 +3606,45 @@
;; as earlyclobber helps to make the instruction more regular to the
;; register allocator.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY)
- (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[3])"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
(define_insn "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY)
- (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[3])"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -3767,19 +3783,20 @@
;; and would make the instruction seem less uniform to the register
;; allocator.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY_IMM:SVE_I
- (match_operand:SVE_I 2 "register_operand" "%0, 0, w, w")
- (match_operand:SVE_I 3 "aarch64_sve_<sve_imm_con>_operand" "<sve_imm_con>, w, <sve_imm_con>, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "aarch64_sve_<sve_imm_con>_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- #
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , <sve_imm_con> ; * ] #
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , <sve_imm_con> ; yes ] #
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
; Split the unpredicated form after reload, so that we don't have
; the unnecessary PTRUE.
"&& reload_completed
@@ -3787,7 +3804,6 @@
[(set (match_dup 0)
(SVE_INT_BINARY_IMM:SVE_I (match_dup 2) (match_dup 3)))]
""
- [(set_attr "movprfx" "*,*,yes,yes")]
)
;; Unpredicated binary operations with a constant (post-RA only).
@@ -3820,57 +3836,58 @@
;; Predicated integer operations, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer operations, merging with the second input.
(define_insn "*cond_<optab><mode>_3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "0, w"))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
)
;; Predicated integer operations, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, 0, w, w, w"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -3899,19 +3916,19 @@
;; -------------------------------------------------------------------------
(define_insn "add<mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?w, ?w, w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(plus:SVE_I
- (match_operand:SVE_I 1 "register_operand" "%0, 0, 0, w, w, w")
- (match_operand:SVE_I 2 "aarch64_sve_add_operand" "vsa, vsn, vsi, vsa, vsn, w")))]
+ (match_operand:SVE_I 1 "register_operand")
+ (match_operand:SVE_I 2 "aarch64_sve_add_operand")))]
"TARGET_SVE"
- "@
- add\t%0.<Vetype>, %0.<Vetype>, #%D2
- sub\t%0.<Vetype>, %0.<Vetype>, #%N2
- * return aarch64_output_sve_vector_inc_dec (\"%0.<Vetype>\", operands[2]);
- movprfx\t%0, %1\;add\t%0.<Vetype>, %0.<Vetype>, #%D2
- movprfx\t%0, %1\;sub\t%0.<Vetype>, %0.<Vetype>, #%N2
- add\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,*,*,yes,yes,*")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , %0 , vsa ; * ] add\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ w , 0 , vsn ; * ] sub\t%0.<Vetype>, %0.<Vetype>, #%N2
+ [ w , 0 , vsi ; * ] << aarch64_output_sve_vector_inc_dec ("%0.<Vetype>", operands[2]);
+ [ ?w , w , vsa ; yes ] movprfx\t%0, %1\;add\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ ?w , w , vsn ; yes ] movprfx\t%0, %1\;sub\t%0.<Vetype>, %0.<Vetype>, #%N2
+ [ w , w , w ; * ] add\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+ }
)
;; Merging forms are handled through SVE_INT_BINARY.
@@ -3925,16 +3942,16 @@
;; -------------------------------------------------------------------------
(define_insn "sub<mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(minus:SVE_I
- (match_operand:SVE_I 1 "aarch64_sve_arith_operand" "w, vsa, vsa")
- (match_operand:SVE_I 2 "register_operand" "w, 0, w")))]
+ (match_operand:SVE_I 1 "aarch64_sve_arith_operand")
+ (match_operand:SVE_I 2 "register_operand")))]
"TARGET_SVE"
- "@
- sub\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
- subr\t%0.<Vetype>, %0.<Vetype>, #%D1
- movprfx\t%0, %2\;subr\t%0.<Vetype>, %0.<Vetype>, #%D1"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , w , w ; * ] sub\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+ [ w , vsa , 0 ; * ] subr\t%0.<Vetype>, %0.<Vetype>, #%D1
+ [ ?&w , vsa , w ; yes ] movprfx\t%0, %2\;subr\t%0.<Vetype>, %0.<Vetype>, #%D1
+ }
)
;; Merging forms are handled through SVE_INT_BINARY.
@@ -4108,13 +4125,13 @@
;; Predicated integer absolute difference.
(define_insn "@aarch64_pred_<su>abd<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(minus:SVE_I
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "%0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)
(unspec:SVE_I
[(match_dup 1)
@@ -4123,10 +4140,10 @@
(match_dup 3))]
UNSPEC_PRED_X)))]
"TARGET_SVE"
- "@
- <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , w ; * ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
(define_expand "@aarch64_cond_<su>abd<mode>"
@@ -4156,15 +4173,15 @@
;; Predicated integer absolute difference, merging with the first input.
(define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
(unspec:SVE_I
[(match_operand 4)
(USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)
(unspec:SVE_I
[(match_operand 5)
@@ -4175,27 +4192,27 @@
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& (!CONSTANT_P (operands[4]) || !CONSTANT_P (operands[5]))"
{
operands[4] = operands[5] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Predicated integer absolute difference, merging with the second input.
(define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
(unspec:SVE_I
[(match_operand 4)
(USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "0, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)
(unspec:SVE_I
[(match_operand 5)
@@ -4206,27 +4223,27 @@
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
"&& (!CONSTANT_P (operands[4]) || !CONSTANT_P (operands[5]))"
{
operands[4] = operands[5] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Predicated integer absolute difference, merging with an independent value.
(define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
(unspec:SVE_I
[(match_operand 5)
(USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, 0, w, w, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)
(unspec:SVE_I
[(match_operand 6)
@@ -4234,17 +4251,18 @@
(match_dup 2)
(match_dup 3))]
UNSPEC_PRED_X))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (!CONSTANT_P (operands[5]) || !CONSTANT_P (operands[6]))
@@ -4274,32 +4292,32 @@
;; Unpredicated saturating signed addition and subtraction.
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w, ?&w, w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(SBINQOPS:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "0, 0, w, w, w")
- (match_operand:SVE_FULL_I 2 "aarch64_sve_sqadd_operand" "vsQ, vsS, vsQ, vsS, w")))]
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_sqadd_operand")))]
"TARGET_SVE"
- "@
- <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
- <binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
- movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
- movprfx\t%0, %1\;<binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
- <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,*,yes,yes,*")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , vsQ ; * ] <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ w , 0 , vsS ; * ] <binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
+ [ ?&w , w , vsQ ; yes ] movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ ?&w , w , vsS ; yes ] movprfx\t%0, %1\;<binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
+ [ w , w , w ; * ] <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+ }
)
;; Unpredicated saturating unsigned addition and subtraction.
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w, w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(UBINQOPS:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "0, w, w")
- (match_operand:SVE_FULL_I 2 "aarch64_sve_arith_operand" "vsa, vsa, w")))]
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_arith_operand")))]
"TARGET_SVE"
- "@
- <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
- movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
- <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,*")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , vsa ; * ] <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ ?&w , w , vsa ; yes ] movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ w , w , w ; * ] <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -4328,19 +4346,19 @@
;; Predicated highpart multiplication.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "%0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")]
+ [(match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand")]
MUL_HIGHPART)]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- <su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , w ; * ] <su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated highpart multiplications with merging.
@@ -4364,36 +4382,38 @@
;; Predicated highpart multiplications, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
MUL_HIGHPART)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")])
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
+)
;; Predicated highpart multiplications, merging with zero.
(define_insn "*cond_<optab><mode>_z"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "%0, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
MUL_HIGHPART)
(match_operand:SVE_FULL_I 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ &w , Upl , %0 , w ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
[(set_attr "movprfx" "yes")])
;; -------------------------------------------------------------------------
@@ -4423,19 +4443,19 @@
;; Integer division predicated with a PTRUE.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY_SD:SVE_FULL_SDI
- (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w, w")
- (match_operand:SVE_FULL_SDI 3 "register_operand" "w, 0, w"))]
+ (match_operand:SVE_FULL_SDI 2 "register_operand")
+ (match_operand:SVE_FULL_SDI 3 "register_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ w , Upl , w , 0 ; * ] <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer division with merging.
@@ -4453,57 +4473,58 @@
;; Predicated integer division, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY_SD:SVE_FULL_SDI
- (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_SDI 3 "register_operand" "w, w"))
+ (match_operand:SVE_FULL_SDI 2 "register_operand")
+ (match_operand:SVE_FULL_SDI 3 "register_operand"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer division, merging with the second input.
(define_insn "*cond_<optab><mode>_3"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY_SD:SVE_FULL_SDI
- (match_operand:SVE_FULL_SDI 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_SDI 3 "register_operand" "0, w"))
+ (match_operand:SVE_FULL_SDI 2 "register_operand")
+ (match_operand:SVE_FULL_SDI 3 "register_operand"))
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
)
;; Predicated integer division, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY_SD:SVE_FULL_SDI
- (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_SDI 3 "register_operand" "w, 0, w, w, w"))
- (match_operand:SVE_FULL_SDI 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_SDI 2 "register_operand")
+ (match_operand:SVE_FULL_SDI 3 "register_operand"))
+ (match_operand:SVE_FULL_SDI 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -4526,16 +4547,16 @@
;; Unpredicated integer binary logical operations.
(define_insn "<optab><mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?w, w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(LOGICAL:SVE_I
- (match_operand:SVE_I 1 "register_operand" "%0, w, w")
- (match_operand:SVE_I 2 "aarch64_sve_logical_operand" "vsl, vsl, w")))]
+ (match_operand:SVE_I 1 "register_operand")
+ (match_operand:SVE_I 2 "aarch64_sve_logical_operand")))]
"TARGET_SVE"
- "@
- <logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
- movprfx\t%0, %1\;<logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
- <logical>\t%0.d, %1.d, %2.d"
- [(set_attr "movprfx" "*,yes,*")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , %0 , vsl ; * ] <logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
+ [ ?w , w , vsl ; yes ] movprfx\t%0, %1\;<logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
+ [ w , w , w ; * ] <logical>\t%0.d, %1.d, %2.d
+ }
)
;; Merging forms are handled through SVE_INT_BINARY.
@@ -4595,39 +4616,40 @@
;; Predicated integer BIC, merging with the first input.
(define_insn "*cond_bic<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(and:SVE_I
(not:SVE_I
- (match_operand:SVE_I 3 "register_operand" "w, w"))
- (match_operand:SVE_I 2 "register_operand" "0, w"))
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 2 "register_operand"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer BIC, merging with an independent value.
(define_insn_and_rewrite "*cond_bic<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(and:SVE_I
(not:SVE_I
- (match_operand:SVE_I 3 "register_operand" "w, w, w, w"))
- (match_operand:SVE_I 2 "register_operand" "0, w, w, w"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 2 "register_operand"))
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -4697,24 +4719,24 @@
;; likely to gain much and would make the instruction seem less uniform
;; to the register allocator.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(ASHIFT:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, 0, w, w")
- (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- #
- <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- <shift>r\t%0.<Vetype>, %1/m, %3.<Vetype>, %2.<Vetype>
- movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , D<lr> ; * ] #
+ [ w , Upl , 0 , w ; * ] <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ w , Upl , w , 0 ; * ] <shift>r\t%0.<Vetype>, %1/m, %3.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& reload_completed
&& !register_operand (operands[3], <MODE>mode)"
[(set (match_dup 0) (ASHIFT:SVE_I (match_dup 2) (match_dup 3)))]
""
- [(set_attr "movprfx" "*,*,*,yes")]
)
;; Unpredicated shift operations by a constant (post-RA only).
@@ -4731,36 +4753,37 @@
;; Predicated integer shift, merging with the first input.
(define_insn "*cond_<optab><mode>_2_const"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(ASHIFT:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ }
)
;; Predicated integer shift, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_const"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(ASHIFT:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- #"
+ {@ [ cons: =0 , 1 , 2 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ &w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -4800,36 +4823,38 @@
;; Predicated shifts of narrow elements by 64-bit amounts, merging with
;; the first input.
(define_insn "*cond_<sve_int_op><mode>_m"
- [(set (match_operand:SVE_FULL_BHSI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_BHSI 0 "register_operand")
(unspec:SVE_FULL_BHSI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_BHSI
- [(match_operand:SVE_FULL_BHSI 2 "register_operand" "0, w")
- (match_operand:VNx2DI 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_BHSI 2 "register_operand")
+ (match_operand:VNx2DI 3 "register_operand")]
SVE_SHIFT_WIDE)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d"
- [(set_attr "movprfx" "*, yes")])
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+ }
+)
;; Predicated shifts of narrow elements by 64-bit amounts, merging with zero.
(define_insn "*cond_<sve_int_op><mode>_z"
- [(set (match_operand:SVE_FULL_BHSI 0 "register_operand" "=&w, &w")
+ [(set (match_operand:SVE_FULL_BHSI 0 "register_operand")
(unspec:SVE_FULL_BHSI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_BHSI
- [(match_operand:SVE_FULL_BHSI 2 "register_operand" "0, w")
- (match_operand:VNx2DI 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_BHSI 2 "register_operand")
+ (match_operand:VNx2DI 3 "register_operand")]
SVE_SHIFT_WIDE)
(match_operand:SVE_FULL_BHSI 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ &w , Upl , 0 , w ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+ [ &w , Upl , w , w ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+ }
[(set_attr "movprfx" "yes")])
;; -------------------------------------------------------------------------
@@ -4860,19 +4885,20 @@
;; Predicated ASRD.
(define_insn "*sdiv_pow2<mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "0, w")
+ [(match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_rshift_imm")]
UNSPEC_ASRD)]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
- [(set_attr "movprfx" "*,yes")])
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ }
+)
;; Predicated shift with merging.
(define_expand "@cond_<sve_int_op><mode>"
@@ -4896,47 +4922,49 @@
;; Predicated shift, merging with the first input.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_I
[(match_operand 4)
(unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "0, w")
+ [(match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm")]
SVE_INT_SHIFT_IMM)]
UNSPEC_PRED_X)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")])
+)
;; Predicated shift, merging with an independent value.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_I
[(match_operand 5)
(unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "w, w, w")
+ [(match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm")]
SVE_INT_SHIFT_IMM)]
UNSPEC_PRED_X)
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- #"
+ {@ [ cons: =0 , 1 , 2 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ &w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -4972,18 +5000,18 @@
;; Predicated floating-point binary operations that take an integer
;; as their second operand.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point binary operations with merging, taking an
@@ -5006,68 +5034,69 @@
;; Predicated floating-point binary operations that take an integer as their
;; second operand, with inactive lanes coming from the first operand.
(define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point binary operations that take an integer as
;; their second operand, with the values of inactive lanes being distinct
;; from the other inputs.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5087,23 +5116,24 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5183,19 +5213,19 @@
;; Predicated floating-point binary operations that have no immediate forms.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY_REG))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ w , Upl , w , 0 ; * ] <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point operations with merging.
@@ -5216,155 +5246,156 @@
;; Predicated floating-point operations, merging with the first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Same for operations that take a 1-bit constant.
(define_insn_and_rewrite "*cond_<optab><mode>_2_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
SVE_COND_FP_BINARY_I1)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
SVE_COND_FP_BINARY_I1)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ }
)
;; Predicated floating-point operations, merging with the second input.
(define_insn_and_rewrite "*cond_<optab><mode>_3_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_3_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
)
;; Predicated floating-point operations, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5384,26 +5415,27 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5417,22 +5449,23 @@
;; Same for operations that take a 1-bit constant.
(define_insn_and_rewrite "*cond_<optab><mode>_any_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
SVE_COND_FP_BINARY_I1)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- #"
+ {@ [ cons: =0 , 1 , 2 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5452,22 +5485,23 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
SVE_COND_FP_BINARY_I1)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- #"
+ {@ [ cons: =0 , 1 , 2 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5489,22 +5523,23 @@
;; Predicated floating-point addition.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?&w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:SI 4 "aarch64_sve_gp_strictness" "i, i, Z, Ui1, i, i, Ui1")
- (match_operand:SVE_FULL_F 2 "register_operand" "%0, 0, w, 0, w, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_operand" "vsA, vsN, w, w, vsA, vsN, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SI 4 "aarch64_sve_gp_strictness")
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_operand")]
SVE_COND_FP_ADD))]
"TARGET_SVE"
- "@
- fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- #
- fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , %0 , vsA , i ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , 0 , vsN , i ; * ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ w , Upl , w , w , Z ; * ] #
+ [ w , Upl , 0 , w , Ui1 ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , vsA , i ; yes ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , vsN , i ; yes ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ ?&w , Upl , w , w , Ui1 ; yes ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
; Split the unpredicated form after reload, so that we don't have
; the unnecessary PTRUE.
"&& reload_completed
@@ -5512,79 +5547,79 @@
&& INTVAL (operands[4]) == SVE_RELAXED_GP"
[(set (match_dup 0) (plus:SVE_FULL_F (match_dup 2) (match_dup 3)))]
""
- [(set_attr "movprfx" "*,*,*,*,yes,yes,yes")]
)
;; Predicated floating-point addition of a constant, merging with the
;; first input.
(define_insn_and_rewrite "*cond_add<mode>_2_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, 0, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
UNSPEC_COND_FADD)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , vsA ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , 0 , vsN ; * ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ ?w , Upl , w , vsA ; yes ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w , vsN ; yes ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,*,yes,yes")]
)
(define_insn "*cond_add<mode>_2_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, 0, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
UNSPEC_COND_FADD)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3"
- [(set_attr "movprfx" "*,*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , vsA ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , 0 , vsN ; * ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ ?w , Upl , w , vsA ; yes ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w , vsN ; yes ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ }
)
;; Predicated floating-point addition of a constant, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_add<mode>_any_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN, vsA, vsN")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
UNSPEC_COND_FADD)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, 0, w, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- #
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ w , Upl , w , vsA , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , vsN , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ w , Upl , w , vsA , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , vsN , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ ?w , Upl , w , vsA , w ] #
+ [ ?w , Upl , w , vsN , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5604,25 +5639,26 @@
)
(define_insn_and_rewrite "*cond_add<mode>_any_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN, vsA, vsN")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
UNSPEC_COND_FADD)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, 0, w, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- #
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ w , Upl , w , vsA , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , vsN , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ w , Upl , w , vsA , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , vsN , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ ?w , Upl , w , vsA , w ] #
+ [ ?w , Upl , w , vsN , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5645,18 +5681,18 @@
;; Predicated FCADD.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD))]
"TARGET_SVE"
- "@
- fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ }
)
;; Predicated FCADD with merging.
@@ -5691,66 +5727,67 @@
;; Predicated FCADD, merging with the first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ }
)
;; Predicated FCADD, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, 0, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5770,23 +5807,24 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, 0, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5808,21 +5846,22 @@
;; Predicated floating-point subtraction.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:SI 4 "aarch64_sve_gp_strictness" "i, Z, Ui1, Ui1, i, Ui1")
- (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_operand" "vsA, w, 0, w, vsA, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w, w, 0, w, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SI 4 "aarch64_sve_gp_strictness")
+ (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_SUB))]
"TARGET_SVE"
- "@
- fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- #
- fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , vsA , 0 , i ; * ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ w , Upl , w , w , Z ; * ] #
+ [ w , Upl , 0 , w , Ui1 ; * ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ w , Upl , w , 0 , Ui1 ; * ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , vsA , w , i ; yes ] movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ ?&w , Upl , w , w , Ui1 ; yes ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
; Split the unpredicated form after reload, so that we don't have
; the unnecessary PTRUE.
"&& reload_completed
@@ -5830,72 +5869,72 @@
&& INTVAL (operands[4]) == SVE_RELAXED_GP"
[(set (match_dup 0) (minus:SVE_FULL_F (match_dup 2) (match_dup 3)))]
""
- [(set_attr "movprfx" "*,*,*,*,yes,yes")]
)
;; Predicated floating-point subtraction from a constant, merging with the
;; second input.
(define_insn_and_rewrite "*cond_sub<mode>_3_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
(match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2"
+ {@ [ cons: =0 , 1 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ ?w , Upl , w ; yes ] movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_sub<mode>_3_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
(match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ ?w , Upl , w ; yes ] movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ }
)
;; Predicated floating-point subtraction from a constant, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_sub<mode>_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
(match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- #"
+ {@ [ cons: =0 , 1 , 3 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ ?w , Upl , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5915,22 +5954,23 @@
)
(define_insn_and_rewrite "*cond_sub<mode>_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
(match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- #"
+ {@ [ cons: =0 , 1 , 3 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ ?w , Upl , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5968,45 +6008,45 @@
;; Predicated floating-point absolute difference.
(define_insn_and_rewrite "*aarch64_pred_abd<mode>_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "%0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[5])"
{
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*aarch64_pred_abd<mode>_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "%0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
(define_expand "@aarch64_cond_abd<mode>"
@@ -6034,138 +6074,139 @@
;; Predicated floating-point absolute difference, merging with the first
;; input.
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& (!rtx_equal_p (operands[1], operands[4])
|| !rtx_equal_p (operands[1], operands[5]))"
{
operands[4] = copy_rtx (operands[1]);
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*aarch64_cond_abd<mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point absolute difference, merging with the second
;; input.
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_3_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
"&& (!rtx_equal_p (operands[1], operands[4])
|| !rtx_equal_p (operands[1], operands[5]))"
{
operands[4] = copy_rtx (operands[1]);
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*aarch64_cond_abd<mode>_3_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
)
;; Predicated floating-point absolute difference, merging with an
;; independent value.
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
(unspec:SVE_FULL_F
[(match_operand 6)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -6189,30 +6230,31 @@
)
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -6233,20 +6275,21 @@
;; Predicated floating-point multiplication.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
- (match_operand:SI 4 "aarch64_sve_gp_strictness" "i, Z, Ui1, i, Ui1")
- (match_operand:SVE_FULL_F 2 "register_operand" "%0, w, 0, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_mul_operand" "vsM, w, w, vsM, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SI 4 "aarch64_sve_gp_strictness")
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_mul_operand")]
SVE_COND_FP_MUL))]
"TARGET_SVE"
- "@
- fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- #
- fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , %0 , vsM , i ; * ] fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , w , Z ; * ] #
+ [ w , Upl , 0 , w , Ui1 ; * ] fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , vsM , i ; yes ] movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , w , Ui1 ; yes ] movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
; Split the unpredicated form after reload, so that we don't have
; the unnecessary PTRUE.
"&& reload_completed
@@ -6254,7 +6297,6 @@
&& INTVAL (operands[4]) == SVE_RELAXED_GP"
[(set (match_dup 0) (mult:SVE_FULL_F (match_dup 2) (match_dup 3)))]
""
- [(set_attr "movprfx" "*,*,*,yes,yes")]
)
;; Merging forms are handled through SVE_COND_FP_BINARY and
@@ -6441,20 +6483,20 @@
;; Predicated floating-point maximum/minimum.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "%0, 0, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_maxmin_operand" "vsB, w, vsB, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_maxmin_operand")]
SVE_COND_FP_MAXMIN))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , vsB ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , vsB ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Merging forms are handled through SVE_COND_FP_BINARY and
@@ -6708,21 +6750,21 @@
;; Predicated integer addition of product.
(define_insn "@aarch64_pred_fma<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(plus:SVE_I
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "%0, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)
- (match_operand:SVE_I 4 "register_operand" "w, 0, w")))]
+ (match_operand:SVE_I 4 "register_operand")))]
"TARGET_SVE"
- "@
- mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , %0 , w , w ; * ] mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ w , Upl , w , w , 0 ; * ] mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer addition of product with merging.
@@ -6750,65 +6792,66 @@
;; Predicated integer addition of product, merging with the first input.
(define_insn "*cond_fma<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(plus:SVE_I
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))
- (match_operand:SVE_I 4 "register_operand" "w, w"))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 4 "register_operand"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0, %2\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , 0 , w , w ; * ] mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %2\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ }
)
;; Predicated integer addition of product, merging with the third input.
(define_insn "*cond_fma<mode>_4"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(plus:SVE_I
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))
- (match_operand:SVE_I 4 "register_operand" "0, w"))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 4 "register_operand"))
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer addition of product, merging with an independent value.
(define_insn_and_rewrite "*cond_fma<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(plus:SVE_I
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w, 0, w, w"))
- (match_operand:SVE_I 4 "register_operand" "w, 0, w, w, w, w"))
- (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 4 "register_operand"))
+ (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
&& !rtx_equal_p (operands[3], operands[5])
&& !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , 0 , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[5], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[5])"
@@ -6849,21 +6892,21 @@
;; Predicated integer subtraction of product.
(define_insn "@aarch64_pred_fnma<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "w, 0, w")
+ (match_operand:SVE_I 4 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "%0, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)))]
"TARGET_SVE"
- "@
- msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , %0 , w , w ; * ] msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ w , Upl , w , w , 0 ; * ] mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer subtraction of product with merging.
@@ -6891,66 +6934,67 @@
;; Predicated integer subtraction of product, merging with the first input.
(define_insn "*cond_fnma<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "w, w")
+ (match_operand:SVE_I 4 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand")))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0, %2\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , 0 , w , w ; * ] msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %2\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ }
)
;; Predicated integer subtraction of product, merging with the third input.
(define_insn "*cond_fnma<mode>_4"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "0, w")
+ (match_operand:SVE_I 4 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand")))
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer subtraction of product, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_fnma<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "w, 0, w, w, w, w")
+ (match_operand:SVE_I 4 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w, 0, w, w")))
- (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand")))
+ (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
&& !rtx_equal_p (operands[3], operands[5])
&& !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , 0 , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[5], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[5])"
@@ -6974,70 +7018,70 @@
;; Four-element integer dot-product with accumulation.
(define_insn "<sur>dot_prod<vsi2qi>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(plus:SVE_FULL_SDI
(unspec:SVE_FULL_SDI
- [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
- (match_operand:<VSI2QI> 2 "register_operand" "w, w")]
+ [(match_operand:<VSI2QI> 1 "register_operand")
+ (match_operand:<VSI2QI> 2 "register_operand")]
DOTPROD)
- (match_operand:SVE_FULL_SDI 3 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_SDI 3 "register_operand")))]
"TARGET_SVE"
- "@
- <sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
- movprfx\t%0, %3\;<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , w , w , 0 ; * ] <sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %3\;<sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
+ }
)
;; Four-element integer dot-product by selected lanes with accumulation.
(define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(plus:SVE_FULL_SDI
(unspec:SVE_FULL_SDI
- [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
+ [(match_operand:<VSI2QI> 1 "register_operand")
(unspec:<VSI2QI>
- [(match_operand:<VSI2QI> 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:<VSI2QI> 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
DOTPROD)
- (match_operand:SVE_FULL_SDI 4 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_SDI 4 "register_operand")))]
"TARGET_SVE"
- "@
- <sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
- movprfx\t%0, %4\;<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , <sve_lane_con> , 0 ; * ] <sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
+ [ ?&w , w , <sve_lane_con> , w ; yes ] movprfx\t%0, %4\;<sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
+ }
)
(define_insn "@<sur>dot_prod<vsi2qi>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(plus:VNx4SI_ONLY
(unspec:VNx4SI_ONLY
- [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
- (match_operand:<VSI2QI> 2 "register_operand" "w, w")]
+ [(match_operand:<VSI2QI> 1 "register_operand")
+ (match_operand:<VSI2QI> 2 "register_operand")]
DOTPROD_US_ONLY)
- (match_operand:VNx4SI_ONLY 3 "register_operand" "0, w")))]
+ (match_operand:VNx4SI_ONLY 3 "register_operand")))]
"TARGET_SVE_I8MM"
- "@
- <sur>dot\\t%0.s, %1.b, %2.b
- movprfx\t%0, %3\;<sur>dot\\t%0.s, %1.b, %2.b"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , w , w , 0 ; * ] <sur>dot\t%0.s, %1.b, %2.b
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %3\;<sur>dot\t%0.s, %1.b, %2.b
+ }
)
(define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(plus:VNx4SI_ONLY
(unspec:VNx4SI_ONLY
- [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
+ [(match_operand:<VSI2QI> 1 "register_operand")
(unspec:<VSI2QI>
- [(match_operand:<VSI2QI> 2 "register_operand" "y, y")
+ [(match_operand:<VSI2QI> 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
DOTPROD_I8MM)
- (match_operand:VNx4SI_ONLY 4 "register_operand" "0, w")))]
+ (match_operand:VNx4SI_ONLY 4 "register_operand")))]
"TARGET_SVE_I8MM"
- "@
- <sur>dot\\t%0.s, %1.b, %2.b[%3]
- movprfx\t%0, %4\;<sur>dot\\t%0.s, %1.b, %2.b[%3]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , y , 0 ; * ] <sur>dot\t%0.s, %1.b, %2.b[%3]
+ [ ?&w , w , y , w ; yes ] movprfx\t%0, %4\;<sur>dot\t%0.s, %1.b, %2.b[%3]
+ }
)
;; -------------------------------------------------------------------------
@@ -7080,18 +7124,18 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_add_<optab><vsi2qi>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(plus:VNx4SI_ONLY
(unspec:VNx4SI_ONLY
- [(match_operand:<VSI2QI> 2 "register_operand" "w, w")
- (match_operand:<VSI2QI> 3 "register_operand" "w, w")]
+ [(match_operand:<VSI2QI> 2 "register_operand")
+ (match_operand:<VSI2QI> 3 "register_operand")]
MATMUL)
- (match_operand:VNx4SI_ONLY 1 "register_operand" "0, w")))]
+ (match_operand:VNx4SI_ONLY 1 "register_operand")))]
"TARGET_SVE_I8MM"
- "@
- <sur>mmla\\t%0.s, %2.b, %3.b
- movprfx\t%0, %1\;<sur>mmla\\t%0.s, %2.b, %3.b"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sur>mmla\t%0.s, %2.b, %3.b
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sur>mmla\t%0.s, %2.b, %3.b
+ }
)
;; -------------------------------------------------------------------------
@@ -7126,20 +7170,20 @@
;; Predicated floating-point ternary operations.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "%w, 0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY))]
"TARGET_SVE"
- "@
- <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , %w , w , 0 ; * ] <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ w , Upl , 0 , w , w ; * ] <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point ternary operations with merging.
@@ -7167,121 +7211,122 @@
;; Predicated floating-point ternary operations, merging with the
;; first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , 0 , w , w ; * ] <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[5])"
{
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , 0 , w , w ; * ] <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ }
)
;; Predicated floating-point ternary operations, merging with the
;; third input.
(define_insn_and_rewrite "*cond_<optab><mode>_4_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[5])"
{
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_4_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point ternary operations, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 6)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, 0, w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
- (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
&& !rtx_equal_p (operands[3], operands[5])
&& !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , 0 , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -7301,29 +7346,30 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, 0, w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
- (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
&& !rtx_equal_p (operands[3], operands[5])
&& !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , 0 , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[5], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[5])"
@@ -7338,20 +7384,20 @@
;; Unpredicated FMLA and FMLS by selected lanes. It doesn't seem worth using
;; (fma ...) since target-independent code won't understand the indexing.
(define_insn "@aarch64_<optab>_lane_<mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:SVE_FULL_F 1 "register_operand" "w, w")
+ [(match_operand:SVE_FULL_F 1 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:SVE_FULL_F 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_FP_TERNARY_LANE))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]
- movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , <sve_lane_con> , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]
+ [ ?&w , w , <sve_lane_con> , w ; yes ] movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]
+ }
)
;; -------------------------------------------------------------------------
@@ -7363,19 +7409,19 @@
;; Predicated FCMLA.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA))]
"TARGET_SVE"
- "@
- fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ }
)
;; unpredicated optab pattern for auto-vectorizer
@@ -7453,69 +7499,70 @@
;; Predicated FCMLA, merging with the third input.
(define_insn_and_rewrite "*cond_<optab><mode>_4_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA)
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ }
"&& !rtx_equal_p (operands[1], operands[5])"
{
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_4_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA)
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ }
)
;; Predicated FCMLA, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 6)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA)
- (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -7535,24 +7582,25 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA)
- (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[5], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[5])"
@@ -7566,20 +7614,20 @@
;; Unpredicated FCMLA with indexing.
(define_insn "@aarch64_<optab>_lane_<mode>"
- [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSF 0 "register_operand")
(unspec:SVE_FULL_HSF
- [(match_operand:SVE_FULL_HSF 1 "register_operand" "w, w")
+ [(match_operand:SVE_FULL_HSF 1 "register_operand")
(unspec:SVE_FULL_HSF
- [(match_operand:SVE_FULL_HSF 2 "register_operand" "<sve_lane_pair_con>, <sve_lane_pair_con>")
+ [(match_operand:SVE_FULL_HSF 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:SVE_FULL_HSF 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_HSF 4 "register_operand")]
FCMLA))]
"TARGET_SVE"
- "@
- fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>
- movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , <sve_lane_pair_con> , 0 ; * ] fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>
+ [ ?&w , w , <sve_lane_pair_con> , w ; yes ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>
+ }
)
;; -------------------------------------------------------------------------
@@ -7590,17 +7638,17 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_tmad<mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:SVE_FULL_F 1 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
+ [(match_operand:SVE_FULL_F 1 "register_operand")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:DI 3 "const_int_operand")]
UNSPEC_FTMAD))]
"TARGET_SVE"
- "@
- ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
- movprfx\t%0, %1\;ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , w ; * ] ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
+ [ ?&w , w , w ; yes ] movprfx\t%0, %1\;ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
+ }
)
;; -------------------------------------------------------------------------
@@ -7614,33 +7662,33 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_<sve_fp_op>vnx4sf"
- [(set (match_operand:VNx4SF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SF 0 "register_operand")
(unspec:VNx4SF
- [(match_operand:VNx4SF 1 "register_operand" "0, w")
- (match_operand:VNx8BF 2 "register_operand" "w, w")
- (match_operand:VNx8BF 3 "register_operand" "w, w")]
+ [(match_operand:VNx4SF 1 "register_operand")
+ (match_operand:VNx8BF 2 "register_operand")
+ (match_operand:VNx8BF 3 "register_operand")]
SVE_BFLOAT_TERNARY_LONG))]
"TARGET_SVE_BF16"
- "@
- <sve_fp_op>\t%0.s, %2.h, %3.h
- movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_fp_op>\t%0.s, %2.h, %3.h
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h
+ }
)
;; The immediate range is enforced before generating the instruction.
(define_insn "@aarch64_sve_<sve_fp_op>_lanevnx4sf"
- [(set (match_operand:VNx4SF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SF 0 "register_operand")
(unspec:VNx4SF
- [(match_operand:VNx4SF 1 "register_operand" "0, w")
- (match_operand:VNx8BF 2 "register_operand" "w, w")
- (match_operand:VNx8BF 3 "register_operand" "y, y")
+ [(match_operand:VNx4SF 1 "register_operand")
+ (match_operand:VNx8BF 2 "register_operand")
+ (match_operand:VNx8BF 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
SVE_BFLOAT_TERNARY_LONG_LANE))]
"TARGET_SVE_BF16"
- "@
- <sve_fp_op>\t%0.s, %2.h, %3.h[%4]
- movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h[%4]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , y ; * ] <sve_fp_op>\t%0.s, %2.h, %3.h[%4]
+ [ ?&w , w , w , y ; yes ] movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h[%4]
+ }
)
;; -------------------------------------------------------------------------
@@ -7652,17 +7700,17 @@
;; The mode iterator enforces the target requirements.
(define_insn "@aarch64_sve_<sve_fp_op><mode>"
- [(set (match_operand:SVE_MATMULF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_MATMULF 0 "register_operand")
(unspec:SVE_MATMULF
- [(match_operand:SVE_MATMULF 2 "register_operand" "w, w")
- (match_operand:SVE_MATMULF 3 "register_operand" "w, w")
- (match_operand:SVE_MATMULF 1 "register_operand" "0, w")]
+ [(match_operand:SVE_MATMULF 2 "register_operand")
+ (match_operand:SVE_MATMULF 3 "register_operand")
+ (match_operand:SVE_MATMULF 1 "register_operand")]
FMMLA))]
"TARGET_SVE"
- "@
- <sve_fp_op>\\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %1\;<sve_fp_op>\\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_fp_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_fp_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ }
)
;; =========================================================================
@@ -7713,24 +7761,24 @@
;; For the other instructions, using the element size is more natural,
;; so we do that for SEL as well.
(define_insn "*vcond_mask_<mode><vpred>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w, w, ?w, ?&w, ?&w")
+ [(set (match_operand:SVE_ALL 0 "register_operand")
(unspec:SVE_ALL
- [(match_operand:<VPRED> 3 "register_operand" "Upa, Upa, Upa, Upa, Upl, Upa, Upa")
- (match_operand:SVE_ALL 1 "aarch64_sve_reg_or_dup_imm" "w, vss, vss, Ufc, Ufc, vss, Ufc")
- (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero" "w, 0, Dz, 0, Dz, w, w")]
+ [(match_operand:<VPRED> 3 "register_operand")
+ (match_operand:SVE_ALL 1 "aarch64_sve_reg_or_dup_imm")
+ (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& (!register_operand (operands[1], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
- "@
- sel\t%0.<Vetype>, %3, %1.<Vetype>, %2.<Vetype>
- mov\t%0.<Vetype>, %3/m, #%I1
- mov\t%0.<Vetype>, %3/z, #%I1
- fmov\t%0.<Vetype>, %3/m, #%1
- movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;fmov\t%0.<Vetype>, %3/m, #%1
- movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, #%I1
- movprfx\t%0, %2\;fmov\t%0.<Vetype>, %3/m, #%1"
- [(set_attr "movprfx" "*,*,*,*,yes,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , w , w , Upa ; * ] sel\t%0.<Vetype>, %3, %1.<Vetype>, %2.<Vetype>
+ [ w , vss , 0 , Upa ; * ] mov\t%0.<Vetype>, %3/m, #%I1
+ [ w , vss , Dz , Upa ; * ] mov\t%0.<Vetype>, %3/z, #%I1
+ [ w , Ufc , 0 , Upa ; * ] fmov\t%0.<Vetype>, %3/m, #%1
+ [ ?w , Ufc , Dz , Upl ; yes ] movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;fmov\t%0.<Vetype>, %3/m, #%1
+ [ ?&w , vss , w , Upa ; yes ] movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, #%I1
+ [ ?&w , Ufc , w , Upa ; yes ] movprfx\t%0, %2\;fmov\t%0.<Vetype>, %3/m, #%1
+ }
)
;; Optimize selects between a duplicated scalar variable and another vector,
@@ -7738,22 +7786,22 @@
;; of GPRs as being more expensive than duplicates of FPRs, since they
;; involve a cross-file move.
(define_insn "@aarch64_sel_dup<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=?w, w, ??w, ?&w, ??&w, ?&w")
+ [(set (match_operand:SVE_ALL 0 "register_operand")
(unspec:SVE_ALL
- [(match_operand:<VPRED> 3 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 3 "register_operand")
(vec_duplicate:SVE_ALL
- (match_operand:<VEL> 1 "register_operand" "r, w, r, w, r, w"))
- (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero" "0, 0, Dz, Dz, w, w")]
+ (match_operand:<VEL> 1 "register_operand"))
+ (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- mov\t%0.<Vetype>, %3/m, %<vwcore>1
- mov\t%0.<Vetype>, %3/m, %<Vetype>1
- movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
- movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<Vetype>1
- movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
- movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<Vetype>1"
- [(set_attr "movprfx" "*,*,yes,yes,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ ?w , r , 0 , Upl ; * ] mov\t%0.<Vetype>, %3/m, %<vwcore>1
+ [ w , w , 0 , Upl ; * ] mov\t%0.<Vetype>, %3/m, %<Vetype>1
+ [ ??w , r , Dz , Upl ; yes ] movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
+ [ ?&w , w , Dz , Upl ; yes ] movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<Vetype>1
+ [ ??&w , r , w , Upl ; yes ] movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
+ [ ?&w , w , w , Upl ; yes ] movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<Vetype>1
+ }
)
;; -------------------------------------------------------------------------
@@ -7891,19 +7939,20 @@
;; - the predicate result bit is in the undefined part of a VNx2BI,
;; so its value doesn't matter anyway.
(define_insn "@aarch64_pred_cmp<cmp_op><mode>"
- [(set (match_operand:<VPRED> 0 "register_operand" "=Upa, Upa")
+ [(set (match_operand:<VPRED> 0 "register_operand")
(unspec:<VPRED>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 2 "aarch64_sve_ptrue_flag")
(SVE_INT_CMP:<VPRED>
- (match_operand:SVE_I 3 "register_operand" "w, w")
- (match_operand:SVE_I 4 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+ (match_operand:SVE_I 3 "register_operand")
+ (match_operand:SVE_I 4 "aarch64_sve_cmp_<sve_imm_con>_operand"))]
UNSPEC_PRED_Z))
(clobber (reg:CC_NZC CC_REGNUM))]
"TARGET_SVE"
- "@
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #%4
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
+ {@ [ cons: =0 , 1 , 3 , 4 ]
+ [ Upa , Upl , w , <sve_imm_con> ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #%4
+ [ Upa , Upl , w , w ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>
+ }
)
;; Predicated integer comparisons in which both the flag and predicate
@@ -7911,18 +7960,18 @@
(define_insn_and_rewrite "*cmp<cmp_op><mode>_cc"
[(set (reg:CC_NZC CC_REGNUM)
(unspec:CC_NZC
- [(match_operand:VNx16BI 1 "register_operand" "Upl, Upl")
+ [(match_operand:VNx16BI 1 "register_operand")
(match_operand 4)
(match_operand:SI 5 "aarch64_sve_ptrue_flag")
(unspec:<VPRED>
[(match_operand 6)
(match_operand:SI 7 "aarch64_sve_ptrue_flag")
(SVE_INT_CMP:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand"))]
UNSPEC_PRED_Z)]
UNSPEC_PTEST))
- (set (match_operand:<VPRED> 0 "register_operand" "=Upa, Upa")
+ (set (match_operand:<VPRED> 0 "register_operand")
(unspec:<VPRED>
[(match_dup 6)
(match_dup 7)
@@ -7932,9 +7981,10 @@
UNSPEC_PRED_Z))]
"TARGET_SVE
&& aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
- "@
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ Upa , Upl , w , <sve_imm_con> ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
+ [ Upa , Upl , w , w ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[4], operands[6])"
{
operands[6] = copy_rtx (operands[4]);
@@ -7947,23 +7997,24 @@
(define_insn_and_rewrite "*cmp<cmp_op><mode>_ptest"
[(set (reg:CC_NZC CC_REGNUM)
(unspec:CC_NZC
- [(match_operand:VNx16BI 1 "register_operand" "Upl, Upl")
+ [(match_operand:VNx16BI 1 "register_operand")
(match_operand 4)
(match_operand:SI 5 "aarch64_sve_ptrue_flag")
(unspec:<VPRED>
[(match_operand 6)
(match_operand:SI 7 "aarch64_sve_ptrue_flag")
(SVE_INT_CMP:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand"))]
UNSPEC_PRED_Z)]
UNSPEC_PTEST))
(clobber (match_scratch:<VPRED> 0 "=Upa, Upa"))]
"TARGET_SVE
&& aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
- "@
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
+ {@ [ cons: 1 , 2 , 3 ]
+ [ Upl , w , <sve_imm_con> ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
+ [ Upl , w , w ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[4], operands[6])"
{
operands[6] = copy_rtx (operands[4]);
@@ -8184,17 +8235,18 @@
;; Predicated floating-point comparisons.
(define_insn "@aarch64_pred_fcm<cmp_op><mode>"
- [(set (match_operand:<VPRED> 0 "register_operand" "=Upa, Upa")
+ [(set (match_operand:<VPRED> 0 "register_operand")
(unspec:<VPRED>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 2 "aarch64_sve_ptrue_flag")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
SVE_COND_FP_CMP_I0))]
"TARGET_SVE"
- "@
- fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #0.0
- fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
+ {@ [ cons: =0 , 1 , 3 , 4 ]
+ [ Upa , Upl , w , Dz ] fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #0.0
+ [ Upa , Upl , w , w ] fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>
+ }
)
;; Same for unordered comparisons.
@@ -8576,29 +8628,31 @@
;; Set operand 0 to the last active element in operand 3, or to tied
;; operand 1 if no elements are active.
(define_insn "@fold_extract_<last_op>_<mode>"
- [(set (match_operand:<VEL> 0 "register_operand" "=?r, w")
+ [(set (match_operand:<VEL> 0 "register_operand")
(unspec:<VEL>
- [(match_operand:<VEL> 1 "register_operand" "0, 0")
- (match_operand:<VPRED> 2 "register_operand" "Upl, Upl")
- (match_operand:SVE_FULL 3 "register_operand" "w, w")]
+ [(match_operand:<VEL> 1 "register_operand")
+ (match_operand:<VPRED> 2 "register_operand")
+ (match_operand:SVE_FULL 3 "register_operand")]
CLAST))]
"TARGET_SVE"
- "@
- clast<ab>\t%<vwcore>0, %2, %<vwcore>0, %3.<Vetype>
- clast<ab>\t%<Vetype>0, %2, %<Vetype>0, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ ?r , 0 , Upl , w ] clast<ab>\t%<vwcore>0, %2, %<vwcore>0, %3.<Vetype>
+ [ w , 0 , Upl , w ] clast<ab>\t%<Vetype>0, %2, %<Vetype>0, %3.<Vetype>
+ }
)
(define_insn "@aarch64_fold_extract_vector_<last_op>_<mode>"
- [(set (match_operand:SVE_FULL 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL 0 "register_operand")
(unspec:SVE_FULL
- [(match_operand:SVE_FULL 1 "register_operand" "0, w")
- (match_operand:<VPRED> 2 "register_operand" "Upl, Upl")
- (match_operand:SVE_FULL 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL 1 "register_operand")
+ (match_operand:<VPRED> 2 "register_operand")
+ (match_operand:SVE_FULL 3 "register_operand")]
CLAST))]
"TARGET_SVE"
- "@
- clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %1\;clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ w , 0 , Upl , w ] clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , w , Upl , w ] movprfx\t%0, %1\;clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -8865,17 +8919,17 @@
;; Like EXT, but start at the first active element.
(define_insn "@aarch64_sve_splice<mode>"
- [(set (match_operand:SVE_FULL 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL 0 "register_operand")
(unspec:SVE_FULL
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:SVE_FULL 2 "register_operand" "0, w")
- (match_operand:SVE_FULL 3 "register_operand" "w, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SVE_FULL 2 "register_operand")
+ (match_operand:SVE_FULL 3 "register_operand")]
UNSPEC_SVE_SPLICE))]
"TARGET_SVE"
- "@
- splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*, yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Permutes that take half the elements from one vector and half the
@@ -9057,32 +9111,32 @@
;; Predicated float-to-integer conversion, either to the same width or wider.
(define_insn "@aarch64_sve_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FCVTI))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
- movprfx\t%0, %2\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ }
)
;; Predicated narrowing float-to-integer conversion.
(define_insn "@aarch64_sve_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(unspec:VNx4SI_ONLY
- [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl")
+ [(match_operand:VNx2BI 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:VNx2DF_ONLY 2 "register_operand" "0, w")]
+ (match_operand:VNx2DF_ONLY 2 "register_operand")]
SVE_COND_FCVTI))]
"TARGET_SVE"
- "@
- fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
- movprfx\t%0, %2\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+ }
)
;; Predicated float-to-integer conversion with merging, either to the same
@@ -9107,45 +9161,45 @@
;; alternatives earlyclobber makes things more consistent for the
;; register allocator.
(define_insn_and_rewrite "*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_relaxed"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=&w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(unspec:SVE_FULL_HSDI
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FCVTI)
- (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
- movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
- movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
(define_insn "*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_strict"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=&w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(unspec:SVE_FULL_HSDI
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FCVTI)
- (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
- movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
- movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ }
)
;; Predicated narrowing float-to-integer conversion with merging.
@@ -9164,22 +9218,22 @@
)
(define_insn "*cond_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=&w, &w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(unspec:VNx4SI_ONLY
- [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:VNx2BI 1 "register_operand")
(unspec:VNx4SI_ONLY
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:VNx2DF_ONLY 2 "register_operand" "w, w, w")]
+ (match_operand:VNx2DF_ONLY 2 "register_operand")]
SVE_COND_FCVTI)
- (match_operand:VNx4SI_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:VNx4SI_ONLY 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
- movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
- movprfx\t%0, %3\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -9244,32 +9298,32 @@
;; Predicated integer-to-float conversion, either to the same width or
;; narrower.
(define_insn "@aarch64_sve_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")]
SVE_COND_ICVTF))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0, %2\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ }
)
;; Predicated widening integer-to-float conversion.
(define_insn "@aarch64_sve_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>"
- [(set (match_operand:VNx2DF_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx2DF_ONLY 0 "register_operand")
(unspec:VNx2DF_ONLY
- [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl")
+ [(match_operand:VNx2BI 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:VNx4SI_ONLY 2 "register_operand" "0, w")]
+ (match_operand:VNx4SI_ONLY 2 "register_operand")]
SVE_COND_ICVTF))]
"TARGET_SVE"
- "@
- <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
- movprfx\t%0, %2\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+ }
)
;; Predicated integer-to-float conversion with merging, either to the same
@@ -9294,45 +9348,45 @@
;; alternatives earlyclobber makes things more consistent for the
;; register allocator.
(define_insn_and_rewrite "*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")]
SVE_COND_ICVTF)
- (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
(define_insn "*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")]
SVE_COND_ICVTF)
- (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ }
)
;; Predicated widening integer-to-float conversion with merging.
@@ -9351,22 +9405,22 @@
)
(define_insn "*cond_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>"
- [(set (match_operand:VNx2DF_ONLY 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:VNx2DF_ONLY 0 "register_operand")
(unspec:VNx2DF_ONLY
- [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:VNx2BI 1 "register_operand")
(unspec:VNx2DF_ONLY
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:VNx4SI_ONLY 2 "register_operand" "w, w, w")]
+ (match_operand:VNx4SI_ONLY 2 "register_operand")]
SVE_COND_ICVTF)
- (match_operand:VNx2DF_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:VNx2DF_ONLY 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
- movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
- movprfx\t%0, %3\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -9442,17 +9496,17 @@
;; Predicated float-to-float truncation.
(define_insn "@aarch64_sve_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
- [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSF 0 "register_operand")
(unspec:SVE_FULL_HSF
- [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_SDF 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_SDF 2 "register_operand")]
SVE_COND_FCVT))]
"TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
- "@
- fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
- movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ }
)
;; Predicated float-to-float truncation with merging.
@@ -9471,22 +9525,22 @@
)
(define_insn "*cond_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
- [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_HSF 0 "register_operand")
(unspec:SVE_FULL_HSF
- [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
(unspec:SVE_FULL_HSF
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_SDF 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_SDF 2 "register_operand")]
SVE_COND_FCVT)
- (match_operand:SVE_FULL_HSF 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSF 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
- "@
- fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
- movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
- movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -9499,17 +9553,17 @@
;; Predicated BFCVT.
(define_insn "@aarch64_sve_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>"
- [(set (match_operand:VNx8BF_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx8BF_ONLY 0 "register_operand")
(unspec:VNx8BF_ONLY
- [(match_operand:VNx4BI 1 "register_operand" "Upl, Upl")
+ [(match_operand:VNx4BI 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:VNx4SF_ONLY 2 "register_operand" "0, w")]
+ (match_operand:VNx4SF_ONLY 2 "register_operand")]
SVE_COND_FCVT))]
"TARGET_SVE_BF16"
- "@
- bfcvt\t%0.h, %1/m, %2.s
- movprfx\t%0, %2\;bfcvt\t%0.h, %1/m, %2.s"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] bfcvt\t%0.h, %1/m, %2.s
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;bfcvt\t%0.h, %1/m, %2.s
+ }
)
;; Predicated BFCVT with merging.
@@ -9528,22 +9582,22 @@
)
(define_insn "*cond_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>"
- [(set (match_operand:VNx8BF_ONLY 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:VNx8BF_ONLY 0 "register_operand")
(unspec:VNx8BF_ONLY
- [(match_operand:VNx4BI 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:VNx4BI 1 "register_operand")
(unspec:VNx8BF_ONLY
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:VNx4SF_ONLY 2 "register_operand" "w, w, w")]
+ (match_operand:VNx4SF_ONLY 2 "register_operand")]
SVE_COND_FCVT)
- (match_operand:VNx8BF_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:VNx8BF_ONLY 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE_BF16"
- "@
- bfcvt\t%0.h, %1/m, %2.s
- movprfx\t%0.s, %1/z, %2.s\;bfcvt\t%0.h, %1/m, %2.s
- movprfx\t%0, %3\;bfcvt\t%0.h, %1/m, %2.s"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] bfcvt\t%0.h, %1/m, %2.s
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.s, %1/z, %2.s\;bfcvt\t%0.h, %1/m, %2.s
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;bfcvt\t%0.h, %1/m, %2.s
+ }
)
;; Predicated BFCVTNT. This doesn't give a natural aarch64_pred_*/cond_*
@@ -9599,17 +9653,17 @@
;; Predicated float-to-float extension.
(define_insn "@aarch64_sve_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>"
- [(set (match_operand:SVE_FULL_SDF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDF 0 "register_operand")
(unspec:SVE_FULL_SDF
- [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_HSF 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_HSF 2 "register_operand")]
SVE_COND_FCVT))]
"TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
- "@
- fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
- movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ }
)
;; Predicated float-to-float extension with merging.
@@ -9628,22 +9682,22 @@
)
(define_insn "*cond_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>"
- [(set (match_operand:SVE_FULL_SDF 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_SDF 0 "register_operand")
(unspec:SVE_FULL_SDF
- [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
(unspec:SVE_FULL_SDF
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_HSF 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_HSF 2 "register_operand")]
SVE_COND_FCVT)
- (match_operand:SVE_FULL_SDF 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_SDF 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
- "@
- fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
- movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
- movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -9716,16 +9770,17 @@
;; zeroing forms, these instructions don't operate elementwise and so
;; don't fit the IFN_COND model.
(define_insn "@aarch64_brk<brk_op>"
- [(set (match_operand:VNx16BI 0 "register_operand" "=Upa, Upa")
+ [(set (match_operand:VNx16BI 0 "register_operand")
(unspec:VNx16BI
- [(match_operand:VNx16BI 1 "register_operand" "Upa, Upa")
- (match_operand:VNx16BI 2 "register_operand" "Upa, Upa")
- (match_operand:VNx16BI 3 "aarch64_simd_reg_or_zero" "Dz, 0")]
+ [(match_operand:VNx16BI 1 "register_operand")
+ (match_operand:VNx16BI 2 "register_operand")
+ (match_operand:VNx16BI 3 "aarch64_simd_reg_or_zero")]
SVE_BRK_UNARY))]
"TARGET_SVE"
- "@
- brk<brk_op>\t%0.b, %1/z, %2.b
- brk<brk_op>\t%0.b, %1/m, %2.b"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ Upa , Upa , Upa , Dz ] brk<brk_op>\t%0.b, %1/z, %2.b
+ [ Upa , Upa , Upa , 0 ] brk<brk_op>\t%0.b, %1/m, %2.b
+ }
)
;; Same, but also producing a flags result.
@@ -10446,25 +10501,25 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx2DI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx2DI 0 "register_operand")
(ANY_PLUS:VNx2DI
(vec_duplicate:VNx2DI
(zero_extend:DI
(unspec:SI
[(match_operand 3)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP)))
- (match_operand:VNx2DI_ONLY 1 "register_operand" "0, w")))]
+ (match_operand:VNx2DI_ONLY 1 "register_operand")))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.d, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.d, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2
+ }
"&& !CONSTANT_P (operands[3])"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Increment a vector of SIs by the number of set bits in a predicate.
@@ -10486,24 +10541,24 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx4SI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI 0 "register_operand")
(ANY_PLUS:VNx4SI
(vec_duplicate:VNx4SI
(unspec:SI
[(match_operand 3)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP))
- (match_operand:VNx4SI_ONLY 1 "register_operand" "0, w")))]
+ (match_operand:VNx4SI_ONLY 1 "register_operand")))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.s, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.s, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2
+ }
"&& !CONSTANT_P (operands[3])"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Increment a vector of HIs by the number of set bits in a predicate.
@@ -10526,25 +10581,25 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx8HI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx8HI 0 "register_operand")
(ANY_PLUS:VNx8HI
(vec_duplicate:VNx8HI
(match_operator:HI 3 "subreg_lowpart_operator"
[(unspec:SI
[(match_operand 4)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP)]))
- (match_operand:VNx8HI_ONLY 1 "register_operand" "0, w")))]
+ (match_operand:VNx8HI_ONLY 1 "register_operand")))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.h, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.h, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; -------------------------------------------------------------------------
@@ -10679,25 +10734,25 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx2DI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx2DI 0 "register_operand")
(ANY_MINUS:VNx2DI
- (match_operand:VNx2DI_ONLY 1 "register_operand" "0, w")
+ (match_operand:VNx2DI_ONLY 1 "register_operand")
(vec_duplicate:VNx2DI
(zero_extend:DI
(unspec:SI
[(match_operand 3)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP)))))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.d, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.d, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2
+ }
"&& !CONSTANT_P (operands[3])"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Decrement a vector of SIs by the number of set bits in a predicate.
@@ -10719,24 +10774,24 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx4SI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI 0 "register_operand")
(ANY_MINUS:VNx4SI
- (match_operand:VNx4SI_ONLY 1 "register_operand" "0, w")
+ (match_operand:VNx4SI_ONLY 1 "register_operand")
(vec_duplicate:VNx4SI
(unspec:SI
[(match_operand 3)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP))))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.s, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.s, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2
+ }
"&& !CONSTANT_P (operands[3])"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Decrement a vector of HIs by the number of set bits in a predicate.
@@ -10759,23 +10814,23 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx8HI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx8HI 0 "register_operand")
(ANY_MINUS:VNx8HI
- (match_operand:VNx8HI_ONLY 1 "register_operand" "0, w")
+ (match_operand:VNx8HI_ONLY 1 "register_operand")
(vec_duplicate:VNx8HI
(match_operator:HI 3 "subreg_lowpart_operator"
[(unspec:SI
[(match_operand 4)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP)]))))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.h, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.h, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md
index 7a77e9b..ffa964d 100644
--- a/gcc/config/aarch64/aarch64-sve2.md
+++ b/gcc/config/aarch64/aarch64-sve2.md
@@ -159,33 +159,35 @@
(define_insn "@aarch64_scatter_stnt<mode>"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:<VPRED> 0 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "aarch64_reg_or_zero" "Z, r")
- (match_operand:<V_INT_EQUIV> 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_SD 3 "register_operand" "w, w")]
+ [(match_operand:<VPRED> 0 "register_operand")
+ (match_operand:DI 1 "aarch64_reg_or_zero")
+ (match_operand:<V_INT_EQUIV> 2 "register_operand")
+ (match_operand:SVE_FULL_SD 3 "register_operand")]
UNSPEC_STNT1_SCATTER))]
"TARGET_SVE"
- "@
- stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>]
- stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>, %1]"
+ {@ [ cons: 0 , 1 , 2 , 3 ]
+ [ Upl , Z , w , w ] stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>]
+ [ Upl , r , w , w ] stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>, %1]
+ }
)
;; Truncating stores.
(define_insn "@aarch64_scatter_stnt_<SVE_FULL_SDI:mode><SVE_PARTIAL_I:mode>"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:<SVE_FULL_SDI:VPRED> 0 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "aarch64_reg_or_zero" "Z, r")
- (match_operand:<SVE_FULL_SDI:V_INT_EQUIV> 2 "register_operand" "w, w")
+ [(match_operand:<SVE_FULL_SDI:VPRED> 0 "register_operand")
+ (match_operand:DI 1 "aarch64_reg_or_zero")
+ (match_operand:<SVE_FULL_SDI:V_INT_EQUIV> 2 "register_operand")
(truncate:SVE_PARTIAL_I
- (match_operand:SVE_FULL_SDI 3 "register_operand" "w, w"))]
+ (match_operand:SVE_FULL_SDI 3 "register_operand"))]
UNSPEC_STNT1_SCATTER))]
"TARGET_SVE2
&& (~<SVE_FULL_SDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
- "@
- stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>]
- stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>, %1]"
+ {@ [ cons: 0 , 1 , 2 , 3 ]
+ [ Upl , Z , w , w ] stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>]
+ [ Upl , r , w , w ] stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>, %1]
+ }
)
;; =========================================================================
@@ -214,16 +216,16 @@
;; The 2nd and 3rd alternatives are valid for just TARGET_SVE as well but
;; we include them here to allow matching simpler, unpredicated RTL.
(define_insn "*aarch64_mul_unpredicated_<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w,w,?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 1 "register_operand" "w,0,w")
- (match_operand:SVE_I 2 "aarch64_sve_vsm_operand" "w,vsm,vsm")))]
+ (match_operand:SVE_I 1 "register_operand")
+ (match_operand:SVE_I 2 "aarch64_sve_vsm_operand")))]
"TARGET_SVE2"
- "@
- mul\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
- mul\t%0.<Vetype>, %0.<Vetype>, #%2
- movprfx\t%0, %1\;mul\t%0.<Vetype>, %0.<Vetype>, #%2"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , w , w ; * ] mul\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+ [ w , 0 , vsm ; * ] mul\t%0.<Vetype>, %0.<Vetype>, #%2
+ [ ?&w , w , vsm ; yes ] movprfx\t%0, %1\;mul\t%0.<Vetype>, %0.<Vetype>, #%2
+ }
)
;; -------------------------------------------------------------------------
@@ -349,20 +351,20 @@
;; General predicated binary arithmetic. All operations handled here
;; are commutative or have a reversed form.
(define_insn "@aarch64_pred_<sve_int_op><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, w, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, 0, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
SVE2_COND_INT_BINARY_REV)]
UNSPEC_PRED_X))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ w , Upl , w , 0 ; * ] <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated binary arithmetic with merging.
@@ -387,77 +389,78 @@
;; Predicated binary arithmetic, merging with the first input.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
[(match_operand 4)
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
SVE2_COND_INT_BINARY)]
UNSPEC_PRED_X)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Predicated binary arithmetic, merging with the second input.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_3"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
[(match_operand 4)
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "0, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
SVE2_COND_INT_BINARY_REV)]
UNSPEC_PRED_X)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE2"
- "@
- <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Predicated binary operations, merging with an independent value.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
[(match_operand 5)
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, 0, w, w, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
SVE2_COND_INT_BINARY_REV)]
UNSPEC_PRED_X)
- (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE2
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -481,22 +484,23 @@
;; so there's no correctness requirement to handle merging with an
;; independent value.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_z"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
[(match_operand 5)
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
SVE2_COND_INT_BINARY_NOREV)]
UNSPEC_PRED_X)
(match_operand:SVE_FULL_I 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE2"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ &w , Upl , 0 , w ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& !CONSTANT_P (operands[5])"
{
operands[5] = CONSTM1_RTX (<VPRED>mode);
@@ -547,22 +551,22 @@
;; Predicated left shifts.
(define_insn "@aarch64_pred_<sve_int_op><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, 0, w, w, w")
- (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, D<lr>, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand")]
SVE2_COND_INT_SHIFT)]
UNSPEC_PRED_X))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , D<lr> ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ w , Upl , w , 0 ; * ] <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , D<lr> ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated left shifts with merging.
@@ -587,83 +591,84 @@
;; Predicated left shifts, merging with the first input.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
[(match_operand 4)
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, 0, w, w")
- (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, D<lr>, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand")]
SVE2_COND_INT_SHIFT)]
UNSPEC_PRED_X)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , D<lr> ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , D<lr> ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,*,yes,yes")]
)
;; Predicated left shifts, merging with the second input.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_3"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
[(match_operand 4)
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "0, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
SVE2_COND_INT_SHIFT)]
UNSPEC_PRED_X)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE2"
- "@
- <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Predicated left shifts, merging with an independent value.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, &w, &w, &w, &w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
[(match_operand 5)
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, 0, w, w, w, w, w, w, w")
- (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, D<lr>, w, D<lr>, w, D<lr>, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand")]
SVE2_COND_INT_SHIFT)]
UNSPEC_PRED_X)
- (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, Dz, 0, 0, w, w")]
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE2
&& !rtx_equal_p (operands[2], operands[4])
&& (CONSTANT_P (operands[4]) || !rtx_equal_p (operands[3], operands[4]))"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , D<lr> , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , D<lr> , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , D<lr> , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , D<lr> , w ] #
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -701,34 +706,34 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_<sve_int_op><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_I 1 "register_operand" "0, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")
+ (match_operand:SVE_FULL_I 1 "register_operand")]
SVE2_INT_TERNARY))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ }
)
(define_insn "@aarch64_sve_<sve_int_op>_lane_<mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w")
+ [(match_operand:SVE_FULL_HSDI 2 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:SVE_FULL_HSDI 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:SVE_FULL_HSDI 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_HSDI 1 "register_operand")]
SVE2_INT_TERNARY_LANE))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
- movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , <sve_lane_con> ; * ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+ [ ?&w , w , w , <sve_lane_con> ; yes ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+ }
)
;; -------------------------------------------------------------------------
@@ -740,37 +745,37 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_add_mul_lane_<mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(plus:SVE_FULL_HSDI
(mult:SVE_FULL_HSDI
(unspec:SVE_FULL_HSDI
- [(match_operand:SVE_FULL_HSDI 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:SVE_FULL_HSDI 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w"))
- (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand"))
+ (match_operand:SVE_FULL_HSDI 1 "register_operand")))]
"TARGET_SVE2"
- "@
- mla\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
- movprfx\t%0, %1\;mla\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , <sve_lane_con> ; * ] mla\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+ [ ?&w , w , w , <sve_lane_con> ; yes ] movprfx\t%0, %1\;mla\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+ }
)
(define_insn "@aarch64_sve_sub_mul_lane_<mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(minus:SVE_FULL_HSDI
- (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")
+ (match_operand:SVE_FULL_HSDI 1 "register_operand")
(mult:SVE_FULL_HSDI
(unspec:SVE_FULL_HSDI
- [(match_operand:SVE_FULL_HSDI 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:SVE_FULL_HSDI 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w"))))]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand"))))]
"TARGET_SVE2"
- "@
- mls\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
- movprfx\t%0, %1\;mls\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , <sve_lane_con> ; * ] mls\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+ [ ?&w , w , w , <sve_lane_con> ; yes ] movprfx\t%0, %1\;mls\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+ }
)
;; -------------------------------------------------------------------------
@@ -781,17 +786,17 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve2_xar<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(rotatert:SVE_FULL_I
(xor:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "%0, w")
- (match_operand:SVE_FULL_I 2 "register_operand" "w, w"))
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand"))
(match_operand:SVE_FULL_I 3 "aarch64_simd_rshift_imm")))]
"TARGET_SVE2"
- "@
- xar\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
- movprfx\t%0, %1\;xar\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , %0 , w ; * ] xar\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
+ [ ?&w , w , w ; yes ] movprfx\t%0, %1\;xar\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
+ }
)
;; -------------------------------------------------------------------------
@@ -825,86 +830,86 @@
)
(define_insn_and_rewrite "*aarch64_sve2_bcax<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(xor:SVE_FULL_I
(and:SVE_FULL_I
(unspec:SVE_FULL_I
[(match_operand 4)
(not:SVE_FULL_I
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))]
+ (match_operand:SVE_FULL_I 3 "register_operand"))]
UNSPEC_PRED_X)
- (match_operand:SVE_FULL_I 2 "register_operand" "w, w"))
- (match_operand:SVE_FULL_I 1 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_I 2 "register_operand"))
+ (match_operand:SVE_FULL_I 1 "register_operand")))]
"TARGET_SVE2"
- "@
- bcax\t%0.d, %0.d, %2.d, %3.d
- movprfx\t%0, %1\;bcax\t%0.d, %0.d, %2.d, %3.d"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] bcax\t%0.d, %0.d, %2.d, %3.d
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;bcax\t%0.d, %0.d, %2.d, %3.d
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Unpredicated 3-way exclusive OR.
(define_insn "@aarch64_sve2_eor3<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(xor:SVE_FULL_I
(xor:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "0, w, w, w")
- (match_operand:SVE_FULL_I 2 "register_operand" "w, 0, w, w"))
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w, 0, w")))]
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand"))
+ (match_operand:SVE_FULL_I 3 "register_operand")))]
"TARGET_SVE2"
- "@
- eor3\t%0.d, %0.d, %2.d, %3.d
- eor3\t%0.d, %0.d, %1.d, %3.d
- eor3\t%0.d, %0.d, %1.d, %2.d
- movprfx\t%0, %1\;eor3\t%0.d, %0.d, %2.d, %3.d"
- [(set_attr "movprfx" "*,*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] eor3\t%0.d, %0.d, %2.d, %3.d
+ [ w , w , 0 , w ; * ] eor3\t%0.d, %0.d, %1.d, %3.d
+ [ w , w , w , 0 ; * ] eor3\t%0.d, %0.d, %1.d, %2.d
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;eor3\t%0.d, %0.d, %2.d, %3.d
+ }
)
;; Use NBSL for vector NOR.
(define_insn_and_rewrite "*aarch64_sve2_nor<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
[(match_operand 3)
(and:SVE_FULL_I
(not:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "%0, w"))
+ (match_operand:SVE_FULL_I 1 "register_operand"))
(not:SVE_FULL_I
- (match_operand:SVE_FULL_I 2 "register_operand" "w, w")))]
+ (match_operand:SVE_FULL_I 2 "register_operand")))]
UNSPEC_PRED_X))]
"TARGET_SVE2"
- "@
- nbsl\t%0.d, %0.d, %2.d, %0.d
- movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %0.d"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , %0 , w ; * ] nbsl\t%0.d, %0.d, %2.d, %0.d
+ [ ?&w , w , w ; yes ] movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %0.d
+ }
"&& !CONSTANT_P (operands[3])"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Use NBSL for vector NAND.
(define_insn_and_rewrite "*aarch64_sve2_nand<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
[(match_operand 3)
(ior:SVE_FULL_I
(not:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "%0, w"))
+ (match_operand:SVE_FULL_I 1 "register_operand"))
(not:SVE_FULL_I
- (match_operand:SVE_FULL_I 2 "register_operand" "w, w")))]
+ (match_operand:SVE_FULL_I 2 "register_operand")))]
UNSPEC_PRED_X))]
"TARGET_SVE2"
- "@
- nbsl\t%0.d, %0.d, %2.d, %2.d
- movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %2.d"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , %0 , w ; * ] nbsl\t%0.d, %0.d, %2.d, %2.d
+ [ ?&w , w , w ; yes ] movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %2.d
+ }
"&& !CONSTANT_P (operands[3])"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Unpredicated bitwise select.
@@ -922,19 +927,19 @@
)
(define_insn "*aarch64_sve2_bsl<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(xor:SVE_FULL_I
(and:SVE_FULL_I
(xor:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "<bsl_1st>, w")
- (match_operand:SVE_FULL_I 2 "register_operand" "<bsl_2nd>, w"))
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand"))
+ (match_operand:SVE_FULL_I 3 "register_operand"))
(match_dup BSL_DUP)))]
"TARGET_SVE2"
- "@
- bsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
- movprfx\t%0, %<bsl_mov>\;bsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , <bsl_1st> , <bsl_2nd> , w ; * ] bsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %<bsl_mov>\;bsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+ }
)
;; Unpredicated bitwise inverted select.
@@ -959,27 +964,27 @@
)
(define_insn_and_rewrite "*aarch64_sve2_nbsl<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
[(match_operand 4)
(not:SVE_FULL_I
(xor:SVE_FULL_I
(and:SVE_FULL_I
(xor:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "<bsl_1st>, w")
- (match_operand:SVE_FULL_I 2 "register_operand" "<bsl_2nd>, w"))
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand"))
+ (match_operand:SVE_FULL_I 3 "register_operand"))
(match_dup BSL_DUP)))]
UNSPEC_PRED_X))]
"TARGET_SVE2"
- "@
- nbsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
- movprfx\t%0, %<bsl_mov>\;nbsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , <bsl_1st> , <bsl_2nd> , w ; * ] nbsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %<bsl_mov>\;nbsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Unpredicated bitwise select with inverted first operand.
@@ -1004,27 +1009,27 @@
)
(define_insn_and_rewrite "*aarch64_sve2_bsl1n<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(xor:SVE_FULL_I
(and:SVE_FULL_I
(unspec:SVE_FULL_I
[(match_operand 4)
(not:SVE_FULL_I
(xor:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "<bsl_1st>, w")
- (match_operand:SVE_FULL_I 2 "register_operand" "<bsl_2nd>, w")))]
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand")))]
UNSPEC_PRED_X)
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+ (match_operand:SVE_FULL_I 3 "register_operand"))
(match_dup BSL_DUP)))]
"TARGET_SVE2"
- "@
- bsl1n\t%0.d, %0.d, %<bsl_dup>.d, %3.d
- movprfx\t%0, %<bsl_mov>\;bsl1n\t%0.d, %0.d, %<bsl_dup>.d, %3.d"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , <bsl_1st> , <bsl_2nd> , w ; * ] bsl1n\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %<bsl_mov>\;bsl1n\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Unpredicated bitwise select with inverted second operand.
@@ -1050,55 +1055,55 @@
)
(define_insn_and_rewrite "*aarch64_sve2_bsl2n<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(ior:SVE_FULL_I
(and:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "<bsl_1st>, w")
- (match_operand:SVE_FULL_I 2 "register_operand" "<bsl_2nd>, w"))
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand"))
(unspec:SVE_FULL_I
[(match_operand 4)
(and:SVE_FULL_I
(not:SVE_FULL_I
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+ (match_operand:SVE_FULL_I 3 "register_operand"))
(not:SVE_FULL_I
(match_dup BSL_DUP)))]
UNSPEC_PRED_X)))]
"TARGET_SVE2"
- "@
- bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
- movprfx\t%0, %<bsl_mov>\;bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , <bsl_1st> , <bsl_2nd> , w ; * ] bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %<bsl_mov>\;bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Unpredicated bitwise select with inverted second operand, alternative form.
;; (bsl_dup ? bsl_mov : ~op3) == ((bsl_dup & bsl_mov) | (~bsl_dup & ~op3))
(define_insn_and_rewrite "*aarch64_sve2_bsl2n<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(ior:SVE_FULL_I
(and:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "<bsl_1st>, w")
- (match_operand:SVE_FULL_I 2 "register_operand" "<bsl_2nd>, w"))
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand"))
(unspec:SVE_FULL_I
[(match_operand 4)
(and:SVE_FULL_I
(not:SVE_FULL_I
(match_dup BSL_DUP))
(not:SVE_FULL_I
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w")))]
+ (match_operand:SVE_FULL_I 3 "register_operand")))]
UNSPEC_PRED_X)))]
"TARGET_SVE2"
- "@
- bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
- movprfx\t%0, %<bsl_mov>\;bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , <bsl_1st> , <bsl_2nd> , w ; * ] bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %<bsl_mov>\;bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; -------------------------------------------------------------------------
@@ -1131,40 +1136,40 @@
;; Pattern-match SSRA and USRA as a predicated operation whose predicate
;; isn't needed.
(define_insn_and_rewrite "*aarch64_sve2_sra<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(plus:SVE_FULL_I
(unspec:SVE_FULL_I
[(match_operand 4)
(SHIFTRT:SVE_FULL_I
- (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_I 2 "register_operand")
(match_operand:SVE_FULL_I 3 "aarch64_simd_rshift_imm"))]
UNSPEC_PRED_X)
- (match_operand:SVE_FULL_I 1 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_I 1 "register_operand")))]
"TARGET_SVE2"
- "@
- <sra_op>sra\t%0.<Vetype>, %2.<Vetype>, #%3
- movprfx\t%0, %1\;<sra_op>sra\t%0.<Vetype>, %2.<Vetype>, #%3"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , w ; * ] <sra_op>sra\t%0.<Vetype>, %2.<Vetype>, #%3
+ [ ?&w , w , w ; yes ] movprfx\t%0, %1\;<sra_op>sra\t%0.<Vetype>, %2.<Vetype>, #%3
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; SRSRA and URSRA.
(define_insn "@aarch64_sve_add_<sve_int_op><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(plus:SVE_FULL_I
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "w, w")
+ [(match_operand:SVE_FULL_I 2 "register_operand")
(match_operand:SVE_FULL_I 3 "aarch64_simd_rshift_imm")]
VRSHR_N)
- (match_operand:SVE_FULL_I 1 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_I 1 "register_operand")))]
"TARGET_SVE2"
- "@
- <sur>sra\t%0.<Vetype>, %2.<Vetype>, #%3
- movprfx\t%0, %1\;<sur>sra\t%0.<Vetype>, %2.<Vetype>, #%3"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , w ; * ] <sur>sra\t%0.<Vetype>, %2.<Vetype>, #%3
+ [ ?&w , w , w ; yes ] movprfx\t%0, %1\;<sur>sra\t%0.<Vetype>, %2.<Vetype>, #%3
+ }
)
;; -------------------------------------------------------------------------
@@ -1222,14 +1227,14 @@
;; Pattern-match SABA and UABA as an absolute-difference-and-accumulate
;; operation whose predicates aren't needed.
(define_insn "*aarch64_sve2_<su>aba<mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(plus:SVE_FULL_I
(minus:SVE_FULL_I
(unspec:SVE_FULL_I
[(match_operand 4)
(USMAX:SVE_FULL_I
- (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))]
+ (match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand"))]
UNSPEC_PRED_X)
(unspec:SVE_FULL_I
[(match_operand 5)
@@ -1237,12 +1242,12 @@
(match_dup 2)
(match_dup 3))]
UNSPEC_PRED_X))
- (match_operand:SVE_FULL_I 1 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_I 1 "register_operand")))]
"TARGET_SVE2"
- "@
- <su>aba\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %1\;<su>aba\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <su>aba\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<su>aba\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ }
)
;; =========================================================================
@@ -1370,142 +1375,142 @@
;; Non-saturating MLA operations.
(define_insn "@aarch64_sve_add_<sve_int_op><mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(plus:SVE_FULL_HSDI
(unspec:SVE_FULL_HSDI
- [(match_operand:<VNARROW> 2 "register_operand" "w, w")
- (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+ [(match_operand:<VNARROW> 2 "register_operand")
+ (match_operand:<VNARROW> 3 "register_operand")]
SVE2_INT_ADD_BINARY_LONG)
- (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_HSDI 1 "register_operand")))]
"TARGET_SVE2"
- "@
- <sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
- movprfx\t%0, %1\;<sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+ }
)
;; Non-saturating MLA operations with lane select.
(define_insn "@aarch64_sve_add_<sve_int_op>_lane_<mode>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(plus:SVE_FULL_SDI
(unspec:SVE_FULL_SDI
- [(match_operand:<VNARROW> 2 "register_operand" "w, w")
+ [(match_operand:<VNARROW> 2 "register_operand")
(unspec:<VNARROW>
- [(match_operand:<VNARROW> 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:<VNARROW> 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
SVE2_INT_ADD_BINARY_LONG_LANE)
- (match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_SDI 1 "register_operand")))]
"TARGET_SVE2"
- "@
- <sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
- movprfx\t%0, %1\;<sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , <sve_lane_con> ; * ] <sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+ [ ?&w , w , w , <sve_lane_con> ; yes ] movprfx\t%0, %1\;<sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+ }
)
;; Saturating MLA operations.
(define_insn "@aarch64_sve_qadd_<sve_int_op><mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(ss_plus:SVE_FULL_HSDI
(unspec:SVE_FULL_HSDI
- [(match_operand:<VNARROW> 2 "register_operand" "w, w")
- (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+ [(match_operand:<VNARROW> 2 "register_operand")
+ (match_operand:<VNARROW> 3 "register_operand")]
SVE2_INT_QADD_BINARY_LONG)
- (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_HSDI 1 "register_operand")))]
"TARGET_SVE2"
- "@
- <sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
- movprfx\t%0, %1\;<sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+ }
)
;; Saturating MLA operations with lane select.
(define_insn "@aarch64_sve_qadd_<sve_int_op>_lane_<mode>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(ss_plus:SVE_FULL_SDI
(unspec:SVE_FULL_SDI
- [(match_operand:<VNARROW> 2 "register_operand" "w, w")
+ [(match_operand:<VNARROW> 2 "register_operand")
(unspec:<VNARROW>
- [(match_operand:<VNARROW> 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:<VNARROW> 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
SVE2_INT_QADD_BINARY_LONG_LANE)
- (match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_SDI 1 "register_operand")))]
"TARGET_SVE2"
- "@
- <sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
- movprfx\t%0, %1\;<sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , <sve_lane_con> ; * ] <sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+ [ ?&w , w , w , <sve_lane_con> ; yes ] movprfx\t%0, %1\;<sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+ }
)
;; Non-saturating MLS operations.
(define_insn "@aarch64_sve_sub_<sve_int_op><mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(minus:SVE_FULL_HSDI
- (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")
+ (match_operand:SVE_FULL_HSDI 1 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<VNARROW> 2 "register_operand" "w, w")
- (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+ [(match_operand:<VNARROW> 2 "register_operand")
+ (match_operand:<VNARROW> 3 "register_operand")]
SVE2_INT_SUB_BINARY_LONG)))]
"TARGET_SVE2"
- "@
- <sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
- movprfx\t%0, %1\;<sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+ }
)
;; Non-saturating MLS operations with lane select.
(define_insn "@aarch64_sve_sub_<sve_int_op>_lane_<mode>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(minus:SVE_FULL_SDI
- (match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")
+ (match_operand:SVE_FULL_SDI 1 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<VNARROW> 2 "register_operand" "w, w")
+ [(match_operand:<VNARROW> 2 "register_operand")
(unspec:<VNARROW>
- [(match_operand:<VNARROW> 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:<VNARROW> 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
SVE2_INT_SUB_BINARY_LONG_LANE)))]
"TARGET_SVE2"
- "@
- <sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
- movprfx\t%0, %1\;<sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , <sve_lane_con> ; * ] <sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+ [ ?&w , w , w , <sve_lane_con> ; yes ] movprfx\t%0, %1\;<sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+ }
)
;; Saturating MLS operations.
(define_insn "@aarch64_sve_qsub_<sve_int_op><mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(ss_minus:SVE_FULL_HSDI
- (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")
+ (match_operand:SVE_FULL_HSDI 1 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<VNARROW> 2 "register_operand" "w, w")
- (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+ [(match_operand:<VNARROW> 2 "register_operand")
+ (match_operand:<VNARROW> 3 "register_operand")]
SVE2_INT_QSUB_BINARY_LONG)))]
"TARGET_SVE2"
- "@
- <sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
- movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+ }
)
;; Saturating MLS operations with lane select.
(define_insn "@aarch64_sve_qsub_<sve_int_op>_lane_<mode>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(ss_minus:SVE_FULL_SDI
- (match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")
+ (match_operand:SVE_FULL_SDI 1 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<VNARROW> 2 "register_operand" "w, w")
+ [(match_operand:<VNARROW> 2 "register_operand")
(unspec:<VNARROW>
- [(match_operand:<VNARROW> 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:<VNARROW> 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
SVE2_INT_QSUB_BINARY_LONG_LANE)))]
"TARGET_SVE2"
- "@
- <sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
- movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , <sve_lane_con> ; * ] <sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+ [ ?&w , w , w , <sve_lane_con> ; yes ] movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+ }
)
;; -------------------------------------------------------------------------
;; ---- [FP] Long multiplication with accumulation
@@ -1518,34 +1523,34 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_<sve_fp_op><mode>"
- [(set (match_operand:VNx4SF_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SF_ONLY 0 "register_operand")
(unspec:VNx4SF_ONLY
- [(match_operand:<VNARROW> 1 "register_operand" "w, w")
- (match_operand:<VNARROW> 2 "register_operand" "w, w")
- (match_operand:VNx4SF_ONLY 3 "register_operand" "0, w")]
+ [(match_operand:<VNARROW> 1 "register_operand")
+ (match_operand:<VNARROW> 2 "register_operand")
+ (match_operand:VNx4SF_ONLY 3 "register_operand")]
SVE2_FP_TERNARY_LONG))]
"TARGET_SVE2"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>
- movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , w , w , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>
+ }
)
(define_insn "@aarch64_<sve_fp_op>_lane_<mode>"
- [(set (match_operand:VNx4SF_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SF_ONLY 0 "register_operand")
(unspec:VNx4SF_ONLY
- [(match_operand:<VNARROW> 1 "register_operand" "w, w")
+ [(match_operand:<VNARROW> 1 "register_operand")
(unspec:<VNARROW>
- [(match_operand:<VNARROW> 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:<VNARROW> 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:VNx4SF_ONLY 4 "register_operand" "0, w")]
+ (match_operand:VNx4SF_ONLY 4 "register_operand")]
SVE2_FP_TERNARY_LONG_LANE))]
"TARGET_SVE2"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]
- movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , <sve_lane_con> , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]
+ [ ?&w , w , <sve_lane_con> , w ; yes ] movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]
+ }
)
;; =========================================================================
@@ -1698,17 +1703,17 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_pred_<sve_int_op><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:SVE_FULL_I 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
SVE2_INT_BINARY_PAIR))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -1723,17 +1728,17 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_pred_<sve_fp_op><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE2_FP_BINARY_PAIR))]
"TARGET_SVE2"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -1767,43 +1772,44 @@
;; Predicated pairwise absolute difference and accumulate, merging with
;; the first input.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_HSDI
[(match_operand 4)
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "0, w")
- (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")
+ (match_operand:<VNARROW> 3 "register_operand")]
SVE2_INT_BINARY_PAIR_LONG)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Predicated pairwise absolute difference and accumulate, merging with zero.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_z"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=&w, &w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_HSDI
[(match_operand 5)
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "0, w")
- (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")
+ (match_operand:<VNARROW> 3 "register_operand")]
SVE2_INT_BINARY_PAIR_LONG)
(match_operand:SVE_FULL_HSDI 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE2"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ &w , Upl , 0 , w ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
+ [ &w , Upl , w , w ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
+ }
"&& !CONSTANT_P (operands[5])"
{
operands[5] = CONSTM1_RTX (<VPRED>mode);
@@ -1824,16 +1830,16 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 1 "register_operand" "0, w")
- (match_operand:SVE_FULL_I 2 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand")]
SVE2_INT_CADD))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #<rot>
- movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #<rot>
+ [ ?&w , w , w ; yes ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #<rot>
+ }
)
;; unpredicated optab pattern for auto-vectorizer
@@ -1855,34 +1861,34 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 1 "register_operand" "0, w")
- (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
SVE2_INT_CMLA))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>, #<rot>
+ }
)
(define_insn "@aarch64_<optab>_lane_<mode>"
- [(set (match_operand:SVE_FULL_HSI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSI 0 "register_operand")
(unspec:SVE_FULL_HSI
- [(match_operand:SVE_FULL_HSI 1 "register_operand" "0, w")
- (match_operand:SVE_FULL_HSI 2 "register_operand" "w, w")
+ [(match_operand:SVE_FULL_HSI 1 "register_operand")
+ (match_operand:SVE_FULL_HSI 2 "register_operand")
(unspec:SVE_FULL_HSI
- [(match_operand:SVE_FULL_HSI 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:SVE_FULL_HSI 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
SVE2_INT_CMLA))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4], #<rot>
- movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4], #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , <sve_lane_con> ; * ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4], #<rot>
+ [ ?&w , w , w , <sve_lane_con> ; yes ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4], #<rot>
+ }
)
;; unpredicated optab pattern for auto-vectorizer
@@ -1935,34 +1941,34 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")
- (match_operand:<VSI2QI> 2 "register_operand" "w, w")
- (match_operand:<VSI2QI> 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_SDI 1 "register_operand")
+ (match_operand:<VSI2QI> 2 "register_operand")
+ (match_operand:<VSI2QI> 3 "register_operand")]
SVE2_INT_CDOT))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>, #<rot>
- movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>, #<rot>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>, #<rot>
+ }
)
(define_insn "@aarch64_<optab>_lane_<mode>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")
- (match_operand:<VSI2QI> 2 "register_operand" "w, w")
+ [(match_operand:SVE_FULL_SDI 1 "register_operand")
+ (match_operand:<VSI2QI> 2 "register_operand")
(unspec:<VSI2QI>
- [(match_operand:<VSI2QI> 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:<VSI2QI> 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
SVE2_INT_CDOT))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>[%4], #<rot>
- movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>[%4], #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , <sve_lane_con> ; * ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>[%4], #<rot>
+ [ ?&w , w , w , <sve_lane_con> ; yes ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>[%4], #<rot>
+ }
)
;; =========================================================================
@@ -2067,17 +2073,17 @@
;; Predicated FCVTX (equivalent to what would be FCVTXNB, except that
;; it supports MOVPRFX).
(define_insn "@aarch64_pred_<sve_fp_op><mode>"
- [(set (match_operand:VNx4SF_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SF_ONLY 0 "register_operand")
(unspec:VNx4SF_ONLY
- [(match_operand:<VWIDE_PRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VWIDE_PRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:<VWIDE> 2 "register_operand" "0, w")]
+ (match_operand:<VWIDE> 2 "register_operand")]
SVE2_COND_FP_UNARY_NARROWB))]
"TARGET_SVE2"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+ }
)
;; Predicated FCVTX with merging.
@@ -2096,45 +2102,45 @@
)
(define_insn_and_rewrite "*cond_<sve_fp_op><mode>_any_relaxed"
- [(set (match_operand:VNx4SF_ONLY 0 "register_operand" "=&w, &w, &w")
+ [(set (match_operand:VNx4SF_ONLY 0 "register_operand")
(unspec:VNx4SF_ONLY
- [(match_operand:<VWIDE_PRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VWIDE_PRED> 1 "register_operand")
(unspec:VNx4SF_ONLY
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:<VWIDE> 2 "register_operand" "w, w, w")]
+ (match_operand:<VWIDE> 2 "register_operand")]
SVE2_COND_FP_UNARY_NARROWB)
- (match_operand:VNx4SF_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:VNx4SF_ONLY 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE2 && !rtx_equal_p (operands[2], operands[3])"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
- movprfx\t%0.<Vewtype>, %1/z, %2.<Vewtype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
- movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<Vewtype>, %1/z, %2.<Vewtype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+ [ &w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
(define_insn "*cond_<sve_fp_op><mode>_any_strict"
- [(set (match_operand:VNx4SF_ONLY 0 "register_operand" "=&w, &w, &w")
+ [(set (match_operand:VNx4SF_ONLY 0 "register_operand")
(unspec:VNx4SF_ONLY
- [(match_operand:<VWIDE_PRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VWIDE_PRED> 1 "register_operand")
(unspec:VNx4SF_ONLY
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:<VWIDE> 2 "register_operand" "w, w, w")]
+ (match_operand:<VWIDE> 2 "register_operand")]
SVE2_COND_FP_UNARY_NARROWB)
- (match_operand:VNx4SF_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:VNx4SF_ONLY 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE2 && !rtx_equal_p (operands[2], operands[3])"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
- movprfx\t%0.<Vewtype>, %1/z, %2.<Vewtype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
- movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<Vewtype>, %1/z, %2.<Vewtype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+ [ &w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+ }
)
;; Predicated FCVTXNT. This doesn't give a natural aarch64_pred_*/cond_*
@@ -2168,18 +2174,18 @@
;; Predicated integer unary operations.
(define_insn "@aarch64_pred_<sve_int_op><mode>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(unspec:VNx4SI_ONLY
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:VNx4SI_ONLY
- [(match_operand:VNx4SI_ONLY 2 "register_operand" "0, w")]
+ [(match_operand:VNx4SI_ONLY 2 "register_operand")]
SVE2_U32_UNARY)]
UNSPEC_PRED_X))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated integer unary operations with merging.
@@ -2202,27 +2208,27 @@
)
(define_insn_and_rewrite "*cond_<sve_int_op><mode>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(unspec:VNx4SI_ONLY
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:VNx4SI_ONLY
[(match_operand 4)
(unspec:VNx4SI_ONLY
- [(match_operand:VNx4SI_ONLY 2 "register_operand" "w, w, w")]
+ [(match_operand:VNx4SI_ONLY 2 "register_operand")]
SVE2_U32_UNARY)]
UNSPEC_PRED_X)
- (match_operand:VNx4SI_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:VNx4SI_ONLY 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE2"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
;; -------------------------------------------------------------------------
@@ -2234,17 +2240,17 @@
;; Predicated FLOGB.
(define_insn "@aarch64_pred_<sve_fp_op><mode>"
- [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
(unspec:<V_INT_EQUIV>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE2_COND_INT_UNARY_FP))]
"TARGET_SVE2"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated FLOGB with merging.
@@ -2263,45 +2269,45 @@
)
(define_insn_and_rewrite "*cond_<sve_fp_op><mode>"
- [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
(unspec:<V_INT_EQUIV>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:<V_INT_EQUIV>
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE2_COND_INT_UNARY_FP)
- (match_operand:<V_INT_EQUIV> 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:<V_INT_EQUIV> 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE2 && !rtx_equal_p (operands[2], operands[3])"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
(define_insn "*cond_<sve_fp_op><mode>_strict"
- [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
(unspec:<V_INT_EQUIV>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:<V_INT_EQUIV>
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE2_COND_INT_UNARY_FP)
- (match_operand:<V_INT_EQUIV> 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:<V_INT_EQUIV> 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE2 && !rtx_equal_p (operands[2], operands[3])"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md
index ccfcad5..c969277 100644
--- a/gcc/config/aarch64/aarch64-tune.md
+++ b/gcc/config/aarch64/aarch64-tune.md
@@ -1,5 +1,5 @@
;; -*- buffer-read-only: t -*-
;; Generated automatically by gentune.sh from aarch64-cores.def
(define_attr "tune"
- "cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,cortexx1c,neoversen1,ares,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,thunderx3t110,neoversev1,zeus,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa520,cortexa710,cortexa715,cortexa720,cortexx2,cortexx3,neoversen2,neoversev2,demeter"
+ "cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,cortexx1c,neoversen1,ares,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,thunderx3t110,neoversev1,zeus,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa520,cortexa710,cortexa715,cortexa720,cortexx2,cortexx3,cortexx4,neoversen2,neoversev2,demeter"
(const (symbol_ref "((enum attr_tune) aarch64_tune)")))
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index f1e98ea..62b1ae0 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -257,7 +257,7 @@ public:
machine_mode orig_mode;
/* The offset in bytes of the piece from the start of the type. */
- poly_uint64_pod offset;
+ poly_uint64 offset;
};
/* Divides types analyzed as IS_PST into individual pieces. The pieces
@@ -1358,8 +1358,8 @@ static const struct tune_params generic_tunings =
have at most a very minor effect on SVE2 cores. */
(AARCH64_EXTRA_TUNE_CSE_SVE_VL_CONSTANTS), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params cortexa35_tunings =
@@ -1394,8 +1394,8 @@ static const struct tune_params cortexa35_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params cortexa53_tunings =
@@ -1430,8 +1430,8 @@ static const struct tune_params cortexa53_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params cortexa57_tunings =
@@ -1466,8 +1466,8 @@ static const struct tune_params cortexa57_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_RENAME_FMA_REGS), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params cortexa72_tunings =
@@ -1502,8 +1502,8 @@ static const struct tune_params cortexa72_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params cortexa73_tunings =
@@ -1538,12 +1538,10 @@ static const struct tune_params cortexa73_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
-
-
static const struct tune_params exynosm1_tunings =
{
&exynosm1_extra_costs,
@@ -1575,8 +1573,8 @@ static const struct tune_params exynosm1_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&exynosm1_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params thunderxt88_tunings =
@@ -1610,8 +1608,8 @@ static const struct tune_params thunderxt88_tunings =
tune_params::AUTOPREFETCHER_OFF, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&thunderxt88_prefetch_tune,
- tune_params::LDP_POLICY_ALIGNED, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALIGNED /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALIGNED, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALIGNED /* stp_policy_model. */
};
static const struct tune_params thunderx_tunings =
@@ -1645,8 +1643,8 @@ static const struct tune_params thunderx_tunings =
tune_params::AUTOPREFETCHER_OFF, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_CHEAP_SHIFT_EXTEND), /* tune_flags. */
&thunderx_prefetch_tune,
- tune_params::LDP_POLICY_ALIGNED, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALIGNED /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALIGNED, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALIGNED /* stp_policy_model. */
};
static const struct tune_params tsv110_tunings =
@@ -1681,8 +1679,8 @@ static const struct tune_params tsv110_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&tsv110_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params xgene1_tunings =
@@ -1716,8 +1714,8 @@ static const struct tune_params xgene1_tunings =
tune_params::AUTOPREFETCHER_OFF, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NO_LDP_STP_QREGS), /* tune_flags. */
&xgene1_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params emag_tunings =
@@ -1751,8 +1749,8 @@ static const struct tune_params emag_tunings =
tune_params::AUTOPREFETCHER_OFF, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NO_LDP_STP_QREGS), /* tune_flags. */
&xgene1_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params qdf24xx_tunings =
@@ -1787,8 +1785,8 @@ static const struct tune_params qdf24xx_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
AARCH64_EXTRA_TUNE_RENAME_LOAD_REGS, /* tune_flags. */
&qdf24xx_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
/* Tuning structure for the Qualcomm Saphira core. Default to falkor values
@@ -1825,8 +1823,8 @@ static const struct tune_params saphira_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params thunderx2t99_tunings =
@@ -1861,8 +1859,8 @@ static const struct tune_params thunderx2t99_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&thunderx2t99_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params thunderx3t110_tunings =
@@ -1897,8 +1895,8 @@ static const struct tune_params thunderx3t110_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&thunderx3t110_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params neoversen1_tunings =
@@ -1932,8 +1930,8 @@ static const struct tune_params neoversen1_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_CHEAP_SHIFT_EXTEND), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params ampere1_tunings =
@@ -1971,8 +1969,8 @@ static const struct tune_params ampere1_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&ampere1_prefetch_tune,
- tune_params::LDP_POLICY_ALIGNED, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALIGNED /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALIGNED, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALIGNED /* stp_policy_model. */
};
static const struct tune_params ampere1a_tunings =
@@ -2011,8 +2009,8 @@ static const struct tune_params ampere1a_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&ampere1_prefetch_tune,
- tune_params::LDP_POLICY_ALIGNED, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALIGNED /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALIGNED, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALIGNED /* stp_policy_model. */
};
static const advsimd_vec_cost neoversev1_advsimd_vector_cost =
@@ -2194,8 +2192,8 @@ static const struct tune_params neoversev1_tunings =
| AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT
| AARCH64_EXTRA_TUNE_CHEAP_SHIFT_EXTEND), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const sve_vec_cost neoverse512tvb_sve_vector_cost =
@@ -2333,8 +2331,8 @@ static const struct tune_params neoverse512tvb_tunings =
| AARCH64_EXTRA_TUNE_USE_NEW_VECTOR_COSTS
| AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const advsimd_vec_cost neoversen2_advsimd_vector_cost =
@@ -2525,8 +2523,8 @@ static const struct tune_params neoversen2_tunings =
| AARCH64_EXTRA_TUNE_USE_NEW_VECTOR_COSTS
| AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const advsimd_vec_cost neoversev2_advsimd_vector_cost =
@@ -2717,8 +2715,8 @@ static const struct tune_params neoversev2_tunings =
| AARCH64_EXTRA_TUNE_USE_NEW_VECTOR_COSTS
| AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT), /* tune_flags. */
&generic_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
static const struct tune_params a64fx_tunings =
@@ -2752,8 +2750,8 @@ static const struct tune_params a64fx_tunings =
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
(AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
&a64fx_prefetch_tune,
- tune_params::LDP_POLICY_ALWAYS, /* ldp_policy_model. */
- tune_params::STP_POLICY_ALWAYS /* stp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
};
/* Support for fine-grained override of the tuning structures. */
@@ -8529,13 +8527,17 @@ aarch64_save_regs_above_locals_p ()
static void
aarch64_layout_frame (void)
{
- int regno, last_fp_reg = INVALID_REGNUM;
+ unsigned regno, last_fp_reg = INVALID_REGNUM;
machine_mode vector_save_mode = aarch64_reg_save_mode (V8_REGNUM);
poly_int64 vector_save_size = GET_MODE_SIZE (vector_save_mode);
bool frame_related_fp_reg_p = false;
aarch64_frame &frame = cfun->machine->frame;
poly_int64 top_of_locals = -1;
+ vec_safe_truncate (frame.saved_gprs, 0);
+ vec_safe_truncate (frame.saved_fprs, 0);
+ vec_safe_truncate (frame.saved_prs, 0);
+
frame.emit_frame_chain = aarch64_needs_frame_chain ();
/* Adjust the outgoing arguments size if required. Keep it in sync with what
@@ -8620,6 +8622,7 @@ aarch64_layout_frame (void)
for (regno = P0_REGNUM; regno <= P15_REGNUM; regno++)
if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
{
+ vec_safe_push (frame.saved_prs, regno);
if (frame.sve_save_and_probe == INVALID_REGNUM)
frame.sve_save_and_probe = regno;
frame.reg_offset[regno] = offset;
@@ -8641,7 +8644,7 @@ aarch64_layout_frame (void)
If we don't have any vector registers to save, and we know how
big the predicate save area is, we can just round it up to the
next 16-byte boundary. */
- if (last_fp_reg == (int) INVALID_REGNUM && offset.is_constant ())
+ if (last_fp_reg == INVALID_REGNUM && offset.is_constant ())
offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
else
{
@@ -8655,10 +8658,11 @@ aarch64_layout_frame (void)
}
/* If we need to save any SVE vector registers, add them next. */
- if (last_fp_reg != (int) INVALID_REGNUM && crtl->abi->id () == ARM_PCS_SVE)
+ if (last_fp_reg != INVALID_REGNUM && crtl->abi->id () == ARM_PCS_SVE)
for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
{
+ vec_safe_push (frame.saved_fprs, regno);
if (frame.sve_save_and_probe == INVALID_REGNUM)
frame.sve_save_and_probe = regno;
frame.reg_offset[regno] = offset;
@@ -8679,13 +8683,8 @@ aarch64_layout_frame (void)
auto allocate_gpr_slot = [&](unsigned int regno)
{
- if (frame.hard_fp_save_and_probe == INVALID_REGNUM)
- frame.hard_fp_save_and_probe = regno;
+ vec_safe_push (frame.saved_gprs, regno);
frame.reg_offset[regno] = offset;
- if (frame.wb_push_candidate1 == INVALID_REGNUM)
- frame.wb_push_candidate1 = regno;
- else if (frame.wb_push_candidate2 == INVALID_REGNUM)
- frame.wb_push_candidate2 = regno;
offset += UNITS_PER_WORD;
};
@@ -8695,7 +8694,7 @@ aarch64_layout_frame (void)
allocate_gpr_slot (R29_REGNUM);
allocate_gpr_slot (R30_REGNUM);
}
- else if (flag_stack_clash_protection
+ else if ((flag_stack_clash_protection || !frame.is_scs_enabled)
&& known_eq (frame.reg_offset[R30_REGNUM], SLOT_REQUIRED))
/* Put the LR save slot first, since it makes a good choice of probe
for stack clash purposes. The idea is that the link register usually
@@ -8714,8 +8713,7 @@ aarch64_layout_frame (void)
for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
{
- if (frame.hard_fp_save_and_probe == INVALID_REGNUM)
- frame.hard_fp_save_and_probe = regno;
+ vec_safe_push (frame.saved_fprs, regno);
/* If there is an alignment gap between integer and fp callee-saves,
allocate the last fp register to it if possible. */
if (regno == last_fp_reg
@@ -8728,21 +8726,25 @@ aarch64_layout_frame (void)
}
frame.reg_offset[regno] = offset;
- if (frame.wb_push_candidate1 == INVALID_REGNUM)
- frame.wb_push_candidate1 = regno;
- else if (frame.wb_push_candidate2 == INVALID_REGNUM
- && frame.wb_push_candidate1 >= V0_REGNUM)
- frame.wb_push_candidate2 = regno;
offset += vector_save_size;
}
offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
-
auto saved_regs_size = offset - frame.bytes_below_saved_regs;
- gcc_assert (known_eq (saved_regs_size, below_hard_fp_saved_regs_size)
- || (frame.hard_fp_save_and_probe != INVALID_REGNUM
- && known_eq (frame.reg_offset[frame.hard_fp_save_and_probe],
- frame.bytes_below_hard_fp)));
+
+ array_slice<unsigned int> push_regs = (!vec_safe_is_empty (frame.saved_gprs)
+ ? frame.saved_gprs
+ : frame.saved_fprs);
+ if (!push_regs.empty ()
+ && known_eq (frame.reg_offset[push_regs[0]], frame.bytes_below_hard_fp))
+ {
+ frame.hard_fp_save_and_probe = push_regs[0];
+ frame.wb_push_candidate1 = push_regs[0];
+ if (push_regs.size () > 1)
+ frame.wb_push_candidate2 = push_regs[1];
+ }
+ else
+ gcc_assert (known_eq (saved_regs_size, below_hard_fp_saved_regs_size));
/* With stack-clash, a register must be saved in non-leaf functions.
The saving of the bottommost register counts as an implicit probe,
@@ -8906,12 +8908,14 @@ aarch64_layout_frame (void)
+ frame.sve_callee_adjust
+ frame.final_adjust, frame.frame_size));
- if (!frame.emit_frame_chain && frame.callee_adjust == 0)
+ if (frame.callee_adjust == 0)
{
- /* We've decided not to associate any register saves with the initial
- stack allocation. */
- frame.wb_pop_candidate1 = frame.wb_push_candidate1 = INVALID_REGNUM;
- frame.wb_pop_candidate2 = frame.wb_push_candidate2 = INVALID_REGNUM;
+ /* We've decided not to do a "real" push and pop. However,
+ setting up the frame chain is treated as being essentially
+ a multi-instruction push. */
+ frame.wb_pop_candidate1 = frame.wb_pop_candidate2 = INVALID_REGNUM;
+ if (!frame.emit_frame_chain)
+ frame.wb_push_candidate1 = frame.wb_push_candidate2 = INVALID_REGNUM;
}
frame.laid_out = true;
@@ -8926,17 +8930,6 @@ aarch64_register_saved_on_entry (int regno)
return known_ge (cfun->machine->frame.reg_offset[regno], 0);
}
-/* Return the next register up from REGNO up to LIMIT for the callee
- to save. */
-
-static unsigned
-aarch64_next_callee_save (unsigned regno, unsigned limit)
-{
- while (regno <= limit && !aarch64_register_saved_on_entry (regno))
- regno ++;
- return regno;
-}
-
/* Push the register number REGNO of mode MODE to the stack with write-back
adjusting the stack by ADJUSTMENT. */
@@ -9254,41 +9247,46 @@ aarch64_add_cfa_expression (rtx_insn *insn, rtx reg,
add_reg_note (insn, REG_CFA_EXPRESSION, gen_rtx_SET (mem, reg));
}
-/* Emit code to save the callee-saved registers from register number START
- to LIMIT to the stack. The stack pointer is currently BYTES_BELOW_SP
- bytes above the bottom of the static frame. Skip any write-back
- candidates if SKIP_WB is true. HARD_FP_VALID_P is true if the hard
- frame pointer has been set up. */
+/* Emit code to save the callee-saved registers in REGS. Skip any
+ write-back candidates if SKIP_WB is true, otherwise consider only
+ write-back candidates.
+
+ The stack pointer is currently BYTES_BELOW_SP bytes above the bottom
+ of the static frame. HARD_FP_VALID_P is true if the hard frame pointer
+ has been set up. */
static void
aarch64_save_callee_saves (poly_int64 bytes_below_sp,
- unsigned start, unsigned limit, bool skip_wb,
+ array_slice<unsigned int> regs, bool skip_wb,
bool hard_fp_valid_p)
{
aarch64_frame &frame = cfun->machine->frame;
rtx_insn *insn;
- unsigned regno;
- unsigned regno2;
rtx anchor_reg = NULL_RTX, ptrue = NULL_RTX;
- for (regno = aarch64_next_callee_save (start, limit);
- regno <= limit;
- regno = aarch64_next_callee_save (regno + 1, limit))
+ auto skip_save_p = [&](unsigned int regno)
+ {
+ if (cfun->machine->reg_is_wrapped_separately[regno])
+ return true;
+
+ if (skip_wb == (regno == frame.wb_push_candidate1
+ || regno == frame.wb_push_candidate2))
+ return true;
+
+ return false;
+ };
+
+ for (unsigned int i = 0; i < regs.size (); ++i)
{
- rtx reg, mem;
+ unsigned int regno = regs[i];
poly_int64 offset;
bool frame_related_p = aarch64_emit_cfi_for_reg_p (regno);
- if (skip_wb
- && (regno == frame.wb_push_candidate1
- || regno == frame.wb_push_candidate2))
- continue;
-
- if (cfun->machine->reg_is_wrapped_separately[regno])
+ if (skip_save_p (regno))
continue;
machine_mode mode = aarch64_reg_save_mode (regno);
- reg = gen_rtx_REG (mode, regno);
+ rtx reg = gen_rtx_REG (mode, regno);
offset = frame.reg_offset[regno] - bytes_below_sp;
rtx base_rtx = stack_pointer_rtx;
poly_int64 sp_offset = offset;
@@ -9315,12 +9313,13 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
}
offset -= fp_offset;
}
- mem = gen_frame_mem (mode, plus_constant (Pmode, base_rtx, offset));
+ rtx mem = gen_frame_mem (mode, plus_constant (Pmode, base_rtx, offset));
bool need_cfa_note_p = (base_rtx != stack_pointer_rtx);
+ unsigned int regno2;
if (!aarch64_sve_mode_p (mode)
- && (regno2 = aarch64_next_callee_save (regno + 1, limit)) <= limit
- && !cfun->machine->reg_is_wrapped_separately[regno2]
+ && i + 1 < regs.size ()
+ && (regno2 = regs[i + 1], !skip_save_p (regno2))
&& known_eq (GET_MODE_SIZE (mode),
frame.reg_offset[regno2] - frame.reg_offset[regno]))
{
@@ -9346,6 +9345,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
}
regno = regno2;
+ ++i;
}
else if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
{
@@ -9363,49 +9363,57 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
}
}
-/* Emit code to restore the callee registers from register number START
- up to and including LIMIT. The stack pointer is currently BYTES_BELOW_SP
- bytes above the bottom of the static frame. Skip any write-back
- candidates if SKIP_WB is true. Write the appropriate REG_CFA_RESTORE
- notes into CFI_OPS. */
+/* Emit code to restore the callee registers in REGS, ignoring pop candidates
+ and any other registers that are handled separately. Write the appropriate
+ REG_CFA_RESTORE notes into CFI_OPS.
+
+ The stack pointer is currently BYTES_BELOW_SP bytes above the bottom
+ of the static frame. */
static void
-aarch64_restore_callee_saves (poly_int64 bytes_below_sp, unsigned start,
- unsigned limit, bool skip_wb, rtx *cfi_ops)
+aarch64_restore_callee_saves (poly_int64 bytes_below_sp,
+ array_slice<unsigned int> regs, rtx *cfi_ops)
{
aarch64_frame &frame = cfun->machine->frame;
- unsigned regno;
- unsigned regno2;
poly_int64 offset;
rtx anchor_reg = NULL_RTX, ptrue = NULL_RTX;
- for (regno = aarch64_next_callee_save (start, limit);
- regno <= limit;
- regno = aarch64_next_callee_save (regno + 1, limit))
+ auto skip_restore_p = [&](unsigned int regno)
{
- bool frame_related_p = aarch64_emit_cfi_for_reg_p (regno);
if (cfun->machine->reg_is_wrapped_separately[regno])
- continue;
+ return true;
- rtx reg, mem;
+ if (regno == frame.wb_pop_candidate1
+ || regno == frame.wb_pop_candidate2)
+ return true;
- if (skip_wb
- && (regno == frame.wb_pop_candidate1
- || regno == frame.wb_pop_candidate2))
+ /* The shadow call stack code restores LR separately. */
+ if (frame.is_scs_enabled && regno == LR_REGNUM)
+ return true;
+
+ return false;
+ };
+
+ for (unsigned int i = 0; i < regs.size (); ++i)
+ {
+ unsigned int regno = regs[i];
+ bool frame_related_p = aarch64_emit_cfi_for_reg_p (regno);
+ if (skip_restore_p (regno))
continue;
machine_mode mode = aarch64_reg_save_mode (regno);
- reg = gen_rtx_REG (mode, regno);
+ rtx reg = gen_rtx_REG (mode, regno);
offset = frame.reg_offset[regno] - bytes_below_sp;
rtx base_rtx = stack_pointer_rtx;
if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
aarch64_adjust_sve_callee_save_base (mode, base_rtx, anchor_reg,
offset, ptrue);
- mem = gen_frame_mem (mode, plus_constant (Pmode, base_rtx, offset));
+ rtx mem = gen_frame_mem (mode, plus_constant (Pmode, base_rtx, offset));
+ unsigned int regno2;
if (!aarch64_sve_mode_p (mode)
- && (regno2 = aarch64_next_callee_save (regno + 1, limit)) <= limit
- && !cfun->machine->reg_is_wrapped_separately[regno2]
+ && i + 1 < regs.size ()
+ && (regno2 = regs[i + 1], !skip_restore_p (regno2))
&& known_eq (GET_MODE_SIZE (mode),
frame.reg_offset[regno2] - frame.reg_offset[regno]))
{
@@ -9418,6 +9426,7 @@ aarch64_restore_callee_saves (poly_int64 bytes_below_sp, unsigned start,
*cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg2, *cfi_ops);
regno = regno2;
+ ++i;
}
else if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
emit_insn (gen_aarch64_pred_mov (mode, reg, ptrue, mem));
@@ -10239,13 +10248,10 @@ aarch64_expand_prologue (void)
- frame.bytes_above_hard_fp);
gcc_assert (known_ge (chain_offset, 0));
+ gcc_assert (reg1 == R29_REGNUM && reg2 == R30_REGNUM);
if (callee_adjust == 0)
- {
- reg1 = R29_REGNUM;
- reg2 = R30_REGNUM;
- aarch64_save_callee_saves (bytes_below_sp, reg1, reg2,
- false, false);
- }
+ aarch64_save_callee_saves (bytes_below_sp, frame.saved_gprs,
+ false, false);
else
gcc_assert (known_eq (chain_offset, 0));
aarch64_add_offset (Pmode, hard_frame_pointer_rtx,
@@ -10283,8 +10289,7 @@ aarch64_expand_prologue (void)
aarch64_emit_stack_tie (hard_frame_pointer_rtx);
}
- aarch64_save_callee_saves (bytes_below_sp, R0_REGNUM, R30_REGNUM,
- callee_adjust != 0 || emit_frame_chain,
+ aarch64_save_callee_saves (bytes_below_sp, frame.saved_gprs, true,
emit_frame_chain);
if (maybe_ne (sve_callee_adjust, 0))
{
@@ -10295,10 +10300,9 @@ aarch64_expand_prologue (void)
!frame_pointer_needed, false);
bytes_below_sp -= sve_callee_adjust;
}
- aarch64_save_callee_saves (bytes_below_sp, P0_REGNUM, P15_REGNUM,
- false, emit_frame_chain);
- aarch64_save_callee_saves (bytes_below_sp, V0_REGNUM, V31_REGNUM,
- callee_adjust != 0 || emit_frame_chain,
+ aarch64_save_callee_saves (bytes_below_sp, frame.saved_prs, true,
+ emit_frame_chain);
+ aarch64_save_callee_saves (bytes_below_sp, frame.saved_fprs, true,
emit_frame_chain);
/* We may need to probe the final adjustment if it is larger than the guard
@@ -10344,8 +10348,6 @@ aarch64_expand_epilogue (bool for_sibcall)
poly_int64 bytes_below_hard_fp = frame.bytes_below_hard_fp;
unsigned reg1 = frame.wb_pop_candidate1;
unsigned reg2 = frame.wb_pop_candidate2;
- unsigned int last_gpr = (frame.is_scs_enabled
- ? R29_REGNUM : R30_REGNUM);
rtx cfi_ops = NULL;
rtx_insn *insn;
/* A stack clash protection prologue may not have left EP0_REGNUM or
@@ -10409,10 +10411,8 @@ aarch64_expand_epilogue (bool for_sibcall)
/* Restore the vector registers before the predicate registers,
so that we can use P4 as a temporary for big-endian SVE frames. */
- aarch64_restore_callee_saves (final_adjust, V0_REGNUM, V31_REGNUM,
- callee_adjust != 0, &cfi_ops);
- aarch64_restore_callee_saves (final_adjust, P0_REGNUM, P15_REGNUM,
- false, &cfi_ops);
+ aarch64_restore_callee_saves (final_adjust, frame.saved_fprs, &cfi_ops);
+ aarch64_restore_callee_saves (final_adjust, frame.saved_prs, &cfi_ops);
if (maybe_ne (sve_callee_adjust, 0))
aarch64_add_sp (NULL_RTX, NULL_RTX, sve_callee_adjust, true);
@@ -10420,8 +10420,7 @@ aarch64_expand_epilogue (bool for_sibcall)
restore x30, we don't need to restore x30 again in the traditional
way. */
aarch64_restore_callee_saves (final_adjust + sve_callee_adjust,
- R0_REGNUM, last_gpr,
- callee_adjust != 0, &cfi_ops);
+ frame.saved_gprs, &cfi_ops);
if (need_barrier_p)
aarch64_emit_stack_tie (stack_pointer_rtx);
@@ -17866,36 +17865,6 @@ aarch64_parse_tune (const char *to_parse, const struct processor **res)
return AARCH_PARSE_INVALID_ARG;
}
-/* Parse a command-line -param=aarch64-ldp-policy= parameter. VALUE is
- the value of the parameter. */
-
-static void
-aarch64_parse_ldp_policy (enum aarch64_ldp_policy value,
- struct tune_params* tune)
-{
- if (value == LDP_POLICY_ALWAYS)
- tune->ldp_policy_model = tune_params::LDP_POLICY_ALWAYS;
- else if (value == LDP_POLICY_NEVER)
- tune->ldp_policy_model = tune_params::LDP_POLICY_NEVER;
- else if (value == LDP_POLICY_ALIGNED)
- tune->ldp_policy_model = tune_params::LDP_POLICY_ALIGNED;
-}
-
-/* Parse a command-line -param=aarch64-stp-policy= parameter. VALUE is
- the value of the parameter. */
-
-static void
-aarch64_parse_stp_policy (enum aarch64_stp_policy value,
- struct tune_params* tune)
-{
- if (value == STP_POLICY_ALWAYS)
- tune->stp_policy_model = tune_params::STP_POLICY_ALWAYS;
- else if (value == STP_POLICY_NEVER)
- tune->stp_policy_model = tune_params::STP_POLICY_NEVER;
- else if (value == STP_POLICY_ALIGNED)
- tune->stp_policy_model = tune_params::STP_POLICY_ALIGNED;
-}
-
/* Parse TOKEN, which has length LENGTH to see if it is an option
described in FLAG. If it is, return the index bit for that fusion type.
If not, error (printing OPTION_NAME) and return zero. */
@@ -18245,12 +18214,10 @@ aarch64_override_options_internal (struct gcc_options *opts)
&aarch64_tune_params);
if (opts->x_aarch64_ldp_policy_param)
- aarch64_parse_ldp_policy (opts->x_aarch64_ldp_policy_param,
- &aarch64_tune_params);
+ aarch64_tune_params.ldp_policy_model = opts->x_aarch64_ldp_policy_param;
if (opts->x_aarch64_stp_policy_param)
- aarch64_parse_stp_policy (opts->x_aarch64_stp_policy_param,
- &aarch64_tune_params);
+ aarch64_tune_params.stp_policy_model = opts->x_aarch64_stp_policy_param;
/* This target defaults to strict volatile bitfields. */
if (opts->x_flag_strict_volatile_bitfields < 0 && abi_version_at_least (2))
@@ -25313,10 +25280,11 @@ aarch64_copy_one_block_and_progress_pointers (rtx *src, rtx *dst,
*dst = aarch64_progress_pointer (*dst);
}
-/* Expand a cpymem using the MOPS extension. OPERANDS are taken
- from the cpymem pattern. Return true iff we succeeded. */
-static bool
-aarch64_expand_cpymem_mops (rtx *operands)
+/* Expand a cpymem/movmem using the MOPS extension. OPERANDS are taken
+ from the cpymem/movmem pattern. IS_MEMMOVE is true if this is a memmove
+ rather than memcpy. Return true iff we succeeded. */
+bool
+aarch64_expand_cpymem_mops (rtx *operands, bool is_memmove = false)
{
if (!TARGET_MOPS)
return false;
@@ -25328,8 +25296,10 @@ aarch64_expand_cpymem_mops (rtx *operands)
rtx dst_mem = replace_equiv_address (operands[0], dst_addr);
rtx src_mem = replace_equiv_address (operands[1], src_addr);
rtx sz_reg = copy_to_mode_reg (DImode, operands[2]);
- emit_insn (gen_aarch64_cpymemdi (dst_mem, src_mem, sz_reg));
-
+ if (is_memmove)
+ emit_insn (gen_aarch64_movmemdi (dst_mem, src_mem, sz_reg));
+ else
+ emit_insn (gen_aarch64_cpymemdi (dst_mem, src_mem, sz_reg));
return true;
}
@@ -26548,30 +26518,18 @@ aarch64_mergeable_load_pair_p (machine_mode mode, rtx mem1, rtx mem2)
bool
aarch64_mem_ok_with_ldpstp_policy_model (rtx mem, bool load, machine_mode mode)
{
- /* If we have LDP_POLICY_NEVER, reject the load pair. */
- if (load
- && aarch64_tune_params.ldp_policy_model == tune_params::LDP_POLICY_NEVER)
- return false;
+ auto policy = (load
+ ? aarch64_tune_params.ldp_policy_model
+ : aarch64_tune_params.stp_policy_model);
- /* If we have STP_POLICY_NEVER, reject the store pair. */
- if (!load
- && aarch64_tune_params.stp_policy_model == tune_params::STP_POLICY_NEVER)
+ /* If we have AARCH64_LDP_STP_POLICY_NEVER, reject the load pair. */
+ if (policy == AARCH64_LDP_STP_POLICY_NEVER)
return false;
- /* If we have LDP_POLICY_ALIGNED,
+ /* If we have AARCH64_LDP_STP_POLICY_ALIGNED,
do not emit the load pair unless the alignment is checked to be
at least double the alignment of the type. */
- if (load
- && aarch64_tune_params.ldp_policy_model == tune_params::LDP_POLICY_ALIGNED
- && !optimize_function_for_size_p (cfun)
- && MEM_ALIGN (mem) < 2 * GET_MODE_ALIGNMENT (mode))
- return false;
-
- /* If we have STP_POLICY_ALIGNED,
- do not emit the store pair unless the alignment is checked to be
- at least double the alignment of the type. */
- if (!load
- && aarch64_tune_params.stp_policy_model == tune_params::STP_POLICY_ALIGNED
+ if (policy == AARCH64_LDP_STP_POLICY_ALIGNED
&& !optimize_function_for_size_p (cfun)
&& MEM_ALIGN (mem) < 2 * GET_MODE_ALIGNMENT (mode))
return false;
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index d74e911..2f0777a 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -762,7 +762,7 @@ extern enum aarch64_processor aarch64_tune;
#define DEFAULT_PCC_STRUCT_RETURN 0
-#ifdef HAVE_POLY_INT_H
+#if defined(HAVE_POLY_INT_H) && defined(GCC_VEC_H)
struct GTY (()) aarch64_frame
{
/* The offset from the bottom of the static frame (the bottom of the
@@ -770,6 +770,13 @@ struct GTY (()) aarch64_frame
needed. */
poly_int64 reg_offset[LAST_SAVED_REGNUM + 1];
+ /* The list of GPRs, FPRs and predicate registers that have nonnegative
+ entries in reg_offset. The registers are listed in order of
+ increasing offset (rather than increasing register number). */
+ vec<unsigned, va_gc_atomic> *saved_gprs;
+ vec<unsigned, va_gc_atomic> *saved_fprs;
+ vec<unsigned, va_gc_atomic> *saved_prs;
+
/* The number of extra stack bytes taken up by register varargs.
This area is allocated by the callee at the very top of the
frame. This value is rounded up to a multiple of
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 60133b5..32c7adc 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -533,23 +533,23 @@
"")
(define_insn "@ccmp<CC_ONLY:mode><GPI:mode>"
- [(set (match_operand:CC_ONLY 1 "cc_register" "")
+ [(set (match_operand:CC_ONLY 1 "cc_register")
(if_then_else:CC_ONLY
(match_operator 4 "aarch64_comparison_operator"
- [(match_operand 0 "cc_register" "")
+ [(match_operand 0 "cc_register")
(const_int 0)])
(compare:CC_ONLY
- (match_operand:GPI 2 "register_operand" "r,r,r")
- (match_operand:GPI 3 "aarch64_ccmp_operand" "r,Uss,Usn"))
+ (match_operand:GPI 2 "register_operand")
+ (match_operand:GPI 3 "aarch64_ccmp_operand"))
(unspec:CC_ONLY
[(match_operand 5 "immediate_operand")]
UNSPEC_NZCV)))]
""
- "@
- ccmp\\t%<w>2, %<w>3, %k5, %m4
- ccmp\\t%<w>2, %3, %k5, %m4
- ccmn\\t%<w>2, #%n3, %k5, %m4"
- [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+ {@ [ cons: 2 , 3 ; attrs: type ]
+ [ r , r ; alus_sreg ] ccmp\t%<w>2, %<w>3, %k5, %m4
+ [ r , Uss ; alus_imm ] ccmp\t%<w>2, %3, %k5, %m4
+ [ r , Usn ; alus_imm ] ccmn\t%<w>2, #%n3, %k5, %m4
+ }
)
(define_insn "@ccmp<CCFP_CCFPE:mode><GPF:mode>"
@@ -570,23 +570,23 @@
)
(define_insn "@ccmp<CC_ONLY:mode><GPI:mode>_rev"
- [(set (match_operand:CC_ONLY 1 "cc_register" "")
+ [(set (match_operand:CC_ONLY 1 "cc_register")
(if_then_else:CC_ONLY
(match_operator 4 "aarch64_comparison_operator"
- [(match_operand 0 "cc_register" "")
+ [(match_operand 0 "cc_register")
(const_int 0)])
(unspec:CC_ONLY
[(match_operand 5 "immediate_operand")]
UNSPEC_NZCV)
(compare:CC_ONLY
- (match_operand:GPI 2 "register_operand" "r,r,r")
- (match_operand:GPI 3 "aarch64_ccmp_operand" "r,Uss,Usn"))))]
+ (match_operand:GPI 2 "register_operand")
+ (match_operand:GPI 3 "aarch64_ccmp_operand"))))]
""
- "@
- ccmp\\t%<w>2, %<w>3, %k5, %M4
- ccmp\\t%<w>2, %3, %k5, %M4
- ccmn\\t%<w>2, #%n3, %k5, %M4"
- [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+ {@ [ cons: 2 , 3 ; attrs: type ]
+ [ r , r ; alus_sreg ] ccmp\t%<w>2, %<w>3, %k5, %M4
+ [ r , Uss ; alus_imm ] ccmp\t%<w>2, %3, %k5, %M4
+ [ r , Usn ; alus_imm ] ccmn\t%<w>2, #%n3, %k5, %M4
+ }
)
(define_insn "@ccmp<CCFP_CCFPE:mode><GPF:mode>_rev"
@@ -1056,15 +1056,16 @@
)
(define_insn "*call_insn"
- [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand" "Ucr, Usf"))
+ [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand"))
(match_operand 1 "" ""))
(unspec:DI [(match_operand:DI 2 "const_int_operand")] UNSPEC_CALLEE_ABI)
(clobber (reg:DI LR_REGNUM))]
""
- "@
- * return aarch64_indirect_call_asm (operands[0]);
- bl\\t%c0"
- [(set_attr "type" "call, call")])
+ {@ [ cons: 0 ; attrs: type ]
+ [ Ucr ; call ] << aarch64_indirect_call_asm (operands[0]);
+ [ Usf ; call ] bl\t%c0
+ }
+)
(define_expand "call_value"
[(parallel
@@ -1083,15 +1084,15 @@
(define_insn "*call_value_insn"
[(set (match_operand 0 "" "")
- (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand" "Ucr, Usf"))
+ (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand"))
(match_operand 2 "" "")))
(unspec:DI [(match_operand:DI 3 "const_int_operand")] UNSPEC_CALLEE_ABI)
(clobber (reg:DI LR_REGNUM))]
""
- "@
- * return aarch64_indirect_call_asm (operands[1]);
- bl\\t%c1"
- [(set_attr "type" "call, call")]
+ {@ [ cons: 1 ; attrs: type ]
+ [ Ucr ; call ] << aarch64_indirect_call_asm (operands[1]);
+ [ Usf ; call ] bl\t%c1
+ }
)
(define_expand "sibcall"
@@ -1459,78 +1460,69 @@
)
(define_insn "*mov<mode>_aarch64"
- [(set (match_operand:HFBF 0 "nonimmediate_operand" "=w,w ,w ,w ,?r,?r,w,w,w ,w ,w,m,r,m ,r")
- (match_operand:HFBF 1 "general_operand" "Y ,?rY,?r,?rY, w, w,w,w,Ufc,Uvi,m,w,m,rY,r"))]
+ [(set (match_operand:HFBF 0 "nonimmediate_operand")
+ (match_operand:HFBF 1 "general_operand"))]
"TARGET_FLOAT && (register_operand (operands[0], <MODE>mode)
|| aarch64_reg_or_fp_zero (operands[1], <MODE>mode))"
- "@
- movi\\t%0.4h, #0
- fmov\\t%h0, %w1
- dup\\t%w0.4h, %w1
- fmov\\t%s0, %w1
- umov\\t%w0, %1.h[0]
- fmov\\t%w0, %s1
- mov\\t%0.h[0], %1.h[0]
- fmov\\t%s0, %s1
- fmov\\t%h0, %1
- * return aarch64_output_scalar_simd_mov_immediate (operands[1], HImode);
- ldr\\t%h0, %1
- str\\t%h1, %0
- ldrh\\t%w0, %1
- strh\\t%w1, %0
- mov\\t%w0, %w1"
- [(set_attr "type" "neon_move,f_mcr,neon_move,f_mcr,neon_to_gp,f_mrc,
- neon_move,fmov,fconsts,neon_move,f_loads,f_stores,
- load_4,store_4,mov_reg")
- (set_attr "arch" "simd,fp16,simd,*,simd,*,simd,*,fp16,simd,*,*,*,*,*")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ w , Y ; neon_move , simd ] movi\t%0.4h, #0
+ [ w , ?rY ; f_mcr , fp16 ] fmov\t%h0, %w1
+ [ w , ?r ; neon_move , simd ] dup\t%w0.4h, %w1
+ [ w , ?rY ; f_mcr , * ] fmov\t%s0, %w1
+ [ ?r , w ; neon_to_gp , simd ] umov\t%w0, %1.h[0]
+ [ ?r , w ; f_mrc , * ] fmov\t%w0, %s1
+ [ w , w ; neon_move , simd ] mov\t%0.h[0], %1.h[0]
+ [ w , w ; fmov , * ] fmov\t%s0, %s1
+ [ w , Ufc ; fconsts , fp16 ] fmov\t%h0, %1
+ [ w , Uvi ; neon_move , simd ] << aarch64_output_scalar_simd_mov_immediate (operands[1], HImode);
+ [ w , m ; f_loads , * ] ldr\t%h0, %1
+ [ m , w ; f_stores , * ] str\t%h1, %0
+ [ r , m ; load_4 , * ] ldrh\t%w0, %1
+ [ m , rY ; store_4 , * ] strh\t%w1, %0
+ [ r , r ; mov_reg , * ] mov\t%w0, %w1
+ }
)
(define_insn "*mov<mode>_aarch64"
- [(set (match_operand:SFD 0 "nonimmediate_operand" "=w,w ,?r,w,w ,w ,w,m,r,m ,r,r")
- (match_operand:SFD 1 "general_operand" "Y ,?rY, w,w,Ufc,Uvi,m,w,m,rY,r,M"))]
+ [(set (match_operand:SFD 0 "nonimmediate_operand")
+ (match_operand:SFD 1 "general_operand"))]
"TARGET_FLOAT && (register_operand (operands[0], <MODE>mode)
|| aarch64_reg_or_fp_zero (operands[1], <MODE>mode))"
- "@
- movi\\t%0.2s, #0
- fmov\\t%s0, %w1
- fmov\\t%w0, %s1
- fmov\\t%s0, %s1
- fmov\\t%s0, %1
- * return aarch64_output_scalar_simd_mov_immediate (operands[1], SImode);
- ldr\\t%s0, %1
- str\\t%s1, %0
- ldr\\t%w0, %1
- str\\t%w1, %0
- mov\\t%w0, %w1
- mov\\t%w0, %1"
- [(set_attr "type" "neon_move,f_mcr,f_mrc,fmov,fconsts,neon_move,\
- f_loads,f_stores,load_4,store_4,mov_reg,\
- fconsts")
- (set_attr "arch" "simd,*,*,*,*,simd,*,*,*,*,*,*")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ w , Y ; neon_move , simd ] movi\t%0.2s, #0
+ [ w , ?rY ; f_mcr , * ] fmov\t%s0, %w1
+ [ ?r , w ; f_mrc , * ] fmov\t%w0, %s1
+ [ w , w ; fmov , * ] fmov\t%s0, %s1
+ [ w , Ufc ; fconsts , * ] fmov\t%s0, %1
+ [ w , Uvi ; neon_move , simd ] << aarch64_output_scalar_simd_mov_immediate (operands[1], SImode);
+ [ w , m ; f_loads , * ] ldr\t%s0, %1
+ [ m , w ; f_stores , * ] str\t%s1, %0
+ [ r , m ; load_4 , * ] ldr\t%w0, %1
+ [ m , rY ; store_4 , * ] str\t%w1, %0
+ [ r , r ; mov_reg , * ] mov\t%w0, %w1
+ [ r , M ; fconsts , * ] mov\t%w0, %1
+ }
)
(define_insn "*mov<mode>_aarch64"
- [(set (match_operand:DFD 0 "nonimmediate_operand" "=w, w ,?r,w,w ,w ,w,m,r,m ,r,r")
- (match_operand:DFD 1 "general_operand" "Y , ?rY, w,w,Ufc,Uvi,m,w,m,rY,r,O"))]
+ [(set (match_operand:DFD 0 "nonimmediate_operand")
+ (match_operand:DFD 1 "general_operand"))]
"TARGET_FLOAT && (register_operand (operands[0], <MODE>mode)
|| aarch64_reg_or_fp_zero (operands[1], <MODE>mode))"
- "@
- movi\\t%d0, #0
- fmov\\t%d0, %x1
- fmov\\t%x0, %d1
- fmov\\t%d0, %d1
- fmov\\t%d0, %1
- * return aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
- ldr\\t%d0, %1
- str\\t%d1, %0
- ldr\\t%x0, %1
- str\\t%x1, %0
- mov\\t%x0, %x1
- * return aarch64_is_mov_xn_imm (INTVAL (operands[1])) ? \"mov\\t%x0, %1\" : \"mov\\t%w0, %1\";"
- [(set_attr "type" "neon_move,f_mcr,f_mrc,fmov,fconstd,neon_move,\
- f_loadd,f_stored,load_8,store_8,mov_reg,\
- fconstd")
- (set_attr "arch" "simd,*,*,*,*,simd,*,*,*,*,*,*")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ w , Y ; neon_move , simd ] movi\t%d0, #0
+ [ w , ?rY ; f_mcr , * ] fmov\t%d0, %x1
+ [ ?r , w ; f_mrc , * ] fmov\t%x0, %d1
+ [ w , w ; fmov , * ] fmov\t%d0, %d1
+ [ w , Ufc ; fconstd , * ] fmov\t%d0, %1
+ [ w , Uvi ; neon_move , simd ] << aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
+ [ w , m ; f_loadd , * ] ldr\t%d0, %1
+ [ m , w ; f_stored , * ] str\t%d1, %0
+ [ r , m ; load_8 , * ] ldr\t%x0, %1
+ [ m , rY ; store_8 , * ] str\t%x1, %0
+ [ r , r ; mov_reg , * ] mov\t%x0, %x1
+ [ r , O ; fconstd , * ] << aarch64_is_mov_xn_imm (INTVAL (operands[1])) ? "mov\t%x0, %1" : "mov\t%w0, %1";
+ }
)
(define_split
@@ -1635,7 +1627,22 @@
}
)
-(define_insn "aarch64_movmemdi"
+(define_expand "aarch64_movmemdi"
+ [(parallel
+ [(set (match_operand 2) (const_int 0))
+ (clobber (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (reg:CC CC_REGNUM))
+ (set (match_operand 0)
+ (unspec:BLK [(match_operand 1) (match_dup 2)] UNSPEC_MOVMEM))])]
+ "TARGET_MOPS"
+ {
+ operands[3] = XEXP (operands[0], 0);
+ operands[4] = XEXP (operands[1], 0);
+ }
+)
+
+(define_insn "*aarch64_movmemdi"
[(parallel [
(set (match_operand:DI 2 "register_operand" "+&r") (const_int 0))
(clobber (match_operand:DI 0 "register_operand" "+&r"))
@@ -1668,17 +1675,9 @@
&& INTVAL (sz_reg) < aarch64_mops_memmove_size_threshold)
FAIL;
- rtx addr_dst = XEXP (operands[0], 0);
- rtx addr_src = XEXP (operands[1], 0);
-
- if (!REG_P (sz_reg))
- sz_reg = force_reg (DImode, sz_reg);
- if (!REG_P (addr_dst))
- addr_dst = force_reg (DImode, addr_dst);
- if (!REG_P (addr_src))
- addr_src = force_reg (DImode, addr_src);
- emit_insn (gen_aarch64_movmemdi (addr_dst, addr_src, sz_reg));
- DONE;
+ if (aarch64_expand_cpymem_mops (operands, true))
+ DONE;
+ FAIL;
}
)
@@ -1728,36 +1727,34 @@
;; Operands 1 and 3 are tied together by the final condition; so we allow
;; fairly lax checking on the second memory operation.
(define_insn "load_pair_sw_<SX:mode><SX2:mode>"
- [(set (match_operand:SX 0 "register_operand" "=r,w")
- (match_operand:SX 1 "aarch64_mem_pair_operand" "Ump,Ump"))
- (set (match_operand:SX2 2 "register_operand" "=r,w")
- (match_operand:SX2 3 "memory_operand" "m,m"))]
+ [(set (match_operand:SX 0 "register_operand")
+ (match_operand:SX 1 "aarch64_mem_pair_operand"))
+ (set (match_operand:SX2 2 "register_operand")
+ (match_operand:SX2 3 "memory_operand"))]
"rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
GET_MODE_SIZE (<SX:MODE>mode)))"
- "@
- ldp\\t%w0, %w2, %z1
- ldp\\t%s0, %s2, %z1"
- [(set_attr "type" "load_8,neon_load1_2reg")
- (set_attr "arch" "*,fp")]
+ {@ [ cons: =0 , 1 , =2 , 3 ; attrs: type , arch ]
+ [ r , Ump , r , m ; load_8 , * ] ldp\t%w0, %w2, %z1
+ [ w , Ump , w , m ; neon_load1_2reg , fp ] ldp\t%s0, %s2, %z1
+ }
)
;; Storing different modes that can still be merged
(define_insn "load_pair_dw_<DX:mode><DX2:mode>"
- [(set (match_operand:DX 0 "register_operand" "=r,w")
- (match_operand:DX 1 "aarch64_mem_pair_operand" "Ump,Ump"))
- (set (match_operand:DX2 2 "register_operand" "=r,w")
- (match_operand:DX2 3 "memory_operand" "m,m"))]
+ [(set (match_operand:DX 0 "register_operand")
+ (match_operand:DX 1 "aarch64_mem_pair_operand"))
+ (set (match_operand:DX2 2 "register_operand")
+ (match_operand:DX2 3 "memory_operand"))]
"rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
GET_MODE_SIZE (<DX:MODE>mode)))"
- "@
- ldp\\t%x0, %x2, %z1
- ldp\\t%d0, %d2, %z1"
- [(set_attr "type" "load_16,neon_load1_2reg")
- (set_attr "arch" "*,fp")]
+ {@ [ cons: =0 , 1 , =2 , 3 ; attrs: type , arch ]
+ [ r , Ump , r , m ; load_16 , * ] ldp\t%x0, %x2, %z1
+ [ w , Ump , w , m ; neon_load1_2reg , fp ] ldp\t%d0, %d2, %z1
+ }
)
(define_insn "load_pair_dw_tftf"
@@ -1778,36 +1775,34 @@
;; Operands 0 and 2 are tied together by the final condition; so we allow
;; fairly lax checking on the second memory operation.
(define_insn "store_pair_sw_<SX:mode><SX2:mode>"
- [(set (match_operand:SX 0 "aarch64_mem_pair_operand" "=Ump,Ump")
- (match_operand:SX 1 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))
- (set (match_operand:SX2 2 "memory_operand" "=m,m")
- (match_operand:SX2 3 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))]
+ [(set (match_operand:SX 0 "aarch64_mem_pair_operand")
+ (match_operand:SX 1 "aarch64_reg_zero_or_fp_zero"))
+ (set (match_operand:SX2 2 "memory_operand")
+ (match_operand:SX2 3 "aarch64_reg_zero_or_fp_zero"))]
"rtx_equal_p (XEXP (operands[2], 0),
plus_constant (Pmode,
XEXP (operands[0], 0),
GET_MODE_SIZE (<SX:MODE>mode)))"
- "@
- stp\\t%w1, %w3, %z0
- stp\\t%s1, %s3, %z0"
- [(set_attr "type" "store_8,neon_store1_2reg")
- (set_attr "arch" "*,fp")]
+ {@ [ cons: =0 , 1 , =2 , 3 ; attrs: type , arch ]
+ [ Ump , rYZ , m , rYZ ; store_8 , * ] stp\t%w1, %w3, %z0
+ [ Ump , w , m , w ; neon_store1_2reg , fp ] stp\t%s1, %s3, %z0
+ }
)
;; Storing different modes that can still be merged
(define_insn "store_pair_dw_<DX:mode><DX2:mode>"
- [(set (match_operand:DX 0 "aarch64_mem_pair_operand" "=Ump,Ump")
- (match_operand:DX 1 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))
- (set (match_operand:DX2 2 "memory_operand" "=m,m")
- (match_operand:DX2 3 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))]
+ [(set (match_operand:DX 0 "aarch64_mem_pair_operand")
+ (match_operand:DX 1 "aarch64_reg_zero_or_fp_zero"))
+ (set (match_operand:DX2 2 "memory_operand")
+ (match_operand:DX2 3 "aarch64_reg_zero_or_fp_zero"))]
"rtx_equal_p (XEXP (operands[2], 0),
plus_constant (Pmode,
XEXP (operands[0], 0),
GET_MODE_SIZE (<DX:MODE>mode)))"
- "@
- stp\\t%x1, %x3, %z0
- stp\\t%d1, %d3, %z0"
- [(set_attr "type" "store_16,neon_store1_2reg")
- (set_attr "arch" "*,fp")]
+ {@ [ cons: =0 , 1 , =2 , 3 ; attrs: type , arch ]
+ [ Ump , rYZ , m , rYZ ; store_16 , * ] stp\t%x1, %x3, %z0
+ [ Ump , w , m , w ; neon_store1_2reg , fp ] stp\t%d1, %d3, %z0
+ }
)
(define_insn "store_pair_dw_tftf"
@@ -1935,13 +1930,13 @@
)
(define_insn "*extendsidi2_aarch64"
- [(set (match_operand:DI 0 "register_operand" "=r,r")
- (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
+ [(set (match_operand:DI 0 "register_operand")
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
""
- "@
- sxtw\t%0, %w1
- ldrsw\t%0, %1"
- [(set_attr "type" "extend,load_4")]
+ {@ [ cons: =0 , 1 ; attrs: type ]
+ [ r , r ; extend ] sxtw\t%0, %w1
+ [ r , m ; load_4 ] ldrsw\t%0, %1
+ }
)
(define_insn "*load_pair_extendsidi2_aarch64"
@@ -1958,34 +1953,32 @@
)
(define_insn "*zero_extendsidi2_aarch64"
- [(set (match_operand:DI 0 "register_operand" "=r,r,w,w,r,w")
- (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m,r,m,w,w")))]
- ""
- "@
- uxtw\t%0, %w1
- ldr\t%w0, %1
- fmov\t%s0, %w1
- ldr\t%s0, %1
- fmov\t%w0, %s1
- fmov\t%s0, %s1"
- [(set_attr "type" "mov_reg,load_4,f_mcr,f_loads,f_mrc,fmov")
- (set_attr "arch" "*,*,fp,fp,fp,fp")]
+ [(set (match_operand:DI 0 "register_operand")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
+ ""
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ r , r ; mov_reg , * ] uxtw\t%0, %w1
+ [ r , m ; load_4 , * ] ldr\t%w0, %1
+ [ w , r ; f_mcr , fp ] fmov\t%s0, %w1
+ [ w , m ; f_loads , fp ] ldr\t%s0, %1
+ [ r , w ; f_mrc , fp ] fmov\t%w0, %s1
+ [ w , w ; fmov , fp ] fmov\t%s0, %s1
+ }
)
(define_insn "*load_pair_zero_extendsidi2_aarch64"
- [(set (match_operand:DI 0 "register_operand" "=r,w")
- (zero_extend:DI (match_operand:SI 1 "aarch64_mem_pair_operand" "Ump,Ump")))
- (set (match_operand:DI 2 "register_operand" "=r,w")
- (zero_extend:DI (match_operand:SI 3 "memory_operand" "m,m")))]
+ [(set (match_operand:DI 0 "register_operand")
+ (zero_extend:DI (match_operand:SI 1 "aarch64_mem_pair_operand")))
+ (set (match_operand:DI 2 "register_operand")
+ (zero_extend:DI (match_operand:SI 3 "memory_operand")))]
"rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
GET_MODE_SIZE (SImode)))"
- "@
- ldp\t%w0, %w2, %z1
- ldp\t%s0, %s2, %z1"
- [(set_attr "type" "load_8,neon_load1_2reg")
- (set_attr "arch" "*,fp")]
+ {@ [ cons: =0 , 1 , =2 , 3 ; attrs: type , arch ]
+ [ r , Ump , r , m ; load_8 , * ] ldp\t%w0, %w2, %z1
+ [ w , Ump , w , m ; neon_load1_2reg , fp ] ldp\t%s0, %s2, %z1
+ }
)
(define_expand "<ANY_EXTEND:optab><SHORT:mode><GPI:mode>2"
@@ -1995,28 +1988,26 @@
)
(define_insn "*extend<SHORT:mode><GPI:mode>2_aarch64"
- [(set (match_operand:GPI 0 "register_operand" "=r,r,r")
- (sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m,w")))]
+ [(set (match_operand:GPI 0 "register_operand")
+ (sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand")))]
""
- "@
- sxt<SHORT:size>\t%<GPI:w>0, %w1
- ldrs<SHORT:size>\t%<GPI:w>0, %1
- smov\t%<GPI:w>0, %1.<SHORT:size>[0]"
- [(set_attr "type" "extend,load_4,neon_to_gp")
- (set_attr "arch" "*,*,fp")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ r , r ; extend , * ] sxt<SHORT:size>\t%<GPI:w>0, %w1
+ [ r , m ; load_4 , * ] ldrs<SHORT:size>\t%<GPI:w>0, %1
+ [ r , w ; neon_to_gp , fp ] smov\t%<GPI:w>0, %1.<SHORT:size>[0]
+ }
)
(define_insn "*zero_extend<SHORT:mode><GPI:mode>2_aarch64"
- [(set (match_operand:GPI 0 "register_operand" "=r,r,w,r")
- (zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m,m,w")))]
+ [(set (match_operand:GPI 0 "register_operand")
+ (zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand")))]
""
- "@
- and\t%<GPI:w>0, %<GPI:w>1, <SHORT:short_mask>
- ldr<SHORT:size>\t%w0, %1
- ldr\t%<SHORT:size>0, %1
- umov\t%w0, %1.<SHORT:size>[0]"
- [(set_attr "type" "logic_imm,load_4,f_loads,neon_to_gp")
- (set_attr "arch" "*,*,fp,fp")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ r , r ; logic_imm , * ] and\t%<GPI:w>0, %<GPI:w>1, <SHORT:short_mask>
+ [ r , m ; load_4 , * ] ldr<SHORT:size>\t%w0, %1
+ [ w , m ; f_loads , fp ] ldr\t%<SHORT:size>0, %1
+ [ r , w ; neon_to_gp , fp ] umov\t%w0, %1.<SHORT:size>[0]
+ }
)
(define_expand "<optab>qihi2"
@@ -2026,23 +2017,23 @@
)
(define_insn "*extendqihi2_aarch64"
- [(set (match_operand:HI 0 "register_operand" "=r,r")
- (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ [(set (match_operand:HI 0 "register_operand")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand")))]
""
- "@
- sxtb\t%w0, %w1
- ldrsb\t%w0, %1"
- [(set_attr "type" "extend,load_4")]
+ {@ [ cons: =0 , 1 ; attrs: type ]
+ [ r , r ; extend ] sxtb\t%w0, %w1
+ [ r , m ; load_4 ] ldrsb\t%w0, %1
+ }
)
(define_insn "*zero_extendqihi2_aarch64"
- [(set (match_operand:HI 0 "register_operand" "=r,r")
- (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ [(set (match_operand:HI 0 "register_operand")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand")))]
""
- "@
- and\t%w0, %w1, 255
- ldrb\t%w0, %1"
- [(set_attr "type" "logic_imm,load_4")]
+ {@ [ cons: =0 , 1 ; attrs: type ]
+ [ r , r ; logic_imm ] and\t%w0, %w1, 255
+ [ r , m ; load_4 ] ldrb\t%w0, %1
+ }
)
;; -------------------------------------------------------------------
@@ -2088,38 +2079,37 @@
(define_insn "*add<mode>3_aarch64"
[(set
- (match_operand:GPI 0 "register_operand" "=rk,rk,w,rk,r,r,rk")
+ (match_operand:GPI 0 "register_operand")
(plus:GPI
- (match_operand:GPI 1 "register_operand" "%rk,rk,w,rk,rk,0,rk")
- (match_operand:GPI 2 "aarch64_pluslong_operand" "I,r,w,J,Uaa,Uai,Uav")))]
- ""
- "@
- add\\t%<w>0, %<w>1, %2
- add\\t%<w>0, %<w>1, %<w>2
- add\\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>
- sub\\t%<w>0, %<w>1, #%n2
- #
- * return aarch64_output_sve_scalar_inc_dec (operands[2]);
- * return aarch64_output_sve_addvl_addpl (operands[2]);"
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_pluslong_operand")))]
+ ""
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ rk , %rk , I ; alu_imm , * ] add\t%<w>0, %<w>1, %2
+ [ rk , rk , r ; alu_sreg , * ] add\t%<w>0, %<w>1, %<w>2
+ [ w , w , w ; neon_add , simd ] add\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>
+ [ rk , rk , J ; alu_imm , * ] sub\t%<w>0, %<w>1, #%n2
+ [ r , rk , Uaa ; multiple , * ] #
+ [ r , 0 , Uai ; alu_imm , sve ] << aarch64_output_sve_scalar_inc_dec (operands[2]);
+ [ rk , rk , Uav ; alu_imm , sve ] << aarch64_output_sve_addvl_addpl (operands[2]);
+ }
;; The "alu_imm" types for INC/DEC and ADDVL/ADDPL are just placeholders.
- [(set_attr "type" "alu_imm,alu_sreg,neon_add,alu_imm,multiple,alu_imm,alu_imm")
- (set_attr "arch" "*,*,simd,*,*,sve,sve")]
)
;; zero_extend version of above
(define_insn "*addsi3_aarch64_uxtw"
[(set
- (match_operand:DI 0 "register_operand" "=rk,rk,rk,r")
+ (match_operand:DI 0 "register_operand")
(zero_extend:DI
- (plus:SI (match_operand:SI 1 "register_operand" "%rk,rk,rk,rk")
- (match_operand:SI 2 "aarch64_pluslong_operand" "I,r,J,Uaa"))))]
- ""
- "@
- add\\t%w0, %w1, %2
- add\\t%w0, %w1, %w2
- sub\\t%w0, %w1, #%n2
- #"
- [(set_attr "type" "alu_imm,alu_sreg,alu_imm,multiple")]
+ (plus:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "aarch64_pluslong_operand"))))]
+ ""
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ rk , %rk , I ; alu_imm ] add\t%w0, %w1, %2
+ [ rk , rk , r ; alu_sreg ] add\t%w0, %w1, %w2
+ [ rk , rk , J ; alu_imm ] sub\t%w0, %w1, #%n2
+ [ r , rk , Uaa ; multiple ] #
+ }
)
;; If there's a free register, and we can load the constant with a
@@ -2182,19 +2172,20 @@
;; this pattern.
(define_insn_and_split "*add<mode>3_poly_1"
[(set
- (match_operand:GPI 0 "register_operand" "=r,r,r,r,r,r,&r")
+ (match_operand:GPI 0 "register_operand")
(plus:GPI
- (match_operand:GPI 1 "register_operand" "%rk,rk,rk,rk,0,rk,rk")
- (match_operand:GPI 2 "aarch64_pluslong_or_poly_operand" "I,r,J,Uaa,Uai,Uav,Uat")))]
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_pluslong_or_poly_operand")))]
"TARGET_SVE && operands[0] != stack_pointer_rtx"
- "@
- add\\t%<w>0, %<w>1, %2
- add\\t%<w>0, %<w>1, %<w>2
- sub\\t%<w>0, %<w>1, #%n2
- #
- * return aarch64_output_sve_scalar_inc_dec (operands[2]);
- * return aarch64_output_sve_addvl_addpl (operands[2]);
- #"
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , %rk , I ; alu_imm ] add\t%<w>0, %<w>1, %2
+ [ r , rk , r ; alu_sreg ] add\t%<w>0, %<w>1, %<w>2
+ [ r , rk , J ; alu_imm ] sub\t%<w>0, %<w>1, #%n2
+ [ r , rk , Uaa ; multiple ] #
+ [ r , 0 , Uai ; alu_imm ] << aarch64_output_sve_scalar_inc_dec (operands[2]);
+ [ r , rk , Uav ; alu_imm ] << aarch64_output_sve_addvl_addpl (operands[2]);
+ [ &r , rk , Uat ; multiple ] #
+ }
"&& epilogue_completed
&& !reg_overlap_mentioned_p (operands[0], operands[1])
&& aarch64_split_add_offset_immediate (operands[2], <MODE>mode)"
@@ -2205,7 +2196,6 @@
DONE;
}
;; The "alu_imm" types for INC/DEC and ADDVL/ADDPL are just placeholders.
- [(set_attr "type" "alu_imm,alu_sreg,alu_imm,multiple,alu_imm,alu_imm,multiple")]
)
(define_split
@@ -2360,82 +2350,83 @@
(define_insn "add<mode>3_compare0"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
- (plus:GPI (match_operand:GPI 1 "register_operand" "%rk,rk,rk")
- (match_operand:GPI 2 "aarch64_plus_operand" "r,I,J"))
+ (plus:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_plus_operand"))
(const_int 0)))
- (set (match_operand:GPI 0 "register_operand" "=r,r,r")
+ (set (match_operand:GPI 0 "register_operand")
(plus:GPI (match_dup 1) (match_dup 2)))]
""
- "@
- adds\\t%<w>0, %<w>1, %<w>2
- adds\\t%<w>0, %<w>1, %2
- subs\\t%<w>0, %<w>1, #%n2"
- [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , %rk , r ; alus_sreg ] adds\t%<w>0, %<w>1, %<w>2
+ [ r , rk , I ; alus_imm ] adds\t%<w>0, %<w>1, %2
+ [ r , rk , J ; alus_imm ] subs\t%<w>0, %<w>1, #%n2
+ }
)
;; zero_extend version of above
(define_insn "*addsi3_compare0_uxtw"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
- (plus:SI (match_operand:SI 1 "register_operand" "%rk,rk,rk")
- (match_operand:SI 2 "aarch64_plus_operand" "r,I,J"))
+ (plus:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "aarch64_plus_operand"))
(const_int 0)))
- (set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (set (match_operand:DI 0 "register_operand")
(zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))]
""
- "@
- adds\\t%w0, %w1, %w2
- adds\\t%w0, %w1, %2
- subs\\t%w0, %w1, #%n2"
- [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , %rk , r ; alus_sreg ] adds\t%w0, %w1, %w2
+ [ r , rk , I ; alus_imm ] adds\t%w0, %w1, %2
+ [ r , rk , J ; alus_imm ] subs\t%w0, %w1, #%n2
+ }
)
(define_insn "*add<mode>3_compareC_cconly"
[(set (reg:CC_C CC_REGNUM)
(compare:CC_C
(plus:GPI
- (match_operand:GPI 0 "register_operand" "r,r,r")
- (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J"))
+ (match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "aarch64_plus_operand"))
(match_dup 0)))]
""
- "@
- cmn\\t%<w>0, %<w>1
- cmn\\t%<w>0, %1
- cmp\\t%<w>0, #%n1"
- [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+ {@ [ cons: 0 , 1 ; attrs: type ]
+ [ r , r ; alus_sreg ] cmn\t%<w>0, %<w>1
+ [ r , I ; alus_imm ] cmn\t%<w>0, %1
+ [ r , J ; alus_imm ] cmp\t%<w>0, #%n1
+ }
)
(define_insn "add<mode>3_compareC"
[(set (reg:CC_C CC_REGNUM)
(compare:CC_C
(plus:GPI
- (match_operand:GPI 1 "register_operand" "rk,rk,rk")
- (match_operand:GPI 2 "aarch64_plus_operand" "r,I,J"))
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_plus_operand"))
(match_dup 1)))
- (set (match_operand:GPI 0 "register_operand" "=r,r,r")
+ (set (match_operand:GPI 0 "register_operand")
(plus:GPI (match_dup 1) (match_dup 2)))]
""
- "@
- adds\\t%<w>0, %<w>1, %<w>2
- adds\\t%<w>0, %<w>1, %2
- subs\\t%<w>0, %<w>1, #%n2"
- [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , rk , r ; alus_sreg ] adds\t%<w>0, %<w>1, %<w>2
+ [ r , rk , I ; alus_imm ] adds\t%<w>0, %<w>1, %2
+ [ r , rk , J ; alus_imm ] subs\t%<w>0, %<w>1, #%n2
+ }
)
(define_insn "*add<mode>3_compareV_cconly_imm"
[(set (reg:CC_V CC_REGNUM)
(compare:CC_V
(plus:<DWI>
- (sign_extend:<DWI> (match_operand:GPI 0 "register_operand" "r,r"))
- (match_operand:<DWI> 1 "const_scalar_int_operand" ""))
+ (sign_extend:<DWI> (match_operand:GPI 0 "register_operand"))
+ (match_operand:<DWI> 1 "const_scalar_int_operand"))
(sign_extend:<DWI>
(plus:GPI
(match_dup 0)
- (match_operand:GPI 2 "aarch64_plus_immediate" "I,J")))))]
+ (match_operand:GPI 2 "aarch64_plus_immediate")))))]
"INTVAL (operands[1]) == INTVAL (operands[2])"
- "@
- cmn\\t%<w>0, %<w>1
- cmp\\t%<w>0, #%n1"
+ {@ [ cons: 0 , 2 ]
+ [ r , I ] cmn\t%<w>0, %<w>1
+ [ r , J ] cmp\t%<w>0, #%n1
+ }
[(set_attr "type" "alus_imm")]
)
@@ -2456,17 +2447,17 @@
(compare:CC_V
(plus:<DWI>
(sign_extend:<DWI>
- (match_operand:GPI 1 "register_operand" "rk,rk"))
- (match_operand:GPI 2 "aarch64_plus_immediate" "I,J"))
+ (match_operand:GPI 1 "register_operand"))
+ (match_operand:GPI 2 "aarch64_plus_immediate"))
(sign_extend:<DWI>
(plus:GPI (match_dup 1) (match_dup 2)))))
- (set (match_operand:GPI 0 "register_operand" "=r,r")
+ (set (match_operand:GPI 0 "register_operand")
(plus:GPI (match_dup 1) (match_dup 2)))]
""
- "@
- adds\\t%<w>0, %<w>1, %<w>2
- subs\\t%<w>0, %<w>1, #%n2"
- [(set_attr "type" "alus_imm,alus_imm")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , rk , I ; alus_imm ] adds\t%<w>0, %<w>1, %<w>2
+ [ r , rk , J ; alus_imm ] subs\t%<w>0, %<w>1, #%n2
+ }
)
(define_insn "add<mode>3_compareV"
@@ -2582,15 +2573,15 @@
(define_insn "*add<mode>3nr_compare0"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
- (plus:GPI (match_operand:GPI 0 "register_operand" "%r,r,r")
- (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J"))
+ (plus:GPI (match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "aarch64_plus_operand"))
(const_int 0)))]
""
- "@
- cmn\\t%<w>0, %<w>1
- cmn\\t%<w>0, %1
- cmp\\t%<w>0, #%n1"
- [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+ {@ [ cons: 0 , 1 ; attrs: type ]
+ [ %r , r ; alus_sreg ] cmn\t%<w>0, %<w>1
+ [ r , I ; alus_imm ] cmn\t%<w>0, %1
+ [ r , J ; alus_imm ] cmp\t%<w>0, #%n1
+ }
)
(define_insn "aarch64_sub<mode>_compare0"
@@ -2902,15 +2893,14 @@
)
(define_insn "subdi3"
- [(set (match_operand:DI 0 "register_operand" "=rk,w")
- (minus:DI (match_operand:DI 1 "register_operand" "rk,w")
- (match_operand:DI 2 "register_operand" "r,w")))]
+ [(set (match_operand:DI 0 "register_operand")
+ (minus:DI (match_operand:DI 1 "register_operand")
+ (match_operand:DI 2 "register_operand")))]
""
- "@
- sub\\t%x0, %x1, %x2
- sub\\t%d0, %d1, %d2"
- [(set_attr "type" "alu_sreg, neon_sub")
- (set_attr "arch" "*,simd")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ rk , rk , r ; alu_sreg , * ] sub\t%x0, %x1, %x2
+ [ w , w , w ; neon_sub , simd ] sub\t%d0, %d1, %d2
+ }
)
(define_expand "subv<GPI:mode>4"
@@ -2950,16 +2940,17 @@
(compare:CC_V
(sign_extend:<DWI>
(minus:GPI
- (match_operand:GPI 1 "register_operand" "rk,rk")
- (match_operand:GPI 2 "aarch64_plus_immediate" "I,J")))
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_plus_immediate")))
(minus:<DWI> (sign_extend:<DWI> (match_dup 1))
(match_dup 2))))
- (set (match_operand:GPI 0 "register_operand" "=r,r")
+ (set (match_operand:GPI 0 "register_operand")
(minus:GPI (match_dup 1) (match_dup 2)))]
""
- "@
- subs\\t%<w>0, %<w>1, %2
- adds\\t%<w>0, %<w>1, #%n2"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ r , rk , I ] subs\t%<w>0, %<w>1, %2
+ [ r , rk , J ] adds\t%<w>0, %<w>1, #%n2
+ }
[(set_attr "type" "alus_sreg")]
)
@@ -3004,15 +2995,16 @@
[(set (reg:CC_V CC_REGNUM)
(compare:CC_V
(sign_extend:<DWI>
- (minus:GPI (match_operand:GPI 0 "register_operand" "r,r,r")
- (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J")))
+ (minus:GPI (match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "aarch64_plus_operand")))
(minus:<DWI> (sign_extend:<DWI> (match_dup 0))
(sign_extend:<DWI> (match_dup 1)))))]
""
- "@
- cmp\\t%<w>0, %<w>1
- cmp\\t%<w>0, %1
- cmp\\t%<w>0, #%n1"
+ {@ [ cons: 0 , 1 ]
+ [ r , r ] cmp\t%<w>0, %<w>1
+ [ r , I ] cmp\t%<w>0, %1
+ [ r , J ] cmp\t%<w>0, #%n1
+ }
[(set_attr "type" "alus_sreg")]
)
@@ -3159,16 +3151,17 @@
(define_insn "sub<mode>3_compare1_imm"
[(set (reg:CC CC_REGNUM)
(compare:CC
- (match_operand:GPI 1 "register_operand" "rk,rk")
- (match_operand:GPI 2 "aarch64_plus_immediate" "I,J")))
- (set (match_operand:GPI 0 "register_operand" "=r,r")
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_plus_immediate")))
+ (set (match_operand:GPI 0 "register_operand")
(plus:GPI
(match_dup 1)
- (match_operand:GPI 3 "aarch64_plus_immediate" "J,I")))]
+ (match_operand:GPI 3 "aarch64_plus_immediate")))]
"UINTVAL (operands[2]) == -UINTVAL (operands[3])"
- "@
- subs\\t%<w>0, %<w>1, %2
- adds\\t%<w>0, %<w>1, #%n2"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ r , rk , I , J ] subs\t%<w>0, %<w>1, %2
+ [ r , rk , J , I ] adds\t%<w>0, %<w>1, #%n2
+ }
[(set_attr "type" "alus_imm")]
)
@@ -3609,14 +3602,13 @@
)
(define_insn "neg<mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r,w")
- (neg:GPI (match_operand:GPI 1 "register_operand" "r,w")))]
+ [(set (match_operand:GPI 0 "register_operand")
+ (neg:GPI (match_operand:GPI 1 "register_operand")))]
""
- "@
- neg\\t%<w>0, %<w>1
- neg\\t%<rtn>0<vas>, %<rtn>1<vas>"
- [(set_attr "type" "alu_sreg, neon_neg<q>")
- (set_attr "arch" "*,simd")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ r , r ; alu_sreg , * ] neg\t%<w>0, %<w>1
+ [ w , w ; neon_neg<q> , simd ] neg\t%<rtn>0<vas>, %<rtn>1<vas>
+ }
)
;; zero_extend version of above
@@ -3931,35 +3923,37 @@
(define_insn "cmp<mode>"
[(set (reg:CC CC_REGNUM)
- (compare:CC (match_operand:GPI 0 "register_operand" "rk,rk,rk")
- (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J")))]
+ (compare:CC (match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "aarch64_plus_operand")))]
""
- "@
- cmp\\t%<w>0, %<w>1
- cmp\\t%<w>0, %1
- cmn\\t%<w>0, #%n1"
- [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+ {@ [ cons: 0 , 1 ; attrs: type ]
+ [ rk , r ; alus_sreg ] cmp\t%<w>0, %<w>1
+ [ rk , I ; alus_imm ] cmp\t%<w>0, %1
+ [ rk , J ; alus_imm ] cmn\t%<w>0, #%n1
+ }
)
(define_insn "fcmp<mode>"
[(set (reg:CCFP CC_REGNUM)
- (compare:CCFP (match_operand:GPF 0 "register_operand" "w,w")
- (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
+ (compare:CCFP (match_operand:GPF 0 "register_operand")
+ (match_operand:GPF 1 "aarch64_fp_compare_operand")))]
"TARGET_FLOAT"
- "@
- fcmp\\t%<s>0, #0.0
- fcmp\\t%<s>0, %<s>1"
+ {@ [ cons: 0 , 1 ]
+ [ w , Y ] fcmp\t%<s>0, #0.0
+ [ w , w ] fcmp\t%<s>0, %<s>1
+ }
[(set_attr "type" "fcmp<s>")]
)
(define_insn "fcmpe<mode>"
[(set (reg:CCFPE CC_REGNUM)
- (compare:CCFPE (match_operand:GPF 0 "register_operand" "w,w")
- (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
+ (compare:CCFPE (match_operand:GPF 0 "register_operand")
+ (match_operand:GPF 1 "aarch64_fp_compare_operand")))]
"TARGET_FLOAT"
- "@
- fcmpe\\t%<s>0, #0.0
- fcmpe\\t%<s>0, %<s>1"
+ {@ [ cons: 0 , 1 ]
+ [ w , Y ] fcmpe\t%<s>0, #0.0
+ [ w , w ] fcmpe\t%<s>0, %<s>1
+ }
[(set_attr "type" "fcmp<s>")]
)
@@ -4146,47 +4140,47 @@
)
(define_insn "*cmov<mode>_insn"
- [(set (match_operand:ALLI 0 "register_operand" "=r,r,r,r,r,r,r")
+ [(set (match_operand:ALLI 0 "register_operand")
(if_then_else:ALLI
(match_operator 1 "aarch64_comparison_operator"
- [(match_operand 2 "cc_register" "") (const_int 0)])
- (match_operand:ALLI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1")
- (match_operand:ALLI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1")))]
+ [(match_operand 2 "cc_register") (const_int 0)])
+ (match_operand:ALLI 3 "aarch64_reg_zero_or_m1_or_1")
+ (match_operand:ALLI 4 "aarch64_reg_zero_or_m1_or_1")))]
"!((operands[3] == const1_rtx && operands[4] == constm1_rtx)
|| (operands[3] == constm1_rtx && operands[4] == const1_rtx))"
;; Final two alternatives should be unreachable, but included for completeness
- "@
- csel\\t%<w>0, %<w>3, %<w>4, %m1
- csinv\\t%<w>0, %<w>3, <w>zr, %m1
- csinv\\t%<w>0, %<w>4, <w>zr, %M1
- csinc\\t%<w>0, %<w>3, <w>zr, %m1
- csinc\\t%<w>0, %<w>4, <w>zr, %M1
- mov\\t%<w>0, -1
- mov\\t%<w>0, 1"
- [(set_attr "type" "csel, csel, csel, csel, csel, mov_imm, mov_imm")]
+ {@ [ cons: =0 , 3 , 4 ; attrs: type ]
+ [ r , rZ , rZ ; csel ] csel\t%<w>0, %<w>3, %<w>4, %m1
+ [ r , rZ , UsM ; csel ] csinv\t%<w>0, %<w>3, <w>zr, %m1
+ [ r , UsM , rZ ; csel ] csinv\t%<w>0, %<w>4, <w>zr, %M1
+ [ r , rZ , Ui1 ; csel ] csinc\t%<w>0, %<w>3, <w>zr, %m1
+ [ r , Ui1 , rZ ; csel ] csinc\t%<w>0, %<w>4, <w>zr, %M1
+ [ r , UsM , UsM ; mov_imm ] mov\t%<w>0, -1
+ [ r , Ui1 , Ui1 ; mov_imm ] mov\t%<w>0, 1
+ }
)
;; zero_extend version of above
(define_insn "*cmovsi_insn_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r,r")
+ [(set (match_operand:DI 0 "register_operand")
(zero_extend:DI
(if_then_else:SI
(match_operator 1 "aarch64_comparison_operator"
- [(match_operand 2 "cc_register" "") (const_int 0)])
- (match_operand:SI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1")
- (match_operand:SI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1"))))]
+ [(match_operand 2 "cc_register") (const_int 0)])
+ (match_operand:SI 3 "aarch64_reg_zero_or_m1_or_1")
+ (match_operand:SI 4 "aarch64_reg_zero_or_m1_or_1"))))]
"!((operands[3] == const1_rtx && operands[4] == constm1_rtx)
|| (operands[3] == constm1_rtx && operands[4] == const1_rtx))"
;; Final two alternatives should be unreachable, but included for completeness
- "@
- csel\\t%w0, %w3, %w4, %m1
- csinv\\t%w0, %w3, wzr, %m1
- csinv\\t%w0, %w4, wzr, %M1
- csinc\\t%w0, %w3, wzr, %m1
- csinc\\t%w0, %w4, wzr, %M1
- mov\\t%w0, -1
- mov\\t%w0, 1"
- [(set_attr "type" "csel, csel, csel, csel, csel, mov_imm, mov_imm")]
+ {@ [ cons: =0 , 3 , 4 ; attrs: type ]
+ [ r , rZ , rZ ; csel ] csel\t%w0, %w3, %w4, %m1
+ [ r , rZ , UsM ; csel ] csinv\t%w0, %w3, wzr, %m1
+ [ r , UsM , rZ ; csel ] csinv\t%w0, %w4, wzr, %M1
+ [ r , rZ , Ui1 ; csel ] csinc\t%w0, %w3, wzr, %m1
+ [ r , Ui1 , rZ ; csel ] csinc\t%w0, %w4, wzr, %M1
+ [ r , UsM , UsM ; mov_imm ] mov\t%w0, -1
+ [ r , Ui1 , Ui1 ; mov_imm ] mov\t%w0, 1
+ }
)
;; There are two canonical forms for `cmp ? -1 : a`.
@@ -4541,60 +4535,59 @@
)
(define_insn "<optab><mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r,rk,w")
- (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r,w")
- (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>,w")))]
+ [(set (match_operand:GPI 0 "register_operand")
+ (LOGICAL:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_logical_operand")))]
""
- "@
- <logical>\\t%<w>0, %<w>1, %<w>2
- <logical>\\t%<w>0, %<w>1, %2
- <logical>\\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
- [(set_attr "type" "logic_reg,logic_imm,neon_logic")
- (set_attr "arch" "*,*,simd")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ r , %r , r ; logic_reg , * ] <logical>\t%<w>0, %<w>1, %<w>2
+ [ rk , r , <lconst> ; logic_imm , * ] <logical>\t%<w>0, %<w>1, %2
+ [ w , w , w ; neon_logic , simd ] <logical>\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+ }
)
;; zero_extend version of above
(define_insn "*<optab>si3_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r,rk")
+ [(set (match_operand:DI 0 "register_operand")
(zero_extend:DI
- (LOGICAL:SI (match_operand:SI 1 "register_operand" "%r,r")
- (match_operand:SI 2 "aarch64_logical_operand" "r,K"))))]
+ (LOGICAL:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "aarch64_logical_operand"))))]
""
- "@
- <logical>\\t%w0, %w1, %w2
- <logical>\\t%w0, %w1, %2"
- [(set_attr "type" "logic_reg,logic_imm")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , %r , r ; logic_reg ] <logical>\t%w0, %w1, %w2
+ [ rk , r , K ; logic_imm ] <logical>\t%w0, %w1, %2
+ }
)
(define_insn "*and<mode>3_compare0"
[(set (reg:CC_NZV CC_REGNUM)
(compare:CC_NZV
- (and:GPI (match_operand:GPI 1 "register_operand" "%r,r")
- (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>"))
+ (and:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_logical_operand"))
(const_int 0)))
- (set (match_operand:GPI 0 "register_operand" "=r,r")
+ (set (match_operand:GPI 0 "register_operand")
(and:GPI (match_dup 1) (match_dup 2)))]
""
- "@
- ands\\t%<w>0, %<w>1, %<w>2
- ands\\t%<w>0, %<w>1, %2"
- [(set_attr "type" "logics_reg,logics_imm")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , %r , r ; logics_reg ] ands\t%<w>0, %<w>1, %<w>2
+ [ r , r , <lconst> ; logics_imm ] ands\t%<w>0, %<w>1, %2
+ }
)
;; zero_extend version of above
(define_insn "*andsi3_compare0_uxtw"
[(set (reg:CC_NZV CC_REGNUM)
(compare:CC_NZV
- (and:SI (match_operand:SI 1 "register_operand" "%r,r")
- (match_operand:SI 2 "aarch64_logical_operand" "r,K"))
+ (and:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "aarch64_logical_operand"))
(const_int 0)))
- (set (match_operand:DI 0 "register_operand" "=r,r")
+ (set (match_operand:DI 0 "register_operand")
(zero_extend:DI (and:SI (match_dup 1) (match_dup 2))))]
""
- "@
- ands\\t%w0, %w1, %w2
- ands\\t%w0, %w1, %2"
- [(set_attr "type" "logics_reg,logics_imm")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , %r , r ; logics_reg ] ands\t%w0, %w1, %w2
+ [ r , r , K ; logics_imm ] ands\t%w0, %w1, %2
+ }
)
(define_insn "*and_<SHIFT:optab><mode>3_compare0"
@@ -4759,14 +4752,13 @@
)
(define_insn "one_cmpl<mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r,w")
- (not:GPI (match_operand:GPI 1 "register_operand" "r,w")))]
+ [(set (match_operand:GPI 0 "register_operand")
+ (not:GPI (match_operand:GPI 1 "register_operand")))]
""
- "@
- mvn\\t%<w>0, %<w>1
- mvn\\t%0.8b, %1.8b"
- [(set_attr "type" "logic_reg,neon_logic")
- (set_attr "arch" "*,simd")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ r , r ; logic_reg , * ] mvn\t%<w>0, %<w>1
+ [ w , w ; neon_logic , simd ] mvn\t%0.8b, %1.8b
+ }
)
(define_insn "*one_cmpl_zero_extend"
@@ -4794,15 +4786,14 @@
;; Binary logical operators negating one operand, i.e. (a & !b), (a | !b).
(define_insn "*<NLOGICAL:optab>_one_cmpl<mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r,w")
- (NLOGICAL:GPI (not:GPI (match_operand:GPI 1 "register_operand" "r,w"))
- (match_operand:GPI 2 "register_operand" "r,w")))]
+ [(set (match_operand:GPI 0 "register_operand")
+ (NLOGICAL:GPI (not:GPI (match_operand:GPI 1 "register_operand"))
+ (match_operand:GPI 2 "register_operand")))]
""
- "@
- <NLOGICAL:nlogical>\\t%<w>0, %<w>2, %<w>1
- <NLOGICAL:nlogical>\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
- [(set_attr "type" "logic_reg,neon_logic")
- (set_attr "arch" "*,simd")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ r , r , r ; logic_reg , * ] <NLOGICAL:nlogical>\t%<w>0, %<w>2, %<w>1
+ [ w , w , w ; neon_logic , simd ] <NLOGICAL:nlogical>\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+ }
)
(define_insn "*<NLOGICAL:optab>_one_cmplsidi3_ze"
@@ -5141,14 +5132,14 @@
(define_insn "*and<mode>3nr_compare0"
[(set (reg:CC_NZV CC_REGNUM)
(compare:CC_NZV
- (and:GPI (match_operand:GPI 0 "register_operand" "%r,r")
- (match_operand:GPI 1 "aarch64_logical_operand" "r,<lconst>"))
+ (and:GPI (match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "aarch64_logical_operand"))
(const_int 0)))]
""
- "@
- tst\\t%<w>0, %<w>1
- tst\\t%<w>0, %1"
- [(set_attr "type" "logics_reg,logics_imm")]
+ {@ [ cons: 0 , 1 ; attrs: type ]
+ [ %r , r ; logics_reg ] tst\t%<w>0, %<w>1
+ [ r , <lconst> ; logics_imm ] tst\t%<w>0, %1
+ }
)
(define_split
@@ -5431,36 +5422,33 @@
;; Logical left shift using SISD or Integer instruction
(define_insn "*aarch64_ashl_sisd_or_int_<mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r,r,w,w")
+ [(set (match_operand:GPI 0 "register_operand")
(ashift:GPI
- (match_operand:GPI 1 "register_operand" "r,r,w,w")
- (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "Us<cmode>,r,Us<cmode>,w")))]
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>")))]
""
- "@
- lsl\t%<w>0, %<w>1, %2
- lsl\t%<w>0, %<w>1, %<w>2
- shl\t%<rtn>0<vas>, %<rtn>1<vas>, %2
- ushl\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>"
- [(set_attr "type" "bfx,shift_reg,neon_shift_imm<q>, neon_shift_reg<q>")
- (set_attr "arch" "*,*,simd,simd")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ r , r , Us<cmode> ; bfx , * ] lsl\t%<w>0, %<w>1, %2
+ [ r , r , r ; shift_reg , * ] lsl\t%<w>0, %<w>1, %<w>2
+ [ w , w , Us<cmode> ; neon_shift_imm<q> , simd ] shl\t%<rtn>0<vas>, %<rtn>1<vas>, %2
+ [ w , w , w ; neon_shift_reg<q> , simd ] ushl\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>
+ }
)
;; Logical right shift using SISD or Integer instruction
(define_insn "*aarch64_lshr_sisd_or_int_<mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r,r,w,&w,&w")
+ [(set (match_operand:GPI 0 "register_operand")
(lshiftrt:GPI
- (match_operand:GPI 1 "register_operand" "r,r,w,w,w")
- (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>"
- "Us<cmode>,r,Us<cmode_simd>,w,0")))]
- ""
- "@
- lsr\t%<w>0, %<w>1, %2
- lsr\t%<w>0, %<w>1, %<w>2
- ushr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
- #
- #"
- [(set_attr "type" "bfx,shift_reg,neon_shift_imm<q>,neon_shift_reg<q>,neon_shift_reg<q>")
- (set_attr "arch" "*,*,simd,simd,simd")]
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>")))]
+ ""
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ r , r , Us<cmode> ; bfx , * ] lsr\t%<w>0, %<w>1, %2
+ [ r , r , r ; shift_reg , * ] lsr\t%<w>0, %<w>1, %<w>2
+ [ w , w , Us<cmode_simd> ; neon_shift_imm<q> , simd ] ushr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
+ [ &w , w , w ; neon_shift_reg<q> , simd ] #
+ [ &w , w , 0 ; neon_shift_reg<q> , simd ] #
+ }
)
(define_split
@@ -5495,20 +5483,18 @@
;; Arithmetic right shift using SISD or Integer instruction
(define_insn "*aarch64_ashr_sisd_or_int_<mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r,r,w,&w,&w")
+ [(set (match_operand:GPI 0 "register_operand")
(ashiftrt:GPI
- (match_operand:GPI 1 "register_operand" "r,r,w,w,w")
- (match_operand:QI 2 "aarch64_reg_or_shift_imm_di"
- "Us<cmode>,r,Us<cmode_simd>,w,0")))]
- ""
- "@
- asr\t%<w>0, %<w>1, %2
- asr\t%<w>0, %<w>1, %<w>2
- sshr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
- #
- #"
- [(set_attr "type" "bfx,shift_reg,neon_shift_imm<q>,neon_shift_reg<q>,neon_shift_reg<q>")
- (set_attr "arch" "*,*,simd,simd,simd")]
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_di")))]
+ ""
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ r , r , Us<cmode> ; bfx , * ] asr\t%<w>0, %<w>1, %2
+ [ r , r , r ; shift_reg , * ] asr\t%<w>0, %<w>1, %<w>2
+ [ w , w , Us<cmode_simd> ; neon_shift_imm<q> , simd ] sshr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
+ [ &w , w , w ; neon_shift_reg<q> , simd ] #
+ [ &w , w , 0 ; neon_shift_reg<q> , simd ] #
+ }
)
(define_split
@@ -5592,15 +5578,15 @@
;; Rotate right
(define_insn "*ror<mode>3_insn"
- [(set (match_operand:GPI 0 "register_operand" "=r,r")
+ [(set (match_operand:GPI 0 "register_operand")
(rotatert:GPI
- (match_operand:GPI 1 "register_operand" "r,r")
- (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "Us<cmode>,r")))]
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>")))]
""
- "@
- ror\\t%<w>0, %<w>1, %2
- ror\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "type" "rotate_imm,shift_reg")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , r , Us<cmode> ; rotate_imm ] ror\t%<w>0, %<w>1, %2
+ [ r , r , r ; shift_reg ] ror\t%<w>0, %<w>1, %<w>2
+ }
)
(define_insn "*rol<mode>3_insn"
@@ -5617,15 +5603,15 @@
;; zero_extend version of shifts
(define_insn "*<optab>si3_insn_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r,r")
+ [(set (match_operand:DI 0 "register_operand")
(zero_extend:DI (SHIFT_no_rotate:SI
- (match_operand:SI 1 "register_operand" "r,r")
- (match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "Uss,r"))))]
+ (match_operand:SI 1 "register_operand")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_si"))))]
""
- "@
- <shift>\\t%w0, %w1, %2
- <shift>\\t%w0, %w1, %w2"
- [(set_attr "type" "bfx,shift_reg")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , r , Uss ; bfx ] <shift>\t%w0, %w1, %2
+ [ r , r , r ; shift_reg ] <shift>\t%w0, %w1, %w2
+ }
)
;; zero_extend version of rotate right
@@ -6490,14 +6476,13 @@
;; and making r = w more expensive
(define_insn "<optab>_trunc<fcvt_target><GPI:mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=w,?r")
- (FIXUORS:GPI (match_operand:<FCVT_TARGET> 1 "register_operand" "w,w")))]
+ [(set (match_operand:GPI 0 "register_operand")
+ (FIXUORS:GPI (match_operand:<FCVT_TARGET> 1 "register_operand")))]
"TARGET_FLOAT"
- "@
- fcvtz<su>\t%<s>0, %<s>1
- fcvtz<su>\t%<w>0, %<s>1"
- [(set_attr "type" "neon_fp_to_int_s,f_cvtf2i")
- (set_attr "arch" "simd,fp")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ w , w ; neon_fp_to_int_s , simd ] fcvtz<su>\t%<s>0, %<s>1
+ [ ?r , w ; f_cvtf2i , fp ] fcvtz<su>\t%<w>0, %<s>1
+ }
)
;; Convert HF -> SI or DI
@@ -6570,14 +6555,13 @@
;; Equal width integer to fp conversion.
(define_insn "<optab><fcvt_target><GPF:mode>2"
- [(set (match_operand:GPF 0 "register_operand" "=w,w")
- (FLOATUORS:GPF (match_operand:<FCVT_TARGET> 1 "register_operand" "w,?r")))]
+ [(set (match_operand:GPF 0 "register_operand")
+ (FLOATUORS:GPF (match_operand:<FCVT_TARGET> 1 "register_operand")))]
"TARGET_FLOAT"
- "@
- <su_optab>cvtf\t%<GPF:s>0, %<s>1
- <su_optab>cvtf\t%<GPF:s>0, %<w1>1"
- [(set_attr "type" "neon_int_to_fp_<Vetype>,f_cvti2f")
- (set_attr "arch" "simd,fp")]
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ w , w ; neon_int_to_fp_<Vetype> , simd ] <su_optab>cvtf\t%<GPF:s>0, %<s>1
+ [ w , ?r ; f_cvti2f , fp ] <su_optab>cvtf\t%<GPF:s>0, %<w1>1
+ }
)
;; Unequal width integer to fp conversions.
@@ -6654,29 +6638,27 @@
;; Convert between fixed-point and floating-point (scalar modes)
(define_insn "<FCVT_F2FIXED:fcvt_fixed_insn><GPF:mode>3"
- [(set (match_operand:<GPF:FCVT_TARGET> 0 "register_operand" "=r, w")
- (unspec:<GPF:FCVT_TARGET> [(match_operand:GPF 1 "register_operand" "w, w")
- (match_operand:SI 2 "immediate_operand" "i, i")]
+ [(set (match_operand:<GPF:FCVT_TARGET> 0 "register_operand")
+ (unspec:<GPF:FCVT_TARGET> [(match_operand:GPF 1 "register_operand")
+ (match_operand:SI 2 "immediate_operand")]
FCVT_F2FIXED))]
""
- "@
- <FCVT_F2FIXED:fcvt_fixed_insn>\t%<GPF:w1>0, %<GPF:s>1, #%2
- <FCVT_F2FIXED:fcvt_fixed_insn>\t%<GPF:s>0, %<GPF:s>1, #%2"
- [(set_attr "type" "f_cvtf2i, neon_fp_to_int_<GPF:Vetype>")
- (set_attr "arch" "fp,simd")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ r , w , i ; f_cvtf2i , fp ] <FCVT_F2FIXED:fcvt_fixed_insn>\t%<GPF:w1>0, %<GPF:s>1, #%2
+ [ w , w , i ; neon_fp_to_int_<GPF:Vetype> , simd ] <FCVT_F2FIXED:fcvt_fixed_insn>\t%<GPF:s>0, %<GPF:s>1, #%2
+ }
)
(define_insn "<FCVT_FIXED2F:fcvt_fixed_insn><GPI:mode>3"
- [(set (match_operand:<GPI:FCVT_TARGET> 0 "register_operand" "=w, w")
- (unspec:<GPI:FCVT_TARGET> [(match_operand:GPI 1 "register_operand" "r, w")
- (match_operand:SI 2 "immediate_operand" "i, i")]
+ [(set (match_operand:<GPI:FCVT_TARGET> 0 "register_operand")
+ (unspec:<GPI:FCVT_TARGET> [(match_operand:GPI 1 "register_operand")
+ (match_operand:SI 2 "immediate_operand")]
FCVT_FIXED2F))]
""
- "@
- <FCVT_FIXED2F:fcvt_fixed_insn>\t%<GPI:v>0, %<GPI:w>1, #%2
- <FCVT_FIXED2F:fcvt_fixed_insn>\t%<GPI:v>0, %<GPI:v>1, #%2"
- [(set_attr "type" "f_cvti2f, neon_int_to_fp_<GPI:Vetype>")
- (set_attr "arch" "fp,simd")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
+ [ w , r , i ; f_cvti2f , fp ] <FCVT_FIXED2F:fcvt_fixed_insn>\t%<GPI:v>0, %<GPI:w>1, #%2
+ [ w , w , i ; neon_int_to_fp_<GPI:Vetype> , simd ] <FCVT_FIXED2F:fcvt_fixed_insn>\t%<GPI:v>0, %<GPI:v>1, #%2
+ }
)
(define_insn "<FCVT_F2FIXED:fcvt_fixed_insn>hf<mode>3"
@@ -6849,14 +6831,14 @@
)
(define_insn "*aarch64_<optab><mode>3_cssc"
- [(set (match_operand:GPI 0 "register_operand" "=r,r")
- (MAXMIN:GPI (match_operand:GPI 1 "register_operand" "r,r")
- (match_operand:GPI 2 "aarch64_<su>minmax_operand" "r,U<su>m")))]
+ [(set (match_operand:GPI 0 "register_operand")
+ (MAXMIN:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_<su>minmax_operand")))]
"TARGET_CSSC"
- "@
- <optab>\\t%<w>0, %<w>1, %<w>2
- <optab>\\t%<w>0, %<w>1, %2"
- [(set_attr "type" "alu_sreg,alu_imm")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+ [ r , r , r ; alu_sreg ] <optab>\t%<w>0, %<w>1, %<w>2
+ [ r , r , U<su>m ; alu_imm ] <optab>\t%<w>0, %<w>1, %2
+ }
)
(define_insn "*aarch64_<optab><mode>3_zero"
@@ -6949,18 +6931,18 @@
)
(define_insn "copysign<GPF:mode>3_insn"
- [(set (match_operand:GPF 0 "register_operand" "=w,w,w,r")
- (unspec:GPF [(match_operand:GPF 1 "register_operand" "w,0,w,r")
- (match_operand:GPF 2 "register_operand" "w,w,0,0")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "0,w,w,X")]
+ [(set (match_operand:GPF 0 "register_operand")
+ (unspec:GPF [(match_operand:GPF 1 "register_operand")
+ (match_operand:GPF 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
UNSPEC_COPYSIGN))]
"TARGET_SIMD"
- "@
- bsl\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
- bit\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
- bif\\t%0.<Vbtype>, %1.<Vbtype>, %3.<Vbtype>
- bfxil\\t%<w1>0, %<w1>1, #0, <sizem1>"
- [(set_attr "type" "neon_bsl<q>,neon_bsl<q>,neon_bsl<q>,bfm")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: type ]
+ [ w , w , w , 0 ; neon_bsl<q> ] bsl\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+ [ w , 0 , w , w ; neon_bsl<q> ] bit\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
+ [ w , w , 0 , w ; neon_bsl<q> ] bif\t%0.<Vbtype>, %1.<Vbtype>, %3.<Vbtype>
+ [ r , r , 0 , X ; bfm ] bfxil\t%<w1>0, %<w1>1, #0, <sizem1>
+ }
)
@@ -6971,31 +6953,18 @@
;; EOR v0.8B, v0.8B, v3.8B
;;
-(define_expand "xorsign<mode>3"
+(define_expand "@xorsign<mode>3"
[(match_operand:GPF 0 "register_operand")
(match_operand:GPF 1 "register_operand")
(match_operand:GPF 2 "register_operand")]
"TARGET_SIMD"
{
-
- machine_mode imode = <V_INT_EQUIV>mode;
- rtx mask = gen_reg_rtx (imode);
- rtx op1x = gen_reg_rtx (imode);
- rtx op2x = gen_reg_rtx (imode);
-
- int bits = GET_MODE_BITSIZE (<MODE>mode) - 1;
- emit_move_insn (mask, GEN_INT (trunc_int_for_mode (HOST_WIDE_INT_M1U << bits,
- imode)));
-
- emit_insn (gen_and<v_int_equiv>3 (op2x, mask,
- lowpart_subreg (imode, operands[2],
- <MODE>mode)));
- emit_insn (gen_xor<v_int_equiv>3 (op1x,
- lowpart_subreg (imode, operands[1],
- <MODE>mode),
- op2x));
+ rtx tmp = gen_reg_rtx (<VCONQ>mode);
+ rtx op1 = lowpart_subreg (<VCONQ>mode, operands[1], <MODE>mode);
+ rtx op2 = lowpart_subreg (<VCONQ>mode, operands[2], <MODE>mode);
+ emit_insn (gen_xorsign3 (<VCONQ>mode, tmp, op1, op2));
emit_move_insn (operands[0],
- lowpart_subreg (<MODE>mode, op1x, imode));
+ lowpart_subreg (<MODE>mode, tmp, <VCONQ>mode));
DONE;
}
)
diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
index 2101c5a..f5a5182 100644
--- a/gcc/config/aarch64/aarch64.opt
+++ b/gcc/config/aarch64/aarch64.opt
@@ -339,39 +339,24 @@ Target Joined UInteger Var(aarch64_vect_unroll_limit) Init(4) Param
Limit how much the autovectorizer may unroll a loop.
-param=aarch64-ldp-policy=
-Target Joined Var(aarch64_ldp_policy_param) Enum(aarch64_ldp_policy) Init(LDP_POLICY_DEFAULT) Param
+Target Joined Var(aarch64_ldp_policy_param) Enum(aarch64_ldp_stp_policy) Init(AARCH64_LDP_STP_POLICY_DEFAULT) Param
--param=aarch64-ldp-policy=[default|always|never|aligned] Fine-grained policy for load pairs.
-Enum
-Name(aarch64_ldp_policy) Type(enum aarch64_ldp_policy) UnknownError(unknown aarch64_ldp_policy mode %qs)
-
-EnumValue
-Enum(aarch64_ldp_policy) String(default) Value(LDP_POLICY_DEFAULT)
-
-EnumValue
-Enum(aarch64_ldp_policy) String(always) Value(LDP_POLICY_ALWAYS)
-
-EnumValue
-Enum(aarch64_ldp_policy) String(never) Value(LDP_POLICY_NEVER)
-
-EnumValue
-Enum(aarch64_ldp_policy) String(aligned) Value(LDP_POLICY_ALIGNED)
-
-param=aarch64-stp-policy=
-Target Joined Var(aarch64_stp_policy_param) Enum(aarch64_stp_policy) Init(STP_POLICY_DEFAULT) Param
+Target Joined Var(aarch64_stp_policy_param) Enum(aarch64_ldp_stp_policy) Init(AARCH64_LDP_STP_POLICY_DEFAULT) Param
--param=aarch64-stp-policy=[default|always|never|aligned] Fine-grained policy for store pairs.
Enum
-Name(aarch64_stp_policy) Type(enum aarch64_stp_policy) UnknownError(unknown aarch64_stp_policy mode %qs)
+Name(aarch64_ldp_stp_policy) Type(enum aarch64_ldp_stp_policy) UnknownError(unknown LDP/STP policy %qs)
EnumValue
-Enum(aarch64_stp_policy) String(default) Value(STP_POLICY_DEFAULT)
+Enum(aarch64_ldp_stp_policy) String(default) Value(AARCH64_LDP_STP_POLICY_DEFAULT)
EnumValue
-Enum(aarch64_stp_policy) String(always) Value(STP_POLICY_ALWAYS)
+Enum(aarch64_ldp_stp_policy) String(always) Value(AARCH64_LDP_STP_POLICY_ALWAYS)
EnumValue
-Enum(aarch64_stp_policy) String(never) Value(STP_POLICY_NEVER)
+Enum(aarch64_ldp_stp_policy) String(never) Value(AARCH64_LDP_STP_POLICY_NEVER)
EnumValue
-Enum(aarch64_stp_policy) String(aligned) Value(STP_POLICY_ALIGNED)
+Enum(aarch64_ldp_stp_policy) String(aligned) Value(AARCH64_LDP_STP_POLICY_ALIGNED)
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 9398d71..2451d8c 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -1428,7 +1428,8 @@
(V4HF "V8HF") (V8HF "V8HF")
(V2SF "V4SF") (V4SF "V4SF")
(V2DF "V2DF") (SI "V4SI")
- (HI "V8HI") (QI "V16QI")])
+ (HI "V8HI") (QI "V16QI")
+ (SF "V4SF") (DF "V2DF")])
;; Half modes of all vector modes.
(define_mode_attr VHALF [(V8QI "V4QI") (V16QI "V8QI")
diff --git a/gcc/config/arc/arc-passes.def b/gcc/config/arc/arc-passes.def
index 0cb5d56..3f9222a 100644
--- a/gcc/config/arc/arc-passes.def
+++ b/gcc/config/arc/arc-passes.def
@@ -17,12 +17,6 @@
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-/* First target dependent ARC if-conversion pass. */
-INSERT_PASS_AFTER (pass_delay_slots, 1, pass_arc_ifcvt);
-
-/* Second target dependent ARC if-conversion pass. */
-INSERT_PASS_BEFORE (pass_shorten_branches, 1, pass_arc_ifcvt);
-
/* Find annulled delay insns and convert them to use the appropriate
predicate. This allows branch shortening to size up these
instructions properly. */
diff --git a/gcc/config/arc/arc-protos.h b/gcc/config/arc/arc-protos.h
index 4f2db7f..026ea99 100644
--- a/gcc/config/arc/arc-protos.h
+++ b/gcc/config/arc/arc-protos.h
@@ -35,7 +35,7 @@ extern const char *arc_output_libcall (const char *);
extern int arc_output_commutative_cond_exec (rtx *operands, bool);
extern bool arc_expand_cpymem (rtx *operands);
extern bool prepare_move_operands (rtx *operands, machine_mode mode);
-extern void emit_shift (enum rtx_code, rtx, rtx, rtx);
+extern bool arc_pre_reload_split (void);
extern void arc_expand_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
extern void arc_split_compare_and_swap (rtx *);
extern void arc_expand_compare_and_swap (rtx *);
@@ -52,8 +52,6 @@ extern bool arc_can_use_return_insn (void);
extern bool arc_split_move_p (rtx *);
#endif /* RTX_CODE */
-extern bool arc_ccfsm_branch_deleted_p (void);
-extern void arc_ccfsm_record_branch_deleted (void);
void arc_asm_output_aligned_decl_local (FILE *, tree, const char *,
unsigned HOST_WIDE_INT,
@@ -67,7 +65,6 @@ extern bool arc_raw_symbolic_reference_mentioned_p (rtx, bool);
extern bool arc_is_longcall_p (rtx);
extern bool arc_is_shortcall_p (rtx);
extern bool valid_brcc_with_delay_p (rtx *);
-extern bool arc_ccfsm_cond_exec_p (void);
extern rtx disi_highpart (rtx);
extern int arc_adjust_insn_length (rtx_insn *, int, bool);
extern int arc_corereg_hazard (rtx, rtx);
@@ -76,15 +73,10 @@ extern int arc_write_ext_corereg (rtx);
extern rtx gen_acc1 (void);
extern rtx gen_acc2 (void);
extern bool arc_branch_size_unknown_p (void);
-struct arc_ccfsm;
-extern void arc_ccfsm_record_condition (rtx, bool, rtx_insn *,
- struct arc_ccfsm *);
extern void arc_expand_prologue (void);
extern void arc_expand_epilogue (int);
extern void arc_init_expanders (void);
extern int arc_check_millicode (rtx op, int offset, int load_p);
-extern void arc_clear_unalign (void);
-extern void arc_toggle_unalign (void);
extern void split_subsi (rtx *);
extern void arc_split_move (rtx *);
extern const char *arc_short_long (rtx_insn *insn, const char *, const char *);
@@ -106,5 +98,4 @@ extern bool arc_is_jli_call_p (rtx);
extern void arc_file_end (void);
extern bool arc_is_secure_call_p (rtx);
-rtl_opt_pass * make_pass_arc_ifcvt (gcc::context *ctxt);
rtl_opt_pass * make_pass_arc_predicate_delay_insns (gcc::context *ctxt);
diff --git a/gcc/config/arc/arc.cc b/gcc/config/arc/arc.cc
index f8c9bf1..00427d8 100644
--- a/gcc/config/arc/arc.cc
+++ b/gcc/config/arc/arc.cc
@@ -101,16 +101,6 @@ HARD_REG_SET overrideregs;
/* Array of valid operand punctuation characters. */
char arc_punct_chars[256];
-/* State used by arc_ccfsm_advance to implement conditional execution. */
-struct GTY (()) arc_ccfsm
-{
- int state;
- int cc;
- rtx cond;
- rtx_insn *target_insn;
- int target_label;
-};
-
/* Status of the IRQ_CTRL_AUX register. */
typedef struct irq_ctrl_saved_t
{
@@ -143,36 +133,6 @@ static irq_ctrl_saved_t irq_ctrl_saved;
/* Number of registers in second bank for FIRQ support. */
static int rgf_banked_register_count;
-#define arc_ccfsm_current cfun->machine->ccfsm_current
-
-#define ARC_CCFSM_BRANCH_DELETED_P(STATE) \
- ((STATE)->state == 1 || (STATE)->state == 2)
-
-/* Indicate we're conditionalizing insns now. */
-#define ARC_CCFSM_RECORD_BRANCH_DELETED(STATE) \
- ((STATE)->state += 2)
-
-#define ARC_CCFSM_COND_EXEC_P(STATE) \
- ((STATE)->state == 3 || (STATE)->state == 4 || (STATE)->state == 5 \
- || current_insn_predicate)
-
-/* Check if INSN has a 16 bit opcode considering struct arc_ccfsm *STATE. */
-#define CCFSM_ISCOMPACT(INSN,STATE) \
- (ARC_CCFSM_COND_EXEC_P (STATE) \
- ? (get_attr_iscompact (INSN) == ISCOMPACT_TRUE \
- || get_attr_iscompact (INSN) == ISCOMPACT_TRUE_LIMM) \
- : get_attr_iscompact (INSN) != ISCOMPACT_FALSE)
-
-/* Likewise, but also consider that INSN might be in a delay slot of JUMP. */
-#define CCFSM_DBR_ISCOMPACT(INSN,JUMP,STATE) \
- ((ARC_CCFSM_COND_EXEC_P (STATE) \
- || (JUMP_P (JUMP) \
- && INSN_ANNULLED_BRANCH_P (JUMP) \
- && (TARGET_AT_DBR_CONDEXEC || INSN_FROM_TARGET_P (INSN)))) \
- ? (get_attr_iscompact (INSN) == ISCOMPACT_TRUE \
- || get_attr_iscompact (INSN) == ISCOMPACT_TRUE_LIMM) \
- : get_attr_iscompact (INSN) != ISCOMPACT_FALSE)
-
/* Start enter/leave register range. */
#define ENTER_LEAVE_START_REG 13
@@ -218,11 +178,6 @@ static int rgf_banked_register_count;
/* ARC600 MULHI register. */
#define AUX_MULHI 0x12
-/* A nop is needed between a 4 byte insn that sets the condition codes and
- a branch that uses them (the same isn't true for an 8 byte insn that sets
- the condition codes). Set by arc_ccfsm_advance. Used by
- arc_print_operand. */
-
static int get_arc_condition_code (rtx);
static tree arc_handle_interrupt_attribute (tree *, tree, tree, int, bool *);
@@ -423,11 +378,6 @@ typedef struct GTY (()) machine_function
{
unsigned int fn_type;
struct arc_frame_info frame_info;
- /* To keep track of unalignment caused by short insns. */
- int unalign;
- struct arc_ccfsm ccfsm_current;
- /* Map from uid to ccfsm state during branch shortening. */
- rtx ccfsm_current_insn;
char arc_reorg_started;
char prescan_initialized;
} machine_function;
@@ -964,53 +914,6 @@ arc_secondary_reload_conv (rtx reg, rtx mem, rtx scratch, bool store_p)
return;
}
-static unsigned arc_ifcvt (void);
-
-namespace {
-
-const pass_data pass_data_arc_ifcvt =
-{
- RTL_PASS,
- "arc_ifcvt", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- TV_IFCVT2, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_df_finish /* todo_flags_finish */
-};
-
-class pass_arc_ifcvt : public rtl_opt_pass
-{
- public:
- pass_arc_ifcvt (gcc::context *ctxt)
- : rtl_opt_pass (pass_data_arc_ifcvt, ctxt)
- {}
-
- /* opt_pass methods: */
- opt_pass * clone ()
- {
- return new pass_arc_ifcvt (m_ctxt);
- }
- virtual unsigned int execute (function *)
- {
- return arc_ifcvt ();
- }
- virtual bool gate (function *)
- {
- return (optimize > 1 && !TARGET_NO_COND_EXEC);
- }
-};
-
-} // anon namespace
-
-rtl_opt_pass *
-make_pass_arc_ifcvt (gcc::context *ctxt)
-{
- return new pass_arc_ifcvt (ctxt);
-}
-
static unsigned arc_predicate_delay_insns (void);
namespace {
@@ -1128,12 +1031,9 @@ arc_init (void)
/* Initialize array for PRINT_OPERAND_PUNCT_VALID_P. */
memset (arc_punct_chars, 0, sizeof (arc_punct_chars));
- arc_punct_chars['#'] = 1;
arc_punct_chars['*'] = 1;
arc_punct_chars['?'] = 1;
arc_punct_chars['!'] = 1;
- arc_punct_chars['^'] = 1;
- arc_punct_chars['&'] = 1;
arc_punct_chars['+'] = 1;
arc_punct_chars['_'] = 1;
}
@@ -1662,7 +1562,7 @@ arc_select_cc_mode (enum rtx_code op, rtx x, rtx y)
/* add.f for if (a+b) */
if (mode == SImode
- && GET_CODE (y) == NEG
+ && GET_CODE (x) == NEG
&& (op == EQ || op == NE))
return CC_ZNmode;
@@ -4239,18 +4139,16 @@ arc_unspec_offset (rtx loc, int unspec)
unspec));
}
-/* !TARGET_BARREL_SHIFTER support. */
-/* Emit a shift insn to set OP0 to OP1 shifted by OP2; CODE specifies what
- kind of shift. */
+/* Predicate for pre-reload splitters with associated instructions,
+ which can match any time before the split1 pass (usually combine),
+ then are unconditionally split in that pass and should not be
+ matched again afterwards. */
-void
-emit_shift (enum rtx_code code, rtx op0, rtx op1, rtx op2)
+bool
+arc_pre_reload_split (void)
{
- rtx shift = gen_rtx_fmt_ee (code, SImode, op1, op2);
- rtx pat
- = ((shift4_operator (shift, SImode) ? gen_shift_si3 : gen_shift_si3_loop)
- (op0, op1, op2, shift));
- emit_insn (pat);
+ return (can_create_pseudo_p ()
+ && !(cfun->curr_properties & PROP_rtl_split_insns));
}
/* Output the assembler code for doing a shift.
@@ -4516,11 +4414,8 @@ static int output_sdata = 0;
'S': Scalled immediate, to be used in pair with 's'.
'N': Negative immediate, to be used in pair with 's'.
'x': size of bit field
- '#': condbranch delay slot suffix
'*': jump delay slot suffix
'?' : nonjump-insn suffix for conditional execution or short instruction
- '!' : jump / call suffix for conditional execution or short instruction
- '`': fold constant inside unary o-perator, re-recognize, and emit.
'd'
'D'
'R': Second word
@@ -4534,7 +4429,6 @@ static int output_sdata = 0;
'V': cache bypass indicator for volatile
'P'
'F'
- '^'
'O': Operator
'o': original symbol - no @ prepending. */
@@ -4647,48 +4541,26 @@ arc_print_operand (FILE *file, rtx x, int code)
output_operand_lossage ("invalid operand to %%s code");
return;
- case '#' :
- /* Conditional branches depending on condition codes.
- Note that this is only for branches that were known to depend on
- condition codes before delay slot scheduling;
- out-of-range brcc / bbit expansions should use '*'.
- This distinction is important because of the different
- allowable delay slot insns and the output of the delay suffix
- for TARGET_AT_DBR_COND_EXEC. */
case '*' :
/* Unconditional branches / branches not depending on condition codes.
This could also be a CALL_INSN.
Output the appropriate delay slot suffix. */
if (final_sequence && final_sequence->len () != 1)
{
- rtx_insn *jump = final_sequence->insn (0);
rtx_insn *delay = final_sequence->insn (1);
/* For TARGET_PAD_RETURN we might have grabbed the delay insn. */
if (delay->deleted ())
return;
- if (JUMP_P (jump) && INSN_ANNULLED_BRANCH_P (jump))
- fputs (INSN_FROM_TARGET_P (delay) ? ".d"
- : TARGET_AT_DBR_CONDEXEC && code == '#' ? ".d"
- : get_attr_type (jump) == TYPE_RETURN && code == '#' ? ""
- : ".nd",
- file);
- else
- fputs (".d", file);
+ fputs (".d", file);
}
return;
+
case '?' : /* with leading "." */
case '!' : /* without leading "." */
- /* This insn can be conditionally executed. See if the ccfsm machinery
- says it should be conditionalized.
- If it shouldn't, we'll check the compact attribute if this insn
- has a short variant, which may be used depending on code size and
- alignment considerations. */
if (current_insn_predicate)
- arc_ccfsm_current.cc
- = get_arc_condition_code (current_insn_predicate);
- if (ARC_CCFSM_COND_EXEC_P (&arc_ccfsm_current))
{
+ int cc = get_arc_condition_code (current_insn_predicate);
/* Is this insn in a delay slot sequence? */
if (!final_sequence || XVECLEN (final_sequence, 0) < 2
|| current_insn_predicate
@@ -4698,19 +4570,16 @@ arc_print_operand (FILE *file, rtx x, int code)
/* This insn isn't in a delay slot sequence, or conditionalized
independently of its position in a delay slot. */
fprintf (file, "%s%s",
- code == '?' ? "." : "",
- arc_condition_codes[arc_ccfsm_current.cc]);
+ code == '?' ? "." : "", arc_condition_codes[cc]);
/* If this is a jump, there are still short variants. However,
only beq_s / bne_s have the same offset range as b_s,
and the only short conditional returns are jeq_s and jne_s. */
if (code == '!'
- && (arc_ccfsm_current.cc == ARC_CC_EQ
- || arc_ccfsm_current.cc == ARC_CC_NE
- || 0 /* FIXME: check if branch in 7 bit range. */))
+ && (cc == ARC_CC_EQ || cc == ARC_CC_NE))
output_short_suffix (file);
}
else if (code == '!') /* Jump with delay slot. */
- fputs (arc_condition_codes[arc_ccfsm_current.cc], file);
+ fputs (arc_condition_codes[cc], file);
else /* An Instruction in a delay slot of a jump or call. */
{
rtx jump = XVECEXP (final_sequence, 0, 0);
@@ -4723,27 +4592,24 @@ arc_print_operand (FILE *file, rtx x, int code)
if (INSN_FROM_TARGET_P (insn))
fprintf (file, "%s%s",
code == '?' ? "." : "",
- arc_condition_codes[ARC_INVERSE_CONDITION_CODE (arc_ccfsm_current.cc)]);
+ arc_condition_codes[ARC_INVERSE_CONDITION_CODE (cc)]);
else
fprintf (file, "%s%s",
code == '?' ? "." : "",
- arc_condition_codes[arc_ccfsm_current.cc]);
- if (arc_ccfsm_current.state == 5)
- arc_ccfsm_current.state = 0;
+ arc_condition_codes[cc]);
}
else
- /* This insn is executed for either path, so don't
- conditionalize it at all. */
- output_short_suffix (file);
-
+ {
+ /* This insn is executed for either path, so don't
+ conditionalize it at all. */
+ output_short_suffix (file);
+ }
}
}
else
output_short_suffix (file);
return;
- case'`':
- /* FIXME: fold constant inside unary operator, re-recognize, and emit. */
- gcc_unreachable ();
+
case 'd' :
fputs (arc_condition_codes[get_arc_condition_code (x)], file);
return;
@@ -4958,14 +4824,7 @@ arc_print_operand (FILE *file, rtx x, int code)
case 'F':
fputs (reg_names[REGNO (x)]+1, file);
return;
- case '^':
- /* This punctuation character is needed because label references are
- printed in the output template using %l. This is a front end
- character, and when we want to emit a '@' before it, we have to use
- this '^'. */
- fputc('@',file);
- return;
case 'O':
/* Output an operator. */
switch (GET_CODE (x))
@@ -5013,10 +4872,7 @@ arc_print_operand (FILE *file, rtx x, int code)
return;
}
break;
- case '&':
- if (TARGET_ANNOTATE_ALIGN)
- fprintf (file, "; unalign: %d", cfun->machine->unalign);
- return;
+
case '+':
if (TARGET_V2)
fputs ("m", file);
@@ -5187,504 +5043,12 @@ arc_print_operand_address (FILE *file , rtx addr)
}
}
-/* Conditional execution support.
-
- This is based on the ARM port but for now is much simpler.
-
- A finite state machine takes care of noticing whether or not instructions
- can be conditionally executed, and thus decrease execution time and code
- size by deleting branch instructions. The fsm is controlled by
- arc_ccfsm_advance (called by arc_final_prescan_insn), and controls the
- actions of PRINT_OPERAND. The patterns in the .md file for the branch
- insns also have a hand in this. */
-/* The way we leave dealing with non-anulled or annull-false delay slot
- insns to the consumer is awkward. */
-
-/* The state of the fsm controlling condition codes are:
- 0: normal, do nothing special
- 1: don't output this insn
- 2: don't output this insn
- 3: make insns conditional
- 4: make insns conditional
- 5: make insn conditional (only for outputting anulled delay slot insns)
-
- special value for cfun->machine->uid_ccfsm_state:
- 6: return with but one insn before it since function start / call
-
- State transitions (state->state by whom, under what condition):
- 0 -> 1 arc_ccfsm_advance, if insn is a conditional branch skipping over
- some instructions.
- 0 -> 2 arc_ccfsm_advance, if insn is a conditional branch followed
- by zero or more non-jump insns and an unconditional branch with
- the same target label as the condbranch.
- 1 -> 3 branch patterns, after having not output the conditional branch
- 2 -> 4 branch patterns, after having not output the conditional branch
- 0 -> 5 branch patterns, for anulled delay slot insn.
- 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL, if the `target' label is reached
- (the target label has CODE_LABEL_NUMBER equal to
- arc_ccfsm_target_label).
- 4 -> 0 arc_ccfsm_advance, if `target' unconditional branch is reached
- 3 -> 1 arc_ccfsm_advance, finding an 'else' jump skipping over some insns.
- 5 -> 0 when outputting the delay slot insn
-
- If the jump clobbers the conditions then we use states 2 and 4.
-
- A similar thing can be done with conditional return insns.
-
- We also handle separating branches from sets of the condition code.
- This is done here because knowledge of the ccfsm state is required,
- we may not be outputting the branch. */
-
-/* arc_final_prescan_insn calls arc_ccfsm_advance to adjust arc_ccfsm_current,
- before letting final output INSN. */
-
-static void
-arc_ccfsm_advance (rtx_insn *insn, struct arc_ccfsm *state)
-{
- /* BODY will hold the body of INSN. */
- rtx body;
-
- /* This will be 1 if trying to repeat the trick (ie: do the `else' part of
- an if/then/else), and things need to be reversed. */
- int reverse = 0;
-
- /* If we start with a return insn, we only succeed if we find another one. */
- int seeking_return = 0;
-
- /* START_INSN will hold the insn from where we start looking. This is the
- first insn after the following code_label if REVERSE is true. */
- rtx_insn *start_insn = insn;
-
- /* Type of the jump_insn. Brcc insns don't affect ccfsm changes,
- since they don't rely on a cmp preceding the. */
- enum attr_type jump_insn_type;
-
- /* Allow -mdebug-ccfsm to turn this off so we can see how well it does.
- We can't do this in macro FINAL_PRESCAN_INSN because its called from
- final_scan_insn which has `optimize' as a local. */
- if (optimize < 2 || TARGET_NO_COND_EXEC)
- return;
-
- /* Ignore notes and labels. */
- if (!INSN_P (insn))
- return;
- body = PATTERN (insn);
- /* If in state 4, check if the target branch is reached, in order to
- change back to state 0. */
- if (state->state == 4)
- {
- if (insn == state->target_insn)
- {
- state->target_insn = NULL;
- state->state = 0;
- }
- return;
- }
-
- /* If in state 3, it is possible to repeat the trick, if this insn is an
- unconditional branch to a label, and immediately following this branch
- is the previous target label which is only used once, and the label this
- branch jumps to is not too far off. Or in other words "we've done the
- `then' part, see if we can do the `else' part." */
- if (state->state == 3)
- {
- if (simplejump_p (insn))
- {
- start_insn = next_nonnote_insn (start_insn);
- if (GET_CODE (start_insn) == BARRIER)
- {
- /* ??? Isn't this always a barrier? */
- start_insn = next_nonnote_insn (start_insn);
- }
- if (GET_CODE (start_insn) == CODE_LABEL
- && CODE_LABEL_NUMBER (start_insn) == state->target_label
- && LABEL_NUSES (start_insn) == 1)
- reverse = TRUE;
- else
- return;
- }
- else if (GET_CODE (body) == SIMPLE_RETURN)
- {
- start_insn = next_nonnote_insn (start_insn);
- if (GET_CODE (start_insn) == BARRIER)
- start_insn = next_nonnote_insn (start_insn);
- if (GET_CODE (start_insn) == CODE_LABEL
- && CODE_LABEL_NUMBER (start_insn) == state->target_label
- && LABEL_NUSES (start_insn) == 1)
- {
- reverse = TRUE;
- seeking_return = 1;
- }
- else
- return;
- }
- else
- return;
- }
-
- if (GET_CODE (insn) != JUMP_INSN
- || GET_CODE (PATTERN (insn)) == ADDR_VEC
- || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
- return;
-
- /* We can't predicate BRCC or loop ends.
- Also, when generating PIC code, and considering a medium range call,
- we can't predicate the call. */
- jump_insn_type = get_attr_type (insn);
- if (jump_insn_type == TYPE_BRCC
- || jump_insn_type == TYPE_BRCC_NO_DELAY_SLOT
- || jump_insn_type == TYPE_LOOP_END
- || (jump_insn_type == TYPE_CALL && !get_attr_predicable (insn)))
- return;
-
- /* This jump might be paralleled with a clobber of the condition codes,
- the jump should always come first. */
- if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
- body = XVECEXP (body, 0, 0);
-
- if (reverse
- || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
- && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
- {
- int insns_skipped = 0, fail = FALSE, succeed = FALSE;
- /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
- int then_not_else = TRUE;
- /* Nonzero if next insn must be the target label. */
- int next_must_be_target_label_p;
- rtx_insn *this_insn = start_insn;
- rtx label = 0;
-
- /* Register the insn jumped to. */
- if (reverse)
- {
- if (!seeking_return)
- label = XEXP (SET_SRC (body), 0);
- }
- else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
- label = XEXP (XEXP (SET_SRC (body), 1), 0);
- else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
- {
- label = XEXP (XEXP (SET_SRC (body), 2), 0);
- then_not_else = FALSE;
- }
- else if (GET_CODE (XEXP (SET_SRC (body), 1)) == SIMPLE_RETURN)
- seeking_return = 1;
- else if (GET_CODE (XEXP (SET_SRC (body), 2)) == SIMPLE_RETURN)
- {
- seeking_return = 1;
- then_not_else = FALSE;
- }
- else
- gcc_unreachable ();
-
- /* If this is a non-annulled branch with a delay slot, there is
- no need to conditionalize the delay slot. */
- if ((GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) == SEQUENCE)
- && state->state == 0 && !INSN_ANNULLED_BRANCH_P (insn))
- {
- this_insn = NEXT_INSN (this_insn);
- }
- /* See how many insns this branch skips, and what kind of insns. If all
- insns are okay, and the label or unconditional branch to the same
- label is not too far away, succeed. */
- for (insns_skipped = 0, next_must_be_target_label_p = FALSE;
- !fail && !succeed && insns_skipped < MAX_INSNS_SKIPPED;
- insns_skipped++)
- {
- rtx scanbody;
-
- this_insn = next_nonnote_insn (this_insn);
- if (!this_insn)
- break;
-
- if (next_must_be_target_label_p)
- {
- if (GET_CODE (this_insn) == BARRIER)
- continue;
- if (GET_CODE (this_insn) == CODE_LABEL
- && this_insn == label)
- {
- state->state = 1;
- succeed = TRUE;
- }
- else
- fail = TRUE;
- break;
- }
-
- switch (GET_CODE (this_insn))
- {
- case CODE_LABEL:
- /* Succeed if it is the target label, otherwise fail since
- control falls in from somewhere else. */
- if (this_insn == label)
- {
- state->state = 1;
- succeed = TRUE;
- }
- else
- fail = TRUE;
- break;
-
- case BARRIER:
- /* Succeed if the following insn is the target label.
- Otherwise fail.
- If return insns are used then the last insn in a function
- will be a barrier. */
- next_must_be_target_label_p = TRUE;
- break;
-
- case CALL_INSN:
- /* Can handle a call insn if there are no insns after it.
- IE: The next "insn" is the target label. We don't have to
- worry about delay slots as such insns are SEQUENCE's inside
- INSN's. ??? It is possible to handle such insns though. */
- if (get_attr_cond (this_insn) == COND_CANUSE)
- next_must_be_target_label_p = TRUE;
- else
- fail = TRUE;
- break;
-
- case JUMP_INSN:
- scanbody = PATTERN (this_insn);
-
- /* If this is an unconditional branch to the same label, succeed.
- If it is to another label, do nothing. If it is conditional,
- fail. */
- /* ??? Probably, the test for the SET and the PC are
- unnecessary. */
-
- if (GET_CODE (scanbody) == SET
- && GET_CODE (SET_DEST (scanbody)) == PC)
- {
- if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
- && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
- {
- state->state = 2;
- succeed = TRUE;
- }
- else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
- fail = TRUE;
- else if (get_attr_cond (this_insn) != COND_CANUSE)
- fail = TRUE;
- }
- else if (GET_CODE (scanbody) == SIMPLE_RETURN
- && seeking_return)
- {
- state->state = 2;
- succeed = TRUE;
- }
- else if (GET_CODE (scanbody) == PARALLEL)
- {
- if (get_attr_cond (this_insn) != COND_CANUSE)
- fail = TRUE;
- }
- break;
-
- case INSN:
- scanbody = PATTERN (this_insn);
-
- /* We can only do this with insns that can use the condition
- codes (and don't set them). */
- if (GET_CODE (scanbody) == SET
- || GET_CODE (scanbody) == PARALLEL)
- {
- if (get_attr_cond (this_insn) != COND_CANUSE)
- fail = TRUE;
- }
- /* We can't handle other insns like sequences. */
- else
- fail = TRUE;
- break;
-
- default:
- break;
- }
- }
-
- if (succeed)
- {
- if ((!seeking_return) && (state->state == 1 || reverse))
- state->target_label = CODE_LABEL_NUMBER (label);
- else if (seeking_return || state->state == 2)
- {
- while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
- {
- this_insn = next_nonnote_insn (this_insn);
-
- gcc_assert (!this_insn ||
- (GET_CODE (this_insn) != BARRIER
- && GET_CODE (this_insn) != CODE_LABEL));
- }
- if (!this_insn)
- {
- /* Oh dear! we ran off the end, give up. */
- extract_insn_cached (insn);
- state->state = 0;
- state->target_insn = NULL;
- return;
- }
- state->target_insn = this_insn;
- }
- else
- gcc_unreachable ();
-
- /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
- what it was. */
- if (!reverse)
- {
- state->cond = XEXP (SET_SRC (body), 0);
- state->cc = get_arc_condition_code (XEXP (SET_SRC (body), 0));
- }
-
- if (reverse || then_not_else)
- state->cc = ARC_INVERSE_CONDITION_CODE (state->cc);
- }
-
- /* Restore recog_operand. Getting the attributes of other insns can
- destroy this array, but final.cc assumes that it remains intact
- across this call; since the insn has been recognized already we
- call insn_extract direct. */
- extract_insn_cached (insn);
- }
-}
-
-/* Record that we are currently outputting label NUM with prefix PREFIX.
- It it's the label we're looking for, reset the ccfsm machinery.
-
- Called from ASM_OUTPUT_INTERNAL_LABEL. */
-
-static void
-arc_ccfsm_at_label (const char *prefix, int num, struct arc_ccfsm *state)
-{
- if (state->state == 3 && state->target_label == num
- && !strcmp (prefix, "L"))
- {
- state->state = 0;
- state->target_insn = NULL;
- }
-}
-
-/* We are considering a conditional branch with the condition COND.
- Check if we want to conditionalize a delay slot insn, and if so modify
- the ccfsm state accordingly.
- REVERSE says branch will branch when the condition is false. */
-void
-arc_ccfsm_record_condition (rtx cond, bool reverse, rtx_insn *jump,
- struct arc_ccfsm *state)
-{
- rtx_insn *seq_insn = NEXT_INSN (PREV_INSN (jump));
- if (!state)
- state = &arc_ccfsm_current;
-
- gcc_assert (state->state == 0);
- if (seq_insn != jump)
- {
- rtx insn = XVECEXP (PATTERN (seq_insn), 0, 1);
-
- if (!as_a<rtx_insn *> (insn)->deleted ()
- && INSN_ANNULLED_BRANCH_P (jump)
- && (TARGET_AT_DBR_CONDEXEC || INSN_FROM_TARGET_P (insn)))
- {
- state->cond = cond;
- state->cc = get_arc_condition_code (cond);
- if (!reverse)
- arc_ccfsm_current.cc
- = ARC_INVERSE_CONDITION_CODE (state->cc);
- rtx pat = PATTERN (insn);
- if (GET_CODE (pat) == COND_EXEC)
- gcc_assert ((INSN_FROM_TARGET_P (insn)
- ? ARC_INVERSE_CONDITION_CODE (state->cc) : state->cc)
- == get_arc_condition_code (XEXP (pat, 0)));
- else
- state->state = 5;
- }
- }
-}
-
-/* Update *STATE as we would when we emit INSN. */
-
-static void
-arc_ccfsm_post_advance (rtx_insn *insn, struct arc_ccfsm *state)
-{
- enum attr_type type;
-
- if (LABEL_P (insn))
- arc_ccfsm_at_label ("L", CODE_LABEL_NUMBER (insn), state);
- else if (JUMP_P (insn)
- && GET_CODE (PATTERN (insn)) != ADDR_VEC
- && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
- && ((type = get_attr_type (insn)) == TYPE_BRANCH
- || ((type == TYPE_UNCOND_BRANCH
- || type == TYPE_RETURN)
- && ARC_CCFSM_BRANCH_DELETED_P (state))))
- {
- if (ARC_CCFSM_BRANCH_DELETED_P (state))
- ARC_CCFSM_RECORD_BRANCH_DELETED (state);
- else
- {
- rtx src = SET_SRC (PATTERN (insn));
- arc_ccfsm_record_condition (XEXP (src, 0), XEXP (src, 1) == pc_rtx,
- insn, state);
- }
- }
- else if (arc_ccfsm_current.state == 5)
- arc_ccfsm_current.state = 0;
-}
-
-/* Return true if the current insn, which is a conditional branch, is to be
- deleted. */
-
-bool
-arc_ccfsm_branch_deleted_p (void)
-{
- return ARC_CCFSM_BRANCH_DELETED_P (&arc_ccfsm_current);
-}
-
-/* Record a branch isn't output because subsequent insns can be
- conditionalized. */
-
-void
-arc_ccfsm_record_branch_deleted (void)
-{
- ARC_CCFSM_RECORD_BRANCH_DELETED (&arc_ccfsm_current);
-}
-
-/* During insn output, indicate if the current insn is predicated. */
-
-bool
-arc_ccfsm_cond_exec_p (void)
-{
- return (cfun->machine->prescan_initialized
- && ARC_CCFSM_COND_EXEC_P (&arc_ccfsm_current));
-}
-
-/* When deciding if an insn should be output short, we want to know something
- about the following insns:
- - if another insn follows which we know we can output as a short insn
- before an alignment-sensitive point, we can output this insn short:
- the decision about the eventual alignment can be postponed.
- - if a to-be-aligned label comes next, we should output this insn such
- as to get / preserve 4-byte alignment.
- - if a likely branch without delay slot insn, or a call with an immediately
- following short insn comes next, we should out output this insn such as to
- get / preserve 2 mod 4 unalignment.
- - do the same for a not completely unlikely branch with a short insn
- following before any other branch / label.
- - in order to decide if we are actually looking at a branch, we need to
- call arc_ccfsm_advance.
- - in order to decide if we are looking at a short insn, we should know
- if it is conditionalized. To a first order of approximation this is
- the case if the state from arc_ccfsm_advance from before this insn
- indicates the insn is conditionalized. However, a further refinement
- could be to not conditionalize an insn if the destination register(s)
- is/are dead in the non-executed case. */
/* Return non-zero if INSN should be output as a short insn. UNALIGN is
zero if the current insn is aligned to a 4-byte-boundary, two otherwise.
If CHECK_ATTR is greater than 0, check the iscompact attribute first. */
static int
-arc_verify_short (rtx_insn *insn, int, int check_attr)
+arc_verify_short (rtx_insn *insn, int check_attr)
{
enum attr_iscompact iscompact;
@@ -5699,8 +5063,7 @@ arc_verify_short (rtx_insn *insn, int, int check_attr)
}
/* When outputting an instruction (alternative) that can potentially be short,
- output the short suffix if the insn is in fact short, and update
- cfun->machine->unalign accordingly. */
+ output the short suffix if the insn is in fact short. */
static void
output_short_suffix (FILE *file)
@@ -5709,10 +5072,9 @@ output_short_suffix (FILE *file)
if (!insn)
return;
- if (arc_verify_short (insn, cfun->machine->unalign, 1))
+ if (arc_verify_short (insn, 1))
{
fprintf (file, "_s");
- cfun->machine->unalign ^= 2;
}
/* Restore recog_operand. */
extract_insn_cached (insn);
@@ -5726,14 +5088,6 @@ arc_final_prescan_insn (rtx_insn *insn, rtx *opvec ATTRIBUTE_UNUSED,
{
if (TARGET_DUMPISIZE)
fprintf (asm_out_file, "\n; at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
-
- if (!cfun->machine->prescan_initialized)
- {
- /* Clear lingering state from branch shortening. */
- memset (&arc_ccfsm_current, 0, sizeof arc_ccfsm_current);
- cfun->machine->prescan_initialized = 1;
- }
- arc_ccfsm_advance (insn, &arc_ccfsm_current);
}
/* Given FROM and TO register numbers, say whether this elimination is allowed.
@@ -5866,8 +5220,6 @@ arc_encode_section_info (tree decl, rtx rtl, int first)
static void arc_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
{
- if (cfun)
- arc_ccfsm_at_label (prefix, labelno, &arc_ccfsm_current);
default_internal_label (stream, prefix, labelno);
}
@@ -8546,17 +7898,7 @@ arc_reorg (void)
jli_call_scan ();
pad_return ();
-/* FIXME: should anticipate ccfsm action, generate special patterns for
- to-be-deleted branches that have no delay slot and have at least the
- length of the size increase forced on other insns that are conditionalized.
- This can also have an insn_list inside that enumerates insns which are
- not actually conditionalized because the destinations are dead in the
- not-execute case.
- Could also tag branches that we want to be unaligned if they get no delay
- slot, or even ones that we don't want to do delay slot sheduling for
- because we can unalign them.
-
- However, there are cases when conditional execution is only possible after
+/* There are cases when conditional execution is only possible after
delay slot scheduling:
- If a delay slot is filled with a nocond/set insn from above, the previous
@@ -8585,22 +7927,8 @@ arc_reorg (void)
init_insn_lengths();
changed = 0;
- if (optimize > 1 && !TARGET_NO_COND_EXEC)
- {
- arc_ifcvt ();
- unsigned int flags = pass_data_arc_ifcvt.todo_flags_finish;
- df_finish_pass ((flags & TODO_df_verify) != 0);
-
- if (dump_file)
- {
- fprintf (dump_file, ";; After if conversion:\n\n");
- print_rtl (dump_file, get_insns ());
- }
- }
-
/* Call shorten_branches to calculate the insn lengths. */
shorten_branches (get_insns());
- cfun->machine->ccfsm_current_insn = NULL_RTX;
if (!INSN_ADDRESSES_SET_P())
fatal_error (input_location,
@@ -9469,8 +8797,7 @@ arc_output_libcall (const char *fname)
static char buf[64];
gcc_assert (len < sizeof buf - 35);
- if (TARGET_LONG_CALLS_SET
- || (TARGET_MEDIUM_CALLS && arc_ccfsm_cond_exec_p ()))
+ if (TARGET_LONG_CALLS_SET)
{
if (flag_pic)
sprintf (buf, "add r12,pcl,@%s@pcl\n\tjl%%!%%* [r12]", fname);
@@ -9570,31 +8897,6 @@ arc_adjust_insn_length (rtx_insn *insn, int len, bool)
return len;
}
-/* Return a copy of COND from *STATEP, inverted if that is indicated by the
- CC field of *STATEP. */
-
-static rtx
-arc_get_ccfsm_cond (struct arc_ccfsm *statep, bool reverse)
-{
- rtx cond = statep->cond;
- int raw_cc = get_arc_condition_code (cond);
- if (reverse)
- raw_cc = ARC_INVERSE_CONDITION_CODE (raw_cc);
-
- if (statep->cc == raw_cc)
- return copy_rtx (cond);
-
- gcc_assert (ARC_INVERSE_CONDITION_CODE (raw_cc) == statep->cc);
-
- machine_mode ccm = GET_MODE (XEXP (cond, 0));
- enum rtx_code code = reverse_condition (GET_CODE (cond));
- if (code == UNKNOWN || ccm == CC_FP_GTmode || ccm == CC_FP_GEmode)
- code = reverse_condition_maybe_unordered (GET_CODE (cond));
-
- return gen_rtx_fmt_ee (code, GET_MODE (cond),
- copy_rtx (XEXP (cond, 0)), copy_rtx (XEXP (cond, 1)));
-}
-
/* Return version of PAT conditionalized with COND, which is part of INSN.
ANNULLED indicates if INSN is an annulled delay-slot insn.
Register further changes if necessary. */
@@ -9639,125 +8941,6 @@ conditionalize_nonjump (rtx pat, rtx cond, rtx insn, bool annulled)
return pat;
}
-/* Use the ccfsm machinery to do if conversion. */
-
-static unsigned
-arc_ifcvt (void)
-{
- struct arc_ccfsm *statep = &cfun->machine->ccfsm_current;
-
- memset (statep, 0, sizeof *statep);
- for (rtx_insn *insn = get_insns (); insn; insn = next_insn (insn))
- {
- arc_ccfsm_advance (insn, statep);
-
- switch (statep->state)
- {
- case 0:
- break;
- case 1: case 2:
- {
- /* Deleted branch. */
- arc_ccfsm_post_advance (insn, statep);
- gcc_assert (!IN_RANGE (statep->state, 1, 2));
- rtx_insn *seq = NEXT_INSN (PREV_INSN (insn));
- if (GET_CODE (PATTERN (seq)) == SEQUENCE)
- {
- rtx slot = XVECEXP (PATTERN (seq), 0, 1);
- rtx pat = PATTERN (slot);
- if (INSN_ANNULLED_BRANCH_P (insn))
- {
- rtx cond
- = arc_get_ccfsm_cond (statep, INSN_FROM_TARGET_P (slot));
- pat = gen_rtx_COND_EXEC (VOIDmode, cond, pat);
- }
- if (!validate_change (seq, &PATTERN (seq), pat, 0))
- gcc_unreachable ();
- PUT_CODE (slot, NOTE);
- NOTE_KIND (slot) = NOTE_INSN_DELETED;
- }
- else
- {
- set_insn_deleted (insn);
- }
- continue;
- }
- case 3:
- if (LABEL_P (insn)
- && statep->target_label == CODE_LABEL_NUMBER (insn))
- {
- arc_ccfsm_post_advance (insn, statep);
- if (--LABEL_NUSES (insn) == 0)
- delete_insn (insn);
- continue;
- }
- /* Fall through. */
- case 4: case 5:
- if (!NONDEBUG_INSN_P (insn))
- break;
-
- /* Conditionalized insn. */
-
- rtx_insn *prev, *pprev;
- rtx *patp, pat, cond;
- bool annulled; annulled = false;
-
- /* If this is a delay slot insn in a non-annulled branch,
- don't conditionalize it. N.B., this should be fine for
- conditional return too. However, don't do this for
- unconditional branches, as these would be encountered when
- processing an 'else' part. */
- prev = PREV_INSN (insn);
- pprev = PREV_INSN (prev);
- if (pprev && NEXT_INSN (NEXT_INSN (pprev)) == NEXT_INSN (insn)
- && JUMP_P (prev) && get_attr_cond (prev) == COND_USE)
- {
- if (!INSN_ANNULLED_BRANCH_P (prev))
- break;
- annulled = true;
- }
-
- patp = &PATTERN (insn);
- pat = *patp;
- cond = arc_get_ccfsm_cond (statep, INSN_FROM_TARGET_P (insn));
- if (NONJUMP_INSN_P (insn) || CALL_P (insn))
- {
- /* ??? don't conditionalize if all side effects are dead
- in the not-execute case. */
-
- pat = conditionalize_nonjump (pat, cond, insn, annulled);
- }
- else if (simplejump_p (insn))
- {
- patp = &SET_SRC (pat);
- pat = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, *patp, pc_rtx);
- }
- else if (JUMP_P (insn) && ANY_RETURN_P (PATTERN (insn)))
- {
- pat = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, pat, pc_rtx);
- pat = gen_rtx_SET (pc_rtx, pat);
- }
- else
- gcc_unreachable ();
- validate_change (insn, patp, pat, 1);
- if (!apply_change_group ())
- gcc_unreachable ();
- if (JUMP_P (insn))
- {
- rtx_insn *next = next_nonnote_insn (insn);
- if (GET_CODE (next) == BARRIER)
- delete_insn (next);
- if (statep->state == 3)
- continue;
- }
- break;
- default:
- gcc_unreachable ();
- }
- arc_ccfsm_post_advance (insn, statep);
- }
- return 0;
-}
/* Find annulled delay insns and convert them to use the appropriate predicate.
This allows branch shortening to size up these insns properly. */
@@ -10058,21 +9241,6 @@ arc_check_millicode (rtx op, int offset, int load_p)
return 1;
}
-/* Accessor functions for cfun->machine->unalign. */
-
-void
-arc_clear_unalign (void)
-{
- if (cfun)
- cfun->machine->unalign = 0;
-}
-
-void
-arc_toggle_unalign (void)
-{
- cfun->machine->unalign ^= 2;
-}
-
/* Operands 0..2 are the operands of a subsi which uses a 12 bit
constant in operand 1, but which would require a LIMM because of
operand mismatch.
@@ -10311,7 +9479,7 @@ arc_split_move (rtx *operands)
const char *
arc_short_long (rtx_insn *insn, const char *s_tmpl, const char *l_tmpl)
{
- int is_short = arc_verify_short (insn, cfun->machine->unalign, -1);
+ int is_short = arc_verify_short (insn, -1);
extract_constrain_insn_cached (insn);
return is_short ? s_tmpl : l_tmpl;
diff --git a/gcc/config/arc/arc.h b/gcc/config/arc/arc.h
index 8daae41..5877389 100644
--- a/gcc/config/arc/arc.h
+++ b/gcc/config/arc/arc.h
@@ -1312,20 +1312,6 @@ do { \
/* Defined to also emit an .align in elfos.h. We don't want that. */
#undef ASM_OUTPUT_CASE_LABEL
-/* ADDR_DIFF_VECs are in the text section and thus can affect the
- current alignment. */
-#define ASM_OUTPUT_CASE_END(FILE, NUM, JUMPTABLE) \
- do \
- { \
- if (GET_CODE (PATTERN (JUMPTABLE)) == ADDR_DIFF_VEC \
- && ((GET_MODE_SIZE (as_a <scalar_int_mode> \
- (GET_MODE (PATTERN (JUMPTABLE)))) \
- * XVECLEN (PATTERN (JUMPTABLE), 1) + 1) \
- & 2)) \
- arc_toggle_unalign (); \
- } \
- while (0)
-
#define JUMP_ALIGN(LABEL) (arc_size_opt_level < 2 ? 2 : 0)
#define LABEL_ALIGN_AFTER_BARRIER(LABEL) \
(JUMP_ALIGN(LABEL) \
@@ -1346,8 +1332,6 @@ do { \
#define ASM_OUTPUT_ALIGN(FILE,LOG) \
do { \
if ((LOG) != 0) fprintf (FILE, "\t.align %d\n", 1 << (LOG)); \
- if ((LOG) > 1) \
- arc_clear_unalign (); \
} while (0)
/* ASM_OUTPUT_ALIGNED_DECL_LOCAL (STREAM, DECL, NAME, SIZE, ALIGNMENT)
diff --git a/gcc/config/arc/arc.md b/gcc/config/arc/arc.md
index d37ecbf..22af0bf 100644
--- a/gcc/config/arc/arc.md
+++ b/gcc/config/arc/arc.md
@@ -547,16 +547,6 @@
(const_string "false")]
(const_string "true")))
-;; Instructions that we can put into a delay slot and conditionalize.
-(define_attr "cond_delay_insn" "no,yes"
- (cond [(eq_attr "cond" "!canuse") (const_string "no")
- (eq_attr "type" "call,branch,uncond_branch,jump,brcc")
- (const_string "no")
- (match_test "find_reg_note (insn, REG_SAVE_NOTE, GEN_INT (2))")
- (const_string "no")
- (eq_attr "length" "2,4") (const_string "yes")]
- (const_string "no")))
-
(define_attr "in_ret_delay_slot" "no,yes"
(cond [(eq_attr "in_delay_slot" "false")
(const_string "no")
@@ -565,19 +555,6 @@
(const_string "no")]
(const_string "yes")))
-(define_attr "cond_ret_delay_insn" "no,yes"
- (cond [(eq_attr "in_ret_delay_slot" "no") (const_string "no")
- (eq_attr "cond_delay_insn" "no") (const_string "no")]
- (const_string "yes")))
-
-(define_attr "annul_ret_delay_insn" "no,yes"
- (cond [(eq_attr "cond_ret_delay_insn" "yes") (const_string "yes")
- (match_test "TARGET_AT_DBR_CONDEXEC") (const_string "no")
- (eq_attr "type" "!call,branch,uncond_branch,jump,brcc,return,sfunc")
- (const_string "yes")]
- (const_string "no")))
-
-
;; Delay slot definition for ARCompact ISA
;; ??? FIXME:
;; When outputting an annul-true insn elegible for cond-exec
@@ -590,14 +567,7 @@
(eq_attr "in_call_delay_slot" "true")
(nil)])
-(define_delay (and (match_test "!TARGET_AT_DBR_CONDEXEC")
- (eq_attr "type" "brcc"))
- [(eq_attr "in_delay_slot" "true")
- (eq_attr "in_delay_slot" "true")
- (nil)])
-
-(define_delay (and (match_test "TARGET_AT_DBR_CONDEXEC")
- (eq_attr "type" "brcc"))
+(define_delay (eq_attr "type" "brcc")
[(eq_attr "in_delay_slot" "true")
(nil)
(nil)])
@@ -605,39 +575,26 @@
(define_delay
(eq_attr "type" "return")
[(eq_attr "in_ret_delay_slot" "yes")
- (eq_attr "annul_ret_delay_insn" "yes")
- (eq_attr "cond_ret_delay_insn" "yes")])
+ (nil)
+ (nil)])
(define_delay (eq_attr "type" "loop_end")
[(eq_attr "in_delay_slot" "true")
- (eq_attr "in_delay_slot" "true")
+ (nil)
(nil)])
-;; For ARC600, unexposing the delay sloy incurs a penalty also in the
-;; non-taken case, so the only meaningful way to have an annull-true
+;; The only meaningful way to have an annull-true
;; filled delay slot is to conditionalize the delay slot insn.
-(define_delay (and (match_test "TARGET_AT_DBR_CONDEXEC")
- (eq_attr "type" "branch,uncond_branch,jump")
- (match_test "!optimize_size"))
- [(eq_attr "in_delay_slot" "true")
- (eq_attr "cond_delay_insn" "yes")
- (eq_attr "cond_delay_insn" "yes")])
-
-;; For ARC700, anything goes for annulled-true insns, since there is no
-;; penalty for the unexposed delay slot when the branch is not taken,
-;; however, we must avoid things that have a delay slot themselvese to
-;; avoid confusing gcc.
-(define_delay (and (match_test "!TARGET_AT_DBR_CONDEXEC")
- (eq_attr "type" "branch,uncond_branch,jump")
+(define_delay (and (eq_attr "type" "branch,uncond_branch,jump")
(match_test "!optimize_size"))
[(eq_attr "in_delay_slot" "true")
- (eq_attr "type" "!call,branch,uncond_branch,jump,brcc,return,sfunc")
- (eq_attr "cond_delay_insn" "yes")])
+ (nil)
+ (nil)])
;; -mlongcall -fpic sfuncs use r12 to load the function address
(define_delay (eq_attr "type" "sfunc")
[(eq_attr "in_sfunc_delay_slot" "true")
- (eq_attr "in_sfunc_delay_slot" "true")
+ (nil)
(nil)])
;; ??? need to use a working strategy for canuse_limm:
;; - either canuse_limm is not eligible for delay slots, and has no
@@ -712,19 +669,19 @@ archs4x, archs4xd"
|| (satisfies_constraint_Cm3 (operands[1])
&& memory_operand (operands[0], QImode))"
"@
- mov%? %0,%1%&
- mov%? %0,%1%&
- mov%? %0,%1%&
- mov%? %0,%1%&
- mov%? %0,%1%&
mov%? %0,%1
mov%? %0,%1
mov%? %0,%1
mov%? %0,%1
mov%? %0,%1
- ldb%? %0,%1%&
- stb%? %1,%0%&
- ldb%? %0,%1%&
+ mov%? %0,%1
+ mov%? %0,%1
+ mov%? %0,%1
+ mov%? %0,%1
+ mov%? %0,%1
+ ldb%? %0,%1
+ stb%? %1,%0
+ ldb%? %0,%1
xldb%U1 %0,%1
ldb%U1%V1 %0,%1
xstb%U0 %1,%0
@@ -756,19 +713,19 @@ archs4x, archs4xd"
|| (satisfies_constraint_Cm3 (operands[1])
&& memory_operand (operands[0], HImode))"
"@
- mov%? %0,%1%&
- mov%? %0,%1%&
- mov%? %0,%1%&
- mov%? %0,%1%&
- mov%? %0,%1%&
mov%? %0,%1
mov%? %0,%1
mov%? %0,%1
- mov%? %0,%1%&
mov%? %0,%1
mov%? %0,%1
- ld%_%? %0,%1%&
- st%_%? %1,%0%&
+ mov%? %0,%1
+ mov%? %0,%1
+ mov%? %0,%1
+ mov%? %0,%1
+ mov%? %0,%1
+ mov%? %0,%1
+ ld%_%? %0,%1
+ st%_%? %1,%0
xld%_%U1 %0,%1
ld%_%U1%V1 %0,%1
xst%_%U0 %1,%0
@@ -822,15 +779,15 @@ archs4x, archs4xd"
mov%?\\t%0,%j1 ;14
ld%?\\t%0,%1 ;15
st%?\\t%1,%0 ;16
- * return arc_short_long (insn, \"push%?\\t%1%&\", \"st%U0\\t%1,%0%&\");
- * return arc_short_long (insn, \"pop%?\\t%0%&\", \"ld%U1\\t%0,%1%&\");
+ * return arc_short_long (insn, \"push%?\\t%1\", \"st%U0\\t%1,%0\");
+ * return arc_short_long (insn, \"pop%?\\t%0\", \"ld%U1\\t%0,%1\");
ld%?\\t%0,%1 ;19
xld%U1\\t%0,%1 ;20
ld%?\\t%0,%1 ;21
ld%?\\t%0,%1 ;22
ld%U1%V1\\t%0,%1 ;23
xst%U0\\t%1,%0 ;24
- st%?\\t%1,%0%& ;25
+ st%?\\t%1,%0 ;25
st%U0%V0\\t%1,%0 ;26
st%U0%V0\\t%1,%0 ;37
st%U0%V0\\t%1,%0 ;28"
@@ -1034,9 +991,9 @@ archs4x, archs4xd"
case 1:
return \"btst%? %1,%z2\";
case 4:
- return \"bmsk%?.f 0,%1,%Z2%&\";
+ return \"bmsk%?.f 0,%1,%Z2\";
case 5:
- return \"bclr%?.f 0,%1,%M2%&\";
+ return \"bclr%?.f 0,%1,%M2\";
case 6:
return \"asr.f 0,%1,%p2\";
default:
@@ -1145,34 +1102,33 @@ archs4x, archs4xd"
; the combiner needs this pattern
(define_insn "*addsi_compare"
[(set (reg:CC_ZN CC_REG)
- (compare:CC_ZN (match_operand:SI 0 "register_operand" "c")
- (neg:SI (match_operand:SI 1 "register_operand" "c"))))]
+ (compare:CC_ZN (neg:SI
+ (match_operand:SI 0 "register_operand" "r"))
+ (match_operand:SI 1 "register_operand" "r")))]
""
- "add.f 0,%0,%1"
+ "add.f\\t0,%0,%1"
[(set_attr "cond" "set")
(set_attr "type" "compare")
(set_attr "length" "4")])
-; for flag setting 'add' instructions like if (a+b < a) { ...}
-; the combiner needs this pattern
(define_insn "addsi_compare_2"
[(set (reg:CC_C CC_REG)
- (compare:CC_C (plus:SI (match_operand:SI 0 "register_operand" "c,c")
- (match_operand:SI 1 "nonmemory_operand" "cL,Cal"))
- (match_dup 0)))]
+ (compare:CC_C (plus:SI (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "rL,Cal"))
+ (match_dup 0)))]
""
- "add.f 0,%0,%1"
+ "add.f\\t0,%0,%1"
[(set_attr "cond" "set")
(set_attr "type" "compare")
(set_attr "length" "4,8")])
(define_insn "*addsi_compare_3"
[(set (reg:CC_C CC_REG)
- (compare:CC_C (plus:SI (match_operand:SI 0 "register_operand" "c")
- (match_operand:SI 1 "register_operand" "c"))
- (match_dup 1)))]
+ (compare:CC_C (plus:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r"))
+ (match_dup 1)))]
""
- "add.f 0,%0,%1"
+ "add.f\\t0,%0,%1"
[(set_attr "cond" "set")
(set_attr "type" "compare")
(set_attr "length" "4")])
@@ -1960,7 +1916,7 @@ archs4x, archs4xd"
"@
sex%_%?\\t%0,%1
sex%_\\t%0,%1
- ldh%?.x\\t%0,%1%&
+ ldh%?.x\\t%0,%1
ld%_.x%U1%V1\\t%0,%1
ld%_.x%U1%V1\\t%0,%1"
[(set_attr "type" "unary,unary,load,load,load")
@@ -1988,7 +1944,7 @@ archs4x, archs4xd"
[(set (match_operand:SI 0 "dest_reg_operand" "=q,w,w")
(abs:SI (match_operand:SI 1 "nonmemory_operand" "q,cL,Cal")))]
""
- "abs%? %0,%1%&"
+ "abs%? %0,%1"
[(set_attr "type" "two_cycle_core")
(set_attr "length" "*,4,8")
(set_attr "iscompact" "true,false,false")])
@@ -2286,7 +2242,7 @@ archs4x, archs4xd"
(sign_extend:DI (match_operand:SI 0 "register_operand" "%q, c,c, c"))
(sign_extend:DI (match_operand:SI 1 "nonmemory_operand" "q,cL,L,C32"))))]
"TARGET_MUL64_SET"
- "mul64%? \t0, %0, %1%&"
+ "mul64%? \t0, %0, %1"
[(set_attr "length" "*,4,4,8")
(set_attr "iscompact" "maybe,false,false,false")
(set_attr "type" "multi,multi,multi,multi")
@@ -2321,7 +2277,7 @@ archs4x, archs4xd"
(zero_extend:DI (match_operand:SI 0 "register_operand" "%c,c,c"))
(zero_extend:DI (match_operand:SI 1 "nonmemory_operand" "cL,L,C32"))))]
"TARGET_MUL64_SET"
- "mulu64%? \t0, %0, %1%&"
+ "mulu64%? \t0, %0, %1"
[(set_attr "length" "4,4,8")
(set_attr "iscompact" "false")
(set_attr "type" "umulti")
@@ -2902,8 +2858,8 @@ archs4x, archs4xd"
"register_operand (operands[1], SImode)
|| register_operand (operands[2], SImode)"
"@
- sub%?\\t%0,%1,%2%&
- sub%?\\t%0,%1,%2%&
+ sub%?\\t%0,%1,%2
+ sub%?\\t%0,%1,%2
sub%?\\t%0,%1,%2
rsub%?\\t%0,%2,%1
sub\\t%0,%1,%2
@@ -3211,26 +3167,26 @@ archs4x, archs4xd"
switch (which_alternative)
{
case 0: case 5: case 10: case 11: case 16: case 17: case 18:
- return "and%? %0,%1,%2%&";
+ return "and%? %0,%1,%2";
case 1: case 6:
- return "and%? %0,%2,%1%&";
+ return "and%? %0,%2,%1";
case 2:
- return "bmsk%? %0,%1,%Z2%&";
+ return "bmsk%? %0,%1,%Z2";
case 7: case 12:
if (satisfies_constraint_C2p (operands[2]))
{
operands[2] = GEN_INT ((~INTVAL (operands[2])));
- return "bmskn%? %0,%1,%Z2%&";
+ return "bmskn%? %0,%1,%Z2";
}
else
{
- return "bmsk%? %0,%1,%Z2%&";
+ return "bmsk%? %0,%1,%Z2";
}
case 3: case 8: case 13:
- return "bclr%? %0,%1,%M2%&";
+ return "bclr%? %0,%1,%M2";
case 4:
return (INTVAL (operands[2]) == 0xff
- ? "extb%? %0,%1%&" : "ext%_%? %0,%1%&");
+ ? "extb%? %0,%1" : "ext%_%? %0,%1");
case 9: case 14: return \"bic%? %0,%1,%n2-1\";
case 15:
return "movb.cl %0,%1,%p2,%p2,%x2";
@@ -3288,7 +3244,7 @@ archs4x, archs4xd"
(match_operand:SI 2 "nonmemory_operand" "0,0,0,0,r,r,Cal")))]
""
"@
- bic%?\\t%0, %2, %1%& ;;constraint 0
+ bic%?\\t%0, %2, %1 ;;constraint 0
bic%?\\t%0,%2,%1 ;;constraint 1
bic\\t%0,%2,%1 ;;constraint 2, FIXME: will it ever get generated ???
bic%?\\t%0,%2,%1 ;;constraint 3, FIXME: will it ever get generated ???
@@ -3343,9 +3299,9 @@ archs4x, archs4xd"
switch (which_alternative)
{
case 0: case 2: case 5: case 6: case 8: case 9: case 10:
- return \"xor%?\\t%0,%1,%2%&\";
+ return \"xor%?\\t%0,%1,%2\";
case 1: case 3:
- return \"xor%?\\t%0,%2,%1%&\";
+ return \"xor%?\\t%0,%2,%1\";
case 4: case 7:
return \"bxor%?\\t%0,%1,%z2\";
default:
@@ -3362,7 +3318,7 @@ archs4x, archs4xd"
[(set (match_operand:SI 0 "dest_reg_operand" "=q,q,r,r")
(neg:SI (match_operand:SI 1 "register_operand" "0,q,0,r")))]
""
- "neg%?\\t%0,%1%&"
+ "neg%?\\t%0,%1"
[(set_attr "type" "unary")
(set_attr "iscompact" "maybe,true,false,false")
(set_attr "predicable" "no,no,yes,no")])
@@ -3371,7 +3327,7 @@ archs4x, archs4xd"
[(set (match_operand:SI 0 "dest_reg_operand" "=q,w")
(not:SI (match_operand:SI 1 "register_operand" "q,c")))]
""
- "not%? %0,%1%&"
+ "not%? %0,%1"
[(set_attr "type" "unary,unary")
(set_attr "iscompact" "true,false")])
@@ -3401,70 +3357,19 @@ archs4x, archs4xd"
[(set (match_operand:SI 0 "dest_reg_operand" "")
(ashift:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "nonmemory_operand" "")))]
- ""
- "
-{
- if (!TARGET_BARREL_SHIFTER)
- {
- emit_shift (ASHIFT, operands[0], operands[1], operands[2]);
- DONE;
- }
-}")
+ "")
(define_expand "ashrsi3"
[(set (match_operand:SI 0 "dest_reg_operand" "")
(ashiftrt:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "nonmemory_operand" "")))]
- ""
- "
-{
- if (!TARGET_BARREL_SHIFTER)
- {
- emit_shift (ASHIFTRT, operands[0], operands[1], operands[2]);
- DONE;
- }
-}")
+ "")
(define_expand "lshrsi3"
[(set (match_operand:SI 0 "dest_reg_operand" "")
(lshiftrt:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "nonmemory_operand" "")))]
- ""
- "
-{
- if (!TARGET_BARREL_SHIFTER)
- {
- emit_shift (LSHIFTRT, operands[0], operands[1], operands[2]);
- DONE;
- }
-}")
-
-(define_insn "shift_si3"
- [(set (match_operand:SI 0 "dest_reg_operand" "=r")
- (match_operator:SI 3 "shift4_operator"
- [(match_operand:SI 1 "register_operand" "0")
- (match_operand:SI 2 "const_int_operand" "n")]))
- (clobber (match_scratch:SI 4 "=&r"))
- (clobber (reg:CC CC_REG))
- ]
- "!TARGET_BARREL_SHIFTER"
- "* return output_shift (operands);"
- [(set_attr "type" "shift")
- (set_attr "length" "16")])
-
-(define_insn "shift_si3_loop"
- [(set (match_operand:SI 0 "dest_reg_operand" "=r,r")
- (match_operator:SI 3 "shift_operator"
- [(match_operand:SI 1 "register_operand" "0,0")
- (match_operand:SI 2 "nonmemory_operand" "rn,Cal")]))
- (clobber (match_scratch:SI 4 "=X,X"))
- (clobber (reg:SI LP_COUNT))
- (clobber (reg:CC CC_REG))
- ]
- "!TARGET_BARREL_SHIFTER"
- "* return output_shift (operands);"
- [(set_attr "type" "shift")
- (set_attr "length" "16,20")])
+ "")
; asl, asr, lsr patterns:
; There is no point in including an 'I' alternative since only the lowest 5
@@ -3499,18 +3404,215 @@ archs4x, archs4xd"
(set_attr "cond" "canuse,nocond,canuse,canuse,nocond,nocond")])
(define_insn "*lshrsi3_insn"
- [(set (match_operand:SI 0 "dest_reg_operand" "=q,q, q, r, r, r")
- (lshiftrt:SI (match_operand:SI 1 "nonmemory_operand" "!0,q, 0, 0, r,rCal")
- (match_operand:SI 2 "nonmemory_operand" "N,N,qM,rL,rL,rCal")))]
+ [(set (match_operand:SI 0 "dest_reg_operand" "=q, q, r, r, r")
+ (lshiftrt:SI (match_operand:SI 1 "nonmemory_operand" "q, 0, 0, r,rCal")
+ (match_operand:SI 2 "nonmemory_operand" "N,qM,rL,rL,rCal")))]
"TARGET_BARREL_SHIFTER
&& (register_operand (operands[1], SImode)
|| register_operand (operands[2], SImode))"
- "*return (which_alternative <= 1 && !arc_ccfsm_cond_exec_p ()
- ? \"lsr%?\\t%0,%1%&\" : \"lsr%?\\t%0,%1,%2%&\");"
+ "@
+ lsr_s\\t%0,%1
+ lsr_s\\t%0,%1,%2
+ lsr%?\\t%0,%1,%2
+ lsr%?\\t%0,%1,%2
+ lsr%?\\t%0,%1,%2"
[(set_attr "type" "shift")
- (set_attr "iscompact" "maybe,maybe,maybe,false,false,false")
- (set_attr "predicable" "no,no,no,yes,no,no")
- (set_attr "cond" "canuse,nocond,canuse,canuse,nocond,nocond")])
+ (set_attr "iscompact" "maybe,maybe,false,false,false")
+ (set_attr "predicable" "no,no,yes,no,no")
+ (set_attr "cond" "nocond,canuse,canuse,nocond,nocond")])
+
+;; Split asl dst,1,src into bset dst,0,src.
+(define_insn_and_split "*ashlsi3_1"
+ [(set (match_operand:SI 0 "dest_reg_operand")
+ (ashift:SI (const_int 1)
+ (match_operand:SI 1 "nonmemory_operand")))]
+ "!TARGET_BARREL_SHIFTER
+ && arc_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (ior:SI (ashift:SI (const_int 1) (match_dup 1))
+ (const_int 0)))]
+ ""
+ [(set_attr "type" "shift")
+ (set_attr "length" "8")])
+
+(define_insn_and_split "*ashlsi3_nobs"
+ [(set (match_operand:SI 0 "dest_reg_operand")
+ (ashift:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "nonmemory_operand")))]
+ "!TARGET_BARREL_SHIFTER
+ && operands[2] != const1_rtx
+ && arc_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ if (CONST_INT_P (operands[2]))
+ {
+ int n = INTVAL (operands[2]) & 0x1f;
+ if (n <= 9)
+ {
+ if (n == 0)
+ emit_move_insn (operands[0], operands[1]);
+ else if (n <= 2)
+ {
+ emit_insn (gen_ashlsi3_cnt1 (operands[0], operands[1]));
+ if (n == 2)
+ emit_insn (gen_ashlsi3_cnt1 (operands[0], operands[0]));
+ }
+ else
+ {
+ rtx zero = gen_reg_rtx (SImode);
+ emit_move_insn (zero, const0_rtx);
+ emit_insn (gen_add_shift (operands[0], operands[1],
+ GEN_INT (3), zero));
+ for (n -= 3; n >= 3; n -= 3)
+ emit_insn (gen_add_shift (operands[0], operands[0],
+ GEN_INT (3), zero));
+ if (n == 2)
+ emit_insn (gen_add_shift (operands[0], operands[0],
+ const2_rtx, zero));
+ else if (n)
+ emit_insn (gen_ashlsi3_cnt1 (operands[0], operands[0]));
+ }
+ DONE;
+ }
+ else if (n >= 29)
+ {
+ if (n < 31)
+ {
+ if (n == 29)
+ {
+ emit_insn (gen_andsi3_i (operands[0], operands[1],
+ GEN_INT (7)));
+ emit_insn (gen_rotrsi3_cnt1 (operands[0], operands[0]));
+ }
+ else
+ emit_insn (gen_andsi3_i (operands[0], operands[1],
+ GEN_INT (3)));
+ emit_insn (gen_rotrsi3_cnt1 (operands[0], operands[0]));
+ }
+ else
+ emit_insn (gen_andsi3_i (operands[0], operands[1], const1_rtx));
+ emit_insn (gen_rotrsi3_cnt1 (operands[0], operands[0]));
+ DONE;
+ }
+ }
+
+ rtx shift = gen_rtx_fmt_ee (ASHIFT, SImode, operands[1], operands[2]);
+ emit_insn (gen_shift_si3_loop (operands[0], operands[1],
+ operands[2], shift));
+ DONE;
+})
+
+(define_insn_and_split "*ashlri3_nobs"
+ [(set (match_operand:SI 0 "dest_reg_operand")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "nonmemory_operand")))]
+ "!TARGET_BARREL_SHIFTER
+ && operands[2] != const1_rtx
+ && arc_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ if (CONST_INT_P (operands[2]))
+ {
+ int n = INTVAL (operands[2]) & 0x1f;
+ if (n <= 4)
+ {
+ if (n != 0)
+ {
+ emit_insn (gen_ashrsi3_cnt1 (operands[0], operands[1]));
+ while (--n > 0)
+ emit_insn (gen_ashrsi3_cnt1 (operands[0], operands[0]));
+ }
+ else
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+ }
+ }
+
+ rtx pat;
+ rtx shift = gen_rtx_fmt_ee (ASHIFTRT, SImode, operands[1], operands[2]);
+ if (shiftr4_operator (shift, SImode))
+ pat = gen_shift_si3 (operands[0], operands[1], operands[2], shift);
+ else
+ pat = gen_shift_si3_loop (operands[0], operands[1], operands[2], shift);
+ emit_insn (pat);
+ DONE;
+})
+
+(define_insn_and_split "*lshrsi3_nobs"
+ [(set (match_operand:SI 0 "dest_reg_operand")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "nonmemory_operand")))]
+ "!TARGET_BARREL_SHIFTER
+ && operands[2] != const1_rtx
+ && arc_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ if (CONST_INT_P (operands[2]))
+ {
+ int n = INTVAL (operands[2]) & 0x1f;
+ if (n <= 4)
+ {
+ if (n != 0)
+ {
+ emit_insn (gen_lshrsi3_cnt1 (operands[0], operands[1]));
+ while (--n > 0)
+ emit_insn (gen_lshrsi3_cnt1 (operands[0], operands[0]));
+ }
+ else
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+ }
+ }
+
+ rtx pat;
+ rtx shift = gen_rtx_fmt_ee (LSHIFTRT, SImode, operands[1], operands[2]);
+ if (shiftr4_operator (shift, SImode))
+ pat = gen_shift_si3 (operands[0], operands[1], operands[2], shift);
+ else
+ pat = gen_shift_si3_loop (operands[0], operands[1], operands[2], shift);
+ emit_insn (pat);
+ DONE;
+})
+
+;; shift_si3 appears after {ashr,lshr}si3_nobs
+(define_insn "shift_si3"
+ [(set (match_operand:SI 0 "dest_reg_operand" "=r")
+ (match_operator:SI 3 "shiftr4_operator"
+ [(match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "const_int_operand" "n")]))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (reg:CC CC_REG))
+ ]
+ "!TARGET_BARREL_SHIFTER
+ && operands[2] != const1_rtx"
+ "* return output_shift (operands);"
+ [(set_attr "type" "shift")
+ (set_attr "length" "16")])
+
+;; shift_si3_loop appears after {ashl,ashr,lshr}si3_nobs
+(define_insn "shift_si3_loop"
+ [(set (match_operand:SI 0 "dest_reg_operand" "=r,r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "nonmemory_operand" "rn,Cal")]))
+ (clobber (reg:SI LP_COUNT))
+ (clobber (reg:CC CC_REG))
+ ]
+ "!TARGET_BARREL_SHIFTER
+ && operands[2] != const1_rtx"
+ "* return output_shift (operands);"
+ [(set_attr "type" "shift")
+ (set_attr "length" "16,20")])
+
+;; Rotate instructions.
(define_insn "rotrsi3"
[(set (match_operand:SI 0 "dest_reg_operand" "=r, r, r")
@@ -3550,7 +3652,7 @@ archs4x, archs4xd"
(compare:CC (match_operand:SI 0 "register_operand" "q, q, h, c, c, q,c")
(match_operand:SI 1 "nonmemory_operand" "cO,hO,Cm1,cI,cL,Cal,Cal")))]
""
- "cmp%? %0,%B1%&"
+ "cmp%? %0,%B1"
[(set_attr "type" "compare")
(set_attr "iscompact" "true,true,true,false,false,true_limm,false")
(set_attr "predicable" "no,no,no,no,yes,no,yes")
@@ -3563,7 +3665,7 @@ archs4x, archs4xd"
(compare:CC_ZN (match_operand:SI 0 "register_operand" "q,c")
(const_int 0)))]
""
- "tst%? %0,%0%&"
+ "tst%? %0,%0"
[(set_attr "type" "compare,compare")
(set_attr "iscompact" "true,false")
(set_attr "predicable" "no,yes")
@@ -3592,7 +3694,7 @@ archs4x, archs4xd"
(match_operand:SI 1 "p2_immediate_operand" "O,n")))]
""
"@
- cmp%? %0,%1%&
+ cmp%? %0,%1
bxor.f 0,%0,%z1"
[(set_attr "type" "compare,compare")
(set_attr "iscompact" "true,false")
@@ -3604,7 +3706,7 @@ archs4x, archs4xd"
(compare:CC_C (match_operand:SI 0 "register_operand" "q, q, h, c, q, c")
(match_operand:SI 1 "nonmemory_operand" "cO,hO,Cm1,cI,Cal,Cal")))]
""
- "cmp%? %0,%1%&"
+ "cmp%? %0,%1"
[(set_attr "type" "compare")
(set_attr "iscompact" "true,true,true,false,true_limm,false")
(set_attr "cond" "set")
@@ -3658,12 +3760,24 @@ archs4x, archs4xd"
(define_expand "scc_insn"
[(set (match_operand:SI 0 "dest_reg_operand" "=w") (match_operand:SI 1 ""))])
+(define_mode_iterator CC_ltu [CC_C CC])
+
+(define_insn "scc_ltu_<mode>"
+ [(set (match_operand:SI 0 "dest_reg_operand" "=w")
+ (ltu:SI (reg:CC_ltu CC_REG) (const_int 0)))]
+ ""
+ "rlc\\t%0,0"
+ [(set_attr "type" "shift")
+ (set_attr "predicable" "no")
+ (set_attr "length" "4")])
+
(define_insn_and_split "*scc_insn"
[(set (match_operand:SI 0 "dest_reg_operand" "=w")
(match_operator:SI 1 "proper_comparison_operator" [(reg CC_REG) (const_int 0)]))]
""
"#"
- "reload_completed"
+ "reload_completed
+ && GET_CODE (operands[1]) != LTU"
[(set (match_dup 0) (const_int 1))
(cond_exec
(match_dup 1)
@@ -3787,19 +3901,10 @@ archs4x, archs4xd"
""
"*
{
- if (arc_ccfsm_branch_deleted_p ())
- {
- arc_ccfsm_record_branch_deleted ();
- return \"; branch deleted, next insns conditionalized\";
- }
- else
- {
- arc_ccfsm_record_condition (operands[1], false, insn, 0);
if (get_attr_length (insn) == 2)
- return \"b%d1%? %^%l0%&\";
+ return \"b%d1%?\\t%l0\";
else
- return \"b%d1%# %^%l0\";
- }
+ return \"b%d1%*\\t%l0\";
}"
[(set_attr "type" "branch")
(set
@@ -3835,22 +3940,7 @@ archs4x, archs4xd"
(pc)
(label_ref (match_operand 0 "" ""))))]
"REVERSIBLE_CC_MODE (GET_MODE (XEXP (operands[1], 0)))"
- "*
-{
- if (arc_ccfsm_branch_deleted_p ())
- {
- arc_ccfsm_record_branch_deleted ();
- return \"; branch deleted, next insns conditionalized\";
- }
- else
- {
- arc_ccfsm_record_condition (operands[1], true, insn, 0);
- if (get_attr_length (insn) == 2)
- return \"b%D1%? %^%l0\";
- else
- return \"b%D1%# %^%l0\";
- }
-}"
+ "b%D1%?\\t%l0"
[(set_attr "type" "branch")
(set
(attr "length")
@@ -3888,7 +3978,7 @@ archs4x, archs4xd"
(define_insn "jump_i"
[(set (pc) (label_ref (match_operand 0 "" "")))]
"!TARGET_LONG_CALLS_SET || !CROSSING_JUMP_P (insn)"
- "b%!%* %^%l0%&"
+ "b%!%*\\t%l0"
[(set_attr "type" "uncond_branch")
(set (attr "iscompact")
(if_then_else (match_test "get_attr_length (insn) == 2")
@@ -3917,11 +4007,11 @@ archs4x, archs4xd"
[(set (pc) (match_operand:SI 0 "nonmemory_operand" "L,I,Cal,q,r"))]
""
"@
- j%!%* %0%&
- j%!%* %0%&
- j%!%* %0%&
- j%!%* [%0]%&
- j%!%* [%0]%&"
+ j%!%* %0
+ j%!%* %0
+ j%!%* %0
+ j%!%* [%0]
+ j%!%* [%0]"
[(set_attr "type" "jump")
(set_attr "iscompact" "false,false,false,maybe,false")
(set_attr "cond" "canuse,canuse_limm,canuse,canuse,canuse")])
@@ -4006,14 +4096,14 @@ archs4x, archs4xd"
switch (GET_MODE (diff_vec))
{
case E_SImode:
- return \"ld.as\\t%0,[%1,%2]%&\";
+ return \"ld.as\\t%0,[%1,%2]\";
case E_HImode:
if (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned)
return \"ld%_.as\\t%0,[%1,%2]\";
return \"ld%_.x.as\\t%0,[%1,%2]\";
case E_QImode:
if (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned)
- return \"ldb%?\\t%0,[%1,%2]%&\";
+ return \"ldb%?\\t%0,[%1,%2]\";
return \"ldb.x\\t%0,[%1,%2]\";
default:
gcc_unreachable ();
@@ -4049,7 +4139,7 @@ archs4x, archs4xd"
[(set (pc) (match_operand:SI 0 "register_operand" "Cal,q,c"))
(use (label_ref (match_operand 1 "" "")))]
""
- "j%!%* [%0]%&"
+ "j%!%* [%0]"
[(set_attr "type" "jump")
(set_attr "iscompact" "false,maybe,false")
(set_attr "cond" "canuse")])
@@ -4085,7 +4175,7 @@ archs4x, archs4xd"
(clobber (reg:SI 31))]
""
"@
- jl%!%* [%0]%&
+ jl%!%* [%0]
jl%!%* [%0]
jli_s %J0
sjli %J0
@@ -4129,7 +4219,7 @@ archs4x, archs4xd"
(clobber (reg:SI 31))]
""
"@
- jl%!%* [%1]%&
+ jl%!%* [%1]
jl%!%* [%1]
jli_s %J1
sjli %J1
@@ -4648,7 +4738,6 @@ archs4x, archs4xd"
{
if (which_alternative == 0)
{
- arc_toggle_unalign ();
return \"trap_s %0\";
}
@@ -4809,12 +4898,7 @@ archs4x, archs4xd"
[(reg CC_REG) (const_int 0)])
(simple_return) (pc)))]
"reload_completed"
-{
- output_asm_insn (\"j%d0%!%#\\t[blink]\", operands);
- /* record the condition in case there is a delay insn. */
- arc_ccfsm_record_condition (operands[0], false, insn, 0);
- return \"\";
-}
+ "j%d0%!%*\\t[blink]"
[(set_attr "type" "return")
(set_attr "cond" "use")
(set_attr "iscompact" "maybe" )
@@ -4853,13 +4937,13 @@ archs4x, archs4xd"
"*
switch (get_attr_length (insn))
{
- case 2: return \"br%d0%? %1, %2, %^%l3%&\";
- case 4: return \"br%d0%* %1, %B2, %^%l3\";
+ case 2: return \"br%d0%?\\t%1,%2,%l3\";
+ case 4: return \"br%d0%*\\t%1,%B2,%l3\";
case 8: if (!brcc_nolimm_operator (operands[0], VOIDmode))
- return \"br%d0%* %1, %B2, %^%l3\";
+ return \"br%d0%*\\t%1,%B2,%l3\";
/* FALLTHRU */
case 6: case 10:
- case 12:return \"cmp%? %1, %B2\\n\\tb%d0%* %^%l3%& ;br%d0 out of range\";
+ case 12:return \"cmp%? %1, %B2\\n\\tb%d0%*\\t%l3 ;br%d0 out of range\";
default: fprintf (stderr, \"unexpected length %d\\n\", get_attr_length (insn)); fflush (stderr); gcc_unreachable ();
}
"
@@ -5038,7 +5122,7 @@ archs4x, archs4xd"
(clobber (match_scratch:SI 2 "=X,r"))]
"TARGET_DBNZ"
"@
- dbnz%#\\t%0,%l1
+ dbnz%*\\t%0,%l1
#"
"TARGET_DBNZ && reload_completed && memory_operand (operands[0], SImode)"
[(set (match_dup 2) (match_dup 0))
@@ -5122,7 +5206,7 @@ archs4x, archs4xd"
[(set (match_operand:SF 0 "dest_reg_operand" "=q,r,r")
(abs:SF (match_operand:SF 1 "register_operand" "0,0,r")))]
""
- "bclr%?\\t%0,%1,31%&"
+ "bclr%?\\t%0,%1,31"
[(set_attr "type" "unary")
(set_attr "iscompact" "maybe,false,false")
(set_attr "length" "2,4,4")
@@ -5911,7 +5995,7 @@ archs4x, archs4xd"
(zero_extract:SI (match_dup 1) (match_dup 5) (match_dup 7)))])
(match_dup 1)])
-(define_insn "*rotrsi3_cnt1"
+(define_insn "rotrsi3_cnt1"
[(set (match_operand:SI 0 "dest_reg_operand" "=r")
(rotatert:SI (match_operand:SI 1 "nonmemory_operand" "rL")
(const_int 1)))]
@@ -5931,15 +6015,15 @@ archs4x, archs4xd"
(set_attr "predicable" "no")
(set_attr "length" "4")])
-(define_insn "*ashlsi2_cnt1"
+(define_insn "ashlsi3_cnt1"
[(set (match_operand:SI 0 "dest_reg_operand" "=q,w")
(ashift:SI (match_operand:SI 1 "register_operand" "q,c")
(const_int 1)))]
""
- "asl%? %0,%1%&"
- [(set_attr "type" "shift")
+ "asl%? %0,%1"
+ [(set_attr "type" "unary")
(set_attr "iscompact" "maybe,false")
- (set_attr "length" "4")
+ (set_attr "length" "*,4")
(set_attr "predicable" "no,no")])
(define_insn "*ashlsi2_cnt8"
@@ -5964,23 +6048,23 @@ archs4x, archs4xd"
(set_attr "length" "4")
(set_attr "predicable" "no")])
-(define_insn "*lshrsi3_cnt1"
+(define_insn "lshrsi3_cnt1"
[(set (match_operand:SI 0 "dest_reg_operand" "=q,w")
(lshiftrt:SI (match_operand:SI 1 "register_operand" "q,c")
(const_int 1)))]
""
- "lsr%? %0,%1%&"
- [(set_attr "type" "shift")
+ "lsr%? %0,%1"
+ [(set_attr "type" "unary")
(set_attr "iscompact" "maybe,false")
(set_attr "predicable" "no,no")])
-(define_insn "*ashrsi3_cnt1"
+(define_insn "ashrsi3_cnt1"
[(set (match_operand:SI 0 "dest_reg_operand" "=q,w")
(ashiftrt:SI (match_operand:SI 1 "register_operand" "q,c")
(const_int 1)))]
""
- "asr%? %0,%1%&"
- [(set_attr "type" "shift")
+ "asr%? %0,%1"
+ [(set_attr "type" "unary")
(set_attr "iscompact" "maybe,false")
(set_attr "predicable" "no,no")])
@@ -6330,7 +6414,7 @@ archs4x, archs4xd"
(set_attr "type" "multi")
(set_attr "predicable" "yes")])
-(define_insn "*add_shift"
+(define_insn "add_shift"
[(set (match_operand:SI 0 "register_operand" "=q,r,r")
(plus:SI (ashift:SI (match_operand:SI 1 "register_operand" "q,r,r")
(match_operand:SI 2 "_1_2_3_operand" ""))
diff --git a/gcc/config/arc/arc.opt b/gcc/config/arc/arc.opt
index 4af901f..6542ce6 100644
--- a/gcc/config/arc/arc.opt
+++ b/gcc/config/arc/arc.opt
@@ -300,8 +300,8 @@ Target Var(TARGET_MEDIUM_CALLS) Init(TARGET_MMEDIUM_CALLS_DEFAULT)
Don't use less than 25 bit addressing range for calls.
mannotate-align
-Target Var(TARGET_ANNOTATE_ALIGN)
-Explain what alignment considerations lead to the decision to make an insn short or long.
+Target Ignore
+Does nothing. Preserved for backward compatibility.
malign-call
Target Ignore
diff --git a/gcc/config/arc/predicates.md b/gcc/config/arc/predicates.md
index 7650e47..e37d884 100644
--- a/gcc/config/arc/predicates.md
+++ b/gcc/config/arc/predicates.md
@@ -549,16 +549,6 @@
(match_code "ashiftrt, lshiftrt, ashift")
)
-;; Return true if OP is a left shift operator that can be implemented in
-;; four insn words or less without a barrel shifter or multiplier.
-(define_predicate "shiftl4_operator"
- (and (match_code "ashift")
- (match_test "const_int_operand (XEXP (op, 1), VOIDmode) ")
- (match_test "UINTVAL (XEXP (op, 1)) <= 9U
- || INTVAL (XEXP (op, 1)) == 29
- || INTVAL (XEXP (op, 1)) == 30
- || INTVAL (XEXP (op, 1)) == 31")))
-
;; Return true if OP is a right shift operator that can be implemented in
;; four insn words or less without a barrel shifter or multiplier.
(define_predicate "shiftr4_operator"
@@ -568,12 +558,6 @@
|| INTVAL (XEXP (op, 1)) == 30
|| INTVAL (XEXP (op, 1)) == 31")))
-;; Return true if OP is a shift operator that can be implemented in
-;; four insn words or less without a barrel shifter or multiplier.
-(define_predicate "shift4_operator"
- (ior (match_operand 0 "shiftl4_operator")
- (match_operand 0 "shiftr4_operator")))
-
(define_predicate "mult_operator"
(and (match_code "mult") (match_test "TARGET_MPY"))
)
diff --git a/gcc/config/arm/constraints.md b/gcc/config/arm/constraints.md
index 05a4ebb..d7c4a1b 100644
--- a/gcc/config/arm/constraints.md
+++ b/gcc/config/arm/constraints.md
@@ -36,7 +36,7 @@
;; in Thumb-1 state: Pa, Pb, Pc, Pd, Pe
;; in Thumb-2 state: Ha, Pj, PJ, Ps, Pt, Pu, Pv, Pw, Px, Py, Pz, Rd, Rf, Rb, Ra,
;; Rg, Ri
-;; in all states: Pf, Pg
+;; in all states: Pg
;; The following memory constraints have been used:
;; in ARM/Thumb-2 state: Uh, Ut, Uv, Uy, Un, Um, Us, Up, Uf, Ux, Ul
@@ -239,13 +239,6 @@
(and (match_code "const_int")
(match_test "TARGET_THUMB1 && ival >= 256 && ival <= 510")))
-(define_constraint "Pf"
- "Memory models except relaxed, consume or release ones."
- (and (match_code "const_int")
- (match_test "!is_mm_relaxed (memmodel_from_int (ival))
- && !is_mm_consume (memmodel_from_int (ival))
- && !is_mm_release (memmodel_from_int (ival))")))
-
(define_constraint "Pg"
"@internal In Thumb-2 state a constant in range 1 to 32"
(and (match_code "const_int")
diff --git a/gcc/config/arm/sync.md b/gcc/config/arm/sync.md
index 7626bf3..2210810 100644
--- a/gcc/config/arm/sync.md
+++ b/gcc/config/arm/sync.md
@@ -62,68 +62,110 @@
(set_attr "conds" "unconditional")
(set_attr "predicable" "no")])
-(define_insn "atomic_load<mode>"
- [(set (match_operand:QHSI 0 "register_operand" "=r,r,l")
+(define_insn "arm_atomic_load<mode>"
+ [(set (match_operand:QHSI 0 "register_operand" "=r,l")
(unspec_volatile:QHSI
- [(match_operand:QHSI 1 "arm_sync_memory_operand" "Q,Q,Q")
- (match_operand:SI 2 "const_int_operand" "n,Pf,n")] ;; model
+ [(match_operand:QHSI 1 "memory_operand" "m,m")]
+ VUNSPEC_LDR))]
+ ""
+ "ldr<sync_sfx>\t%0, %1"
+ [(set_attr "arch" "32,any")])
+
+(define_insn "arm_atomic_load_acquire<mode>"
+ [(set (match_operand:QHSI 0 "register_operand" "=r")
+ (unspec_volatile:QHSI
+ [(match_operand:QHSI 1 "arm_sync_memory_operand" "Q")]
VUNSPEC_LDA))]
"TARGET_HAVE_LDACQ"
- {
- if (aarch_mm_needs_acquire (operands[2]))
- {
- if (TARGET_THUMB1)
- return "lda<sync_sfx>\t%0, %1";
- else
- return "lda<sync_sfx>%?\t%0, %1";
- }
- else
- {
- if (TARGET_THUMB1)
- return "ldr<sync_sfx>\t%0, %1";
- else
- return "ldr<sync_sfx>%?\t%0, %1";
- }
- }
- [(set_attr "arch" "32,v8mb,any")
- (set_attr "predicable" "yes")])
+ "lda<sync_sfx>\t%0, %C1"
+)
-(define_insn "atomic_store<mode>"
- [(set (match_operand:QHSI 0 "memory_operand" "=Q,Q,Q")
+(define_insn "arm_atomic_store<mode>"
+ [(set (match_operand:QHSI 0 "memory_operand" "=m,m")
+ (unspec_volatile:QHSI
+ [(match_operand:QHSI 1 "register_operand" "r,l")]
+ VUNSPEC_STR))]
+ ""
+ "str<sync_sfx>\t%1, %0";
+ [(set_attr "arch" "32,any")])
+
+(define_insn "arm_atomic_store_release<mode>"
+ [(set (match_operand:QHSI 0 "arm_sync_memory_operand" "=Q")
(unspec_volatile:QHSI
- [(match_operand:QHSI 1 "general_operand" "r,r,l")
- (match_operand:SI 2 "const_int_operand" "n,Pf,n")] ;; model
+ [(match_operand:QHSI 1 "register_operand" "r")]
VUNSPEC_STL))]
"TARGET_HAVE_LDACQ"
- {
- if (aarch_mm_needs_release (operands[2]))
- {
- if (TARGET_THUMB1)
- return "stl<sync_sfx>\t%1, %0";
- else
- return "stl<sync_sfx>%?\t%1, %0";
- }
- else
- {
- if (TARGET_THUMB1)
- return "str<sync_sfx>\t%1, %0";
- else
- return "str<sync_sfx>%?\t%1, %0";
- }
- }
- [(set_attr "arch" "32,v8mb,any")
- (set_attr "predicable" "yes")])
+ "stl<sync_sfx>\t%1, %C0")
+
+
+(define_expand "atomic_load<mode>"
+ [(match_operand:QHSI 0 "register_operand") ;; val out
+ (match_operand:QHSI 1 "arm_sync_memory_operand") ;; memory
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ ""
+{
+ memmodel model = memmodel_from_int (INTVAL (operands[2]));
+
+ if (TARGET_HAVE_LDACQ && !is_mm_relaxed (model))
+ {
+ emit_insn (gen_arm_atomic_load_acquire<mode> (operands[0], operands[1]));
+ DONE;
+ }
+
+ /* The seq_cst model needs a barrier before the load to block reordering with
+ earlier accesses. */
+ if (is_mm_seq_cst (model))
+ expand_mem_thread_fence (model);
+
+ emit_insn (gen_arm_atomic_load<mode> (operands[0], operands[1]));
+
+ /* All non-relaxed models need a barrier after the load when load-acquire
+ instructions are not available. */
+ if (!is_mm_relaxed (model))
+ expand_mem_thread_fence (model);
+
+ DONE;
+})
+
+(define_expand "atomic_store<mode>"
+ [(match_operand:QHSI 0 "arm_sync_memory_operand") ;; memory
+ (match_operand:QHSI 1 "register_operand") ;; store value
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ ""
+{
+ memmodel model = memmodel_from_int (INTVAL (operands[2]));
+
+ if (TARGET_HAVE_LDACQ && !is_mm_relaxed (model))
+ {
+ emit_insn (gen_arm_atomic_store_release<mode> (operands[0], operands[1]));
+ DONE;
+ }
+
+ /* All non-relaxed models need a barrier after the load when load-acquire
+ instructions are not available. */
+ if (!is_mm_relaxed (model))
+ expand_mem_thread_fence (model);
+
+ emit_insn (gen_arm_atomic_store<mode> (operands[0], operands[1]));
+
+ /* The seq_cst model needs a barrier after the store to block reordering with
+ later accesses. */
+ if (is_mm_seq_cst (model))
+ expand_mem_thread_fence (model);
+
+ DONE;
+})
;; An LDRD instruction usable by the atomic_loaddi expander on LPAE targets
(define_insn "arm_atomic_loaddi2_ldrd"
[(set (match_operand:DI 0 "register_operand" "=r")
(unspec_volatile:DI
- [(match_operand:DI 1 "arm_sync_memory_operand" "Q")]
+ [(match_operand:DI 1 "memory_operand" "m")]
VUNSPEC_LDRD_ATOMIC))]
"ARM_DOUBLEWORD_ALIGN && TARGET_HAVE_LPAE"
- "ldrd%?\t%0, %H0, %C1"
- [(set_attr "predicable" "yes")])
+ "ldrd\t%0, %H0, %1"
+)
;; There are three ways to expand this depending on the architecture
;; features available. As for the barriers, a load needs a barrier
@@ -152,6 +194,11 @@
DONE;
}
+ /* The seq_cst model needs a barrier before the load to block reordering with
+ earlier accesses. */
+ if (is_mm_seq_cst (model))
+ expand_mem_thread_fence (model);
+
/* On LPAE targets LDRD and STRD accesses to 64-bit aligned
locations are 64-bit single-copy atomic. We still need barriers in the
appropriate places to implement the ordering constraints. */
@@ -160,7 +207,6 @@
else
emit_insn (gen_arm_load_exclusivedi (operands[0], operands[1]));
-
/* All non-relaxed models need a barrier after the load when load-acquire
instructions are not available. */
if (!is_mm_relaxed (model))
@@ -446,54 +492,42 @@
[(set_attr "arch" "32,v8mb")])
(define_insn "arm_load_exclusive<mode>"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
(zero_extend:SI
(unspec_volatile:NARROW
- [(match_operand:NARROW 1 "mem_noofs_operand" "Ua,Ua")]
+ [(match_operand:NARROW 1 "mem_noofs_operand" "Ua")]
VUNSPEC_LL)))]
"TARGET_HAVE_LDREXBH"
- "@
- ldrex<sync_sfx>%?\t%0, %C1
- ldrex<sync_sfx>\t%0, %C1"
- [(set_attr "arch" "32,v8mb")
- (set_attr "predicable" "yes")])
+ "ldrex<sync_sfx>\t%0, %C1"
+)
(define_insn "arm_load_acquire_exclusive<mode>"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
(zero_extend:SI
(unspec_volatile:NARROW
- [(match_operand:NARROW 1 "mem_noofs_operand" "Ua,Ua")]
+ [(match_operand:NARROW 1 "mem_noofs_operand" "Ua")]
VUNSPEC_LAX)))]
"TARGET_HAVE_LDACQ"
- "@
- ldaex<sync_sfx>%?\\t%0, %C1
- ldaex<sync_sfx>\\t%0, %C1"
- [(set_attr "arch" "32,v8mb")
- (set_attr "predicable" "yes")])
+ "ldaex<sync_sfx>\\t%0, %C1"
+)
(define_insn "arm_load_exclusivesi"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
(unspec_volatile:SI
- [(match_operand:SI 1 "mem_noofs_operand" "Ua,Ua")]
+ [(match_operand:SI 1 "mem_noofs_operand" "Ua")]
VUNSPEC_LL))]
"TARGET_HAVE_LDREX"
- "@
- ldrex%?\t%0, %C1
- ldrex\t%0, %C1"
- [(set_attr "arch" "32,v8mb")
- (set_attr "predicable" "yes")])
+ "ldrex\t%0, %C1"
+)
(define_insn "arm_load_acquire_exclusivesi"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
(unspec_volatile:SI
- [(match_operand:SI 1 "mem_noofs_operand" "Ua,Ua")]
+ [(match_operand:SI 1 "mem_noofs_operand" "Ua")]
VUNSPEC_LAX))]
"TARGET_HAVE_LDACQ"
- "@
- ldaex%?\t%0, %C1
- ldaex\t%0, %C1"
- [(set_attr "arch" "32,v8mb")
- (set_attr "predicable" "yes")])
+ "ldaex\t%0, %C1"
+)
(define_insn "arm_load_exclusivedi"
[(set (match_operand:DI 0 "s_register_operand" "=r")
@@ -501,8 +535,8 @@
[(match_operand:DI 1 "mem_noofs_operand" "Ua")]
VUNSPEC_LL))]
"TARGET_HAVE_LDREXD"
- "ldrexd%?\t%0, %H0, %C1"
- [(set_attr "predicable" "yes")])
+ "ldrexd\t%0, %H0, %C1"
+)
(define_insn "arm_load_acquire_exclusivedi"
[(set (match_operand:DI 0 "s_register_operand" "=r")
@@ -510,8 +544,8 @@
[(match_operand:DI 1 "mem_noofs_operand" "Ua")]
VUNSPEC_LAX))]
"TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN"
- "ldaexd%?\t%0, %H0, %C1"
- [(set_attr "predicable" "yes")])
+ "ldaexd\t%0, %H0, %C1"
+)
(define_insn "arm_store_exclusive<mode>"
[(set (match_operand:SI 0 "s_register_operand" "=&r")
@@ -530,14 +564,11 @@
Note that the 1st register always gets the
lowest word in memory. */
gcc_assert ((REGNO (operands[2]) & 1) == 0 || TARGET_THUMB2);
- return "strexd%?\t%0, %2, %H2, %C1";
+ return "strexd\t%0, %2, %H2, %C1";
}
- if (TARGET_THUMB1)
- return "strex<sync_sfx>\t%0, %2, %C1";
- else
- return "strex<sync_sfx>%?\t%0, %2, %C1";
+ return "strex<sync_sfx>\t%0, %2, %C1";
}
- [(set_attr "predicable" "yes")])
+)
(define_insn "arm_store_release_exclusivedi"
[(set (match_operand:SI 0 "s_register_operand" "=&r")
@@ -550,20 +581,16 @@
{
/* See comment in arm_store_exclusive<mode> above. */
gcc_assert ((REGNO (operands[2]) & 1) == 0 || TARGET_THUMB2);
- return "stlexd%?\t%0, %2, %H2, %C1";
+ return "stlexd\t%0, %2, %H2, %C1";
}
- [(set_attr "predicable" "yes")])
+)
(define_insn "arm_store_release_exclusive<mode>"
- [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ [(set (match_operand:SI 0 "s_register_operand" "=&r")
(unspec_volatile:SI [(const_int 0)] VUNSPEC_SLX))
- (set (match_operand:QHSI 1 "mem_noofs_operand" "=Ua,Ua")
+ (set (match_operand:QHSI 1 "mem_noofs_operand" "=Ua")
(unspec_volatile:QHSI
- [(match_operand:QHSI 2 "s_register_operand" "r,r")]
+ [(match_operand:QHSI 2 "s_register_operand" "r")]
VUNSPEC_SLX))]
"TARGET_HAVE_LDACQ"
- "@
- stlex<sync_sfx>%?\t%0, %2, %C1
- stlex<sync_sfx>\t%0, %2, %C1"
- [(set_attr "arch" "32,v8mb")
- (set_attr "predicable" "yes")])
+ "stlex<sync_sfx>\t%0, %2, %C1")
diff --git a/gcc/config/arm/unspecs.md b/gcc/config/arm/unspecs.md
index 6a5b1f8..4713ec8 100644
--- a/gcc/config/arm/unspecs.md
+++ b/gcc/config/arm/unspecs.md
@@ -221,8 +221,10 @@
VUNSPEC_SC ; Represent a store-register-exclusive.
VUNSPEC_LAX ; Represent a load-register-acquire-exclusive.
VUNSPEC_SLX ; Represent a store-register-release-exclusive.
- VUNSPEC_LDA ; Represent a store-register-acquire.
+ VUNSPEC_LDA ; Represent a load-register-acquire.
+ VUNSPEC_LDR ; Represent a load-register-relaxed.
VUNSPEC_STL ; Represent a store-register-release.
+ VUNSPEC_STR ; Represent a store-register-relaxed.
VUNSPEC_GET_FPSCR ; Represent fetch of FPSCR content.
VUNSPEC_SET_FPSCR ; Represent assign of FPSCR content.
VUNSPEC_SET_FPSCR_NZCVQC ; Represent assign of FPSCR_nzcvqc content.
diff --git a/gcc/config/darwin.cc b/gcc/config/darwin.cc
index d8c8607..a80b6ca 100644
--- a/gcc/config/darwin.cc
+++ b/gcc/config/darwin.cc
@@ -3073,7 +3073,35 @@ darwin_asm_output_dwarf_offset (FILE *file, int size, const char * lab,
void
darwin_file_start (void)
{
- /* Nothing to do. */
+#ifdef HAVE_AS_MMACOSX_VERSION_MIN_OPTION
+ /* This should not happen with a well-formed command line, but the user could
+ invoke cc1* directly without it. */
+ if (!darwin_macosx_version_min)
+ return;
+ /* This assumes that the version passed has been validated in the driver. */
+ unsigned maj, min, tiny;
+ int count = sscanf (darwin_macosx_version_min, "%u.%u.%u", &maj, &min, &tiny);
+ if (count < 0)
+ return;
+ if (count < 3)
+ tiny = 0;
+ if (count < 2)
+ min = 0;
+ const char *directive;
+#ifdef HAVE_AS_MACOS_BUILD_VERSION
+ /* We only handle macos, so far. */
+ if (generating_for_darwin_version >= 18)
+ directive = "build_version macos, ";
+ else
+#endif
+ directive = "macosx_version_min ";
+ if (count > 2 && tiny != 0)
+ fprintf (asm_out_file, "\t.%s %u, %u, %u\n", directive, maj, min, tiny);
+ else if (count > 1)
+ fprintf (asm_out_file, "\t.%s %u, %u\n", directive, maj, min);
+ else
+ fprintf (asm_out_file, "\t.%s %u, 0\n", directive, maj);
+#endif
}
/* Called for the TARGET_ASM_FILE_END hook.
@@ -3295,7 +3323,9 @@ darwin_override_options (void)
/* Keep track of which (major) version we're generating code for. */
if (darwin_macosx_version_min)
{
- if (strverscmp (darwin_macosx_version_min, "10.7") >= 0)
+ if (strverscmp (darwin_macosx_version_min, "10.14") >= 0)
+ generating_for_darwin_version = 18;
+ else if (strverscmp (darwin_macosx_version_min, "10.7") >= 0)
generating_for_darwin_version = 11;
else if (strverscmp (darwin_macosx_version_min, "10.6") >= 0)
generating_for_darwin_version = 10;
diff --git a/gcc/config/gcn/gcn-valu.md b/gcc/config/gcn/gcn-valu.md
index 284dda7..32b170e 100644
--- a/gcc/config/gcn/gcn-valu.md
+++ b/gcc/config/gcn/gcn-valu.md
@@ -457,23 +457,21 @@
(set_attr "length" "4,8")])
(define_insn "mov<mode>_exec"
- [(set (match_operand:V_1REG 0 "nonimmediate_operand" "=v, v, v, v, v, m")
+ [(set (match_operand:V_1REG 0 "nonimmediate_operand")
(vec_merge:V_1REG
- (match_operand:V_1REG 1 "general_operand" "vA, B, v,vA, m, v")
- (match_operand:V_1REG 2 "gcn_alu_or_unspec_operand"
- "U0,U0,vA,vA,U0,U0")
- (match_operand:DI 3 "register_operand" " e, e,cV,Sv, e, e")))
- (clobber (match_scratch:<VnDI> 4 "=X, X, X, X,&v,&v"))]
+ (match_operand:V_1REG 1 "general_operand")
+ (match_operand:V_1REG 2 "gcn_alu_or_unspec_operand")
+ (match_operand:DI 3 "register_operand")))
+ (clobber (match_scratch:<VnDI> 4))]
"!MEM_P (operands[0]) || REG_P (operands[1])"
- "@
- v_mov_b32\t%0, %1
- v_mov_b32\t%0, %1
- v_cndmask_b32\t%0, %2, %1, vcc
- v_cndmask_b32\t%0, %2, %1, %3
- #
- #"
- [(set_attr "type" "vop1,vop1,vop2,vop3a,*,*")
- (set_attr "length" "4,8,4,8,16,16")])
+ {@ [cons: =0, 1, 2, 3, =4; attrs: type, length]
+ [v,vA,U0,e ,X ;vop1 ,4 ] v_mov_b32\t%0, %1
+ [v,B ,U0,e ,X ;vop1 ,8 ] v_mov_b32\t%0, %1
+ [v,v ,vA,cV,X ;vop2 ,4 ] v_cndmask_b32\t%0, %2, %1, vcc
+ [v,vA,vA,Sv,X ;vop3a,8 ] v_cndmask_b32\t%0, %2, %1, %3
+ [v,m ,U0,e ,&v;* ,16] #
+ [m,v ,U0,e ,&v;* ,16] #
+ })
; This variant does not accept an unspec, but does permit MEM
; read/modify/write which is necessary for maskstore.
@@ -644,19 +642,18 @@
; flat_load v, vT
(define_insn "mov<mode>_sgprbase"
- [(set (match_operand:V_1REG 0 "nonimmediate_operand" "= v, v, v, m")
+ [(set (match_operand:V_1REG 0 "nonimmediate_operand")
(unspec:V_1REG
- [(match_operand:V_1REG 1 "general_operand" " vA,vB, m, v")]
+ [(match_operand:V_1REG 1 "general_operand")]
UNSPEC_SGPRBASE))
- (clobber (match_operand:<VnDI> 2 "register_operand" "=&v,&v,&v,&v"))]
+ (clobber (match_operand:<VnDI> 2 "register_operand"))]
"lra_in_progress || reload_completed"
- "@
- v_mov_b32\t%0, %1
- v_mov_b32\t%0, %1
- #
- #"
- [(set_attr "type" "vop1,vop1,*,*")
- (set_attr "length" "4,8,12,12")])
+ {@ [cons: =0, 1, =2; attrs: type, length]
+ [v,vA,&v;vop1,4 ] v_mov_b32\t%0, %1
+ [v,vB,&v;vop1,8 ] ^
+ [v,m ,&v;* ,12] #
+ [m,v ,&v;* ,12] #
+ })
(define_insn "mov<mode>_sgprbase"
[(set (match_operand:V_2REG 0 "nonimmediate_operand" "= v, v, m")
@@ -676,17 +673,17 @@
(set_attr "length" "8,12,12")])
(define_insn "mov<mode>_sgprbase"
- [(set (match_operand:V_4REG 0 "nonimmediate_operand" "= v, v, m")
+ [(set (match_operand:V_4REG 0 "nonimmediate_operand")
(unspec:V_4REG
- [(match_operand:V_4REG 1 "general_operand" "vDB, m, v")]
+ [(match_operand:V_4REG 1 "general_operand")]
UNSPEC_SGPRBASE))
- (clobber (match_operand:<VnDI> 2 "register_operand" "=&v,&v,&v"))]
+ (clobber (match_operand:<VnDI> 2 "register_operand"))]
"lra_in_progress || reload_completed"
- "v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, %H1\;v_mov_b32\t%J0, %J1\;v_mov_b32\t%K0, %K1
- #
- #"
- [(set_attr "type" "vmult,*,*")
- (set_attr "length" "8,12,12")])
+ {@ [cons: =0, 1, =2; attrs: type, length]
+ [v,vDB,&v;vmult,8 ] v_mov_b32\t%L0, %L1\;v_mov_b32\t%H0, %H1\;v_mov_b32\t%J0, %J1\;v_mov_b32\t%K0, %K1
+ [v,m ,&v;* ,12] #
+ [m,v ,&v;* ,12] #
+ })
; reload_in was once a standard name, but here it's only referenced by
; gcn_secondary_reload. It allows a reload with a scratch register.
diff --git a/gcc/config/gcn/gcn.cc b/gcc/config/gcn/gcn.cc
index f6cff65..ef3b647 100644
--- a/gcc/config/gcn/gcn.cc
+++ b/gcc/config/gcn/gcn.cc
@@ -6991,7 +6991,7 @@ print_operand_address (FILE *file, rtx mem)
void
print_operand (FILE *file, rtx x, int code)
{
- int xcode = x ? GET_CODE (x) : 0;
+ rtx_code xcode = x ? GET_CODE (x) : UNKNOWN;
bool invert = false;
switch (code)
{
diff --git a/gcc/config/gcn/gcn.md b/gcc/config/gcn/gcn.md
index 7065acf..30fe9e3 100644
--- a/gcc/config/gcn/gcn.md
+++ b/gcc/config/gcn/gcn.md
@@ -542,87 +542,76 @@
; 32bit move pattern
(define_insn "*mov<mode>_insn"
- [(set (match_operand:SISF 0 "nonimmediate_operand"
- "=SD,SD,SD,SD,RB,Sm,RS,v,Sg, v, v,RF,v,RLRG, v,SD, v,RM")
- (match_operand:SISF 1 "gcn_load_operand"
- "SSA, J, B,RB,Sm,RS,Sm,v, v,Sv,RF, v,B, v,RLRG, Y,RM, v"))]
- ""
- "@
- s_mov_b32\t%0, %1
- s_movk_i32\t%0, %1
- s_mov_b32\t%0, %1
- s_buffer_load%s0\t%0, s[0:3], %1\;s_waitcnt\tlgkmcnt(0)
- s_buffer_store%s1\t%1, s[0:3], %0
- s_load_dword\t%0, %A1\;s_waitcnt\tlgkmcnt(0)
- s_store_dword\t%1, %A0
- v_mov_b32\t%0, %1
- v_readlane_b32\t%0, %1, 0
- v_writelane_b32\t%0, %1, 0
- flat_load_dword\t%0, %A1%O1%g1\;s_waitcnt\t0
- flat_store_dword\t%A0, %1%O0%g0
- v_mov_b32\t%0, %1
- ds_write_b32\t%A0, %1%O0\;s_waitcnt\tlgkmcnt(0)
- ds_read_b32\t%0, %A1%O1\;s_waitcnt\tlgkmcnt(0)
- s_mov_b32\t%0, %1
- global_load_dword\t%0, %A1%O1%g1\;s_waitcnt\tvmcnt(0)
- global_store_dword\t%A0, %1%O0%g0"
- [(set_attr "type" "sop1,sopk,sop1,smem,smem,smem,smem,vop1,vop3a,vop3a,flat,
- flat,vop1,ds,ds,sop1,flat,flat")
- (set_attr "exec" "*,*,*,*,*,*,*,*,none,none,*,*,*,*,*,*,*,*")
- (set_attr "length" "4,4,8,12,12,12,12,4,8,8,12,12,8,12,12,8,12,12")])
+ [(set (match_operand:SISF 0 "nonimmediate_operand")
+ (match_operand:SISF 1 "gcn_load_operand"))]
+ ""
+ {@ [cons: =0, 1; attrs: type, exec, length]
+ [SD ,SSA ;sop1 ,* ,4 ] s_mov_b32\t%0, %1
+ [SD ,J ;sopk ,* ,4 ] s_movk_i32\t%0, %1
+ [SD ,B ;sop1 ,* ,8 ] s_mov_b32\t%0, %1
+ [SD ,RB ;smem ,* ,12] s_buffer_load%s0\t%0, s[0:3], %1\;s_waitcnt\tlgkmcnt(0)
+ [RB ,Sm ;smem ,* ,12] s_buffer_store%s1\t%1, s[0:3], %0
+ [Sm ,RS ;smem ,* ,12] s_load_dword\t%0, %A1\;s_waitcnt\tlgkmcnt(0)
+ [RS ,Sm ;smem ,* ,12] s_store_dword\t%1, %A0
+ [v ,v ;vop1 ,* ,4 ] v_mov_b32\t%0, %1
+ [Sg ,v ;vop3a,none,8 ] v_readlane_b32\t%0, %1, 0
+ [v ,Sv ;vop3a,none,8 ] v_writelane_b32\t%0, %1, 0
+ [v ,RF ;flat ,* ,12] flat_load_dword\t%0, %A1%O1%g1\;s_waitcnt\t0
+ [RF ,v ;flat ,* ,12] flat_store_dword\t%A0, %1%O0%g0
+ [v ,B ;vop1 ,* ,8 ] v_mov_b32\t%0, %1
+ [RLRG,v ;ds ,* ,12] ds_write_b32\t%A0, %1%O0\;s_waitcnt\tlgkmcnt(0)
+ [v ,RLRG;ds ,* ,12] ds_read_b32\t%0, %A1%O1\;s_waitcnt\tlgkmcnt(0)
+ [SD ,Y ;sop1 ,* ,8 ] s_mov_b32\t%0, %1
+ [v ,RM ;flat ,* ,12] global_load_dword\t%0, %A1%O1%g1\;s_waitcnt\tvmcnt(0)
+ [RM ,v ;flat ,* ,12] global_store_dword\t%A0, %1%O0%g0
+ })
; 8/16bit move pattern
; TODO: implement combined load and zero_extend, but *only* for -msram-ecc=on
(define_insn "*mov<mode>_insn"
- [(set (match_operand:QIHI 0 "nonimmediate_operand"
- "=SD,SD,SD,v,Sg, v, v,RF,v,RLRG, v, v,RM")
- (match_operand:QIHI 1 "gcn_load_operand"
- "SSA, J, B,v, v,Sv,RF, v,B, v,RLRG,RM, v"))]
+ [(set (match_operand:QIHI 0 "nonimmediate_operand")
+ (match_operand:QIHI 1 "gcn_load_operand"))]
"gcn_valid_move_p (<MODE>mode, operands[0], operands[1])"
- "@
- s_mov_b32\t%0, %1
- s_movk_i32\t%0, %1
- s_mov_b32\t%0, %1
- v_mov_b32\t%0, %1
- v_readlane_b32\t%0, %1, 0
- v_writelane_b32\t%0, %1, 0
- flat_load%o1\t%0, %A1%O1%g1\;s_waitcnt\t0
- flat_store%s0\t%A0, %1%O0%g0
- v_mov_b32\t%0, %1
- ds_write%b0\t%A0, %1%O0\;s_waitcnt\tlgkmcnt(0)
- ds_read%u1\t%0, %A1%O1\;s_waitcnt\tlgkmcnt(0)
- global_load%o1\t%0, %A1%O1%g1\;s_waitcnt\tvmcnt(0)
- global_store%s0\t%A0, %1%O0%g0"
- [(set_attr "type"
- "sop1,sopk,sop1,vop1,vop3a,vop3a,flat,flat,vop1,ds,ds,flat,flat")
- (set_attr "exec" "*,*,*,*,none,none,*,*,*,*,*,*,*")
- (set_attr "length" "4,4,8,4,4,4,12,12,8,12,12,12,12")])
+ {@ [cons: =0, 1; attrs: type, exec, length]
+ [SD ,SSA ;sop1 ,* ,4 ] s_mov_b32\t%0, %1
+ [SD ,J ;sopk ,* ,4 ] s_movk_i32\t%0, %1
+ [SD ,B ;sop1 ,* ,8 ] s_mov_b32\t%0, %1
+ [v ,v ;vop1 ,* ,4 ] v_mov_b32\t%0, %1
+ [Sg ,v ;vop3a,none,4 ] v_readlane_b32\t%0, %1, 0
+ [v ,Sv ;vop3a,none,4 ] v_writelane_b32\t%0, %1, 0
+ [v ,RF ;flat ,* ,12] flat_load%o1\t%0, %A1%O1%g1\;s_waitcnt\t0
+ [RF ,v ;flat ,* ,12] flat_store%s0\t%A0, %1%O0%g0
+ [v ,B ;vop1 ,* ,8 ] v_mov_b32\t%0, %1
+ [RLRG,v ;ds ,* ,12] ds_write%b0\t%A0, %1%O0\;s_waitcnt\tlgkmcnt(0)
+ [v ,RLRG;ds ,* ,12] ds_read%u1\t%0, %A1%O1\;s_waitcnt\tlgkmcnt(0)
+ [v ,RM ;flat ,* ,12] global_load%o1\t%0, %A1%O1%g1\;s_waitcnt\tvmcnt(0)
+ [RM ,v ;flat ,* ,12] global_store%s0\t%A0, %1%O0%g0
+ })
; 64bit move pattern
(define_insn_and_split "*mov<mode>_insn"
- [(set (match_operand:DIDF 0 "nonimmediate_operand"
- "=SD,SD,SD,RS,Sm,v, v,Sg, v, v,RF,RLRG, v, v,RM")
- (match_operand:DIDF 1 "general_operand"
- "SSA, C,DB,Sm,RS,v,DB, v,Sv,RF, v, v,RLRG,RM, v"))]
+ [(set (match_operand:DIDF 0 "nonimmediate_operand")
+ (match_operand:DIDF 1 "general_operand"))]
"GET_CODE(operands[1]) != SYMBOL_REF"
- "@
- s_mov_b64\t%0, %1
- s_mov_b64\t%0, %1
- #
- s_store_dwordx2\t%1, %A0
- s_load_dwordx2\t%0, %A1\;s_waitcnt\tlgkmcnt(0)
- #
- #
- #
- #
- flat_load_dwordx2\t%0, %A1%O1%g1\;s_waitcnt\t0
- flat_store_dwordx2\t%A0, %1%O0%g0
- ds_write_b64\t%A0, %1%O0\;s_waitcnt\tlgkmcnt(0)
- ds_read_b64\t%0, %A1%O1\;s_waitcnt\tlgkmcnt(0)
- global_load_dwordx2\t%0, %A1%O1%g1\;s_waitcnt\tvmcnt(0)
- global_store_dwordx2\t%A0, %1%O0%g0"
+ {@ [cons: =0, 1; attrs: type, length]
+ [SD ,SSA ;sop1 ,4 ] s_mov_b64\t%0, %1
+ [SD ,C ;sop1 ,8 ] ^
+ [SD ,DB ;mult ,* ] #
+ [RS ,Sm ;smem ,12] s_store_dwordx2\t%1, %A0
+ [Sm ,RS ;smem ,12] s_load_dwordx2\t%0, %A1\;s_waitcnt\tlgkmcnt(0)
+ [v ,v ;vmult,* ] #
+ [v ,DB ;vmult,* ] #
+ [Sg ,v ;vmult,* ] #
+ [v ,Sv ;vmult,* ] #
+ [v ,RF ;flat ,12] flat_load_dwordx2\t%0, %A1%O1%g1\;s_waitcnt\t0
+ [RF ,v ;flat ,12] flat_store_dwordx2\t%A0, %1%O0%g0
+ [RLRG,v ;ds ,12] ds_write_b64\t%A0, %1%O0\;s_waitcnt\tlgkmcnt(0)
+ [v ,RLRG;ds ,12] ds_read_b64\t%0, %A1%O1\;s_waitcnt\tlgkmcnt(0)
+ [v ,RM ;flat ,12] global_load_dwordx2\t%0, %A1%O1%g1\;s_waitcnt\tvmcnt(0)
+ [RM ,v ;flat ,12] global_store_dwordx2\t%A0, %1%O0%g0
+ }
"reload_completed
&& ((!MEM_P (operands[0]) && !MEM_P (operands[1])
&& !gcn_sgpr_move_p (operands[0], operands[1]))
@@ -651,32 +640,28 @@
operands[2] = outhi;
operands[3] = inhi;
}
- }
- [(set_attr "type" "sop1,sop1,mult,smem,smem,vmult,vmult,vmult,vmult,flat,
- flat,ds,ds,flat,flat")
- (set_attr "length" "4,8,*,12,12,*,*,*,*,12,12,12,12,12,12")])
+ })
; 128-bit move.
(define_insn_and_split "*movti_insn"
- [(set (match_operand:TI 0 "nonimmediate_operand"
- "=SD,RS,Sm,RF, v,v, v,SD,RM, v,RL, v")
- (match_operand:TI 1 "general_operand"
- "SSB,Sm,RS, v,RF,v,Sv, v, v,RM, v,RL"))]
- ""
- "@
- #
- s_store_dwordx4\t%1, %A0
- s_load_dwordx4\t%0, %A1\;s_waitcnt\tlgkmcnt(0)
- flat_store_dwordx4\t%A0, %1%O0%g0
- flat_load_dwordx4\t%0, %A1%O1%g1\;s_waitcnt\t0
- #
- #
- #
- global_store_dwordx4\t%A0, %1%O0%g0
- global_load_dwordx4\t%0, %A1%O1%g1\;s_waitcnt\tvmcnt(0)
- ds_write_b128\t%A0, %1%O0\;s_waitcnt\tlgkmcnt(0)
- ds_read_b128\t%0, %A1%O1\;s_waitcnt\tlgkmcnt(0)"
+ [(set (match_operand:TI 0 "nonimmediate_operand")
+ (match_operand:TI 1 "general_operand" ))]
+ ""
+ {@ [cons: =0, 1; attrs: type, delayeduse, length]
+ [SD,SSB;mult ,* ,* ] #
+ [RS,Sm ;smem ,* ,12] s_store_dwordx4\t%1, %A0
+ [Sm,RS ;smem ,yes,12] s_load_dwordx4\t%0, %A1\;s_waitcnt\tlgkmcnt(0)
+ [RF,v ;flat ,* ,12] flat_store_dwordx4\t%A0, %1%O0%g0
+ [v ,RF ;flat ,* ,12] flat_load_dwordx4\t%0, %A1%O1%g1\;s_waitcnt\t0
+ [v ,v ;vmult,* ,* ] #
+ [v ,Sv ;vmult,* ,* ] #
+ [SD,v ;vmult,* ,* ] #
+ [RM,v ;flat ,yes,12] global_store_dwordx4\t%A0, %1%O0%g0
+ [v ,RM ;flat ,* ,12] global_load_dwordx4\t%0, %A1%O1%g1\;s_waitcnt\tvmcnt(0)
+ [RL,v ;ds ,* ,12] ds_write_b128\t%A0, %1%O0\;s_waitcnt\tlgkmcnt(0)
+ [v ,RL ;ds ,* ,12] ds_read_b128\t%0, %A1%O1\;s_waitcnt\tlgkmcnt(0)
+ }
"reload_completed
&& REG_P (operands[0])
&& (REG_P (operands[1]) || GET_CODE (operands[1]) == CONST_INT)"
@@ -695,11 +680,7 @@
operands[3] = gcn_operand_part (TImode, operands[1], 1);
operands[0] = gcn_operand_part (TImode, operands[0], 0);
operands[1] = gcn_operand_part (TImode, operands[1], 0);
- }
- [(set_attr "type" "mult,smem,smem,flat,flat,vmult,vmult,vmult,flat,flat,\
- ds,ds")
- (set_attr "delayeduse" "*,*,yes,*,*,*,*,*,yes,*,*,*")
- (set_attr "length" "*,12,12,12,12,*,*,*,12,12,12,12")])
+ })
;; }}}
;; {{{ Prologue/Epilogue
diff --git a/gcc/config/i386/avx5124fmapsintrin.h b/gcc/config/i386/avx5124fmapsintrin.h
index 97dd77c..4c884a5 100644
--- a/gcc/config/i386/avx5124fmapsintrin.h
+++ b/gcc/config/i386/avx5124fmapsintrin.h
@@ -30,7 +30,7 @@
#ifndef __AVX5124FMAPS__
#pragma GCC push_options
-#pragma GCC target("avx5124fmaps")
+#pragma GCC target("avx5124fmaps,evex512")
#define __DISABLE_AVX5124FMAPS__
#endif /* __AVX5124FMAPS__ */
diff --git a/gcc/config/i386/avx5124vnniwintrin.h b/gcc/config/i386/avx5124vnniwintrin.h
index fd12958..795e481 100644
--- a/gcc/config/i386/avx5124vnniwintrin.h
+++ b/gcc/config/i386/avx5124vnniwintrin.h
@@ -30,7 +30,7 @@
#ifndef __AVX5124VNNIW__
#pragma GCC push_options
-#pragma GCC target("avx5124vnniw")
+#pragma GCC target("avx5124vnniw,evex512")
#define __DISABLE_AVX5124VNNIW__
#endif /* __AVX5124VNNIW__ */
diff --git a/gcc/config/i386/avx512bf16intrin.h b/gcc/config/i386/avx512bf16intrin.h
index 107f4a4..94ccbf6 100644
--- a/gcc/config/i386/avx512bf16intrin.h
+++ b/gcc/config/i386/avx512bf16intrin.h
@@ -34,13 +34,6 @@
#define __DISABLE_AVX512BF16__
#endif /* __AVX512BF16__ */
-/* Internal data types for implementing the intrinsics. */
-typedef __bf16 __v32bf __attribute__ ((__vector_size__ (64)));
-
-/* The Intel API is flexible enough that we must allow aliasing with other
- vector types, and their scalar components. */
-typedef __bf16 __m512bh __attribute__ ((__vector_size__ (64), __may_alias__));
-
/* Convert One BF16 Data to One Single Float Data. */
extern __inline float
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -49,6 +42,24 @@ _mm_cvtsbh_ss (__bf16 __A)
return __builtin_ia32_cvtbf2sf (__A);
}
+#ifdef __DISABLE_AVX512BF16__
+#undef __DISABLE_AVX512BF16__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512BF16__ */
+
+#if !defined (__AVX512BF16__) || !defined (__EVEX512__)
+#pragma GCC push_options
+#pragma GCC target("avx512bf16,evex512")
+#define __DISABLE_AVX512BF16_512__
+#endif /* __AVX512BF16_512__ */
+
+/* Internal data types for implementing the intrinsics. */
+typedef __bf16 __v32bf __attribute__ ((__vector_size__ (64)));
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+ vector types, and their scalar components. */
+typedef __bf16 __m512bh __attribute__ ((__vector_size__ (64), __may_alias__));
+
/* vcvtne2ps2bf16 */
extern __inline __m512bh
@@ -144,9 +155,9 @@ _mm512_mask_cvtpbh_ps (__m512 __S, __mmask16 __U, __m256bh __A)
(__m512i)_mm512_cvtepi16_epi32 ((__m256i)__A), 16)));
}
-#ifdef __DISABLE_AVX512BF16__
-#undef __DISABLE_AVX512BF16__
+#ifdef __DISABLE_AVX512BF16_512__
+#undef __DISABLE_AVX512BF16_512__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX512BF16__ */
+#endif /* __DISABLE_AVX512BF16_512__ */
#endif /* _AVX512BF16INTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512bitalgintrin.h b/gcc/config/i386/avx512bitalgintrin.h
index a1c7be1..af8514f 100644
--- a/gcc/config/i386/avx512bitalgintrin.h
+++ b/gcc/config/i386/avx512bitalgintrin.h
@@ -22,15 +22,15 @@
<http://www.gnu.org/licenses/>. */
#if !defined _IMMINTRIN_H_INCLUDED
-# error "Never use <avx512bitalgintrin.h> directly; include <x86intrin.h> instead."
+# error "Never use <avx512bitalgintrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef _AVX512BITALGINTRIN_H_INCLUDED
#define _AVX512BITALGINTRIN_H_INCLUDED
-#ifndef __AVX512BITALG__
+#if !defined (__AVX512BITALG__) || !defined (__EVEX512__)
#pragma GCC push_options
-#pragma GCC target("avx512bitalg")
+#pragma GCC target("avx512bitalg,evex512")
#define __DISABLE_AVX512BITALG__
#endif /* __AVX512BITALG__ */
@@ -108,153 +108,4 @@ _mm512_mask_bitshuffle_epi64_mask (__mmask64 __M, __m512i __A, __m512i __B)
#pragma GCC pop_options
#endif /* __DISABLE_AVX512BITALG__ */
-#if !defined(__AVX512BITALG__) || !defined(__AVX512VL__)
-#pragma GCC push_options
-#pragma GCC target("avx512bitalg,avx512vl")
-#define __DISABLE_AVX512BITALGVL__
-#endif /* __AVX512BITALGVL__ */
-
-extern __inline __m256i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_popcnt_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
-{
- return (__m256i) __builtin_ia32_vpopcountb_v32qi_mask ((__v32qi) __A,
- (__v32qi) __W,
- (__mmask32) __U);
-}
-
-extern __inline __m256i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_popcnt_epi8 (__mmask32 __U, __m256i __A)
-{
- return (__m256i) __builtin_ia32_vpopcountb_v32qi_mask ((__v32qi) __A,
- (__v32qi)
- _mm256_setzero_si256 (),
- (__mmask32) __U);
-}
-
-extern __inline __mmask32
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_bitshuffle_epi64_mask (__m256i __A, __m256i __B)
-{
- return (__mmask32) __builtin_ia32_vpshufbitqmb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__mmask32) -1);
-}
-
-extern __inline __mmask32
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_bitshuffle_epi64_mask (__mmask32 __M, __m256i __A, __m256i __B)
-{
- return (__mmask32) __builtin_ia32_vpshufbitqmb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__mmask32) __M);
-}
-
-extern __inline __mmask16
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_bitshuffle_epi64_mask (__m128i __A, __m128i __B)
-{
- return (__mmask16) __builtin_ia32_vpshufbitqmb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__mmask16) -1);
-}
-
-extern __inline __mmask16
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_bitshuffle_epi64_mask (__mmask16 __M, __m128i __A, __m128i __B)
-{
- return (__mmask16) __builtin_ia32_vpshufbitqmb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__mmask16) __M);
-}
-
-extern __inline __m256i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_popcnt_epi8 (__m256i __A)
-{
- return (__m256i) __builtin_ia32_vpopcountb_v32qi ((__v32qi) __A);
-}
-
-extern __inline __m256i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_popcnt_epi16 (__m256i __A)
-{
- return (__m256i) __builtin_ia32_vpopcountw_v16hi ((__v16hi) __A);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_popcnt_epi8 (__m128i __A)
-{
- return (__m128i) __builtin_ia32_vpopcountb_v16qi ((__v16qi) __A);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_popcnt_epi16 (__m128i __A)
-{
- return (__m128i) __builtin_ia32_vpopcountw_v8hi ((__v8hi) __A);
-}
-
-extern __inline __m256i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_popcnt_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
-{
- return (__m256i) __builtin_ia32_vpopcountw_v16hi_mask ((__v16hi) __A,
- (__v16hi) __W,
- (__mmask16) __U);
-}
-
-extern __inline __m256i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_popcnt_epi16 (__mmask16 __U, __m256i __A)
-{
- return (__m256i) __builtin_ia32_vpopcountw_v16hi_mask ((__v16hi) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_popcnt_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
-{
- return (__m128i) __builtin_ia32_vpopcountb_v16qi_mask ((__v16qi) __A,
- (__v16qi) __W,
- (__mmask16) __U);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_popcnt_epi8 (__mmask16 __U, __m128i __A)
-{
- return (__m128i) __builtin_ia32_vpopcountb_v16qi_mask ((__v16qi) __A,
- (__v16qi)
- _mm_setzero_si128 (),
- (__mmask16) __U);
-}
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_popcnt_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i) __builtin_ia32_vpopcountw_v8hi_mask ((__v8hi) __A,
- (__v8hi) __W,
- (__mmask8) __U);
-}
-
-extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_popcnt_epi16 (__mmask8 __U, __m128i __A)
-{
- return (__m128i) __builtin_ia32_vpopcountw_v8hi_mask ((__v8hi) __A,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
-}
-#ifdef __DISABLE_AVX512BITALGVL__
-#undef __DISABLE_AVX512BITALGVL__
-#pragma GCC pop_options
-#endif /* __DISABLE_AVX512BITALGVL__ */
-
#endif /* _AVX512BITALGINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512bitalgvlintrin.h b/gcc/config/i386/avx512bitalgvlintrin.h
new file mode 100644
index 0000000..36d697d
--- /dev/null
+++ b/gcc/config/i386/avx512bitalgvlintrin.h
@@ -0,0 +1,180 @@
+/* Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if !defined _IMMINTRIN_H_INCLUDED
+# error "Never use <avx512bitalgvlintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _AVX512BITALGVLINTRIN_H_INCLUDED
+#define _AVX512BITALGVLINTRIN_H_INCLUDED
+
+#if !defined(__AVX512BITALG__) || !defined(__AVX512VL__)
+#pragma GCC push_options
+#pragma GCC target("avx512bitalg,avx512vl")
+#define __DISABLE_AVX512BITALGVL__
+#endif /* __AVX512BITALGVL__ */
+
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_popcnt_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpopcountb_v32qi_mask ((__v32qi) __A,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_popcnt_epi8 (__mmask32 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpopcountb_v32qi_mask ((__v32qi) __A,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+extern __inline __mmask32
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_bitshuffle_epi64_mask (__m256i __A, __m256i __B)
+{
+ return (__mmask32) __builtin_ia32_vpshufbitqmb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_bitshuffle_epi64_mask (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__mmask32) __builtin_ia32_vpshufbitqmb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__mmask32) __M);
+}
+
+extern __inline __mmask16
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_bitshuffle_epi64_mask (__m128i __A, __m128i __B)
+{
+ return (__mmask16) __builtin_ia32_vpshufbitqmb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_bitshuffle_epi64_mask (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__mmask16) __builtin_ia32_vpshufbitqmb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_popcnt_epi8 (__m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpopcountb_v32qi ((__v32qi) __A);
+}
+
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_popcnt_epi16 (__m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpopcountw_v16hi ((__v16hi) __A);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_popcnt_epi8 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpopcountb_v16qi ((__v16qi) __A);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_popcnt_epi16 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpopcountw_v8hi ((__v8hi) __A);
+}
+
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_popcnt_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpopcountw_v16hi_mask ((__v16hi) __A,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_popcnt_epi16 (__mmask16 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpopcountw_v16hi_mask ((__v16hi) __A,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_popcnt_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpopcountb_v16qi_mask ((__v16qi) __A,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_popcnt_epi8 (__mmask16 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpopcountb_v16qi_mask ((__v16qi) __A,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_popcnt_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpopcountw_v8hi_mask ((__v8hi) __A,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_popcnt_epi16 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpopcountw_v8hi_mask ((__v8hi) __A,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+#ifdef __DISABLE_AVX512BITALGVL__
+#undef __DISABLE_AVX512BITALGVL__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512BITALGVL__ */
+
+#endif /* _AVX512BITALGVLINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512bwintrin.h b/gcc/config/i386/avx512bwintrin.h
index d1cd549..925bae1 100644
--- a/gcc/config/i386/avx512bwintrin.h
+++ b/gcc/config/i386/avx512bwintrin.h
@@ -34,16 +34,6 @@
#define __DISABLE_AVX512BW__
#endif /* __AVX512BW__ */
-/* Internal data types for implementing the intrinsics. */
-typedef short __v32hi __attribute__ ((__vector_size__ (64)));
-typedef short __v32hi_u __attribute__ ((__vector_size__ (64), \
- __may_alias__, __aligned__ (1)));
-typedef char __v64qi __attribute__ ((__vector_size__ (64)));
-typedef char __v64qi_u __attribute__ ((__vector_size__ (64), \
- __may_alias__, __aligned__ (1)));
-
-typedef unsigned long long __mmask64;
-
extern __inline unsigned char
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_ktest_mask32_u8 (__mmask32 __A, __mmask32 __B, unsigned char *__CF)
@@ -54,229 +44,292 @@ _ktest_mask32_u8 (__mmask32 __A, __mmask32 __B, unsigned char *__CF)
extern __inline unsigned char
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_ktest_mask64_u8 (__mmask64 __A, __mmask64 __B, unsigned char *__CF)
+_ktestz_mask32_u8 (__mmask32 __A, __mmask32 __B)
{
- *__CF = (unsigned char) __builtin_ia32_ktestcdi (__A, __B);
- return (unsigned char) __builtin_ia32_ktestzdi (__A, __B);
+ return (unsigned char) __builtin_ia32_ktestzsi (__A, __B);
}
extern __inline unsigned char
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_ktestz_mask32_u8 (__mmask32 __A, __mmask32 __B)
+_ktestc_mask32_u8 (__mmask32 __A, __mmask32 __B)
{
- return (unsigned char) __builtin_ia32_ktestzsi (__A, __B);
+ return (unsigned char) __builtin_ia32_ktestcsi (__A, __B);
}
extern __inline unsigned char
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_ktestz_mask64_u8 (__mmask64 __A, __mmask64 __B)
+_kortest_mask32_u8 (__mmask32 __A, __mmask32 __B, unsigned char *__CF)
{
- return (unsigned char) __builtin_ia32_ktestzdi (__A, __B);
+ *__CF = (unsigned char) __builtin_ia32_kortestcsi (__A, __B);
+ return (unsigned char) __builtin_ia32_kortestzsi (__A, __B);
}
extern __inline unsigned char
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_ktestc_mask32_u8 (__mmask32 __A, __mmask32 __B)
+_kortestz_mask32_u8 (__mmask32 __A, __mmask32 __B)
{
- return (unsigned char) __builtin_ia32_ktestcsi (__A, __B);
+ return (unsigned char) __builtin_ia32_kortestzsi (__A, __B);
}
extern __inline unsigned char
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_ktestc_mask64_u8 (__mmask64 __A, __mmask64 __B)
+_kortestc_mask32_u8 (__mmask32 __A, __mmask32 __B)
{
- return (unsigned char) __builtin_ia32_ktestcdi (__A, __B);
+ return (unsigned char) __builtin_ia32_kortestcsi (__A, __B);
}
-extern __inline unsigned char
+extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kortest_mask32_u8 (__mmask32 __A, __mmask32 __B, unsigned char *__CF)
+_kadd_mask32 (__mmask32 __A, __mmask32 __B)
{
- *__CF = (unsigned char) __builtin_ia32_kortestcsi (__A, __B);
- return (unsigned char) __builtin_ia32_kortestzsi (__A, __B);
+ return (__mmask32) __builtin_ia32_kaddsi ((__mmask32) __A, (__mmask32) __B);
}
-extern __inline unsigned char
+extern __inline unsigned int
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kortest_mask64_u8 (__mmask64 __A, __mmask64 __B, unsigned char *__CF)
+_cvtmask32_u32 (__mmask32 __A)
{
- *__CF = (unsigned char) __builtin_ia32_kortestcdi (__A, __B);
- return (unsigned char) __builtin_ia32_kortestzdi (__A, __B);
+ return (unsigned int) __builtin_ia32_kmovd ((__mmask32) __A);
}
-extern __inline unsigned char
+extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kortestz_mask32_u8 (__mmask32 __A, __mmask32 __B)
+_cvtu32_mask32 (unsigned int __A)
{
- return (unsigned char) __builtin_ia32_kortestzsi (__A, __B);
+ return (__mmask32) __builtin_ia32_kmovd ((__mmask32) __A);
}
-extern __inline unsigned char
+extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kortestz_mask64_u8 (__mmask64 __A, __mmask64 __B)
+_load_mask32 (__mmask32 *__A)
{
- return (unsigned char) __builtin_ia32_kortestzdi (__A, __B);
+ return (__mmask32) __builtin_ia32_kmovd (*__A);
}
-extern __inline unsigned char
+extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kortestc_mask32_u8 (__mmask32 __A, __mmask32 __B)
+_store_mask32 (__mmask32 *__A, __mmask32 __B)
{
- return (unsigned char) __builtin_ia32_kortestcsi (__A, __B);
+ *(__mmask32 *) __A = __builtin_ia32_kmovd (__B);
}
-extern __inline unsigned char
+extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kortestc_mask64_u8 (__mmask64 __A, __mmask64 __B)
+_knot_mask32 (__mmask32 __A)
{
- return (unsigned char) __builtin_ia32_kortestcdi (__A, __B);
+ return (__mmask32) __builtin_ia32_knotsi ((__mmask32) __A);
}
extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kadd_mask32 (__mmask32 __A, __mmask32 __B)
+_kor_mask32 (__mmask32 __A, __mmask32 __B)
{
- return (__mmask32) __builtin_ia32_kaddsi ((__mmask32) __A, (__mmask32) __B);
+ return (__mmask32) __builtin_ia32_korsi ((__mmask32) __A, (__mmask32) __B);
}
-extern __inline __mmask64
+extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kadd_mask64 (__mmask64 __A, __mmask64 __B)
+_kxnor_mask32 (__mmask32 __A, __mmask32 __B)
{
- return (__mmask64) __builtin_ia32_kadddi ((__mmask64) __A, (__mmask64) __B);
+ return (__mmask32) __builtin_ia32_kxnorsi ((__mmask32) __A, (__mmask32) __B);
}
-extern __inline unsigned int
+extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_cvtmask32_u32 (__mmask32 __A)
+_kxor_mask32 (__mmask32 __A, __mmask32 __B)
{
- return (unsigned int) __builtin_ia32_kmovd ((__mmask32) __A);
+ return (__mmask32) __builtin_ia32_kxorsi ((__mmask32) __A, (__mmask32) __B);
}
-extern __inline unsigned long long
+extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_cvtmask64_u64 (__mmask64 __A)
+_kand_mask32 (__mmask32 __A, __mmask32 __B)
{
- return (unsigned long long) __builtin_ia32_kmovq ((__mmask64) __A);
+ return (__mmask32) __builtin_ia32_kandsi ((__mmask32) __A, (__mmask32) __B);
}
extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_cvtu32_mask32 (unsigned int __A)
+_kandn_mask32 (__mmask32 __A, __mmask32 __B)
{
- return (__mmask32) __builtin_ia32_kmovd ((__mmask32) __A);
+ return (__mmask32) __builtin_ia32_kandnsi ((__mmask32) __A, (__mmask32) __B);
}
-extern __inline __mmask64
+extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_cvtu64_mask64 (unsigned long long __A)
+_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
{
- return (__mmask64) __builtin_ia32_kmovq ((__mmask64) __A);
+ return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
+ (__mmask32) __B);
}
extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_load_mask32 (__mmask32 *__A)
+_kunpackw_mask32 (__mmask16 __A, __mmask16 __B)
{
- return (__mmask32) __builtin_ia32_kmovd (*__A);
+ return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
+ (__mmask32) __B);
}
-extern __inline __mmask64
+#if __OPTIMIZE__
+extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_load_mask64 (__mmask64 *__A)
+_kshiftli_mask32 (__mmask32 __A, unsigned int __B)
{
- return (__mmask64) __builtin_ia32_kmovq (*(__mmask64 *) __A);
+ return (__mmask32) __builtin_ia32_kshiftlisi ((__mmask32) __A,
+ (__mmask8) __B);
}
-extern __inline void
+extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_store_mask32 (__mmask32 *__A, __mmask32 __B)
+_kshiftri_mask32 (__mmask32 __A, unsigned int __B)
{
- *(__mmask32 *) __A = __builtin_ia32_kmovd (__B);
+ return (__mmask32) __builtin_ia32_kshiftrisi ((__mmask32) __A,
+ (__mmask8) __B);
}
-extern __inline void
+#else
+#define _kshiftli_mask32(X, Y) \
+ ((__mmask32) __builtin_ia32_kshiftlisi ((__mmask32)(X), (__mmask8)(Y)))
+
+#define _kshiftri_mask32(X, Y) \
+ ((__mmask32) __builtin_ia32_kshiftrisi ((__mmask32)(X), (__mmask8)(Y)))
+
+#endif
+
+#ifdef __DISABLE_AVX512BW__
+#undef __DISABLE_AVX512BW__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512BW__ */
+
+#if !defined (__AVX512BW__) || !defined (__EVEX512__)
+#pragma GCC push_options
+#pragma GCC target("avx512bw,evex512")
+#define __DISABLE_AVX512BW_512__
+#endif /* __AVX512BW_512__ */
+
+/* Internal data types for implementing the intrinsics. */
+typedef short __v32hi __attribute__ ((__vector_size__ (64)));
+typedef short __v32hi_u __attribute__ ((__vector_size__ (64), \
+ __may_alias__, __aligned__ (1)));
+typedef char __v64qi __attribute__ ((__vector_size__ (64)));
+typedef char __v64qi_u __attribute__ ((__vector_size__ (64), \
+ __may_alias__, __aligned__ (1)));
+
+typedef unsigned long long __mmask64;
+
+extern __inline unsigned char
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_store_mask64 (__mmask64 *__A, __mmask64 __B)
+_ktest_mask64_u8 (__mmask64 __A, __mmask64 __B, unsigned char *__CF)
{
- *(__mmask64 *) __A = __builtin_ia32_kmovq (__B);
+ *__CF = (unsigned char) __builtin_ia32_ktestcdi (__A, __B);
+ return (unsigned char) __builtin_ia32_ktestzdi (__A, __B);
}
-extern __inline __mmask32
+extern __inline unsigned char
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_knot_mask32 (__mmask32 __A)
+_ktestz_mask64_u8 (__mmask64 __A, __mmask64 __B)
{
- return (__mmask32) __builtin_ia32_knotsi ((__mmask32) __A);
+ return (unsigned char) __builtin_ia32_ktestzdi (__A, __B);
}
-extern __inline __mmask64
+extern __inline unsigned char
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_knot_mask64 (__mmask64 __A)
+_ktestc_mask64_u8 (__mmask64 __A, __mmask64 __B)
{
- return (__mmask64) __builtin_ia32_knotdi ((__mmask64) __A);
+ return (unsigned char) __builtin_ia32_ktestcdi (__A, __B);
}
-extern __inline __mmask32
+extern __inline unsigned char
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kor_mask32 (__mmask32 __A, __mmask32 __B)
+_kortest_mask64_u8 (__mmask64 __A, __mmask64 __B, unsigned char *__CF)
{
- return (__mmask32) __builtin_ia32_korsi ((__mmask32) __A, (__mmask32) __B);
+ *__CF = (unsigned char) __builtin_ia32_kortestcdi (__A, __B);
+ return (unsigned char) __builtin_ia32_kortestzdi (__A, __B);
+}
+
+extern __inline unsigned char
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kortestz_mask64_u8 (__mmask64 __A, __mmask64 __B)
+{
+ return (unsigned char) __builtin_ia32_kortestzdi (__A, __B);
+}
+
+extern __inline unsigned char
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kortestc_mask64_u8 (__mmask64 __A, __mmask64 __B)
+{
+ return (unsigned char) __builtin_ia32_kortestcdi (__A, __B);
}
extern __inline __mmask64
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kor_mask64 (__mmask64 __A, __mmask64 __B)
+_kadd_mask64 (__mmask64 __A, __mmask64 __B)
{
- return (__mmask64) __builtin_ia32_kordi ((__mmask64) __A, (__mmask64) __B);
+ return (__mmask64) __builtin_ia32_kadddi ((__mmask64) __A, (__mmask64) __B);
}
-extern __inline __mmask32
+extern __inline unsigned long long
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kxnor_mask32 (__mmask32 __A, __mmask32 __B)
+_cvtmask64_u64 (__mmask64 __A)
{
- return (__mmask32) __builtin_ia32_kxnorsi ((__mmask32) __A, (__mmask32) __B);
+ return (unsigned long long) __builtin_ia32_kmovq ((__mmask64) __A);
}
extern __inline __mmask64
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kxnor_mask64 (__mmask64 __A, __mmask64 __B)
+_cvtu64_mask64 (unsigned long long __A)
{
- return (__mmask64) __builtin_ia32_kxnordi ((__mmask64) __A, (__mmask64) __B);
+ return (__mmask64) __builtin_ia32_kmovq ((__mmask64) __A);
}
-extern __inline __mmask32
+extern __inline __mmask64
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kxor_mask32 (__mmask32 __A, __mmask32 __B)
+_load_mask64 (__mmask64 *__A)
{
- return (__mmask32) __builtin_ia32_kxorsi ((__mmask32) __A, (__mmask32) __B);
+ return (__mmask64) __builtin_ia32_kmovq (*(__mmask64 *) __A);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_store_mask64 (__mmask64 *__A, __mmask64 __B)
+{
+ *(__mmask64 *) __A = __builtin_ia32_kmovq (__B);
}
extern __inline __mmask64
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kxor_mask64 (__mmask64 __A, __mmask64 __B)
+_knot_mask64 (__mmask64 __A)
{
- return (__mmask64) __builtin_ia32_kxordi ((__mmask64) __A, (__mmask64) __B);
+ return (__mmask64) __builtin_ia32_knotdi ((__mmask64) __A);
}
-extern __inline __mmask32
+extern __inline __mmask64
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kand_mask32 (__mmask32 __A, __mmask32 __B)
+_kor_mask64 (__mmask64 __A, __mmask64 __B)
{
- return (__mmask32) __builtin_ia32_kandsi ((__mmask32) __A, (__mmask32) __B);
+ return (__mmask64) __builtin_ia32_kordi ((__mmask64) __A, (__mmask64) __B);
}
extern __inline __mmask64
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kand_mask64 (__mmask64 __A, __mmask64 __B)
+_kxnor_mask64 (__mmask64 __A, __mmask64 __B)
{
- return (__mmask64) __builtin_ia32_kanddi ((__mmask64) __A, (__mmask64) __B);
+ return (__mmask64) __builtin_ia32_kxnordi ((__mmask64) __A, (__mmask64) __B);
}
-extern __inline __mmask32
+extern __inline __mmask64
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kandn_mask32 (__mmask32 __A, __mmask32 __B)
+_kxor_mask64 (__mmask64 __A, __mmask64 __B)
{
- return (__mmask32) __builtin_ia32_kandnsi ((__mmask32) __A, (__mmask32) __B);
+ return (__mmask64) __builtin_ia32_kxordi ((__mmask64) __A, (__mmask64) __B);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kand_mask64 (__mmask64 __A, __mmask64 __B)
+{
+ return (__mmask64) __builtin_ia32_kanddi ((__mmask64) __A, (__mmask64) __B);
}
extern __inline __mmask64
@@ -366,22 +419,6 @@ _mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
(__mmask64) __U);
}
-extern __inline __mmask32
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
-{
- return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
- (__mmask32) __B);
-}
-
-extern __inline __mmask32
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kunpackw_mask32 (__mmask16 __A, __mmask16 __B)
-{
- return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
- (__mmask32) __B);
-}
-
extern __inline __mmask64
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
@@ -2776,14 +2813,6 @@ _mm512_mask_packus_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
}
#ifdef __OPTIMIZE__
-extern __inline __mmask32
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kshiftli_mask32 (__mmask32 __A, unsigned int __B)
-{
- return (__mmask32) __builtin_ia32_kshiftlisi ((__mmask32) __A,
- (__mmask8) __B);
-}
-
extern __inline __mmask64
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kshiftli_mask64 (__mmask64 __A, unsigned int __B)
@@ -2792,14 +2821,6 @@ _kshiftli_mask64 (__mmask64 __A, unsigned int __B)
(__mmask8) __B);
}
-extern __inline __mmask32
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kshiftri_mask32 (__mmask32 __A, unsigned int __B)
-{
- return (__mmask32) __builtin_ia32_kshiftrisi ((__mmask32) __A,
- (__mmask8) __B);
-}
-
extern __inline __mmask64
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kshiftri_mask64 (__mmask64 __A, unsigned int __B)
@@ -3145,15 +3166,9 @@ _mm512_bsrli_epi128 (__m512i __A, const int __N)
}
#else
-#define _kshiftli_mask32(X, Y) \
- ((__mmask32) __builtin_ia32_kshiftlisi ((__mmask32)(X), (__mmask8)(Y)))
-
#define _kshiftli_mask64(X, Y) \
((__mmask64) __builtin_ia32_kshiftlidi ((__mmask64)(X), (__mmask8)(Y)))
-#define _kshiftri_mask32(X, Y) \
- ((__mmask32) __builtin_ia32_kshiftrisi ((__mmask32)(X), (__mmask8)(Y)))
-
#define _kshiftri_mask64(X, Y) \
((__mmask64) __builtin_ia32_kshiftridi ((__mmask64)(X), (__mmask8)(Y)))
@@ -3328,9 +3343,9 @@ _mm512_bsrli_epi128 (__m512i __A, const int __N)
#endif
-#ifdef __DISABLE_AVX512BW__
-#undef __DISABLE_AVX512BW__
+#ifdef __DISABLE_AVX512BW_512__
+#undef __DISABLE_AVX512BW_512__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX512BW__ */
+#endif /* __DISABLE_AVX512BW_512__ */
#endif /* _AVX512BWINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512dqintrin.h b/gcc/config/i386/avx512dqintrin.h
index 93900a0..b6a1d49 100644
--- a/gcc/config/i386/avx512dqintrin.h
+++ b/gcc/config/i386/avx512dqintrin.h
@@ -184,6 +184,470 @@ _kandn_mask8 (__mmask8 __A, __mmask8 __B)
return (__mmask8) __builtin_ia32_kandnqi ((__mmask8) __A, (__mmask8) __B);
}
+#ifdef __OPTIMIZE__
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kshiftli_mask8 (__mmask8 __A, unsigned int __B)
+{
+ return (__mmask8) __builtin_ia32_kshiftliqi ((__mmask8) __A, (__mmask8) __B);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kshiftri_mask8 (__mmask8 __A, unsigned int __B)
+{
+ return (__mmask8) __builtin_ia32_kshiftriqi ((__mmask8) __A, (__mmask8) __B);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_reduce_sd (__m128d __A, __m128d __B, int __C)
+{
+ return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_reduce_round_sd (__m128d __A, __m128d __B, int __C, const int __R)
+{
+ return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_reduce_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, int __C)
+{
+ return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_reduce_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, int __C, const int __R)
+{
+ return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df) __W,
+ __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_reduce_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C)
+{
+ return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_reduce_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ int __C, const int __R)
+{
+ return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df)
+ _mm_setzero_pd (),
+ __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_reduce_ss (__m128 __A, __m128 __B, int __C)
+{
+ return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_reduce_round_ss (__m128 __A, __m128 __B, int __C, const int __R)
+{
+ return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_reduce_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, int __C)
+{
+ return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_reduce_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, int __C, const int __R)
+{
+ return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf) __W,
+ __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_reduce_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C)
+{
+ return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_reduce_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ int __C, const int __R)
+{
+ return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf)
+ _mm_setzero_ps (),
+ __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_range_sd (__m128d __A, __m128d __B, int __C)
+{
+ return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_range_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, int __C)
+{
+ return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_range_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C)
+{
+ return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_range_ss (__m128 __A, __m128 __B, int __C)
+{
+ return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_range_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, int __C)
+{
+ return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_range_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C)
+{
+ return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_range_round_sd (__m128d __A, __m128d __B, int __C, const int __R)
+{
+ return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_range_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
+ int __C, const int __R)
+{
+ return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_range_round_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_range_round_ss (__m128 __A, __m128 __B, int __C, const int __R)
+{
+ return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_range_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
+ int __C, const int __R)
+{
+ return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_range_round_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fpclass_ss_mask (__m128 __A, const int __imm)
+{
+ return (__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) __A, __imm,
+ (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fpclass_sd_mask (__m128d __A, const int __imm)
+{
+ return (__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) __A, __imm,
+ (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fpclass_ss_mask (__mmask8 __U, __m128 __A, const int __imm)
+{
+ return (__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) __A, __imm, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fpclass_sd_mask (__mmask8 __U, __m128d __A, const int __imm)
+{
+ return (__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) __A, __imm, __U);
+}
+
+#else
+#define _kshiftli_mask8(X, Y) \
+ ((__mmask8) __builtin_ia32_kshiftliqi ((__mmask8)(X), (__mmask8)(Y)))
+
+#define _kshiftri_mask8(X, Y) \
+ ((__mmask8) __builtin_ia32_kshiftriqi ((__mmask8)(X), (__mmask8)(Y)))
+
+#define _mm_range_sd(A, B, C) \
+ ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
+ (__mmask8) -1, _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_range_sd(W, U, A, B, C) \
+ ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), \
+ (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_range_sd(U, A, B, C) \
+ ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
+ (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_range_ss(A, B, C) \
+ ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
+ (__mmask8) -1, _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_range_ss(W, U, A, B, C) \
+ ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \
+ (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_range_ss(U, A, B, C) \
+ ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
+ (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_range_round_sd(A, B, C, R) \
+ ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
+ (__mmask8) -1, (R)))
+
+#define _mm_mask_range_round_sd(W, U, A, B, C, R) \
+ ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (R)))
+
+#define _mm_maskz_range_round_sd(U, A, B, C, R) \
+ ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
+ (__mmask8)(U), (R)))
+
+#define _mm_range_round_ss(A, B, C, R) \
+ ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
+ (__mmask8) -1, (R)))
+
+#define _mm_mask_range_round_ss(W, U, A, B, C, R) \
+ ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (R)))
+
+#define _mm_maskz_range_round_ss(U, A, B, C, R) \
+ ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
+ (__mmask8)(U), (R)))
+
+#define _mm_fpclass_ss_mask(X, C) \
+ ((__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) (__m128) (X), \
+ (int) (C), (__mmask8) (-1))) \
+
+#define _mm_fpclass_sd_mask(X, C) \
+ ((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X), \
+ (int) (C), (__mmask8) (-1))) \
+
+#define _mm_mask_fpclass_ss_mask(X, C, U) \
+ ((__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) (__m128) (X), \
+ (int) (C), (__mmask8) (U)))
+
+#define _mm_mask_fpclass_sd_mask(X, C, U) \
+ ((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X), \
+ (int) (C), (__mmask8) (U)))
+#define _mm_reduce_sd(A, B, C) \
+ ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
+ (__mmask8)-1))
+
+#define _mm_mask_reduce_sd(W, U, A, B, C) \
+ ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), (__mmask8)(U)))
+
+#define _mm_maskz_reduce_sd(U, A, B, C) \
+ ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
+ (__mmask8)(U)))
+
+#define _mm_reduce_round_sd(A, B, C, R) \
+ ((__m128d) __builtin_ia32_reducesd_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__mmask8)(U), (int)(R)))
+
+#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \
+ ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_reduce_round_sd(U, A, B, C, R) \
+ ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_reduce_ss(A, B, C) \
+ ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
+ (__mmask8)-1))
+
+#define _mm_mask_reduce_ss(W, U, A, B, C) \
+ ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), (__mmask8)(U)))
+
+#define _mm_maskz_reduce_ss(U, A, B, C) \
+ ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
+ (__mmask8)(U)))
+
+#define _mm_reduce_round_ss(A, B, C, R) \
+ ((__m128) __builtin_ia32_reducess_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__mmask8)(U), (int)(R)))
+
+#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \
+ ((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_reduce_round_ss(U, A, B, C, R) \
+ ((__m128) __builtin_ia32_reducesd_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
+ (__mmask8)(U), (int)(R)))
+
+#endif
+
+#ifdef __DISABLE_AVX512DQ__
+#undef __DISABLE_AVX512DQ__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512DQ__ */
+
+#if !defined (__AVX512DQ__) || !defined (__EVEX512__)
+#pragma GCC push_options
+#pragma GCC target("avx512dq,evex512")
+#define __DISABLE_AVX512DQ_512__
+#endif /* __AVX512DQ_512__ */
+
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_broadcast_f64x2 (__m128d __A)
@@ -1070,20 +1534,6 @@ _mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A)
}
#ifdef __OPTIMIZE__
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kshiftli_mask8 (__mmask8 __A, unsigned int __B)
-{
- return (__mmask8) __builtin_ia32_kshiftliqi ((__mmask8) __A, (__mmask8) __B);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kshiftri_mask8 (__mmask8 __A, unsigned int __B)
-{
- return (__mmask8) __builtin_ia32_kshiftriqi ((__mmask8) __A, (__mmask8) __B);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_range_pd (__m512d __A, __m512d __B, int __C)
@@ -1156,305 +1606,6 @@ _mm512_maskz_range_ps (__mmask16 __U, __m512 __A, __m512 __B, int __C)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_reduce_sd (__m128d __A, __m128d __B, int __C)
-{
- return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df) _mm_setzero_pd (),
- (__mmask8) -1);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_reduce_round_sd (__m128d __A, __m128d __B, int __C, const int __R)
-{
- return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_reduce_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B, int __C)
-{
- return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df) __W,
- (__mmask8) __U);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_reduce_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B, int __C, const int __R)
-{
- return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df) __W,
- __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_reduce_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C)
-{
- return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df) _mm_setzero_pd (),
- (__mmask8) __U);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_reduce_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- int __C, const int __R)
-{
- return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df)
- _mm_setzero_pd (),
- __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_reduce_ss (__m128 __A, __m128 __B, int __C)
-{
- return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf) _mm_setzero_ps (),
- (__mmask8) -1);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_reduce_round_ss (__m128 __A, __m128 __B, int __C, const int __R)
-{
- return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_reduce_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B, int __C)
-{
- return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf) __W,
- (__mmask8) __U);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_reduce_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B, int __C, const int __R)
-{
- return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf) __W,
- __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_reduce_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C)
-{
- return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf) _mm_setzero_ps (),
- (__mmask8) __U);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_reduce_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
- int __C, const int __R)
-{
- return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf)
- _mm_setzero_ps (),
- __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_range_sd (__m128d __A, __m128d __B, int __C)
-{
- return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_range_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, int __C)
-{
- return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_range_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C)
-{
- return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_range_ss (__m128 __A, __m128 __B, int __C)
-{
- return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_range_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, int __C)
-{
- return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_range_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C)
-{
- return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_range_round_sd (__m128d __A, __m128d __B, int __C, const int __R)
-{
- return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_range_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
- int __C, const int __R)
-{
- return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_range_round_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C,
- const int __R)
-{
- return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
- (__v2df) __B, __C,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_range_round_ss (__m128 __A, __m128 __B, int __C, const int __R)
-{
- return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_range_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
- int __C, const int __R)
-{
- return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_range_round_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C,
- const int __R)
-{
- return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
- (__v4sf) __B, __C,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fpclass_ss_mask (__m128 __A, const int __imm)
-{
- return (__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) __A, __imm,
- (__mmask8) -1);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fpclass_sd_mask (__m128d __A, const int __imm)
-{
- return (__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) __A, __imm,
- (__mmask8) -1);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fpclass_ss_mask (__mmask8 __U, __m128 __A, const int __imm)
-{
- return (__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) __A, __imm, __U);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fpclass_sd_mask (__mmask8 __U, __m128d __A, const int __imm)
-{
- return (__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) __A, __imm, __U);
-}
-
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtt_roundpd_epi64 (__m512d __A, const int __R)
@@ -2395,72 +2546,6 @@ _mm512_fpclass_ps_mask (__m512 __A, const int __imm)
}
#else
-#define _kshiftli_mask8(X, Y) \
- ((__mmask8) __builtin_ia32_kshiftliqi ((__mmask8)(X), (__mmask8)(Y)))
-
-#define _kshiftri_mask8(X, Y) \
- ((__mmask8) __builtin_ia32_kshiftriqi ((__mmask8)(X), (__mmask8)(Y)))
-
-#define _mm_range_sd(A, B, C) \
- ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
- (__mmask8) -1, _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_range_sd(W, U, A, B, C) \
- ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), \
- (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_range_sd(U, A, B, C) \
- ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
- (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_range_ss(A, B, C) \
- ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
- (__mmask8) -1, _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_range_ss(W, U, A, B, C) \
- ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \
- (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_range_ss(U, A, B, C) \
- ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
- (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_range_round_sd(A, B, C, R) \
- ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
- (__mmask8) -1, (R)))
-
-#define _mm_mask_range_round_sd(W, U, A, B, C, R) \
- ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), \
- (__mmask8)(U), (R)))
-
-#define _mm_maskz_range_round_sd(U, A, B, C, R) \
- ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
- (__mmask8)(U), (R)))
-
-#define _mm_range_round_ss(A, B, C, R) \
- ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
- (__mmask8) -1, (R)))
-
-#define _mm_mask_range_round_ss(W, U, A, B, C, R) \
- ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \
- (__mmask8)(U), (R)))
-
-#define _mm_maskz_range_round_ss(U, A, B, C, R) \
- ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
- (__mmask8)(U), (R)))
-
#define _mm512_cvtt_roundpd_epi64(A, B) \
((__m512i)__builtin_ia32_cvttpd2qq512_mask ((A), (__v8di) \
_mm512_setzero_si512 (), \
@@ -2792,22 +2877,6 @@ _mm512_fpclass_ps_mask (__m512 __A, const int __imm)
(__v16si)(__m512i)_mm512_setzero_si512 (),\
(__mmask16)(U)))
-#define _mm_fpclass_ss_mask(X, C) \
- ((__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) (__m128) (X), \
- (int) (C), (__mmask8) (-1))) \
-
-#define _mm_fpclass_sd_mask(X, C) \
- ((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X), \
- (int) (C), (__mmask8) (-1))) \
-
-#define _mm_mask_fpclass_ss_mask(X, C, U) \
- ((__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) (__m128) (X), \
- (int) (C), (__mmask8) (U)))
-
-#define _mm_mask_fpclass_sd_mask(X, C, U) \
- ((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X), \
- (int) (C), (__mmask8) (U)))
-
#define _mm512_mask_fpclass_pd_mask(u, X, C) \
((__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) (__m512d) (X), \
(int) (C), (__mmask8)(u)))
@@ -2824,68 +2893,11 @@ _mm512_fpclass_ps_mask (__m512 __A, const int __imm)
((__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) (__m512) (x),\
(int) (c),(__mmask16)-1))
-#define _mm_reduce_sd(A, B, C) \
- ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
- (__mmask8)-1))
-
-#define _mm_mask_reduce_sd(W, U, A, B, C) \
- ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), (__mmask8)(U)))
-
-#define _mm_maskz_reduce_sd(U, A, B, C) \
- ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
- (__mmask8)(U)))
-
-#define _mm_reduce_round_sd(A, B, C, R) \
- ((__m128d) __builtin_ia32_reducesd_round ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__mmask8)(U), (int)(R)))
-
-#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \
- ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_reduce_round_sd(U, A, B, C, R) \
- ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
- (__mmask8)(U), (int)(R)))
-
-#define _mm_reduce_ss(A, B, C) \
- ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
- (__mmask8)-1))
-
-#define _mm_mask_reduce_ss(W, U, A, B, C) \
- ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), (__mmask8)(U)))
-
-#define _mm_maskz_reduce_ss(U, A, B, C) \
- ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
- (__mmask8)(U)))
-
-#define _mm_reduce_round_ss(A, B, C, R) \
- ((__m128) __builtin_ia32_reducess_round ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__mmask8)(U), (int)(R)))
-
-#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \
- ((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_reduce_round_ss(U, A, B, C, R) \
- ((__m128) __builtin_ia32_reducesd_mask_round ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
- (__mmask8)(U), (int)(R)))
-
-
#endif
-#ifdef __DISABLE_AVX512DQ__
-#undef __DISABLE_AVX512DQ__
+#ifdef __DISABLE_AVX512DQ_512__
+#undef __DISABLE_AVX512DQ_512__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX512DQ__ */
+#endif /* __DISABLE_AVX512DQ_512__ */
#endif /* _AVX512DQINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512erintrin.h b/gcc/config/i386/avx512erintrin.h
index bd83b7f..5c7be9c 100644
--- a/gcc/config/i386/avx512erintrin.h
+++ b/gcc/config/i386/avx512erintrin.h
@@ -30,7 +30,7 @@
#ifndef __AVX512ER__
#pragma GCC push_options
-#pragma GCC target("avx512er")
+#pragma GCC target("avx512er,evex512")
#define __DISABLE_AVX512ER__
#endif /* __AVX512ER__ */
diff --git a/gcc/config/i386/avx512fintrin.h b/gcc/config/i386/avx512fintrin.h
index 517e787..85bf72d 100644
--- a/gcc/config/i386/avx512fintrin.h
+++ b/gcc/config/i386/avx512fintrin.h
@@ -34,6 +34,3748 @@
#define __DISABLE_AVX512F__
#endif /* __AVX512F__ */
+typedef unsigned char __mmask8;
+typedef unsigned short __mmask16;
+typedef unsigned int __mmask32;
+
+/* Constants for mantissa extraction */
+typedef enum
+{
+ _MM_MANT_NORM_1_2, /* interval [1, 2) */
+ _MM_MANT_NORM_p5_2, /* interval [0.5, 2) */
+ _MM_MANT_NORM_p5_1, /* interval [0.5, 1) */
+ _MM_MANT_NORM_p75_1p5 /* interval [0.75, 1.5) */
+} _MM_MANTISSA_NORM_ENUM;
+
+typedef enum
+{
+ _MM_MANT_SIGN_src, /* sign = sign(SRC) */
+ _MM_MANT_SIGN_zero, /* sign = 0 */
+ _MM_MANT_SIGN_nan /* DEST = NaN if sign(SRC) = 1 */
+} _MM_MANTISSA_SIGN_ENUM;
+
+#ifdef __OPTIMIZE__
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_round_sd (__m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
+ (__v2df) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_round_ss (__m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_round_sd (__m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
+ (__v2df) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_round_ss (__m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U, __R);
+}
+
+#else
+#define _mm_add_round_sd(A, B, C) \
+ (__m128d)__builtin_ia32_addsd_round(A, B, C)
+
+#define _mm_mask_add_round_sd(W, U, A, B, C) \
+ (__m128d)__builtin_ia32_addsd_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_add_round_sd(U, A, B, C) \
+ (__m128d)__builtin_ia32_addsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
+
+#define _mm_add_round_ss(A, B, C) \
+ (__m128)__builtin_ia32_addss_round(A, B, C)
+
+#define _mm_mask_add_round_ss(W, U, A, B, C) \
+ (__m128)__builtin_ia32_addss_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_add_round_ss(U, A, B, C) \
+ (__m128)__builtin_ia32_addss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
+
+#define _mm_sub_round_sd(A, B, C) \
+ (__m128d)__builtin_ia32_subsd_round(A, B, C)
+
+#define _mm_mask_sub_round_sd(W, U, A, B, C) \
+ (__m128d)__builtin_ia32_subsd_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_sub_round_sd(U, A, B, C) \
+ (__m128d)__builtin_ia32_subsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
+
+#define _mm_sub_round_ss(A, B, C) \
+ (__m128)__builtin_ia32_subss_round(A, B, C)
+
+#define _mm_mask_sub_round_ss(W, U, A, B, C) \
+ (__m128)__builtin_ia32_subss_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_sub_round_ss(U, A, B, C) \
+ (__m128)__builtin_ia32_subss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
+
+#endif
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rcp14_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd ((__v2df) __B,
+ (__v2df) __A);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df) _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rcp14_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss ((__v4sf) __B,
+ (__v4sf) __A);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rsqrt14_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd ((__v2df) __B,
+ (__v2df) __A);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rsqrt14_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss ((__v4sf) __B,
+ (__v4sf) __A);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_round_sd (__m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_sqrtsd_mask_round ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sqrt_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_sqrtsd_mask_round ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sqrt_round_sd (__mmask8 __U, __m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_sqrtsd_mask_round ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_round_ss (__m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_sqrtss_mask_round ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sqrt_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_sqrtss_mask_round ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sqrt_round_ss (__mmask8 __U, __m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_sqrtss_mask_round ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_round_sd (__m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
+ (__v2df) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mul_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mul_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_round_ss (__m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mul_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mul_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_round_sd (__m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
+ (__v2df) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_div_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_div_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_round_ss (__m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_div_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_div_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_scalef_round_sd (__m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_scalef_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_scalef_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_scalef_round_ss (__m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_scalef_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_scalef_round_ss (__mmask8 __U, __m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U, __R);
+}
+#else
+#define _mm_sqrt_round_sd(A, B, C) \
+ (__m128d)__builtin_ia32_sqrtsd_mask_round (B, A, \
+ (__v2df) _mm_setzero_pd (), -1, C)
+
+#define _mm_mask_sqrt_round_sd(W, U, A, B, C) \
+ (__m128d)__builtin_ia32_sqrtsd_mask_round (B, A, W, U, C)
+
+#define _mm_maskz_sqrt_round_sd(U, A, B, C) \
+ (__m128d)__builtin_ia32_sqrtsd_mask_round (B, A, \
+ (__v2df) _mm_setzero_pd (), U, C)
+
+#define _mm_sqrt_round_ss(A, B, C) \
+ (__m128)__builtin_ia32_sqrtss_mask_round (B, A, \
+ (__v4sf) _mm_setzero_ps (), -1, C)
+
+#define _mm_mask_sqrt_round_ss(W, U, A, B, C) \
+ (__m128)__builtin_ia32_sqrtss_mask_round (B, A, W, U, C)
+
+#define _mm_maskz_sqrt_round_ss(U, A, B, C) \
+ (__m128)__builtin_ia32_sqrtss_mask_round (B, A, \
+ (__v4sf) _mm_setzero_ps (), U, C)
+
+#define _mm_mul_round_sd(A, B, C) \
+ (__m128d)__builtin_ia32_mulsd_round(A, B, C)
+
+#define _mm_mask_mul_round_sd(W, U, A, B, C) \
+ (__m128d)__builtin_ia32_mulsd_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_mul_round_sd(U, A, B, C) \
+ (__m128d)__builtin_ia32_mulsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
+
+#define _mm_mul_round_ss(A, B, C) \
+ (__m128)__builtin_ia32_mulss_round(A, B, C)
+
+#define _mm_mask_mul_round_ss(W, U, A, B, C) \
+ (__m128)__builtin_ia32_mulss_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_mul_round_ss(U, A, B, C) \
+ (__m128)__builtin_ia32_mulss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
+
+#define _mm_div_round_sd(A, B, C) \
+ (__m128d)__builtin_ia32_divsd_round(A, B, C)
+
+#define _mm_mask_div_round_sd(W, U, A, B, C) \
+ (__m128d)__builtin_ia32_divsd_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_div_round_sd(U, A, B, C) \
+ (__m128d)__builtin_ia32_divsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
+
+#define _mm_div_round_ss(A, B, C) \
+ (__m128)__builtin_ia32_divss_round(A, B, C)
+
+#define _mm_mask_div_round_ss(W, U, A, B, C) \
+ (__m128)__builtin_ia32_divss_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_div_round_ss(U, A, B, C) \
+ (__m128)__builtin_ia32_divss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
+
+#define _mm_scalef_round_sd(A, B, C) \
+ ((__m128d) \
+ __builtin_ia32_scalefsd_mask_round ((A), (B), \
+ (__v2df) _mm_undefined_pd (), \
+ -1, (C)))
+
+#define _mm_scalef_round_ss(A, B, C) \
+ ((__m128) \
+ __builtin_ia32_scalefss_mask_round ((A), (B), \
+ (__v4sf) _mm_undefined_ps (), \
+ -1, (C)))
+
+#define _mm_mask_scalef_round_sd(W, U, A, B, C) \
+ ((__m128d) \
+ __builtin_ia32_scalefsd_mask_round ((A), (B), (W), (U), (C)))
+
+#define _mm_mask_scalef_round_ss(W, U, A, B, C) \
+ ((__m128) \
+ __builtin_ia32_scalefss_mask_round ((A), (B), (W), (U), (C)))
+
+#define _mm_maskz_scalef_round_sd(U, A, B, C) \
+ ((__m128d) \
+ __builtin_ia32_scalefsd_mask_round ((A), (B), \
+ (__v2df) _mm_setzero_pd (), \
+ (U), (C)))
+
+#define _mm_maskz_scalef_round_ss(U, A, B, C) \
+ ((__m128) \
+ __builtin_ia32_scalefss_mask_round ((A), (B), \
+ (__v4sf) _mm_setzero_ps (), \
+ (U), (C)))
+#endif
+
+#define _mm_mask_sqrt_sd(W, U, A, B) \
+ _mm_mask_sqrt_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_sqrt_sd(U, A, B) \
+ _mm_maskz_sqrt_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_sqrt_ss(W, U, A, B) \
+ _mm_mask_sqrt_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_sqrt_ss(U, A, B) \
+ _mm_maskz_sqrt_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_scalef_sd(W, U, A, B) \
+ _mm_mask_scalef_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_scalef_sd(U, A, B) \
+ _mm_maskz_scalef_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_scalef_ss(W, U, A, B) \
+ _mm_mask_scalef_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_scalef_ss(U, A, B) \
+ _mm_maskz_scalef_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtu32_sd (__m128d __A, unsigned __B)
+{
+ return (__m128d) __builtin_ia32_cvtusi2sd32 ((__v2df) __A, __B);
+}
+
+#ifdef __x86_64__
+#ifdef __OPTIMIZE__
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundu64_sd (__m128d __A, unsigned long long __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundi64_sd (__m128d __A, long long __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_cvtsi2sd64 ((__v2df) __A, __B, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsi64_sd (__m128d __A, long long __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_cvtsi2sd64 ((__v2df) __A, __B, __R);
+}
+#else
+#define _mm_cvt_roundu64_sd(A, B, C) \
+ (__m128d)__builtin_ia32_cvtusi2sd64(A, B, C)
+
+#define _mm_cvt_roundi64_sd(A, B, C) \
+ (__m128d)__builtin_ia32_cvtsi2sd64(A, B, C)
+
+#define _mm_cvt_roundsi64_sd(A, B, C) \
+ (__m128d)__builtin_ia32_cvtsi2sd64(A, B, C)
+#endif
+
+#endif
+
+#ifdef __OPTIMIZE__
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundu32_ss (__m128 __A, unsigned __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsi32_ss (__m128 __A, int __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtsi2ss32 ((__v4sf) __A, __B, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundi32_ss (__m128 __A, int __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtsi2ss32 ((__v4sf) __A, __B, __R);
+}
+#else
+#define _mm_cvt_roundu32_ss(A, B, C) \
+ (__m128)__builtin_ia32_cvtusi2ss32(A, B, C)
+
+#define _mm_cvt_roundi32_ss(A, B, C) \
+ (__m128)__builtin_ia32_cvtsi2ss32(A, B, C)
+
+#define _mm_cvt_roundsi32_ss(A, B, C) \
+ (__m128)__builtin_ia32_cvtsi2ss32(A, B, C)
+#endif
+
+#ifdef __x86_64__
+#ifdef __OPTIMIZE__
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundu64_ss (__m128 __A, unsigned long long __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsi64_ss (__m128 __A, long long __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtsi2ss64 ((__v4sf) __A, __B, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundi64_ss (__m128 __A, long long __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtsi2ss64 ((__v4sf) __A, __B, __R);
+}
+#else
+#define _mm_cvt_roundu64_ss(A, B, C) \
+ (__m128)__builtin_ia32_cvtusi2ss64(A, B, C)
+
+#define _mm_cvt_roundi64_ss(A, B, C) \
+ (__m128)__builtin_ia32_cvtsi2ss64(A, B, C)
+
+#define _mm_cvt_roundsi64_ss(A, B, C) \
+ (__m128)__builtin_ia32_cvtsi2ss64(A, B, C)
+#endif
+
+#endif
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float *__P)
+{
+ return (__m128) __builtin_ia32_loadss_mask (__P, (__v4sf) __W, __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_load_ss (__mmask8 __U, const float *__P)
+{
+ return (__m128) __builtin_ia32_loadss_mask (__P, (__v4sf) _mm_setzero_ps (),
+ __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double *__P)
+{
+ return (__m128d) __builtin_ia32_loadsd_mask (__P, (__v2df) __W, __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_load_sd (__mmask8 __U, const double *__P)
+{
+ return (__m128d) __builtin_ia32_loadsd_mask (__P, (__v2df) _mm_setzero_pd (),
+ __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_movess_mask ((__v4sf) __A, (__v4sf) __B,
+ (__v4sf) __W, __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_movess_mask ((__v4sf) __A, (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (), __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_movesd_mask ((__v2df) __A, (__v2df) __B,
+ (__v2df) __W, __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_movesd_mask ((__v2df) __A, (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_store_ss (float *__P, __mmask8 __U, __m128 __A)
+{
+ __builtin_ia32_storess_mask (__P, (__v4sf) __A, (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_store_sd (double *__P, __mmask8 __U, __m128d __A)
+{
+ __builtin_ia32_storesd_mask (__P, (__v2df) __A, (__mmask8) __U);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fixupimm_round_sd (__m128d __A, __m128d __B, __m128i __C,
+ const int __imm, const int __R)
+{
+ return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2di) __C, __imm,
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fixupimm_round_sd (__m128d __A, __mmask8 __U, __m128d __B,
+ __m128i __C, const int __imm, const int __R)
+{
+ return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2di) __C, __imm,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fixupimm_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ __m128i __C, const int __imm, const int __R)
+{
+ return (__m128d) __builtin_ia32_fixupimmsd_maskz ((__v2df) __A,
+ (__v2df) __B,
+ (__v2di) __C,
+ __imm,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fixupimm_round_ss (__m128 __A, __m128 __B, __m128i __C,
+ const int __imm, const int __R)
+{
+ return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4si) __C, __imm,
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fixupimm_round_ss (__m128 __A, __mmask8 __U, __m128 __B,
+ __m128i __C, const int __imm, const int __R)
+{
+ return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4si) __C, __imm,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fixupimm_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ __m128i __C, const int __imm, const int __R)
+{
+ return (__m128) __builtin_ia32_fixupimmss_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4si) __C, __imm,
+ (__mmask8) __U, __R);
+}
+
+#else
+#define _mm_fixupimm_round_sd(X, Y, Z, C, R) \
+ ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
+ (__mmask8)(-1), (R)))
+
+#define _mm_mask_fixupimm_round_sd(X, U, Y, Z, C, R) \
+ ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
+ (__mmask8)(U), (R)))
+
+#define _mm_maskz_fixupimm_round_sd(U, X, Y, Z, C, R) \
+ ((__m128d)__builtin_ia32_fixupimmsd_maskz ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
+ (__mmask8)(U), (R)))
+
+#define _mm_fixupimm_round_ss(X, Y, Z, C, R) \
+ ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
+ (__mmask8)(-1), (R)))
+
+#define _mm_mask_fixupimm_round_ss(X, U, Y, Z, C, R) \
+ ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
+ (__mmask8)(U), (R)))
+
+#define _mm_maskz_fixupimm_round_ss(U, X, Y, Z, C, R) \
+ ((__m128)__builtin_ia32_fixupimmss_maskz ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
+ (__mmask8)(U), (R)))
+
+#endif
+
+#ifdef __x86_64__
+#ifdef __OPTIMIZE__
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundss_u64 (__m128 __A, const int __R)
+{
+ return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf) __A, __R);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundss_si64 (__m128 __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvtss2si64 ((__v4sf) __A, __R);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundss_i64 (__m128 __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvtss2si64 ((__v4sf) __A, __R);
+}
+
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundss_u64 (__m128 __A, const int __R)
+{
+ return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf) __A, __R);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundss_i64 (__m128 __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A, __R);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundss_si64 (__m128 __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A, __R);
+}
+#else
+#define _mm_cvt_roundss_u64(A, B) \
+ ((unsigned long long)__builtin_ia32_vcvtss2usi64(A, B))
+
+#define _mm_cvt_roundss_si64(A, B) \
+ ((long long)__builtin_ia32_vcvtss2si64(A, B))
+
+#define _mm_cvt_roundss_i64(A, B) \
+ ((long long)__builtin_ia32_vcvtss2si64(A, B))
+
+#define _mm_cvtt_roundss_u64(A, B) \
+ ((unsigned long long)__builtin_ia32_vcvttss2usi64(A, B))
+
+#define _mm_cvtt_roundss_i64(A, B) \
+ ((long long)__builtin_ia32_vcvttss2si64(A, B))
+
+#define _mm_cvtt_roundss_si64(A, B) \
+ ((long long)__builtin_ia32_vcvttss2si64(A, B))
+#endif
+#endif
+
+#ifdef __OPTIMIZE__
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundss_u32 (__m128 __A, const int __R)
+{
+ return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A, __R);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundss_si32 (__m128 __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvtss2si32 ((__v4sf) __A, __R);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundss_i32 (__m128 __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvtss2si32 ((__v4sf) __A, __R);
+}
+
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundss_u32 (__m128 __A, const int __R)
+{
+ return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A, __R);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundss_i32 (__m128 __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A, __R);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundss_si32 (__m128 __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A, __R);
+}
+#else
+#define _mm_cvt_roundss_u32(A, B) \
+ ((unsigned)__builtin_ia32_vcvtss2usi32(A, B))
+
+#define _mm_cvt_roundss_si32(A, B) \
+ ((int)__builtin_ia32_vcvtss2si32(A, B))
+
+#define _mm_cvt_roundss_i32(A, B) \
+ ((int)__builtin_ia32_vcvtss2si32(A, B))
+
+#define _mm_cvtt_roundss_u32(A, B) \
+ ((unsigned)__builtin_ia32_vcvttss2usi32(A, B))
+
+#define _mm_cvtt_roundss_si32(A, B) \
+ ((int)__builtin_ia32_vcvttss2si32(A, B))
+
+#define _mm_cvtt_roundss_i32(A, B) \
+ ((int)__builtin_ia32_vcvttss2si32(A, B))
+#endif
+
+#ifdef __x86_64__
+#ifdef __OPTIMIZE__
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsd_u64 (__m128d __A, const int __R)
+{
+ return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df) __A, __R);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsd_si64 (__m128d __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvtsd2si64 ((__v2df) __A, __R);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsd_i64 (__m128d __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvtsd2si64 ((__v2df) __A, __R);
+}
+
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundsd_u64 (__m128d __A, const int __R)
+{
+ return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df) __A, __R);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundsd_si64 (__m128d __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A, __R);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundsd_i64 (__m128d __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A, __R);
+}
+#else
+#define _mm_cvt_roundsd_u64(A, B) \
+ ((unsigned long long)__builtin_ia32_vcvtsd2usi64(A, B))
+
+#define _mm_cvt_roundsd_si64(A, B) \
+ ((long long)__builtin_ia32_vcvtsd2si64(A, B))
+
+#define _mm_cvt_roundsd_i64(A, B) \
+ ((long long)__builtin_ia32_vcvtsd2si64(A, B))
+
+#define _mm_cvtt_roundsd_u64(A, B) \
+ ((unsigned long long)__builtin_ia32_vcvttsd2usi64(A, B))
+
+#define _mm_cvtt_roundsd_si64(A, B) \
+ ((long long)__builtin_ia32_vcvttsd2si64(A, B))
+
+#define _mm_cvtt_roundsd_i64(A, B) \
+ ((long long)__builtin_ia32_vcvttsd2si64(A, B))
+#endif
+#endif
+
+#ifdef __OPTIMIZE__
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsd_u32 (__m128d __A, const int __R)
+{
+ return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A, __R);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsd_si32 (__m128d __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvtsd2si32 ((__v2df) __A, __R);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsd_i32 (__m128d __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvtsd2si32 ((__v2df) __A, __R);
+}
+
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundsd_u32 (__m128d __A, const int __R)
+{
+ return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A, __R);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundsd_i32 (__m128d __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A, __R);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundsd_si32 (__m128d __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsd_ss (__m128 __A, __m128d __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtsd2ss_round ((__v4sf) __A,
+ (__v2df) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvt_roundsd_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128d __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtsd2ss_mask_round ((__v4sf) __A,
+ (__v2df) __B,
+ (__v4sf) __W,
+ __U,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvt_roundsd_ss (__mmask8 __U, __m128 __A,
+ __m128d __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtsd2ss_mask_round ((__v4sf) __A,
+ (__v2df) __B,
+ _mm_setzero_ps (),
+ __U,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundss_sd (__m128d __A, __m128 __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_cvtss2sd_round ((__v2df) __A,
+ (__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvt_roundss_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128 __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_cvtss2sd_mask_round ((__v2df) __A,
+ (__v4sf) __B,
+ (__v2df) __W,
+ __U,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvt_roundss_sd (__mmask8 __U, __m128d __A,
+ __m128 __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_cvtss2sd_mask_round ((__v2df) __A,
+ (__v4sf) __B,
+ _mm_setzero_pd (),
+ __U,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getexp_round_ss (__m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_getexpss128_round ((__v4sf) __A,
+ (__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getexp_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getexp_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getexp_round_sd (__m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_getexpsd128_round ((__v2df) __A,
+ (__v2df) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getexp_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getexp_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getmant_round_sd (__m128d __A, __m128d __B,
+ _MM_MANTISSA_NORM_ENUM __C,
+ _MM_MANTISSA_SIGN_ENUM __D, const int __R)
+{
+ return (__m128d) __builtin_ia32_getmantsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__D << 2) | __C,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getmant_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, _MM_MANTISSA_NORM_ENUM __C,
+ _MM_MANTISSA_SIGN_ENUM __D, const int __R)
+{
+ return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__D << 2) | __C,
+ (__v2df) __W,
+ __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getmant_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ _MM_MANTISSA_NORM_ENUM __C,
+ _MM_MANTISSA_SIGN_ENUM __D, const int __R)
+{
+ return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__D << 2) | __C,
+ (__v2df)
+ _mm_setzero_pd(),
+ __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getmant_round_ss (__m128 __A, __m128 __B,
+ _MM_MANTISSA_NORM_ENUM __C,
+ _MM_MANTISSA_SIGN_ENUM __D, const int __R)
+{
+ return (__m128) __builtin_ia32_getmantss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__D << 2) | __C,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getmant_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, _MM_MANTISSA_NORM_ENUM __C,
+ _MM_MANTISSA_SIGN_ENUM __D, const int __R)
+{
+ return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__D << 2) | __C,
+ (__v4sf) __W,
+ __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getmant_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ _MM_MANTISSA_NORM_ENUM __C,
+ _MM_MANTISSA_SIGN_ENUM __D, const int __R)
+{
+ return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__D << 2) | __C,
+ (__v4sf)
+ _mm_setzero_ps(),
+ __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_roundscale_round_ss (__m128 __A, __m128 __B, const int __imm,
+ const int __R)
+{
+ return (__m128)
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __imm,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_roundscale_round_ss (__m128 __A, __mmask8 __B, __m128 __C,
+ __m128 __D, const int __imm, const int __R)
+{
+ return (__m128)
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) __C,
+ (__v4sf) __D, __imm,
+ (__v4sf) __A,
+ (__mmask8) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_roundscale_round_ss (__mmask8 __A, __m128 __B, __m128 __C,
+ const int __imm, const int __R)
+{
+ return (__m128)
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) __B,
+ (__v4sf) __C, __imm,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __A,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_roundscale_round_sd (__m128d __A, __m128d __B, const int __imm,
+ const int __R)
+{
+ return (__m128d)
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) __A,
+ (__v2df) __B, __imm,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_roundscale_round_sd (__m128d __A, __mmask8 __B, __m128d __C,
+ __m128d __D, const int __imm, const int __R)
+{
+ return (__m128d)
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) __C,
+ (__v2df) __D, __imm,
+ (__v2df) __A,
+ (__mmask8) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_roundscale_round_sd (__mmask8 __A, __m128d __B, __m128d __C,
+ const int __imm, const int __R)
+{
+ return (__m128d)
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) __B,
+ (__v2df) __C, __imm,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __A,
+ __R);
+}
+
+#else
+#define _mm_cvt_roundsd_u32(A, B) \
+ ((unsigned)__builtin_ia32_vcvtsd2usi32(A, B))
+
+#define _mm_cvt_roundsd_si32(A, B) \
+ ((int)__builtin_ia32_vcvtsd2si32(A, B))
+
+#define _mm_cvt_roundsd_i32(A, B) \
+ ((int)__builtin_ia32_vcvtsd2si32(A, B))
+
+#define _mm_cvtt_roundsd_u32(A, B) \
+ ((unsigned)__builtin_ia32_vcvttsd2usi32(A, B))
+
+#define _mm_cvtt_roundsd_si32(A, B) \
+ ((int)__builtin_ia32_vcvttsd2si32(A, B))
+
+#define _mm_cvtt_roundsd_i32(A, B) \
+ ((int)__builtin_ia32_vcvttsd2si32(A, B))
+
+#define _mm_cvt_roundsd_ss(A, B, C) \
+ (__m128)__builtin_ia32_cvtsd2ss_round(A, B, C)
+
+#define _mm_mask_cvt_roundsd_ss(W, U, A, B, C) \
+ (__m128)__builtin_ia32_cvtsd2ss_mask_round ((A), (B), (W), (U), (C))
+
+#define _mm_maskz_cvt_roundsd_ss(U, A, B, C) \
+ (__m128)__builtin_ia32_cvtsd2ss_mask_round ((A), (B), _mm_setzero_ps (), \
+ (U), (C))
+
+#define _mm_cvt_roundss_sd(A, B, C) \
+ (__m128d)__builtin_ia32_cvtss2sd_round(A, B, C)
+
+#define _mm_mask_cvt_roundss_sd(W, U, A, B, C) \
+ (__m128d)__builtin_ia32_cvtss2sd_mask_round ((A), (B), (W), (U), (C))
+
+#define _mm_maskz_cvt_roundss_sd(U, A, B, C) \
+ (__m128d)__builtin_ia32_cvtss2sd_mask_round ((A), (B), _mm_setzero_pd (), \
+ (U), (C))
+
+#define _mm_getmant_round_sd(X, Y, C, D, R) \
+ ((__m128d)__builtin_ia32_getmantsd_round ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (int)(((D)<<2) | (C)), \
+ (R)))
+
+#define _mm_mask_getmant_round_sd(W, U, X, Y, C, D, R) \
+ ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U),\
+ (R)))
+
+#define _mm_maskz_getmant_round_sd(U, X, Y, C, D, R) \
+ ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)(__m128d)_mm_setzero_pd(), \
+ (__mmask8)(U),\
+ (R)))
+
+#define _mm_getmant_round_ss(X, Y, C, D, R) \
+ ((__m128)__builtin_ia32_getmantss_round ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), \
+ (int)(((D)<<2) | (C)), \
+ (R)))
+
+#define _mm_mask_getmant_round_ss(W, U, X, Y, C, D, R) \
+ ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U),\
+ (R)))
+
+#define _mm_maskz_getmant_round_ss(U, X, Y, C, D, R) \
+ ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)(__m128)_mm_setzero_ps(), \
+ (__mmask8)(U),\
+ (R)))
+
+#define _mm_getexp_round_ss(A, B, R) \
+ ((__m128)__builtin_ia32_getexpss128_round((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), R))
+
+#define _mm_mask_getexp_round_ss(W, U, A, B, C) \
+ (__m128)__builtin_ia32_getexpss_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_getexp_round_ss(U, A, B, C) \
+ (__m128)__builtin_ia32_getexpss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
+
+#define _mm_getexp_round_sd(A, B, R) \
+ ((__m128d)__builtin_ia32_getexpsd128_round((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), R))
+
+#define _mm_mask_getexp_round_sd(W, U, A, B, C) \
+ (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_getexp_round_sd(U, A, B, C) \
+ (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
+
+#define _mm_roundscale_round_ss(A, B, I, R) \
+ ((__m128) \
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \
+ (__v4sf) (__m128) (B), \
+ (int) (I), \
+ (__v4sf) _mm_setzero_ps (), \
+ (__mmask8) (-1), \
+ (int) (R)))
+#define _mm_mask_roundscale_round_ss(A, U, B, C, I, R) \
+ ((__m128) \
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (B), \
+ (__v4sf) (__m128) (C), \
+ (int) (I), \
+ (__v4sf) (__m128) (A), \
+ (__mmask8) (U), \
+ (int) (R)))
+#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \
+ ((__m128) \
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \
+ (__v4sf) (__m128) (B), \
+ (int) (I), \
+ (__v4sf) _mm_setzero_ps (), \
+ (__mmask8) (U), \
+ (int) (R)))
+#define _mm_roundscale_round_sd(A, B, I, R) \
+ ((__m128d) \
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \
+ (__v2df) (__m128d) (B), \
+ (int) (I), \
+ (__v2df) _mm_setzero_pd (), \
+ (__mmask8) (-1), \
+ (int) (R)))
+#define _mm_mask_roundscale_round_sd(A, U, B, C, I, R) \
+ ((__m128d) \
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (B), \
+ (__v2df) (__m128d) (C), \
+ (int) (I), \
+ (__v2df) (__m128d) (A), \
+ (__mmask8) (U), \
+ (int) (R)))
+#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \
+ ((__m128d) \
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \
+ (__v2df) (__m128d) (B), \
+ (int) (I), \
+ (__v2df) _mm_setzero_pd (), \
+ (__mmask8) (U), \
+ (int) (R)))
+
+#endif
+
+#define _mm_mask_cvtss_sd(W, U, A, B) \
+ _mm_mask_cvt_roundss_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_cvtss_sd(U, A, B) \
+ _mm_maskz_cvt_roundss_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_cvtsd_ss(W, U, A, B) \
+ _mm_mask_cvt_roundsd_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_cvtsd_ss(U, A, B) \
+ _mm_maskz_cvt_roundsd_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#ifdef __OPTIMIZE__
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kshiftli_mask16 (__mmask16 __A, unsigned int __B)
+{
+ return (__mmask16) __builtin_ia32_kshiftlihi ((__mmask16) __A,
+ (__mmask8) __B);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kshiftri_mask16 (__mmask16 __A, unsigned int __B)
+{
+ return (__mmask16) __builtin_ia32_kshiftrihi ((__mmask16) __A,
+ (__mmask8) __B);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_round_sd_mask (__m128d __X, __m128d __Y, const int __P, const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X,
+ (__v2df) __Y, __P,
+ (__mmask8) -1, __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_round_sd_mask (__mmask8 __M, __m128d __X, __m128d __Y,
+ const int __P, const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X,
+ (__v2df) __Y, __P,
+ (__mmask8) __M, __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_round_ss_mask (__m128 __X, __m128 __Y, const int __P, const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X,
+ (__v4sf) __Y, __P,
+ (__mmask8) -1, __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_round_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y,
+ const int __P, const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X,
+ (__v4sf) __Y, __P,
+ (__mmask8) __M, __R);
+}
+
+#else
+#define _kshiftli_mask16(X, Y) \
+ ((__mmask16) __builtin_ia32_kshiftlihi ((__mmask16)(X), (__mmask8)(Y)))
+
+#define _kshiftri_mask16(X, Y) \
+ ((__mmask16) __builtin_ia32_kshiftrihi ((__mmask16)(X), (__mmask8)(Y)))
+
+#define _mm_cmp_round_sd_mask(X, Y, P, R) \
+ ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P),\
+ (__mmask8)-1, R))
+
+#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \
+ ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P),\
+ (M), R))
+
+#define _mm_cmp_round_ss_mask(X, Y, P, R) \
+ ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)-1, R))
+
+#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \
+ ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (M), R))
+
+#endif
+
+extern __inline unsigned char
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kortest_mask16_u8 (__mmask16 __A, __mmask16 __B, unsigned char *__CF)
+{
+ *__CF = (unsigned char) __builtin_ia32_kortestchi (__A, __B);
+ return (unsigned char) __builtin_ia32_kortestzhi (__A, __B);
+}
+
+extern __inline unsigned char
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kortestz_mask16_u8 (__mmask16 __A, __mmask16 __B)
+{
+ return (unsigned char) __builtin_ia32_kortestzhi ((__mmask16) __A,
+ (__mmask16) __B);
+}
+
+extern __inline unsigned char
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kortestc_mask16_u8 (__mmask16 __A, __mmask16 __B)
+{
+ return (unsigned char) __builtin_ia32_kortestchi ((__mmask16) __A,
+ (__mmask16) __B);
+}
+
+extern __inline unsigned int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_cvtmask16_u32 (__mmask16 __A)
+{
+ return (unsigned int) __builtin_ia32_kmovw ((__mmask16 ) __A);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_cvtu32_mask16 (unsigned int __A)
+{
+ return (__mmask16) __builtin_ia32_kmovw ((__mmask16 ) __A);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_load_mask16 (__mmask16 *__A)
+{
+ return (__mmask16) __builtin_ia32_kmovw (*(__mmask16 *) __A);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_store_mask16 (__mmask16 *__A, __mmask16 __B)
+{
+ *(__mmask16 *) __A = __builtin_ia32_kmovw (__B);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kand_mask16 (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kandn_mask16 (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A,
+ (__mmask16) __B);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kor_mask16 (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kxnor_mask16 (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kxor_mask16 (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_knot_mask16 (__mmask16 __A)
+{
+ return (__mmask16) __builtin_ia32_knothi ((__mmask16) __A);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_kunpackb_mask16 (__mmask8 __A, __mmask8 __B)
+{
+ return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_round_sd (__m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
+ (__v2df) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_round_ss (__m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_round_sd (__m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
+ (__v2df) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_round_ss (__m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U, __R);
+}
+
+#else
+#define _mm_max_round_sd(A, B, C) \
+ (__m128d)__builtin_ia32_maxsd_round(A, B, C)
+
+#define _mm_mask_max_round_sd(W, U, A, B, C) \
+ (__m128d)__builtin_ia32_maxsd_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_max_round_sd(U, A, B, C) \
+ (__m128d)__builtin_ia32_maxsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
+
+#define _mm_max_round_ss(A, B, C) \
+ (__m128)__builtin_ia32_maxss_round(A, B, C)
+
+#define _mm_mask_max_round_ss(W, U, A, B, C) \
+ (__m128)__builtin_ia32_maxss_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_max_round_ss(U, A, B, C) \
+ (__m128)__builtin_ia32_maxss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
+
+#define _mm_min_round_sd(A, B, C) \
+ (__m128d)__builtin_ia32_minsd_round(A, B, C)
+
+#define _mm_mask_min_round_sd(W, U, A, B, C) \
+ (__m128d)__builtin_ia32_minsd_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_min_round_sd(U, A, B, C) \
+ (__m128d)__builtin_ia32_minsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
+
+#define _mm_min_round_ss(A, B, C) \
+ (__m128)__builtin_ia32_minss_round(A, B, C)
+
+#define _mm_mask_min_round_ss(W, U, A, B, C) \
+ (__m128)__builtin_ia32_minss_mask_round(A, B, W, U, C)
+
+#define _mm_maskz_min_round_ss(U, A, B, C) \
+ (__m128)__builtin_ia32_minss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
+
+#endif
+
+#ifdef __OPTIMIZE__
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W,
+ (__v2df) __A,
+ (__v2df) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W,
+ (__v2df) __A,
+ -(__v2df) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W,
+ (__v4sf) __A,
+ -(__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fnmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W,
+ -(__v2df) __A,
+ (__v2df) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fnmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W,
+ -(__v4sf) __A,
+ (__v4sf) __B,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fnmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W,
+ -(__v2df) __A,
+ -(__v2df) __B,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fnmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W,
+ -(__v4sf) __A,
+ -(__v4sf) __B,
+ __R);
+}
+#else
+#define _mm_fmadd_round_sd(A, B, C, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_round(A, B, C, R)
+
+#define _mm_fmadd_round_ss(A, B, C, R) \
+ (__m128)__builtin_ia32_vfmaddss3_round(A, B, C, R)
+
+#define _mm_fmsub_round_sd(A, B, C, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_round(A, B, -(C), R)
+
+#define _mm_fmsub_round_ss(A, B, C, R) \
+ (__m128)__builtin_ia32_vfmaddss3_round(A, B, -(C), R)
+
+#define _mm_fnmadd_round_sd(A, B, C, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_round(A, -(B), C, R)
+
+#define _mm_fnmadd_round_ss(A, B, C, R) \
+ (__m128)__builtin_ia32_vfmaddss3_round(A, -(B), C, R)
+
+#define _mm_fnmsub_round_sd(A, B, C, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_round(A, -(B), -(C), R)
+
+#define _mm_fnmsub_round_ss(A, B, C, R) \
+ (__m128)__builtin_ia32_vfmaddss3_round(A, -(B), -(C), R)
+#endif
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
+ (__v2df) __A,
+ -(__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ (__v4sf) __A,
+ -(__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmsub_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmsub_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
+ (__v2df) __A,
+ -(__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
+ (__v4sf) __A,
+ -(__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
+ -(__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ -(__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmadd_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
+ -(__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmadd_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
+ -(__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
+ -(__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
+ -(__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
+ -(__v2df) __A,
+ -(__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ -(__v4sf) __A,
+ -(__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmsub_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W,
+ -(__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmsub_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W,
+ -(__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
+ -(__v2df) __A,
+ -(__v2df) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
+ -(__v4sf) __A,
+ -(__v4sf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmsub_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
+ (__v2df) __A,
+ -(__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmsub_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ (__v4sf) __A,
+ -(__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmsub_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
+ (__v2df) __A,
+ -(__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmsub_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
+ (__v4sf) __A,
+ -(__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmadd_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
+ -(__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmadd_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ -(__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
+ -(__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
+ -(__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmadd_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
+ -(__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmadd_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
+ -(__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmsub_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
+ -(__v2df) __A,
+ -(__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmsub_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ -(__v4sf) __A,
+ -(__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W,
+ -(__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W,
+ -(__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmsub_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B,
+ const int __R)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
+ -(__v2df) __A,
+ -(__v2df) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmsub_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
+ -(__v4sf) __A,
+ -(__v4sf) __B,
+ (__mmask8) __U, __R);
+}
+#else
+#define _mm_mask_fmadd_round_sd(A, U, B, C, R) \
+ (__m128d) __builtin_ia32_vfmaddsd3_mask (A, B, C, U, R)
+
+#define _mm_mask_fmadd_round_ss(A, U, B, C, R) \
+ (__m128) __builtin_ia32_vfmaddss3_mask (A, B, C, U, R)
+
+#define _mm_mask3_fmadd_round_sd(A, B, C, U, R) \
+ (__m128d) __builtin_ia32_vfmaddsd3_mask3 (A, B, C, U, R)
+
+#define _mm_mask3_fmadd_round_ss(A, B, C, U, R) \
+ (__m128) __builtin_ia32_vfmaddss3_mask3 (A, B, C, U, R)
+
+#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \
+ (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, B, C, U, R)
+
+#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \
+ (__m128) __builtin_ia32_vfmaddss3_maskz (A, B, C, U, R)
+
+#define _mm_mask_fmsub_round_sd(A, U, B, C, R) \
+ (__m128d) __builtin_ia32_vfmaddsd3_mask (A, B, -(C), U, R)
+
+#define _mm_mask_fmsub_round_ss(A, U, B, C, R) \
+ (__m128) __builtin_ia32_vfmaddss3_mask (A, B, -(C), U, R)
+
+#define _mm_mask3_fmsub_round_sd(A, B, C, U, R) \
+ (__m128d) __builtin_ia32_vfmsubsd3_mask3 (A, B, C, U, R)
+
+#define _mm_mask3_fmsub_round_ss(A, B, C, U, R) \
+ (__m128) __builtin_ia32_vfmsubss3_mask3 (A, B, C, U, R)
+
+#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \
+ (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, B, -(C), U, R)
+
+#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \
+ (__m128) __builtin_ia32_vfmaddss3_maskz (A, B, -(C), U, R)
+
+#define _mm_mask_fnmadd_round_sd(A, U, B, C, R) \
+ (__m128d) __builtin_ia32_vfmaddsd3_mask (A, -(B), C, U, R)
+
+#define _mm_mask_fnmadd_round_ss(A, U, B, C, R) \
+ (__m128) __builtin_ia32_vfmaddss3_mask (A, -(B), C, U, R)
+
+#define _mm_mask3_fnmadd_round_sd(A, B, C, U, R) \
+ (__m128d) __builtin_ia32_vfmaddsd3_mask3 (A, -(B), C, U, R)
+
+#define _mm_mask3_fnmadd_round_ss(A, B, C, U, R) \
+ (__m128) __builtin_ia32_vfmaddss3_mask3 (A, -(B), C, U, R)
+
+#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \
+ (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, -(B), C, U, R)
+
+#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \
+ (__m128) __builtin_ia32_vfmaddss3_maskz (A, -(B), C, U, R)
+
+#define _mm_mask_fnmsub_round_sd(A, U, B, C, R) \
+ (__m128d) __builtin_ia32_vfmaddsd3_mask (A, -(B), -(C), U, R)
+
+#define _mm_mask_fnmsub_round_ss(A, U, B, C, R) \
+ (__m128) __builtin_ia32_vfmaddss3_mask (A, -(B), -(C), U, R)
+
+#define _mm_mask3_fnmsub_round_sd(A, B, C, U, R) \
+ (__m128d) __builtin_ia32_vfmsubsd3_mask3 (A, -(B), C, U, R)
+
+#define _mm_mask3_fnmsub_round_ss(A, B, C, U, R) \
+ (__m128) __builtin_ia32_vfmsubss3_mask3 (A, -(B), C, U, R)
+
+#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \
+ (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, -(B), -(C), U, R)
+
+#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \
+ (__m128) __builtin_ia32_vfmaddss3_maskz (A, -(B), -(C), U, R)
+#endif
+
+#ifdef __OPTIMIZE__
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comi_round_ss (__m128 __A, __m128 __B, const int __P, const int __R)
+{
+ return __builtin_ia32_vcomiss ((__v4sf) __A, (__v4sf) __B, __P, __R);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comi_round_sd (__m128d __A, __m128d __B, const int __P, const int __R)
+{
+ return __builtin_ia32_vcomisd ((__v2df) __A, (__v2df) __B, __P, __R);
+}
+#else
+#define _mm_comi_round_ss(A, B, C, D)\
+__builtin_ia32_vcomiss(A, B, C, D)
+#define _mm_comi_round_sd(A, B, C, D)\
+__builtin_ia32_vcomisd(A, B, C, D)
+#endif
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mul_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B)
+{
+ return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mul_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mul_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B)
+{
+ return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mul_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_div_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B)
+{
+ return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_div_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_div_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B)
+{
+ return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_div_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_scalef_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_scalef_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __x86_64__
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
+{
+ return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
+{
+ return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#endif
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtu32_ss (__m128 __A, unsigned __B)
+{
+ return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fixupimm_sd (__m128d __A, __m128d __B, __m128i __C, const int __imm)
+{
+ return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2di) __C, __imm,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fixupimm_sd (__m128d __A, __mmask8 __U, __m128d __B,
+ __m128i __C, const int __imm)
+{
+ return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2di) __C, __imm,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fixupimm_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ __m128i __C, const int __imm)
+{
+ return (__m128d) __builtin_ia32_fixupimmsd_maskz ((__v2df) __A,
+ (__v2df) __B,
+ (__v2di) __C,
+ __imm,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fixupimm_ss (__m128 __A, __m128 __B, __m128i __C, const int __imm)
+{
+ return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4si) __C, __imm,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fixupimm_ss (__m128 __A, __mmask8 __U, __m128 __B,
+ __m128i __C, const int __imm)
+{
+ return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4si) __C, __imm,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fixupimm_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ __m128i __C, const int __imm)
+{
+ return (__m128) __builtin_ia32_fixupimmss_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4si) __C, __imm,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#else
+#define _mm_fixupimm_sd(X, Y, Z, C) \
+ ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
+ (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_fixupimm_sd(X, U, Y, Z, C) \
+ ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
+ (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_fixupimm_sd(U, X, Y, Z, C) \
+ ((__m128d)__builtin_ia32_fixupimmsd_maskz ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
+ (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_fixupimm_ss(X, Y, Z, C) \
+ ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
+ (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_fixupimm_ss(X, U, Y, Z, C) \
+ ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
+ (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_fixupimm_ss(U, X, Y, Z, C) \
+ ((__m128)__builtin_ia32_fixupimmss_maskz ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
+ (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#endif
+
+#ifdef __x86_64__
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_u64 (__m128 __A)
+{
+ return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf)
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttss_u64 (__m128 __A)
+{
+ return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf)
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttss_i64 (__m128 __A)
+{
+ return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#endif /* __x86_64__ */
+
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_u32 (__m128 __A)
+{
+ return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttss_u32 (__m128 __A)
+{
+ return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttss_i32 (__m128 __A)
+{
+ return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_i32 (__m128d __A)
+{
+ return (int) __builtin_ia32_cvtsd2si ((__v2df) __A);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_i32 (__m128 __A)
+{
+ return (int) __builtin_ia32_cvtss2si ((__v4sf) __A);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvti32_sd (__m128d __A, int __B)
+{
+ return (__m128d) __builtin_ia32_cvtsi2sd ((__v2df) __A, __B);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvti32_ss (__m128 __A, int __B)
+{
+ return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
+}
+
+#ifdef __x86_64__
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_u64 (__m128d __A)
+{
+ return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df)
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsd_u64 (__m128d __A)
+{
+ return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df)
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsd_i64 (__m128d __A)
+{
+ return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_i64 (__m128d __A)
+{
+ return (long long) __builtin_ia32_cvtsd2si64 ((__v2df) __A);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_i64 (__m128 __A)
+{
+ return (long long) __builtin_ia32_cvtss2si64 ((__v4sf) __A);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvti64_sd (__m128d __A, long long __B)
+{
+ return (__m128d) __builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvti64_ss (__m128 __A, long long __B)
+{
+ return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
+}
+#endif /* __x86_64__ */
+
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_u32 (__m128d __A)
+{
+ return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsd_u32 (__m128d __A)
+{
+ return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsd_i32 (__m128d __A)
+{
+ return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getexp_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_getexpss128_round ((__v4sf) __A,
+ (__v4sf) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getexp_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_getexpsd128_round ((__v2df) __A,
+ (__v2df) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getmant_sd (__m128d __A, __m128d __B, _MM_MANTISSA_NORM_ENUM __C,
+ _MM_MANTISSA_SIGN_ENUM __D)
+{
+ return (__m128d) __builtin_ia32_getmantsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__D << 2) | __C,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getmant_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
+ _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D)
+{
+ return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__D << 2) | __C,
+ (__v2df) __W,
+ __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getmant_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D)
+{
+ return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A,
+ (__v2df) __B,
+ (__D << 2) | __C,
+ (__v2df)
+ _mm_setzero_pd(),
+ __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getmant_ss (__m128 __A, __m128 __B, _MM_MANTISSA_NORM_ENUM __C,
+ _MM_MANTISSA_SIGN_ENUM __D)
+{
+ return (__m128) __builtin_ia32_getmantss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__D << 2) | __C,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getmant_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
+ _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D)
+{
+ return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__D << 2) | __C,
+ (__v4sf) __W,
+ __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getmant_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D)
+{
+ return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__D << 2) | __C,
+ (__v4sf)
+ _mm_setzero_ps(),
+ __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_roundscale_ss (__m128 __A, __m128 __B, const int __imm)
+{
+ return (__m128)
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __imm,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_roundscale_ss (__m128 __A, __mmask8 __B, __m128 __C, __m128 __D,
+ const int __imm)
+{
+ return (__m128)
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) __C,
+ (__v4sf) __D, __imm,
+ (__v4sf) __A,
+ (__mmask8) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_roundscale_ss (__mmask8 __A, __m128 __B, __m128 __C,
+ const int __imm)
+{
+ return (__m128)
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) __B,
+ (__v4sf) __C, __imm,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_roundscale_sd (__m128d __A, __m128d __B, const int __imm)
+{
+ return (__m128d)
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) __A,
+ (__v2df) __B, __imm,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_roundscale_sd (__m128d __A, __mmask8 __B, __m128d __C, __m128d __D,
+ const int __imm)
+{
+ return (__m128d)
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) __C,
+ (__v2df) __D, __imm,
+ (__v2df) __A,
+ (__mmask8) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_roundscale_sd (__mmask8 __A, __m128d __B, __m128d __C,
+ const int __imm)
+{
+ return (__m128d)
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) __B,
+ (__v2df) __C, __imm,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_sd_mask (__m128d __X, __m128d __Y, const int __P)
+{
+ return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X,
+ (__v2df) __Y, __P,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_sd_mask (__mmask8 __M, __m128d __X, __m128d __Y, const int __P)
+{
+ return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X,
+ (__v2df) __Y, __P,
+ (__mmask8) __M,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_ss_mask (__m128 __X, __m128 __Y, const int __P)
+{
+ return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X,
+ (__v4sf) __Y, __P,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y, const int __P)
+{
+ return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X,
+ (__v4sf) __Y, __P,
+ (__mmask8) __M,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#else
+#define _mm_getmant_sd(X, Y, C, D) \
+ ((__m128d)__builtin_ia32_getmantsd_round ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (int)(((D)<<2) | (C)), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_getmant_sd(W, U, X, Y, C, D) \
+ ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U),\
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_getmant_sd(U, X, Y, C, D) \
+ ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U),\
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_getmant_ss(X, Y, C, D) \
+ ((__m128)__builtin_ia32_getmantss_round ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), \
+ (int)(((D)<<2) | (C)), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_getmant_ss(W, U, X, Y, C, D) \
+ ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U),\
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_getmant_ss(U, X, Y, C, D) \
+ ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U),\
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_getexp_ss(A, B) \
+ ((__m128)__builtin_ia32_getexpss128_round((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_getexp_ss(W, U, A, B) \
+ (__m128)__builtin_ia32_getexpss_mask_round(A, B, W, U,\
+ _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_getexp_ss(U, A, B) \
+ (__m128)__builtin_ia32_getexpss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U,\
+ _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_getexp_sd(A, B) \
+ ((__m128d)__builtin_ia32_getexpsd128_round((__v2df)(__m128d)(A), (__v2df)(__m128d)(B),\
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_getexp_sd(W, U, A, B) \
+ (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, W, U,\
+ _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_getexp_sd(U, A, B) \
+ (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U,\
+ _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_roundscale_ss(A, B, I) \
+ ((__m128) \
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \
+ (__v4sf) (__m128) (B), \
+ (int) (I), \
+ (__v4sf) _mm_setzero_ps (), \
+ (__mmask8) (-1), \
+ _MM_FROUND_CUR_DIRECTION))
+#define _mm_mask_roundscale_ss(A, U, B, C, I) \
+ ((__m128) \
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (B), \
+ (__v4sf) (__m128) (C), \
+ (int) (I), \
+ (__v4sf) (__m128) (A), \
+ (__mmask8) (U), \
+ _MM_FROUND_CUR_DIRECTION))
+#define _mm_maskz_roundscale_ss(U, A, B, I) \
+ ((__m128) \
+ __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \
+ (__v4sf) (__m128) (B), \
+ (int) (I), \
+ (__v4sf) _mm_setzero_ps (), \
+ (__mmask8) (U), \
+ _MM_FROUND_CUR_DIRECTION))
+#define _mm_roundscale_sd(A, B, I) \
+ ((__m128d) \
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \
+ (__v2df) (__m128d) (B), \
+ (int) (I), \
+ (__v2df) _mm_setzero_pd (), \
+ (__mmask8) (-1), \
+ _MM_FROUND_CUR_DIRECTION))
+#define _mm_mask_roundscale_sd(A, U, B, C, I) \
+ ((__m128d) \
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (B), \
+ (__v2df) (__m128d) (C), \
+ (int) (I), \
+ (__v2df) (__m128d) (A), \
+ (__mmask8) (U), \
+ _MM_FROUND_CUR_DIRECTION))
+#define _mm_maskz_roundscale_sd(U, A, B, I) \
+ ((__m128d) \
+ __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \
+ (__v2df) (__m128d) (B), \
+ (int) (I), \
+ (__v2df) _mm_setzero_pd (), \
+ (__mmask8) (U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_cmp_sd_mask(X, Y, P) \
+ ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P),\
+ (__mmask8)-1,_MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_cmp_sd_mask(M, X, Y, P) \
+ ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P),\
+ M,_MM_FROUND_CUR_DIRECTION))
+
+#define _mm_cmp_ss_mask(X, Y, P) \
+ ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)-1,_MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_cmp_ss_mask(M, X, Y, P) \
+ ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ M,_MM_FROUND_CUR_DIRECTION))
+
+#endif
+
+#ifdef __DISABLE_AVX512F__
+#undef __DISABLE_AVX512F__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512F__ */
+
+#if !defined (__AVX512F__) || !defined (__EVEX512__)
+#pragma GCC push_options
+#pragma GCC target("avx512f,evex512")
+#define __DISABLE_AVX512F_512__
+#endif /* __AVX512F_512__ */
+
/* Internal data types for implementing the intrinsics. */
typedef double __v8df __attribute__ ((__vector_size__ (64)));
typedef float __v16sf __attribute__ ((__vector_size__ (64)));
@@ -57,9 +3799,6 @@ typedef float __m512_u __attribute__ ((__vector_size__ (64), __may_alias__, __al
typedef long long __m512i_u __attribute__ ((__vector_size__ (64), __may_alias__, __aligned__ (1)));
typedef double __m512d_u __attribute__ ((__vector_size__ (64), __may_alias__, __aligned__ (1)));
-typedef unsigned char __mmask8;
-typedef unsigned short __mmask16;
-
extern __inline __mmask16
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_int2mask (int __M)
@@ -1498,174 +5237,6 @@ _mm512_maskz_sra_epi32 (__mmask16 __U, __m512i __A, __m128i __B)
(__mmask16) __U);
}
-#ifdef __OPTIMIZE__
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_round_sd (__m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
- (__v2df) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_add_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_add_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_round_ss (__m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
- (__v4sf) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_add_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_add_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_round_sd (__m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
- (__v2df) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_sub_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_sub_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_round_ss (__m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
- (__v4sf) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_sub_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_sub_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U, __R);
-}
-
-#else
-#define _mm_add_round_sd(A, B, C) \
- (__m128d)__builtin_ia32_addsd_round(A, B, C)
-
-#define _mm_mask_add_round_sd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_addsd_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_add_round_sd(U, A, B, C) \
- (__m128d)__builtin_ia32_addsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
-
-#define _mm_add_round_ss(A, B, C) \
- (__m128)__builtin_ia32_addss_round(A, B, C)
-
-#define _mm_mask_add_round_ss(W, U, A, B, C) \
- (__m128)__builtin_ia32_addss_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_add_round_ss(U, A, B, C) \
- (__m128)__builtin_ia32_addss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
-
-#define _mm_sub_round_sd(A, B, C) \
- (__m128d)__builtin_ia32_subsd_round(A, B, C)
-
-#define _mm_mask_sub_round_sd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_subsd_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_sub_round_sd(U, A, B, C) \
- (__m128d)__builtin_ia32_subsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
-
-#define _mm_sub_round_ss(A, B, C) \
- (__m128)__builtin_ia32_subss_round(A, B, C)
-
-#define _mm_mask_sub_round_ss(W, U, A, B, C) \
- (__m128)__builtin_ia32_subss_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_sub_round_ss(U, A, B, C) \
- (__m128)__builtin_ia32_subss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
-
-#endif
-
/* Constant helper to represent the ternary logic operations among
vector A, B and C. */
typedef enum
@@ -1856,62 +5427,6 @@ _mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rcp14_sd (__m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd ((__v2df) __B,
- (__v2df) __A);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __B,
- (__v2df) __A,
- (__v2df) __W,
- (__mmask8) __U);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __B,
- (__v2df) __A,
- (__v2df) _mm_setzero_ps (),
- (__mmask8) __U);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rcp14_ss (__m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss ((__v4sf) __B,
- (__v4sf) __A);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __B,
- (__v4sf) __A,
- (__v4sf) __W,
- (__mmask8) __U);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __B,
- (__v4sf) __A,
- (__v4sf) _mm_setzero_ps (),
- (__mmask8) __U);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_rsqrt14_pd (__m512d __A)
@@ -1970,62 +5485,6 @@ _mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt14_sd (__m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd ((__v2df) __B,
- (__v2df) __A);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __B,
- (__v2df) __A,
- (__v2df) __W,
- (__mmask8) __U);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __B,
- (__v2df) __A,
- (__v2df) _mm_setzero_pd (),
- (__mmask8) __U);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt14_ss (__m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss ((__v4sf) __B,
- (__v4sf) __A);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __B,
- (__v4sf) __A,
- (__v4sf) __W,
- (__mmask8) __U);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __B,
- (__v4sf) __A,
- (__v4sf) _mm_setzero_ps (),
- (__mmask8) __U);
-}
-
#ifdef __OPTIMIZE__
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -2086,71 +5545,6 @@ _mm512_maskz_sqrt_round_ps (__mmask16 __U, __m512 __A, const int __R)
(__mmask16) __U, __R);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_round_sd (__m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_sqrtsd_mask_round ((__v2df) __B,
- (__v2df) __A,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_sqrt_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_sqrtsd_mask_round ((__v2df) __B,
- (__v2df) __A,
- (__v2df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_sqrt_round_sd (__mmask8 __U, __m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_sqrtsd_mask_round ((__v2df) __B,
- (__v2df) __A,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_round_ss (__m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_sqrtss_mask_round ((__v4sf) __B,
- (__v4sf) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_sqrt_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_sqrtss_mask_round ((__v4sf) __B,
- (__v4sf) __A,
- (__v4sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_sqrt_round_ss (__mmask8 __U, __m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_sqrtss_mask_round ((__v4sf) __B,
- (__v4sf) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U, __R);
-}
#else
#define _mm512_sqrt_round_pd(A, C) \
(__m512d)__builtin_ia32_sqrtpd512_mask(A, (__v8df)_mm512_undefined_pd(), -1, C)
@@ -2170,41 +5564,8 @@ _mm_maskz_sqrt_round_ss (__mmask8 __U, __m128 __A, __m128 __B, const int __R)
#define _mm512_maskz_sqrt_round_ps(U, A, C) \
(__m512)__builtin_ia32_sqrtps512_mask(A, (__v16sf)_mm512_setzero_ps(), U, C)
-#define _mm_sqrt_round_sd(A, B, C) \
- (__m128d)__builtin_ia32_sqrtsd_mask_round (B, A, \
- (__v2df) _mm_setzero_pd (), -1, C)
-
-#define _mm_mask_sqrt_round_sd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_sqrtsd_mask_round (B, A, W, U, C)
-
-#define _mm_maskz_sqrt_round_sd(U, A, B, C) \
- (__m128d)__builtin_ia32_sqrtsd_mask_round (B, A, \
- (__v2df) _mm_setzero_pd (), U, C)
-
-#define _mm_sqrt_round_ss(A, B, C) \
- (__m128)__builtin_ia32_sqrtss_mask_round (B, A, \
- (__v4sf) _mm_setzero_ps (), -1, C)
-
-#define _mm_mask_sqrt_round_ss(W, U, A, B, C) \
- (__m128)__builtin_ia32_sqrtss_mask_round (B, A, W, U, C)
-
-#define _mm_maskz_sqrt_round_ss(U, A, B, C) \
- (__m128)__builtin_ia32_sqrtss_mask_round (B, A, \
- (__v4sf) _mm_setzero_ps (), U, C)
#endif
-#define _mm_mask_sqrt_sd(W, U, A, B) \
- _mm_mask_sqrt_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_sqrt_sd(U, A, B) \
- _mm_maskz_sqrt_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_sqrt_ss(W, U, A, B) \
- _mm_mask_sqrt_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_sqrt_ss(U, A, B) \
- _mm_maskz_sqrt_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtepi8_epi32 (__m128i __A)
@@ -2802,134 +6163,6 @@ _mm512_maskz_div_round_ps (__mmask16 __U, __m512 __A, __m512 __B, const int __R)
(__mmask16) __U, __R);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_round_sd (__m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
- (__v2df) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_mul_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_mul_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_round_ss (__m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
- (__v4sf) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_mul_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_mul_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_round_sd (__m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
- (__v2df) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_div_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_div_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_round_ss (__m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
- (__v4sf) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_div_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_div_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U, __R);
-}
-
#else
#define _mm512_mul_round_pd(A, B, C) \
(__m512d)__builtin_ia32_mulpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, C)
@@ -2967,42 +6200,6 @@ _mm_maskz_div_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
#define _mm512_maskz_div_round_ps(U, A, B, C) \
(__m512)__builtin_ia32_divps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, C)
-#define _mm_mul_round_sd(A, B, C) \
- (__m128d)__builtin_ia32_mulsd_round(A, B, C)
-
-#define _mm_mask_mul_round_sd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_mulsd_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_mul_round_sd(U, A, B, C) \
- (__m128d)__builtin_ia32_mulsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
-
-#define _mm_mul_round_ss(A, B, C) \
- (__m128)__builtin_ia32_mulss_round(A, B, C)
-
-#define _mm_mask_mul_round_ss(W, U, A, B, C) \
- (__m128)__builtin_ia32_mulss_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_mul_round_ss(U, A, B, C) \
- (__m128)__builtin_ia32_mulss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
-
-#define _mm_div_round_sd(A, B, C) \
- (__m128d)__builtin_ia32_divsd_round(A, B, C)
-
-#define _mm_mask_div_round_sd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_divsd_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_div_round_sd(U, A, B, C) \
- (__m128d)__builtin_ia32_divsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
-
-#define _mm_div_round_ss(A, B, C) \
- (__m128)__builtin_ia32_divss_round(A, B, C)
-
-#define _mm_mask_div_round_ss(W, U, A, B, C) \
- (__m128)__builtin_ia32_divss_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_div_round_ss(U, A, B, C) \
- (__m128)__builtin_ia32_divss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
-
#endif
#ifdef __OPTIMIZE__
@@ -3246,72 +6443,6 @@ _mm512_maskz_scalef_round_ps (__mmask16 __U, __m512 __A, __m512 __B,
(__mmask16) __U, __R);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_scalef_round_sd (__m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_scalef_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_scalef_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_scalef_round_ss (__m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_scalef_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_scalef_round_ss (__mmask8 __U, __m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U, __R);
-}
#else
#define _mm512_scalef_round_pd(A, B, C) \
((__m512d) \
@@ -3343,51 +6474,8 @@ _mm_maskz_scalef_round_ss (__mmask8 __U, __m128 __A, __m128 __B, const int __R)
(__v16sf) _mm512_setzero_ps(), \
(U), (C)))
-#define _mm_scalef_round_sd(A, B, C) \
- ((__m128d) \
- __builtin_ia32_scalefsd_mask_round ((A), (B), \
- (__v2df) _mm_undefined_pd (), \
- -1, (C)))
-
-#define _mm_scalef_round_ss(A, B, C) \
- ((__m128) \
- __builtin_ia32_scalefss_mask_round ((A), (B), \
- (__v4sf) _mm_undefined_ps (), \
- -1, (C)))
-
-#define _mm_mask_scalef_round_sd(W, U, A, B, C) \
- ((__m128d) \
- __builtin_ia32_scalefsd_mask_round ((A), (B), (W), (U), (C)))
-
-#define _mm_mask_scalef_round_ss(W, U, A, B, C) \
- ((__m128) \
- __builtin_ia32_scalefss_mask_round ((A), (B), (W), (U), (C)))
-
-#define _mm_maskz_scalef_round_sd(U, A, B, C) \
- ((__m128d) \
- __builtin_ia32_scalefsd_mask_round ((A), (B), \
- (__v2df) _mm_setzero_pd (), \
- (U), (C)))
-
-#define _mm_maskz_scalef_round_ss(U, A, B, C) \
- ((__m128) \
- __builtin_ia32_scalefss_mask_round ((A), (B), \
- (__v4sf) _mm_setzero_ps (), \
- (U), (C)))
#endif
-#define _mm_mask_scalef_sd(W, U, A, B) \
- _mm_mask_scalef_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_scalef_sd(U, A, B) \
- _mm_maskz_scalef_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_scalef_ss(W, U, A, B) \
- _mm_mask_scalef_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_scalef_ss(U, A, B) \
- _mm_maskz_scalef_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
#ifdef __OPTIMIZE__
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -5188,115 +8276,6 @@ _mm512_maskz_cvt_roundps_epu32 (__mmask16 __U, __m512 __A, const int __R)
((__m512i)__builtin_ia32_cvtps2udq512_mask(A, (__v16si)_mm512_setzero_si512 (), U, B))
#endif
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtu32_sd (__m128d __A, unsigned __B)
-{
- return (__m128d) __builtin_ia32_cvtusi2sd32 ((__v2df) __A, __B);
-}
-
-#ifdef __x86_64__
-#ifdef __OPTIMIZE__
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundu64_sd (__m128d __A, unsigned long long __B, const int __R)
-{
- return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundi64_sd (__m128d __A, long long __B, const int __R)
-{
- return (__m128d) __builtin_ia32_cvtsi2sd64 ((__v2df) __A, __B, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsi64_sd (__m128d __A, long long __B, const int __R)
-{
- return (__m128d) __builtin_ia32_cvtsi2sd64 ((__v2df) __A, __B, __R);
-}
-#else
-#define _mm_cvt_roundu64_sd(A, B, C) \
- (__m128d)__builtin_ia32_cvtusi2sd64(A, B, C)
-
-#define _mm_cvt_roundi64_sd(A, B, C) \
- (__m128d)__builtin_ia32_cvtsi2sd64(A, B, C)
-
-#define _mm_cvt_roundsi64_sd(A, B, C) \
- (__m128d)__builtin_ia32_cvtsi2sd64(A, B, C)
-#endif
-
-#endif
-
-#ifdef __OPTIMIZE__
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundu32_ss (__m128 __A, unsigned __B, const int __R)
-{
- return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsi32_ss (__m128 __A, int __B, const int __R)
-{
- return (__m128) __builtin_ia32_cvtsi2ss32 ((__v4sf) __A, __B, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundi32_ss (__m128 __A, int __B, const int __R)
-{
- return (__m128) __builtin_ia32_cvtsi2ss32 ((__v4sf) __A, __B, __R);
-}
-#else
-#define _mm_cvt_roundu32_ss(A, B, C) \
- (__m128)__builtin_ia32_cvtusi2ss32(A, B, C)
-
-#define _mm_cvt_roundi32_ss(A, B, C) \
- (__m128)__builtin_ia32_cvtsi2ss32(A, B, C)
-
-#define _mm_cvt_roundsi32_ss(A, B, C) \
- (__m128)__builtin_ia32_cvtsi2ss32(A, B, C)
-#endif
-
-#ifdef __x86_64__
-#ifdef __OPTIMIZE__
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundu64_ss (__m128 __A, unsigned long long __B, const int __R)
-{
- return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsi64_ss (__m128 __A, long long __B, const int __R)
-{
- return (__m128) __builtin_ia32_cvtsi2ss64 ((__v4sf) __A, __B, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundi64_ss (__m128 __A, long long __B, const int __R)
-{
- return (__m128) __builtin_ia32_cvtsi2ss64 ((__v4sf) __A, __B, __R);
-}
-#else
-#define _mm_cvt_roundu64_ss(A, B, C) \
- (__m128)__builtin_ia32_cvtusi2ss64(A, B, C)
-
-#define _mm_cvt_roundi64_ss(A, B, C) \
- (__m128)__builtin_ia32_cvtsi2ss64(A, B, C)
-
-#define _mm_cvt_roundsi64_ss(A, B, C) \
- (__m128)__builtin_ia32_cvtsi2ss64(A, B, C)
-#endif
-
-#endif
-
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtepi32_epi8 (__m512i __A)
@@ -6394,83 +9373,6 @@ _mm512_mask_storeu_ps (void *__P, __mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float *__P)
-{
- return (__m128) __builtin_ia32_loadss_mask (__P, (__v4sf) __W, __U);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_load_ss (__mmask8 __U, const float *__P)
-{
- return (__m128) __builtin_ia32_loadss_mask (__P, (__v4sf) _mm_setzero_ps (),
- __U);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double *__P)
-{
- return (__m128d) __builtin_ia32_loadsd_mask (__P, (__v2df) __W, __U);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_load_sd (__mmask8 __U, const double *__P)
-{
- return (__m128d) __builtin_ia32_loadsd_mask (__P, (__v2df) _mm_setzero_pd (),
- __U);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_movess_mask ((__v4sf) __A, (__v4sf) __B,
- (__v4sf) __W, __U);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_movess_mask ((__v4sf) __A, (__v4sf) __B,
- (__v4sf) _mm_setzero_ps (), __U);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_movesd_mask ((__v2df) __A, (__v2df) __B,
- (__v2df) __W, __U);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_movesd_mask ((__v2df) __A, (__v2df) __B,
- (__v2df) _mm_setzero_pd (),
- __U);
-}
-
-extern __inline void
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_store_ss (float *__P, __mmask8 __U, __m128 __A)
-{
- __builtin_ia32_storess_mask (__P, (__v4sf) __A, (__mmask8) __U);
-}
-
-extern __inline void
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_store_sd (double *__P, __mmask8 __U, __m128d __A)
-{
- __builtin_ia32_storesd_mask (__P, (__v2df) __A, (__mmask8) __U);
-}
-
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_loadu_epi64 (void const *__P)
@@ -7273,73 +10175,6 @@ _mm512_maskz_fixupimm_round_ps (__mmask16 __U, __m512 __A, __m512 __B,
(__mmask16) __U, __R);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fixupimm_round_sd (__m128d __A, __m128d __B, __m128i __C,
- const int __imm, const int __R)
-{
- return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2di) __C, __imm,
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fixupimm_round_sd (__m128d __A, __mmask8 __U, __m128d __B,
- __m128i __C, const int __imm, const int __R)
-{
- return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2di) __C, __imm,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fixupimm_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- __m128i __C, const int __imm, const int __R)
-{
- return (__m128d) __builtin_ia32_fixupimmsd_maskz ((__v2df) __A,
- (__v2df) __B,
- (__v2di) __C,
- __imm,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fixupimm_round_ss (__m128 __A, __m128 __B, __m128i __C,
- const int __imm, const int __R)
-{
- return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4si) __C, __imm,
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fixupimm_round_ss (__m128 __A, __mmask8 __U, __m128 __B,
- __m128i __C, const int __imm, const int __R)
-{
- return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4si) __C, __imm,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fixupimm_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
- __m128i __C, const int __imm, const int __R)
-{
- return (__m128) __builtin_ia32_fixupimmss_maskz ((__v4sf) __A,
- (__v4sf) __B,
- (__v4si) __C, __imm,
- (__mmask8) __U, __R);
-}
-
#else
#define _mm512_shuffle_pd(X, Y, C) \
((__m512d)__builtin_ia32_shufpd512_mask ((__v8df)(__m512d)(X), \
@@ -7407,35 +10242,6 @@ _mm_maskz_fixupimm_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
(__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), (int)(C), \
(__mmask16)(U), (R)))
-#define _mm_fixupimm_round_sd(X, Y, Z, C, R) \
- ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
- (__mmask8)(-1), (R)))
-
-#define _mm_mask_fixupimm_round_sd(X, U, Y, Z, C, R) \
- ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
- (__mmask8)(U), (R)))
-
-#define _mm_maskz_fixupimm_round_sd(U, X, Y, Z, C, R) \
- ((__m128d)__builtin_ia32_fixupimmsd_maskz ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
- (__mmask8)(U), (R)))
-
-#define _mm_fixupimm_round_ss(X, Y, Z, C, R) \
- ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
- (__mmask8)(-1), (R)))
-
-#define _mm_mask_fixupimm_round_ss(X, U, Y, Z, C, R) \
- ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
- (__mmask8)(U), (R)))
-
-#define _mm_maskz_fixupimm_round_ss(U, X, Y, Z, C, R) \
- ((__m128)__builtin_ia32_fixupimmss_maskz ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
- (__mmask8)(U), (R)))
#endif
extern __inline __m512
@@ -8169,258 +10975,6 @@ _mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
(__mmask8) __U);
}
-#ifdef __x86_64__
-#ifdef __OPTIMIZE__
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundss_u64 (__m128 __A, const int __R)
-{
- return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf) __A, __R);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundss_si64 (__m128 __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvtss2si64 ((__v4sf) __A, __R);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundss_i64 (__m128 __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvtss2si64 ((__v4sf) __A, __R);
-}
-
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundss_u64 (__m128 __A, const int __R)
-{
- return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf) __A, __R);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundss_i64 (__m128 __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A, __R);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundss_si64 (__m128 __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A, __R);
-}
-#else
-#define _mm_cvt_roundss_u64(A, B) \
- ((unsigned long long)__builtin_ia32_vcvtss2usi64(A, B))
-
-#define _mm_cvt_roundss_si64(A, B) \
- ((long long)__builtin_ia32_vcvtss2si64(A, B))
-
-#define _mm_cvt_roundss_i64(A, B) \
- ((long long)__builtin_ia32_vcvtss2si64(A, B))
-
-#define _mm_cvtt_roundss_u64(A, B) \
- ((unsigned long long)__builtin_ia32_vcvttss2usi64(A, B))
-
-#define _mm_cvtt_roundss_i64(A, B) \
- ((long long)__builtin_ia32_vcvttss2si64(A, B))
-
-#define _mm_cvtt_roundss_si64(A, B) \
- ((long long)__builtin_ia32_vcvttss2si64(A, B))
-#endif
-#endif
-
-#ifdef __OPTIMIZE__
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundss_u32 (__m128 __A, const int __R)
-{
- return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A, __R);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundss_si32 (__m128 __A, const int __R)
-{
- return (int) __builtin_ia32_vcvtss2si32 ((__v4sf) __A, __R);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundss_i32 (__m128 __A, const int __R)
-{
- return (int) __builtin_ia32_vcvtss2si32 ((__v4sf) __A, __R);
-}
-
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundss_u32 (__m128 __A, const int __R)
-{
- return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A, __R);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundss_i32 (__m128 __A, const int __R)
-{
- return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A, __R);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundss_si32 (__m128 __A, const int __R)
-{
- return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A, __R);
-}
-#else
-#define _mm_cvt_roundss_u32(A, B) \
- ((unsigned)__builtin_ia32_vcvtss2usi32(A, B))
-
-#define _mm_cvt_roundss_si32(A, B) \
- ((int)__builtin_ia32_vcvtss2si32(A, B))
-
-#define _mm_cvt_roundss_i32(A, B) \
- ((int)__builtin_ia32_vcvtss2si32(A, B))
-
-#define _mm_cvtt_roundss_u32(A, B) \
- ((unsigned)__builtin_ia32_vcvttss2usi32(A, B))
-
-#define _mm_cvtt_roundss_si32(A, B) \
- ((int)__builtin_ia32_vcvttss2si32(A, B))
-
-#define _mm_cvtt_roundss_i32(A, B) \
- ((int)__builtin_ia32_vcvttss2si32(A, B))
-#endif
-
-#ifdef __x86_64__
-#ifdef __OPTIMIZE__
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsd_u64 (__m128d __A, const int __R)
-{
- return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df) __A, __R);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsd_si64 (__m128d __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvtsd2si64 ((__v2df) __A, __R);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsd_i64 (__m128d __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvtsd2si64 ((__v2df) __A, __R);
-}
-
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundsd_u64 (__m128d __A, const int __R)
-{
- return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df) __A, __R);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundsd_si64 (__m128d __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A, __R);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundsd_i64 (__m128d __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A, __R);
-}
-#else
-#define _mm_cvt_roundsd_u64(A, B) \
- ((unsigned long long)__builtin_ia32_vcvtsd2usi64(A, B))
-
-#define _mm_cvt_roundsd_si64(A, B) \
- ((long long)__builtin_ia32_vcvtsd2si64(A, B))
-
-#define _mm_cvt_roundsd_i64(A, B) \
- ((long long)__builtin_ia32_vcvtsd2si64(A, B))
-
-#define _mm_cvtt_roundsd_u64(A, B) \
- ((unsigned long long)__builtin_ia32_vcvttsd2usi64(A, B))
-
-#define _mm_cvtt_roundsd_si64(A, B) \
- ((long long)__builtin_ia32_vcvttsd2si64(A, B))
-
-#define _mm_cvtt_roundsd_i64(A, B) \
- ((long long)__builtin_ia32_vcvttsd2si64(A, B))
-#endif
-#endif
-
-#ifdef __OPTIMIZE__
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsd_u32 (__m128d __A, const int __R)
-{
- return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A, __R);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsd_si32 (__m128d __A, const int __R)
-{
- return (int) __builtin_ia32_vcvtsd2si32 ((__v2df) __A, __R);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsd_i32 (__m128d __A, const int __R)
-{
- return (int) __builtin_ia32_vcvtsd2si32 ((__v2df) __A, __R);
-}
-
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundsd_u32 (__m128d __A, const int __R)
-{
- return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A, __R);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundsd_i32 (__m128d __A, const int __R)
-{
- return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A, __R);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundsd_si32 (__m128d __A, const int __R)
-{
- return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A, __R);
-}
-#else
-#define _mm_cvt_roundsd_u32(A, B) \
- ((unsigned)__builtin_ia32_vcvtsd2usi32(A, B))
-
-#define _mm_cvt_roundsd_si32(A, B) \
- ((int)__builtin_ia32_vcvtsd2si32(A, B))
-
-#define _mm_cvt_roundsd_i32(A, B) \
- ((int)__builtin_ia32_vcvtsd2si32(A, B))
-
-#define _mm_cvtt_roundsd_u32(A, B) \
- ((unsigned)__builtin_ia32_vcvttsd2usi32(A, B))
-
-#define _mm_cvtt_roundsd_si32(A, B) \
- ((int)__builtin_ia32_vcvttsd2si32(A, B))
-
-#define _mm_cvtt_roundsd_i32(A, B) \
- ((int)__builtin_ia32_vcvttsd2si32(A, B))
-#endif
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_movedup_pd (__m512d __A)
@@ -8741,71 +11295,6 @@ _mm512_maskz_cvt_roundpd_ps (__mmask8 __U, __m512d __A, const int __R)
(__mmask8) __U, __R);
}
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsd_ss (__m128 __A, __m128d __B, const int __R)
-{
- return (__m128) __builtin_ia32_cvtsd2ss_round ((__v4sf) __A,
- (__v2df) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvt_roundsd_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128d __B, const int __R)
-{
- return (__m128) __builtin_ia32_cvtsd2ss_mask_round ((__v4sf) __A,
- (__v2df) __B,
- (__v4sf) __W,
- __U,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvt_roundsd_ss (__mmask8 __U, __m128 __A,
- __m128d __B, const int __R)
-{
- return (__m128) __builtin_ia32_cvtsd2ss_mask_round ((__v4sf) __A,
- (__v2df) __B,
- _mm_setzero_ps (),
- __U,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundss_sd (__m128d __A, __m128 __B, const int __R)
-{
- return (__m128d) __builtin_ia32_cvtss2sd_round ((__v2df) __A,
- (__v4sf) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvt_roundss_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128 __B, const int __R)
-{
- return (__m128d) __builtin_ia32_cvtss2sd_mask_round ((__v2df) __A,
- (__v4sf) __B,
- (__v2df) __W,
- __U,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvt_roundss_sd (__mmask8 __U, __m128d __A,
- __m128 __B, const int __R)
-{
- return (__m128d) __builtin_ia32_cvtss2sd_mask_round ((__v2df) __A,
- (__v4sf) __B,
- _mm_setzero_pd (),
- __U,
- __R);
-}
#else
#define _mm512_cvt_roundpd_ps(A, B) \
(__m256)__builtin_ia32_cvtpd2ps512_mask(A, (__v8sf)_mm256_undefined_ps(), -1, B)
@@ -8816,40 +11305,8 @@ _mm_maskz_cvt_roundss_sd (__mmask8 __U, __m128d __A,
#define _mm512_maskz_cvt_roundpd_ps(U, A, B) \
(__m256)__builtin_ia32_cvtpd2ps512_mask(A, (__v8sf)_mm256_setzero_ps(), U, B)
-#define _mm_cvt_roundsd_ss(A, B, C) \
- (__m128)__builtin_ia32_cvtsd2ss_round(A, B, C)
-
-#define _mm_mask_cvt_roundsd_ss(W, U, A, B, C) \
- (__m128)__builtin_ia32_cvtsd2ss_mask_round ((A), (B), (W), (U), (C))
-
-#define _mm_maskz_cvt_roundsd_ss(U, A, B, C) \
- (__m128)__builtin_ia32_cvtsd2ss_mask_round ((A), (B), _mm_setzero_ps (), \
- (U), (C))
-
-#define _mm_cvt_roundss_sd(A, B, C) \
- (__m128d)__builtin_ia32_cvtss2sd_round(A, B, C)
-
-#define _mm_mask_cvt_roundss_sd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_cvtss2sd_mask_round ((A), (B), (W), (U), (C))
-
-#define _mm_maskz_cvt_roundss_sd(U, A, B, C) \
- (__m128d)__builtin_ia32_cvtss2sd_mask_round ((A), (B), _mm_setzero_pd (), \
- (U), (C))
-
#endif
-#define _mm_mask_cvtss_sd(W, U, A, B) \
- _mm_mask_cvt_roundss_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_cvtss_sd(U, A, B) \
- _mm_maskz_cvt_roundss_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_cvtsd_ss(W, U, A, B) \
- _mm_mask_cvt_roundsd_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_cvtsd_ss(U, A, B) \
- _mm_maskz_cvt_roundsd_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_stream_si512 (__m512i * __P, __m512i __A)
@@ -8878,87 +11335,7 @@ _mm512_stream_load_si512 (void *__P)
return __builtin_ia32_movntdqa512 ((__v8di *)__P);
}
-/* Constants for mantissa extraction */
-typedef enum
-{
- _MM_MANT_NORM_1_2, /* interval [1, 2) */
- _MM_MANT_NORM_p5_2, /* interval [0.5, 2) */
- _MM_MANT_NORM_p5_1, /* interval [0.5, 1) */
- _MM_MANT_NORM_p75_1p5 /* interval [0.75, 1.5) */
-} _MM_MANTISSA_NORM_ENUM;
-
-typedef enum
-{
- _MM_MANT_SIGN_src, /* sign = sign(SRC) */
- _MM_MANT_SIGN_zero, /* sign = 0 */
- _MM_MANT_SIGN_nan /* DEST = NaN if sign(SRC) = 1 */
-} _MM_MANTISSA_SIGN_ENUM;
-
#ifdef __OPTIMIZE__
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_getexp_round_ss (__m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_getexpss128_round ((__v4sf) __A,
- (__v4sf) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_getexp_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_getexp_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_getexp_round_sd (__m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_getexpsd128_round ((__v2df) __A,
- (__v2df) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_getexp_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_getexp_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U, __R);
-}
-
extern __inline __m512
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_getexp_round_ps (__m512 __A, const int __R)
@@ -9091,84 +11468,6 @@ _mm512_maskz_getmant_round_ps (__mmask16 __U, __m512 __A,
__U, __R);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_getmant_round_sd (__m128d __A, __m128d __B,
- _MM_MANTISSA_NORM_ENUM __C,
- _MM_MANTISSA_SIGN_ENUM __D, const int __R)
-{
- return (__m128d) __builtin_ia32_getmantsd_round ((__v2df) __A,
- (__v2df) __B,
- (__D << 2) | __C,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_getmant_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B, _MM_MANTISSA_NORM_ENUM __C,
- _MM_MANTISSA_SIGN_ENUM __D, const int __R)
-{
- return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__D << 2) | __C,
- (__v2df) __W,
- __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_getmant_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- _MM_MANTISSA_NORM_ENUM __C,
- _MM_MANTISSA_SIGN_ENUM __D, const int __R)
-{
- return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__D << 2) | __C,
- (__v2df)
- _mm_setzero_pd(),
- __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_getmant_round_ss (__m128 __A, __m128 __B,
- _MM_MANTISSA_NORM_ENUM __C,
- _MM_MANTISSA_SIGN_ENUM __D, const int __R)
-{
- return (__m128) __builtin_ia32_getmantss_round ((__v4sf) __A,
- (__v4sf) __B,
- (__D << 2) | __C,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_getmant_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B, _MM_MANTISSA_NORM_ENUM __C,
- _MM_MANTISSA_SIGN_ENUM __D, const int __R)
-{
- return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__D << 2) | __C,
- (__v4sf) __W,
- __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_getmant_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
- _MM_MANTISSA_NORM_ENUM __C,
- _MM_MANTISSA_SIGN_ENUM __D, const int __R)
-{
- return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__D << 2) | __C,
- (__v4sf)
- _mm_setzero_ps(),
- __U, __R);
-}
-
#else
#define _mm512_getmant_round_pd(X, B, C, R) \
((__m512d)__builtin_ia32_getmantpd512_mask ((__v8df)(__m512d)(X), \
@@ -9210,68 +11509,6 @@ _mm_maskz_getmant_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
(__v16sf)(__m512)_mm512_setzero_ps(), \
(__mmask16)(U),\
(R)))
-#define _mm_getmant_round_sd(X, Y, C, D, R) \
- ((__m128d)__builtin_ia32_getmantsd_round ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (int)(((D)<<2) | (C)), \
- (R)))
-
-#define _mm_mask_getmant_round_sd(W, U, X, Y, C, D, R) \
- ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (int)(((D)<<2) | (C)), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U),\
- (R)))
-
-#define _mm_maskz_getmant_round_sd(U, X, Y, C, D, R) \
- ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (int)(((D)<<2) | (C)), \
- (__v2df)(__m128d)_mm_setzero_pd(), \
- (__mmask8)(U),\
- (R)))
-
-#define _mm_getmant_round_ss(X, Y, C, D, R) \
- ((__m128)__builtin_ia32_getmantss_round ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), \
- (int)(((D)<<2) | (C)), \
- (R)))
-
-#define _mm_mask_getmant_round_ss(W, U, X, Y, C, D, R) \
- ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U),\
- (R)))
-
-#define _mm_maskz_getmant_round_ss(U, X, Y, C, D, R) \
- ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)(__m128)_mm_setzero_ps(), \
- (__mmask8)(U),\
- (R)))
-
-#define _mm_getexp_round_ss(A, B, R) \
- ((__m128)__builtin_ia32_getexpss128_round((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), R))
-
-#define _mm_mask_getexp_round_ss(W, U, A, B, C) \
- (__m128)__builtin_ia32_getexpss_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_getexp_round_ss(U, A, B, C) \
- (__m128)__builtin_ia32_getexpss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
-
-#define _mm_getexp_round_sd(A, B, R) \
- ((__m128d)__builtin_ia32_getexpsd128_round((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), R))
-
-#define _mm_mask_getexp_round_sd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_getexp_round_sd(U, A, B, C) \
- (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
-
#define _mm512_getexp_round_ps(A, R) \
((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
@@ -9363,88 +11600,6 @@ _mm512_maskz_roundscale_round_pd (__mmask8 __A, __m512d __B,
(__mmask8) __A, __R);
}
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_roundscale_round_ss (__m128 __A, __m128 __B, const int __imm,
- const int __R)
-{
- return (__m128)
- __builtin_ia32_rndscaless_mask_round ((__v4sf) __A,
- (__v4sf) __B, __imm,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_roundscale_round_ss (__m128 __A, __mmask8 __B, __m128 __C,
- __m128 __D, const int __imm, const int __R)
-{
- return (__m128)
- __builtin_ia32_rndscaless_mask_round ((__v4sf) __C,
- (__v4sf) __D, __imm,
- (__v4sf) __A,
- (__mmask8) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_roundscale_round_ss (__mmask8 __A, __m128 __B, __m128 __C,
- const int __imm, const int __R)
-{
- return (__m128)
- __builtin_ia32_rndscaless_mask_round ((__v4sf) __B,
- (__v4sf) __C, __imm,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __A,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_roundscale_round_sd (__m128d __A, __m128d __B, const int __imm,
- const int __R)
-{
- return (__m128d)
- __builtin_ia32_rndscalesd_mask_round ((__v2df) __A,
- (__v2df) __B, __imm,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_roundscale_round_sd (__m128d __A, __mmask8 __B, __m128d __C,
- __m128d __D, const int __imm, const int __R)
-{
- return (__m128d)
- __builtin_ia32_rndscalesd_mask_round ((__v2df) __C,
- (__v2df) __D, __imm,
- (__v2df) __A,
- (__mmask8) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_roundscale_round_sd (__mmask8 __A, __m128d __B, __m128d __C,
- const int __imm, const int __R)
-{
- return (__m128d)
- __builtin_ia32_rndscalesd_mask_round ((__v2df) __B,
- (__v2df) __C, __imm,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __A,
- __R);
-}
-
#else
#define _mm512_roundscale_round_ps(A, B, R) \
((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(A), (int)(B),\
@@ -9472,54 +11627,6 @@ _mm_maskz_roundscale_round_sd (__mmask8 __A, __m128d __B, __m128d __C,
(int)(C), \
(__v8df)_mm512_setzero_pd(),\
(__mmask8)(A), R))
-#define _mm_roundscale_round_ss(A, B, I, R) \
- ((__m128) \
- __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \
- (__v4sf) (__m128) (B), \
- (int) (I), \
- (__v4sf) _mm_setzero_ps (), \
- (__mmask8) (-1), \
- (int) (R)))
-#define _mm_mask_roundscale_round_ss(A, U, B, C, I, R) \
- ((__m128) \
- __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (B), \
- (__v4sf) (__m128) (C), \
- (int) (I), \
- (__v4sf) (__m128) (A), \
- (__mmask8) (U), \
- (int) (R)))
-#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \
- ((__m128) \
- __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \
- (__v4sf) (__m128) (B), \
- (int) (I), \
- (__v4sf) _mm_setzero_ps (), \
- (__mmask8) (U), \
- (int) (R)))
-#define _mm_roundscale_round_sd(A, B, I, R) \
- ((__m128d) \
- __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \
- (__v2df) (__m128d) (B), \
- (int) (I), \
- (__v2df) _mm_setzero_pd (), \
- (__mmask8) (-1), \
- (int) (R)))
-#define _mm_mask_roundscale_round_sd(A, U, B, C, I, R) \
- ((__m128d) \
- __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (B), \
- (__v2df) (__m128d) (C), \
- (int) (I), \
- (__v2df) (__m128d) (A), \
- (__mmask8) (U), \
- (int) (R)))
-#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \
- ((__m128d) \
- __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \
- (__v2df) (__m128d) (B), \
- (int) (I), \
- (__v2df) _mm_setzero_pd (), \
- (__mmask8) (U), \
- (int) (R)))
#endif
extern __inline __m512
@@ -10068,22 +12175,6 @@ _mm512_cmpneq_epu64_mask (__m512i __X, __m512i __Y)
#define _MM_CMPINT_GT 0x6
#ifdef __OPTIMIZE__
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kshiftli_mask16 (__mmask16 __A, unsigned int __B)
-{
- return (__mmask16) __builtin_ia32_kshiftlihi ((__mmask16) __A,
- (__mmask8) __B);
-}
-
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kshiftri_mask16 (__mmask16 __A, unsigned int __B)
-{
- return (__mmask16) __builtin_ia32_kshiftrihi ((__mmask16) __A,
- (__mmask8) __B);
-}
-
extern __inline __mmask8
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cmp_epi64_mask (__m512i __X, __m512i __Y, const int __P)
@@ -10199,51 +12290,7 @@ _mm512_mask_cmp_round_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y,
(__mmask16) __U, __R);
}
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmp_round_sd_mask (__m128d __X, __m128d __Y, const int __P, const int __R)
-{
- return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X,
- (__v2df) __Y, __P,
- (__mmask8) -1, __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cmp_round_sd_mask (__mmask8 __M, __m128d __X, __m128d __Y,
- const int __P, const int __R)
-{
- return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X,
- (__v2df) __Y, __P,
- (__mmask8) __M, __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmp_round_ss_mask (__m128 __X, __m128 __Y, const int __P, const int __R)
-{
- return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X,
- (__v4sf) __Y, __P,
- (__mmask8) -1, __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cmp_round_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y,
- const int __P, const int __R)
-{
- return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X,
- (__v4sf) __Y, __P,
- (__mmask8) __M, __R);
-}
-
#else
-#define _kshiftli_mask16(X, Y) \
- ((__mmask16) __builtin_ia32_kshiftlihi ((__mmask16)(X), (__mmask8)(Y)))
-
-#define _kshiftri_mask16(X, Y) \
- ((__mmask16) __builtin_ia32_kshiftrihi ((__mmask16)(X), (__mmask8)(Y)))
-
#define _mm512_cmp_epi64_mask(X, Y, P) \
((__mmask8) __builtin_ia32_cmpq512_mask ((__v8di)(__m512i)(X), \
(__v8di)(__m512i)(Y), (int)(P),\
@@ -10304,25 +12351,6 @@ _mm_mask_cmp_round_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y,
(__v16sf)(__m512)(Y), (int)(P),\
(__mmask16)(M), R))
-#define _mm_cmp_round_sd_mask(X, Y, P, R) \
- ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P),\
- (__mmask8)-1, R))
-
-#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \
- ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P),\
- (M), R))
-
-#define _mm_cmp_round_ss_mask(X, Y, P, R) \
- ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)-1, R))
-
-#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \
- ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (M), R))
#endif
#ifdef __OPTIMIZE__
@@ -11110,66 +13138,6 @@ _mm512_maskz_expandloadu_epi32 (__mmask16 __U, void const *__P)
(), (__mmask16) __U);
}
-/* Mask arithmetic operations */
-#define _kand_mask16 _mm512_kand
-#define _kandn_mask16 _mm512_kandn
-#define _knot_mask16 _mm512_knot
-#define _kor_mask16 _mm512_kor
-#define _kxnor_mask16 _mm512_kxnor
-#define _kxor_mask16 _mm512_kxor
-
-extern __inline unsigned char
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kortest_mask16_u8 (__mmask16 __A, __mmask16 __B, unsigned char *__CF)
-{
- *__CF = (unsigned char) __builtin_ia32_kortestchi (__A, __B);
- return (unsigned char) __builtin_ia32_kortestzhi (__A, __B);
-}
-
-extern __inline unsigned char
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kortestz_mask16_u8 (__mmask16 __A, __mmask16 __B)
-{
- return (unsigned char) __builtin_ia32_kortestzhi ((__mmask16) __A,
- (__mmask16) __B);
-}
-
-extern __inline unsigned char
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kortestc_mask16_u8 (__mmask16 __A, __mmask16 __B)
-{
- return (unsigned char) __builtin_ia32_kortestchi ((__mmask16) __A,
- (__mmask16) __B);
-}
-
-extern __inline unsigned int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_cvtmask16_u32 (__mmask16 __A)
-{
- return (unsigned int) __builtin_ia32_kmovw ((__mmask16 ) __A);
-}
-
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_cvtu32_mask16 (unsigned int __A)
-{
- return (__mmask16) __builtin_ia32_kmovw ((__mmask16 ) __A);
-}
-
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_load_mask16 (__mmask16 *__A)
-{
- return (__mmask16) __builtin_ia32_kmovw (*(__mmask16 *) __A);
-}
-
-extern __inline void
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_store_mask16 (__mmask16 *__A, __mmask16 __B)
-{
- *(__mmask16 *) __A = __builtin_ia32_kmovw (__B);
-}
-
extern __inline __mmask16
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_kand (__mmask16 __A, __mmask16 __B)
@@ -11236,13 +13204,6 @@ _mm512_kunpackb (__mmask16 __A, __mmask16 __B)
return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
}
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_kunpackb_mask16 (__mmask8 __A, __mmask8 __B)
-{
- return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
-}
-
#ifdef __OPTIMIZE__
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -11593,174 +13554,6 @@ _mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B)
(__mmask16) __U);
}
-#ifdef __OPTIMIZE__
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_round_sd (__m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
- (__v2df) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_max_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_max_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_round_ss (__m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
- (__v4sf) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_max_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_max_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_round_sd (__m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
- (__v2df) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_min_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_min_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_round_ss (__m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
- (__v4sf) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_min_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_min_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U, __R);
-}
-
-#else
-#define _mm_max_round_sd(A, B, C) \
- (__m128d)__builtin_ia32_maxsd_round(A, B, C)
-
-#define _mm_mask_max_round_sd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_maxsd_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_max_round_sd(U, A, B, C) \
- (__m128d)__builtin_ia32_maxsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
-
-#define _mm_max_round_ss(A, B, C) \
- (__m128)__builtin_ia32_maxss_round(A, B, C)
-
-#define _mm_mask_max_round_ss(W, U, A, B, C) \
- (__m128)__builtin_ia32_maxss_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_max_round_ss(U, A, B, C) \
- (__m128)__builtin_ia32_maxss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
-
-#define _mm_min_round_sd(A, B, C) \
- (__m128d)__builtin_ia32_minsd_round(A, B, C)
-
-#define _mm_mask_min_round_sd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_minsd_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_min_round_sd(U, A, B, C) \
- (__m128d)__builtin_ia32_minsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C)
-
-#define _mm_min_round_ss(A, B, C) \
- (__m128)__builtin_ia32_minss_round(A, B, C)
-
-#define _mm_mask_min_round_ss(W, U, A, B, C) \
- (__m128)__builtin_ia32_minss_mask_round(A, B, W, U, C)
-
-#define _mm_maskz_min_round_ss(U, A, B, C) \
- (__m128)__builtin_ia32_minss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C)
-
-#endif
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_blend_pd (__mmask8 __U, __m512d __A, __m512d __W)
@@ -11797,735 +13590,6 @@ _mm512_mask_blend_epi32 (__mmask16 __U, __m512i __A, __m512i __W)
(__mmask16) __U);
}
-#ifdef __OPTIMIZE__
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W,
- (__v2df) __A,
- (__v2df) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W,
- (__v4sf) __A,
- (__v4sf) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W,
- (__v2df) __A,
- -(__v2df) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W,
- (__v4sf) __A,
- -(__v4sf) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fnmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W,
- -(__v2df) __A,
- (__v2df) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fnmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W,
- -(__v4sf) __A,
- (__v4sf) __B,
- __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fnmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W,
- -(__v2df) __A,
- -(__v2df) __B,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fnmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W,
- -(__v4sf) __A,
- -(__v4sf) __B,
- __R);
-}
-#else
-#define _mm_fmadd_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_round(A, B, C, R)
-
-#define _mm_fmadd_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_round(A, B, C, R)
-
-#define _mm_fmsub_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_round(A, B, -(C), R)
-
-#define _mm_fmsub_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_round(A, B, -(C), R)
-
-#define _mm_fnmadd_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_round(A, -(B), C, R)
-
-#define _mm_fnmadd_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_round(A, -(B), C, R)
-
-#define _mm_fnmsub_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_round(A, -(B), -(C), R)
-
-#define _mm_fnmsub_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_round(A, -(B), -(C), R)
-#endif
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmadd_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmadd_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
- (__v2df) __A,
- -(__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- (__v4sf) __A,
- -(__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmsub_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U)
-{
- return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmsub_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U)
-{
- return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
- (__v2df) __A,
- -(__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
- (__v4sf) __A,
- -(__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
- -(__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- -(__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmadd_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
- -(__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmadd_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
- -(__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
- -(__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
- -(__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
- -(__v2df) __A,
- -(__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- -(__v4sf) __A,
- -(__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmsub_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U)
-{
- return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W,
- -(__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmsub_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U)
-{
- return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W,
- -(__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
- -(__v2df) __A,
- -(__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
- -(__v4sf) __A,
- -(__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmadd_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmadd_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmadd_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmadd_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmsub_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
- (__v2df) __A,
- -(__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmsub_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- (__v4sf) __A,
- -(__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmsub_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
- (__v2df) __A,
- -(__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmsub_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
- (__v4sf) __A,
- -(__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmadd_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
- -(__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmadd_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- -(__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
- -(__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
- -(__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmadd_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
- -(__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmadd_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
- -(__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmsub_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W,
- -(__v2df) __A,
- -(__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmsub_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- -(__v4sf) __A,
- -(__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W,
- -(__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W,
- -(__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmsub_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B,
- const int __R)
-{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W,
- -(__v2df) __A,
- -(__v2df) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmsub_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B,
- const int __R)
-{
- return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W,
- -(__v4sf) __A,
- -(__v4sf) __B,
- (__mmask8) __U, __R);
-}
-#else
-#define _mm_mask_fmadd_round_sd(A, U, B, C, R) \
- (__m128d) __builtin_ia32_vfmaddsd3_mask (A, B, C, U, R)
-
-#define _mm_mask_fmadd_round_ss(A, U, B, C, R) \
- (__m128) __builtin_ia32_vfmaddss3_mask (A, B, C, U, R)
-
-#define _mm_mask3_fmadd_round_sd(A, B, C, U, R) \
- (__m128d) __builtin_ia32_vfmaddsd3_mask3 (A, B, C, U, R)
-
-#define _mm_mask3_fmadd_round_ss(A, B, C, U, R) \
- (__m128) __builtin_ia32_vfmaddss3_mask3 (A, B, C, U, R)
-
-#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \
- (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, B, C, U, R)
-
-#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \
- (__m128) __builtin_ia32_vfmaddss3_maskz (A, B, C, U, R)
-
-#define _mm_mask_fmsub_round_sd(A, U, B, C, R) \
- (__m128d) __builtin_ia32_vfmaddsd3_mask (A, B, -(C), U, R)
-
-#define _mm_mask_fmsub_round_ss(A, U, B, C, R) \
- (__m128) __builtin_ia32_vfmaddss3_mask (A, B, -(C), U, R)
-
-#define _mm_mask3_fmsub_round_sd(A, B, C, U, R) \
- (__m128d) __builtin_ia32_vfmsubsd3_mask3 (A, B, C, U, R)
-
-#define _mm_mask3_fmsub_round_ss(A, B, C, U, R) \
- (__m128) __builtin_ia32_vfmsubss3_mask3 (A, B, C, U, R)
-
-#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \
- (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, B, -(C), U, R)
-
-#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \
- (__m128) __builtin_ia32_vfmaddss3_maskz (A, B, -(C), U, R)
-
-#define _mm_mask_fnmadd_round_sd(A, U, B, C, R) \
- (__m128d) __builtin_ia32_vfmaddsd3_mask (A, -(B), C, U, R)
-
-#define _mm_mask_fnmadd_round_ss(A, U, B, C, R) \
- (__m128) __builtin_ia32_vfmaddss3_mask (A, -(B), C, U, R)
-
-#define _mm_mask3_fnmadd_round_sd(A, B, C, U, R) \
- (__m128d) __builtin_ia32_vfmaddsd3_mask3 (A, -(B), C, U, R)
-
-#define _mm_mask3_fnmadd_round_ss(A, B, C, U, R) \
- (__m128) __builtin_ia32_vfmaddss3_mask3 (A, -(B), C, U, R)
-
-#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \
- (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, -(B), C, U, R)
-
-#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \
- (__m128) __builtin_ia32_vfmaddss3_maskz (A, -(B), C, U, R)
-
-#define _mm_mask_fnmsub_round_sd(A, U, B, C, R) \
- (__m128d) __builtin_ia32_vfmaddsd3_mask (A, -(B), -(C), U, R)
-
-#define _mm_mask_fnmsub_round_ss(A, U, B, C, R) \
- (__m128) __builtin_ia32_vfmaddss3_mask (A, -(B), -(C), U, R)
-
-#define _mm_mask3_fnmsub_round_sd(A, B, C, U, R) \
- (__m128d) __builtin_ia32_vfmsubsd3_mask3 (A, -(B), C, U, R)
-
-#define _mm_mask3_fnmsub_round_ss(A, B, C, U, R) \
- (__m128) __builtin_ia32_vfmsubss3_mask3 (A, -(B), C, U, R)
-
-#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \
- (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, -(B), -(C), U, R)
-
-#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \
- (__m128) __builtin_ia32_vfmaddss3_maskz (A, -(B), -(C), U, R)
-#endif
-
-#ifdef __OPTIMIZE__
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comi_round_ss (__m128 __A, __m128 __B, const int __P, const int __R)
-{
- return __builtin_ia32_vcomiss ((__v4sf) __A, (__v4sf) __B, __P, __R);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comi_round_sd (__m128d __A, __m128d __B, const int __P, const int __R)
-{
- return __builtin_ia32_vcomisd ((__v2df) __A, (__v2df) __B, __P, __R);
-}
-#else
-#define _mm_comi_round_ss(A, B, C, D)\
-__builtin_ia32_vcomiss(A, B, C, D)
-#define _mm_comi_round_sd(A, B, C, D)\
-__builtin_ia32_vcomisd(A, B, C, D)
-#endif
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_sqrt_pd (__m512d __A)
@@ -12650,52 +13714,6 @@ _mm512_maskz_add_ps (__mmask16 __U, __m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_add_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_add_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_add_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_add_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_sub_pd (__m512d __A, __m512d __B)
@@ -12756,52 +13774,6 @@ _mm512_maskz_sub_ps (__mmask16 __U, __m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_sub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_sub_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_sub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_sub_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mul_pd (__m512d __A, __m512d __B)
@@ -12862,54 +13834,6 @@ _mm512_maskz_mul_ps (__mmask16 __U, __m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_mul_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B)
-{
- return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_mul_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_mul_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B)
-{
- return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_mul_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_div_pd (__m512d __M, __m512d __V)
@@ -12970,54 +13894,6 @@ _mm512_maskz_div_ps (__mmask16 __U, __m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_div_sd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128d __B)
-{
- return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_div_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_div_ss (__m128 __W, __mmask8 __U, __m128 __A,
- __m128 __B)
-{
- return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_div_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_max_pd (__m512d __A, __m512d __B)
@@ -13088,52 +13964,6 @@ _mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_max_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_max_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_max_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_max_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_min_pd (__m512d __A, __m512d __B)
@@ -13204,52 +14034,6 @@ _mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_min_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_min_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_min_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_min_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_scalef_pd (__m512d __A, __m512d __B)
@@ -13320,30 +14104,6 @@ _mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_scalef_sd (__m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_scalef_ss (__m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_fmadd_pd (__m512d __A, __m512d __B, __m512d __C)
@@ -14142,32 +14902,6 @@ _mm512_cvtss_f32 (__m512 __A)
return __A[0];
}
-#ifdef __x86_64__
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
-{
- return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
-{
- return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtu32_ss (__m128 __A, unsigned __B)
-{
- return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m512
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtepi32_ps (__m512i __A)
@@ -14309,76 +15043,6 @@ _mm512_maskz_fixupimm_ps (__mmask16 __U, __m512 __A, __m512 __B,
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fixupimm_sd (__m128d __A, __m128d __B, __m128i __C, const int __imm)
-{
- return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2di) __C, __imm,
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fixupimm_sd (__m128d __A, __mmask8 __U, __m128d __B,
- __m128i __C, const int __imm)
-{
- return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2di) __C, __imm,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fixupimm_sd (__mmask8 __U, __m128d __A, __m128d __B,
- __m128i __C, const int __imm)
-{
- return (__m128d) __builtin_ia32_fixupimmsd_maskz ((__v2df) __A,
- (__v2df) __B,
- (__v2di) __C,
- __imm,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fixupimm_ss (__m128 __A, __m128 __B, __m128i __C, const int __imm)
-{
- return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4si) __C, __imm,
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fixupimm_ss (__m128 __A, __mmask8 __U, __m128 __B,
- __m128i __C, const int __imm)
-{
- return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4si) __C, __imm,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fixupimm_ss (__mmask8 __U, __m128 __A, __m128 __B,
- __m128i __C, const int __imm)
-{
- return (__m128) __builtin_ia32_fixupimmss_maskz ((__v4sf) __A,
- (__v4sf) __B,
- (__v4si) __C, __imm,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
#else
#define _mm512_fixupimm_pd(X, Y, Z, C) \
((__m512d)__builtin_ia32_fixupimmpd512_mask ((__v8df)(__m512d)(X), \
@@ -14410,65 +15074,8 @@ _mm_maskz_fixupimm_ss (__mmask8 __U, __m128 __A, __m128 __B,
(__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), (int)(C), \
(__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
-#define _mm_fixupimm_sd(X, Y, Z, C) \
- ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
- (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_fixupimm_sd(X, U, Y, Z, C) \
- ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
- (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_fixupimm_sd(U, X, Y, Z, C) \
- ((__m128d)__builtin_ia32_fixupimmsd_maskz ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \
- (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_fixupimm_ss(X, Y, Z, C) \
- ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
- (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_fixupimm_ss(X, U, Y, Z, C) \
- ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
- (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_fixupimm_ss(U, X, Y, Z, C) \
- ((__m128)__builtin_ia32_fixupimmss_maskz ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \
- (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
#endif
-#ifdef __x86_64__
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_u64 (__m128 __A)
-{
- return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf)
- __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_u64 (__m128 __A)
-{
- return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf)
- __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_i64 (__m128 __A)
-{
- return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-#endif /* __x86_64__ */
-
extern __inline int
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtsi512_si32 (__m512i __A)
@@ -14477,138 +15084,6 @@ _mm512_cvtsi512_si32 (__m512i __A)
return __B[0];
}
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_u32 (__m128 __A)
-{
- return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_u32 (__m128 __A)
-{
- return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_i32 (__m128 __A)
-{
- return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_i32 (__m128d __A)
-{
- return (int) __builtin_ia32_cvtsd2si ((__v2df) __A);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_i32 (__m128 __A)
-{
- return (int) __builtin_ia32_cvtss2si ((__v4sf) __A);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvti32_sd (__m128d __A, int __B)
-{
- return (__m128d) __builtin_ia32_cvtsi2sd ((__v2df) __A, __B);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvti32_ss (__m128 __A, int __B)
-{
- return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
-}
-
-#ifdef __x86_64__
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_u64 (__m128d __A)
-{
- return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df)
- __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_u64 (__m128d __A)
-{
- return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df)
- __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_i64 (__m128d __A)
-{
- return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_i64 (__m128d __A)
-{
- return (long long) __builtin_ia32_cvtsd2si64 ((__v2df) __A);
-}
-
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_i64 (__m128 __A)
-{
- return (long long) __builtin_ia32_cvtss2si64 ((__v4sf) __A);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvti64_sd (__m128d __A, long long __B)
-{
- return (__m128d) __builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvti64_ss (__m128 __A, long long __B)
-{
- return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
-}
-#endif /* __x86_64__ */
-
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_u32 (__m128d __A)
-{
- return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_u32 (__m128d __A)
-{
- return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_i32 (__m128d __A)
-{
- return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtps_pd (__m256 __A)
@@ -14770,70 +15245,6 @@ _mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_getexp_ss (__m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss128_round ((__v4sf) __A,
- (__v4sf) __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_getexp_sd (__m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd128_round ((__v2df) __A,
- (__v2df) __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_getmant_pd (__m512d __A, _MM_MANTISSA_NORM_ENUM __B,
@@ -14906,82 +15317,6 @@ _mm512_maskz_getmant_ps (__mmask16 __U, __m512 __A,
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_getmant_sd (__m128d __A, __m128d __B, _MM_MANTISSA_NORM_ENUM __C,
- _MM_MANTISSA_SIGN_ENUM __D)
-{
- return (__m128d) __builtin_ia32_getmantsd_round ((__v2df) __A,
- (__v2df) __B,
- (__D << 2) | __C,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_getmant_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
- _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D)
-{
- return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__D << 2) | __C,
- (__v2df) __W,
- __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_getmant_sd (__mmask8 __U, __m128d __A, __m128d __B,
- _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D)
-{
- return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A,
- (__v2df) __B,
- (__D << 2) | __C,
- (__v2df)
- _mm_setzero_pd(),
- __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_getmant_ss (__m128 __A, __m128 __B, _MM_MANTISSA_NORM_ENUM __C,
- _MM_MANTISSA_SIGN_ENUM __D)
-{
- return (__m128) __builtin_ia32_getmantss_round ((__v4sf) __A,
- (__v4sf) __B,
- (__D << 2) | __C,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_getmant_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
- _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D)
-{
- return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__D << 2) | __C,
- (__v4sf) __W,
- __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_getmant_ss (__mmask8 __U, __m128 __A, __m128 __B,
- _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D)
-{
- return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A,
- (__v4sf) __B,
- (__D << 2) | __C,
- (__v4sf)
- _mm_setzero_ps(),
- __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
#else
#define _mm512_getmant_pd(X, B, C) \
((__m512d)__builtin_ia32_getmantpd512_mask ((__v8df)(__m512d)(X), \
@@ -15023,74 +15358,6 @@ _mm_maskz_getmant_ss (__mmask8 __U, __m128 __A, __m128 __B,
(__v16sf)_mm512_setzero_ps(), \
(__mmask16)(U),\
_MM_FROUND_CUR_DIRECTION))
-#define _mm_getmant_sd(X, Y, C, D) \
- ((__m128d)__builtin_ia32_getmantsd_round ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (int)(((D)<<2) | (C)), \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_sd(W, U, X, Y, C, D) \
- ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (int)(((D)<<2) | (C)), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U),\
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_getmant_sd(U, X, Y, C, D) \
- ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (int)(((D)<<2) | (C)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U),\
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_getmant_ss(X, Y, C, D) \
- ((__m128)__builtin_ia32_getmantss_round ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), \
- (int)(((D)<<2) | (C)), \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_ss(W, U, X, Y, C, D) \
- ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U),\
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_getmant_ss(U, X, Y, C, D) \
- ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U),\
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_getexp_ss(A, B) \
- ((__m128)__builtin_ia32_getexpss128_round((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getexp_ss(W, U, A, B) \
- (__m128)__builtin_ia32_getexpss_mask_round(A, B, W, U,\
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_getexp_ss(U, A, B) \
- (__m128)__builtin_ia32_getexpss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U,\
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_getexp_sd(A, B) \
- ((__m128d)__builtin_ia32_getexpsd128_round((__v2df)(__m128d)(A), (__v2df)(__m128d)(B),\
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getexp_sd(W, U, A, B) \
- (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, W, U,\
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_getexp_sd(U, A, B) \
- (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U,\
- _MM_FROUND_CUR_DIRECTION)
-
#define _mm512_getexp_ps(A) \
((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION))
@@ -15185,87 +15452,6 @@ _mm512_maskz_roundscale_pd (__mmask8 __A, __m512d __B, const int __imm)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_roundscale_ss (__m128 __A, __m128 __B, const int __imm)
-{
- return (__m128)
- __builtin_ia32_rndscaless_mask_round ((__v4sf) __A,
- (__v4sf) __B, __imm,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_roundscale_ss (__m128 __A, __mmask8 __B, __m128 __C, __m128 __D,
- const int __imm)
-{
- return (__m128)
- __builtin_ia32_rndscaless_mask_round ((__v4sf) __C,
- (__v4sf) __D, __imm,
- (__v4sf) __A,
- (__mmask8) __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_roundscale_ss (__mmask8 __A, __m128 __B, __m128 __C,
- const int __imm)
-{
- return (__m128)
- __builtin_ia32_rndscaless_mask_round ((__v4sf) __B,
- (__v4sf) __C, __imm,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_roundscale_sd (__m128d __A, __m128d __B, const int __imm)
-{
- return (__m128d)
- __builtin_ia32_rndscalesd_mask_round ((__v2df) __A,
- (__v2df) __B, __imm,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_roundscale_sd (__m128d __A, __mmask8 __B, __m128d __C, __m128d __D,
- const int __imm)
-{
- return (__m128d)
- __builtin_ia32_rndscalesd_mask_round ((__v2df) __C,
- (__v2df) __D, __imm,
- (__v2df) __A,
- (__mmask8) __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_roundscale_sd (__mmask8 __A, __m128d __B, __m128d __C,
- const int __imm)
-{
- return (__m128d)
- __builtin_ia32_rndscalesd_mask_round ((__v2df) __B,
- (__v2df) __C, __imm,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
#else
#define _mm512_roundscale_ps(A, B) \
((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(A), (int)(B),\
@@ -15293,54 +15479,6 @@ _mm_maskz_roundscale_sd (__mmask8 __A, __m128d __B, __m128d __C,
(int)(C), \
(__v8df)_mm512_setzero_pd(),\
(__mmask8)(A), _MM_FROUND_CUR_DIRECTION))
-#define _mm_roundscale_ss(A, B, I) \
- ((__m128) \
- __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \
- (__v4sf) (__m128) (B), \
- (int) (I), \
- (__v4sf) _mm_setzero_ps (), \
- (__mmask8) (-1), \
- _MM_FROUND_CUR_DIRECTION))
-#define _mm_mask_roundscale_ss(A, U, B, C, I) \
- ((__m128) \
- __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (B), \
- (__v4sf) (__m128) (C), \
- (int) (I), \
- (__v4sf) (__m128) (A), \
- (__mmask8) (U), \
- _MM_FROUND_CUR_DIRECTION))
-#define _mm_maskz_roundscale_ss(U, A, B, I) \
- ((__m128) \
- __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \
- (__v4sf) (__m128) (B), \
- (int) (I), \
- (__v4sf) _mm_setzero_ps (), \
- (__mmask8) (U), \
- _MM_FROUND_CUR_DIRECTION))
-#define _mm_roundscale_sd(A, B, I) \
- ((__m128d) \
- __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \
- (__v2df) (__m128d) (B), \
- (int) (I), \
- (__v2df) _mm_setzero_pd (), \
- (__mmask8) (-1), \
- _MM_FROUND_CUR_DIRECTION))
-#define _mm_mask_roundscale_sd(A, U, B, C, I) \
- ((__m128d) \
- __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (B), \
- (__v2df) (__m128d) (C), \
- (int) (I), \
- (__v2df) (__m128d) (A), \
- (__mmask8) (U), \
- _MM_FROUND_CUR_DIRECTION))
-#define _mm_maskz_roundscale_sd(U, A, B, I) \
- ((__m128d) \
- __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \
- (__v2df) (__m128d) (B), \
- (int) (I), \
- (__v2df) _mm_setzero_pd (), \
- (__mmask8) (U), \
- _MM_FROUND_CUR_DIRECTION))
#endif
#ifdef __OPTIMIZE__
@@ -15384,46 +15522,6 @@ _mm512_mask_cmp_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y, const int __P)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmp_sd_mask (__m128d __X, __m128d __Y, const int __P)
-{
- return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X,
- (__v2df) __Y, __P,
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cmp_sd_mask (__mmask8 __M, __m128d __X, __m128d __Y, const int __P)
-{
- return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X,
- (__v2df) __Y, __P,
- (__mmask8) __M,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmp_ss_mask (__m128 __X, __m128 __Y, const int __P)
-{
- return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X,
- (__v4sf) __Y, __P,
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cmp_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y, const int __P)
-{
- return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X,
- (__v4sf) __Y, __P,
- (__mmask8) __M,
- _MM_FROUND_CUR_DIRECTION);
-}
-
#else
#define _mm512_cmp_pd_mask(X, Y, P) \
((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \
@@ -15445,25 +15543,6 @@ _mm_mask_cmp_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y, const int __P)
(__v16sf)(__m512)(Y), (int)(P),\
(__mmask16)(M),_MM_FROUND_CUR_DIRECTION))
-#define _mm_cmp_sd_mask(X, Y, P) \
- ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P),\
- (__mmask8)-1,_MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_cmp_sd_mask(M, X, Y, P) \
- ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P),\
- M,_MM_FROUND_CUR_DIRECTION))
-
-#define _mm_cmp_ss_mask(X, Y, P) \
- ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)-1,_MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_cmp_ss_mask(M, X, Y, P) \
- ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- M,_MM_FROUND_CUR_DIRECTION))
#endif
extern __inline __mmask8
@@ -16493,9 +16572,9 @@ _mm512_mask_reduce_max_pd (__mmask8 __U, __m512d __A)
#undef __MM512_REDUCE_OP
-#ifdef __DISABLE_AVX512F__
-#undef __DISABLE_AVX512F__
+#ifdef __DISABLE_AVX512F_512__
+#undef __DISABLE_AVX512F_512__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX512F__ */
+#endif /* __DISABLE_AVX512F_512__ */
#endif /* _AVX512FINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512fp16intrin.h b/gcc/config/i386/avx512fp16intrin.h
index dd083e5..92c0c24e9 100644
--- a/gcc/config/i386/avx512fp16intrin.h
+++ b/gcc/config/i386/avx512fp16intrin.h
@@ -25,8 +25,8 @@
#error "Never use <avx512fp16intrin.h> directly; include <immintrin.h> instead."
#endif
-#ifndef __AVX512FP16INTRIN_H_INCLUDED
-#define __AVX512FP16INTRIN_H_INCLUDED
+#ifndef _AVX512FP16INTRIN_H_INCLUDED
+#define _AVX512FP16INTRIN_H_INCLUDED
#ifndef __AVX512FP16__
#pragma GCC push_options
@@ -37,21 +37,17 @@
/* Internal data types for implementing the intrinsics. */
typedef _Float16 __v8hf __attribute__ ((__vector_size__ (16)));
typedef _Float16 __v16hf __attribute__ ((__vector_size__ (32)));
-typedef _Float16 __v32hf __attribute__ ((__vector_size__ (64)));
/* The Intel API is flexible enough that we must allow aliasing with other
vector types, and their scalar components. */
typedef _Float16 __m128h __attribute__ ((__vector_size__ (16), __may_alias__));
typedef _Float16 __m256h __attribute__ ((__vector_size__ (32), __may_alias__));
-typedef _Float16 __m512h __attribute__ ((__vector_size__ (64), __may_alias__));
/* Unaligned version of the same type. */
typedef _Float16 __m128h_u __attribute__ ((__vector_size__ (16), \
__may_alias__, __aligned__ (1)));
typedef _Float16 __m256h_u __attribute__ ((__vector_size__ (32), \
__may_alias__, __aligned__ (1)));
-typedef _Float16 __m512h_u __attribute__ ((__vector_size__ (64), \
- __may_alias__, __aligned__ (1)));
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -78,33 +74,8 @@ _mm256_set_ph (_Float16 __A15, _Float16 __A14, _Float16 __A13,
__A12, __A13, __A14, __A15 };
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_set_ph (_Float16 __A31, _Float16 __A30, _Float16 __A29,
- _Float16 __A28, _Float16 __A27, _Float16 __A26,
- _Float16 __A25, _Float16 __A24, _Float16 __A23,
- _Float16 __A22, _Float16 __A21, _Float16 __A20,
- _Float16 __A19, _Float16 __A18, _Float16 __A17,
- _Float16 __A16, _Float16 __A15, _Float16 __A14,
- _Float16 __A13, _Float16 __A12, _Float16 __A11,
- _Float16 __A10, _Float16 __A9, _Float16 __A8,
- _Float16 __A7, _Float16 __A6, _Float16 __A5,
- _Float16 __A4, _Float16 __A3, _Float16 __A2,
- _Float16 __A1, _Float16 __A0)
-{
- return __extension__ (__m512h)(__v32hf){ __A0, __A1, __A2, __A3,
- __A4, __A5, __A6, __A7,
- __A8, __A9, __A10, __A11,
- __A12, __A13, __A14, __A15,
- __A16, __A17, __A18, __A19,
- __A20, __A21, __A22, __A23,
- __A24, __A25, __A26, __A27,
- __A28, __A29, __A30, __A31 };
-}
-
-/* Create vectors of elements in the reversed order from _mm_set_ph,
- _mm256_set_ph and _mm512_set_ph functions. */
-
+/* Create vectors of elements in the reversed order from _mm_set_ph
+ and _mm256_set_ph functions. */
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_setr_ph (_Float16 __A0, _Float16 __A1, _Float16 __A2,
@@ -128,30 +99,7 @@ _mm256_setr_ph (_Float16 __A0, _Float16 __A1, _Float16 __A2,
__A0);
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_setr_ph (_Float16 __A0, _Float16 __A1, _Float16 __A2,
- _Float16 __A3, _Float16 __A4, _Float16 __A5,
- _Float16 __A6, _Float16 __A7, _Float16 __A8,
- _Float16 __A9, _Float16 __A10, _Float16 __A11,
- _Float16 __A12, _Float16 __A13, _Float16 __A14,
- _Float16 __A15, _Float16 __A16, _Float16 __A17,
- _Float16 __A18, _Float16 __A19, _Float16 __A20,
- _Float16 __A21, _Float16 __A22, _Float16 __A23,
- _Float16 __A24, _Float16 __A25, _Float16 __A26,
- _Float16 __A27, _Float16 __A28, _Float16 __A29,
- _Float16 __A30, _Float16 __A31)
-
-{
- return _mm512_set_ph (__A31, __A30, __A29, __A28, __A27, __A26, __A25,
- __A24, __A23, __A22, __A21, __A20, __A19, __A18,
- __A17, __A16, __A15, __A14, __A13, __A12, __A11,
- __A10, __A9, __A8, __A7, __A6, __A5, __A4, __A3,
- __A2, __A1, __A0);
-}
-
/* Broadcast _Float16 to vector. */
-
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_set1_ph (_Float16 __A)
@@ -167,18 +115,7 @@ _mm256_set1_ph (_Float16 __A)
__A, __A, __A, __A, __A, __A, __A, __A);
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_set1_ph (_Float16 __A)
-{
- return _mm512_set_ph (__A, __A, __A, __A, __A, __A, __A, __A,
- __A, __A, __A, __A, __A, __A, __A, __A,
- __A, __A, __A, __A, __A, __A, __A, __A,
- __A, __A, __A, __A, __A, __A, __A, __A);
-}
-
/* Create a vector with all zeros. */
-
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_setzero_ph (void)
@@ -193,13 +130,6 @@ _mm256_setzero_ph (void)
return _mm256_set1_ph (0.0f16);
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_setzero_ph (void)
-{
- return _mm512_set1_ph (0.0f16);
-}
-
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_undefined_ph (void)
@@ -222,24 +152,6 @@ _mm256_undefined_ph (void)
return __Y;
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_undefined_ph (void)
-{
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Winit-self"
- __m512h __Y = __Y;
-#pragma GCC diagnostic pop
- return __Y;
-}
-
-extern __inline _Float16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsh_h (__m128h __A)
-{
- return __A[0];
-}
-
extern __inline _Float16
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_cvtsh_h (__m256h __A)
@@ -247,146 +159,6 @@ _mm256_cvtsh_h (__m256h __A)
return __A[0];
}
-extern __inline _Float16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtsh_h (__m512h __A)
-{
- return __A[0];
-}
-
-extern __inline __m512
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_castph_ps (__m512h __a)
-{
- return (__m512) __a;
-}
-
-extern __inline __m512d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_castph_pd (__m512h __a)
-{
- return (__m512d) __a;
-}
-
-extern __inline __m512i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_castph_si512 (__m512h __a)
-{
- return (__m512i) __a;
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_castph512_ph128 (__m512h __A)
-{
- union
- {
- __m128h __a[4];
- __m512h __v;
- } __u = { .__v = __A };
- return __u.__a[0];
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_castph512_ph256 (__m512h __A)
-{
- union
- {
- __m256h __a[2];
- __m512h __v;
- } __u = { .__v = __A };
- return __u.__a[0];
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_castph128_ph512 (__m128h __A)
-{
- union
- {
- __m128h __a[4];
- __m512h __v;
- } __u;
- __u.__a[0] = __A;
- return __u.__v;
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_castph256_ph512 (__m256h __A)
-{
- union
- {
- __m256h __a[2];
- __m512h __v;
- } __u;
- __u.__a[0] = __A;
- return __u.__v;
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_zextph128_ph512 (__m128h __A)
-{
- return (__m512h) _mm512_insertf32x4 (_mm512_setzero_ps (),
- (__m128) __A, 0);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_zextph256_ph512 (__m256h __A)
-{
- return (__m512h) _mm512_insertf64x4 (_mm512_setzero_pd (),
- (__m256d) __A, 0);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_castps_ph (__m512 __a)
-{
- return (__m512h) __a;
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_castpd_ph (__m512d __a)
-{
- return (__m512h) __a;
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_castsi512_ph (__m512i __a)
-{
- return (__m512h) __a;
-}
-
-/* Create a vector with element 0 as F and the rest zero. */
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_sh (_Float16 __F)
-{
- return _mm_set_ph (0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16,
- __F);
-}
-
-/* Create a vector with element 0 as *P and the rest zero. */
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_sh (void const *__P)
-{
- return _mm_set_ph (0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16,
- *(_Float16 const *) __P);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_load_ph (void const *__P)
-{
- return *(const __m512h *) __P;
-}
-
extern __inline __m256h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_load_ph (void const *__P)
@@ -401,13 +173,6 @@ _mm_load_ph (void const *__P)
return *(const __m128h *) __P;
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_loadu_ph (void const *__P)
-{
- return *(const __m512h_u *) __P;
-}
-
extern __inline __m256h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_loadu_ph (void const *__P)
@@ -422,21 +187,6 @@ _mm_loadu_ph (void const *__P)
return *(const __m128h_u *) __P;
}
-/* Stores the lower _Float16 value. */
-extern __inline void
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_sh (void *__P, __m128h __A)
-{
- *(_Float16 *) __P = ((__v8hf)__A)[0];
-}
-
-extern __inline void
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_store_ph (void *__P, __m512h __A)
-{
- *(__m512h *) __P = __A;
-}
-
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_store_ph (void *__P, __m256h __A)
@@ -453,13 +203,6 @@ _mm_store_ph (void *__P, __m128h __A)
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_storeu_ph (void *__P, __m512h __A)
-{
- *(__m512h_u *) __P = __A;
-}
-
-extern __inline void
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_storeu_ph (void *__P, __m256h __A)
{
*(__m256h_u *) __P = __A;
@@ -472,290 +215,30 @@ _mm_storeu_ph (void *__P, __m128h __A)
*(__m128h_u *) __P = __A;
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_abs_ph (__m512h __A)
-{
- return (__m512h) _mm512_and_epi32 ( _mm512_set1_epi32 (0x7FFF7FFF),
- (__m512i) __A);
-}
-
-/* Intrinsics v[add,sub,mul,div]ph. */
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_add_ph (__m512h __A, __m512h __B)
-{
- return (__m512h) ((__v32hf) __A + (__v32hf) __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_add_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
-{
- return __builtin_ia32_addph512_mask (__C, __D, __A, __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_add_ph (__mmask32 __A, __m512h __B, __m512h __C)
-{
- return __builtin_ia32_addph512_mask (__B, __C,
- _mm512_setzero_ph (), __A);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_sub_ph (__m512h __A, __m512h __B)
-{
- return (__m512h) ((__v32hf) __A - (__v32hf) __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_sub_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
-{
- return __builtin_ia32_subph512_mask (__C, __D, __A, __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_sub_ph (__mmask32 __A, __m512h __B, __m512h __C)
-{
- return __builtin_ia32_subph512_mask (__B, __C,
- _mm512_setzero_ph (), __A);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mul_ph (__m512h __A, __m512h __B)
-{
- return (__m512h) ((__v32hf) __A * (__v32hf) __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_mul_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
-{
- return __builtin_ia32_mulph512_mask (__C, __D, __A, __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_mul_ph (__mmask32 __A, __m512h __B, __m512h __C)
-{
- return __builtin_ia32_mulph512_mask (__B, __C,
- _mm512_setzero_ph (), __A);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_div_ph (__m512h __A, __m512h __B)
-{
- return (__m512h) ((__v32hf) __A / (__v32hf) __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_div_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
-{
- return __builtin_ia32_divph512_mask (__C, __D, __A, __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_div_ph (__mmask32 __A, __m512h __B, __m512h __C)
-{
- return __builtin_ia32_divph512_mask (__B, __C,
- _mm512_setzero_ph (), __A);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_add_round_ph (__m512h __A, __m512h __B, const int __C)
-{
- return __builtin_ia32_addph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1, __C);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_add_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
- __m512h __D, const int __E)
-{
- return __builtin_ia32_addph512_mask_round (__C, __D, __A, __B, __E);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_add_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
- const int __D)
-{
- return __builtin_ia32_addph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A, __D);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_sub_round_ph (__m512h __A, __m512h __B, const int __C)
-{
- return __builtin_ia32_subph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1, __C);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_sub_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
- __m512h __D, const int __E)
-{
- return __builtin_ia32_subph512_mask_round (__C, __D, __A, __B, __E);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_sub_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
- const int __D)
-{
- return __builtin_ia32_subph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A, __D);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mul_round_ph (__m512h __A, __m512h __B, const int __C)
-{
- return __builtin_ia32_mulph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1, __C);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_mul_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
- __m512h __D, const int __E)
-{
- return __builtin_ia32_mulph512_mask_round (__C, __D, __A, __B, __E);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_mul_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
- const int __D)
-{
- return __builtin_ia32_mulph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A, __D);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_div_round_ph (__m512h __A, __m512h __B, const int __C)
-{
- return __builtin_ia32_divph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1, __C);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_div_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
- __m512h __D, const int __E)
-{
- return __builtin_ia32_divph512_mask_round (__C, __D, __A, __B, __E);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_div_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
- const int __D)
-{
- return __builtin_ia32_divph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A, __D);
-}
-#else
-#define _mm512_add_round_ph(A, B, C) \
- ((__m512h)__builtin_ia32_addph512_mask_round((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, (C)))
-
-#define _mm512_mask_add_round_ph(A, B, C, D, E) \
- ((__m512h)__builtin_ia32_addph512_mask_round((C), (D), (A), (B), (E)))
-
-#define _mm512_maskz_add_round_ph(A, B, C, D) \
- ((__m512h)__builtin_ia32_addph512_mask_round((B), (C), \
- _mm512_setzero_ph (), \
- (A), (D)))
-
-#define _mm512_sub_round_ph(A, B, C) \
- ((__m512h)__builtin_ia32_subph512_mask_round((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, (C)))
-
-#define _mm512_mask_sub_round_ph(A, B, C, D, E) \
- ((__m512h)__builtin_ia32_subph512_mask_round((C), (D), (A), (B), (E)))
-
-#define _mm512_maskz_sub_round_ph(A, B, C, D) \
- ((__m512h)__builtin_ia32_subph512_mask_round((B), (C), \
- _mm512_setzero_ph (), \
- (A), (D)))
-
-#define _mm512_mul_round_ph(A, B, C) \
- ((__m512h)__builtin_ia32_mulph512_mask_round((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, (C)))
-
-#define _mm512_mask_mul_round_ph(A, B, C, D, E) \
- ((__m512h)__builtin_ia32_mulph512_mask_round((C), (D), (A), (B), (E)))
-
-#define _mm512_maskz_mul_round_ph(A, B, C, D) \
- ((__m512h)__builtin_ia32_mulph512_mask_round((B), (C), \
- _mm512_setzero_ph (), \
- (A), (D)))
-
-#define _mm512_div_round_ph(A, B, C) \
- ((__m512h)__builtin_ia32_divph512_mask_round((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, (C)))
-
-#define _mm512_mask_div_round_ph(A, B, C, D, E) \
- ((__m512h)__builtin_ia32_divph512_mask_round((C), (D), (A), (B), (E)))
-
-#define _mm512_maskz_div_round_ph(A, B, C, D) \
- ((__m512h)__builtin_ia32_divph512_mask_round((B), (C), \
- _mm512_setzero_ph (), \
- (A), (D)))
-#endif /* __OPTIMIZE__ */
-
-extern __inline __m512h
+/* Create a vector with element 0 as F and the rest zero. */
+extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_conj_pch (__m512h __A)
+_mm_set_sh (_Float16 __F)
{
- return (__m512h) _mm512_xor_epi32 ((__m512i) __A, _mm512_set1_epi32 (1<<31));
+ return _mm_set_ph (0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16,
+ __F);
}
-extern __inline __m512h
+/* Create a vector with element 0 as *P and the rest zero. */
+extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_conj_pch (__m512h __W, __mmask16 __U, __m512h __A)
+_mm_load_sh (void const *__P)
{
- return (__m512h)
- __builtin_ia32_movaps512_mask ((__v16sf) _mm512_conj_pch (__A),
- (__v16sf) __W,
- (__mmask16) __U);
+ return _mm_set_ph (0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16, 0.0f16,
+ *(_Float16 const *) __P);
}
-extern __inline __m512h
+/* Stores the lower _Float16 value. */
+extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_conj_pch (__mmask16 __U, __m512h __A)
+_mm_store_sh (void *__P, __m128h __A)
{
- return (__m512h)
- __builtin_ia32_movaps512_mask ((__v16sf) _mm512_conj_pch (__A),
- (__v16sf) _mm512_setzero_ps (),
- (__mmask16) __U);
+ *(_Float16 *) __P = ((__v8hf)__A)[0];
}
/* Intrinsics of v[add,sub,mul,div]sh. */
@@ -1012,138 +495,6 @@ _mm_maskz_div_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
(A), (D)))
#endif /* __OPTIMIZE__ */
-/* Intrinsic vmaxph vminph. */
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_max_ph (__m512h __A, __m512h __B)
-{
- return __builtin_ia32_maxph512_mask (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_max_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
-{
- return __builtin_ia32_maxph512_mask (__C, __D, __A, __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_max_ph (__mmask32 __A, __m512h __B, __m512h __C)
-{
- return __builtin_ia32_maxph512_mask (__B, __C,
- _mm512_setzero_ph (), __A);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_min_ph (__m512h __A, __m512h __B)
-{
- return __builtin_ia32_minph512_mask (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_min_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
-{
- return __builtin_ia32_minph512_mask (__C, __D, __A, __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_min_ph (__mmask32 __A, __m512h __B, __m512h __C)
-{
- return __builtin_ia32_minph512_mask (__B, __C,
- _mm512_setzero_ph (), __A);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_max_round_ph (__m512h __A, __m512h __B, const int __C)
-{
- return __builtin_ia32_maxph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1, __C);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_max_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
- __m512h __D, const int __E)
-{
- return __builtin_ia32_maxph512_mask_round (__C, __D, __A, __B, __E);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_max_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
- const int __D)
-{
- return __builtin_ia32_maxph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A, __D);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_min_round_ph (__m512h __A, __m512h __B, const int __C)
-{
- return __builtin_ia32_minph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1, __C);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_min_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
- __m512h __D, const int __E)
-{
- return __builtin_ia32_minph512_mask_round (__C, __D, __A, __B, __E);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_min_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
- const int __D)
-{
- return __builtin_ia32_minph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A, __D);
-}
-
-#else
-#define _mm512_max_round_ph(A, B, C) \
- (__builtin_ia32_maxph512_mask_round ((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, (C)))
-
-#define _mm512_mask_max_round_ph(A, B, C, D, E) \
- (__builtin_ia32_maxph512_mask_round ((C), (D), (A), (B), (E)))
-
-#define _mm512_maskz_max_round_ph(A, B, C, D) \
- (__builtin_ia32_maxph512_mask_round ((B), (C), \
- _mm512_setzero_ph (), \
- (A), (D)))
-
-#define _mm512_min_round_ph(A, B, C) \
- (__builtin_ia32_minph512_mask_round ((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, (C)))
-
-#define _mm512_mask_min_round_ph(A, B, C, D, E) \
- (__builtin_ia32_minph512_mask_round ((C), (D), (A), (B), (E)))
-
-#define _mm512_maskz_min_round_ph(A, B, C, D) \
- (__builtin_ia32_minph512_mask_round ((B), (C), \
- _mm512_setzero_ph (), \
- (A), (D)))
-#endif /* __OPTIMIZE__ */
-
/* Intrinsic vmaxsh vminsh. */
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -1275,60 +626,6 @@ _mm_maskz_min_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
#endif /* __OPTIMIZE__ */
-/* vcmpph */
-#ifdef __OPTIMIZE
-extern __inline __mmask32
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cmp_ph_mask (__m512h __A, __m512h __B, const int __C)
-{
- return (__mmask32) __builtin_ia32_cmpph512_mask (__A, __B, __C,
- (__mmask32) -1);
-}
-
-extern __inline __mmask32
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cmp_ph_mask (__mmask32 __A, __m512h __B, __m512h __C,
- const int __D)
-{
- return (__mmask32) __builtin_ia32_cmpph512_mask (__B, __C, __D,
- __A);
-}
-
-extern __inline __mmask32
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cmp_round_ph_mask (__m512h __A, __m512h __B, const int __C,
- const int __D)
-{
- return (__mmask32) __builtin_ia32_cmpph512_mask_round (__A, __B,
- __C, (__mmask32) -1,
- __D);
-}
-
-extern __inline __mmask32
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cmp_round_ph_mask (__mmask32 __A, __m512h __B, __m512h __C,
- const int __D, const int __E)
-{
- return (__mmask32) __builtin_ia32_cmpph512_mask_round (__B, __C,
- __D, __A,
- __E);
-}
-
-#else
-#define _mm512_cmp_ph_mask(A, B, C) \
- (__builtin_ia32_cmpph512_mask ((A), (B), (C), (-1)))
-
-#define _mm512_mask_cmp_ph_mask(A, B, C, D) \
- (__builtin_ia32_cmpph512_mask ((B), (C), (D), (A)))
-
-#define _mm512_cmp_round_ph_mask(A, B, C, D) \
- (__builtin_ia32_cmpph512_mask_round ((A), (B), (C), (-1), (D)))
-
-#define _mm512_mask_cmp_round_ph_mask(A, B, C, D, E) \
- (__builtin_ia32_cmpph512_mask_round ((B), (C), (D), (A), (E)))
-
-#endif /* __OPTIMIZE__ */
-
/* Intrinsics vcmpsh. */
#ifdef __OPTIMIZE__
extern __inline __mmask8
@@ -1525,126 +822,6 @@ _mm_comi_round_sh (__m128h __A, __m128h __B, const int __P, const int __R)
#endif /* __OPTIMIZE__ */
-/* Intrinsics vsqrtph. */
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_sqrt_ph (__m512h __A)
-{
- return __builtin_ia32_sqrtph512_mask_round (__A,
- _mm512_setzero_ph(),
- (__mmask32) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_sqrt_ph (__m512h __A, __mmask32 __B, __m512h __C)
-{
- return __builtin_ia32_sqrtph512_mask_round (__C, __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_sqrt_ph (__mmask32 __A, __m512h __B)
-{
- return __builtin_ia32_sqrtph512_mask_round (__B,
- _mm512_setzero_ph (),
- __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_sqrt_round_ph (__m512h __A, const int __B)
-{
- return __builtin_ia32_sqrtph512_mask_round (__A,
- _mm512_setzero_ph(),
- (__mmask32) -1, __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_sqrt_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
- const int __D)
-{
- return __builtin_ia32_sqrtph512_mask_round (__C, __A, __B, __D);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_sqrt_round_ph (__mmask32 __A, __m512h __B, const int __C)
-{
- return __builtin_ia32_sqrtph512_mask_round (__B,
- _mm512_setzero_ph (),
- __A, __C);
-}
-
-#else
-#define _mm512_sqrt_round_ph(A, B) \
- (__builtin_ia32_sqrtph512_mask_round ((A), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, (B)))
-
-#define _mm512_mask_sqrt_round_ph(A, B, C, D) \
- (__builtin_ia32_sqrtph512_mask_round ((C), (A), (B), (D)))
-
-#define _mm512_maskz_sqrt_round_ph(A, B, C) \
- (__builtin_ia32_sqrtph512_mask_round ((B), \
- _mm512_setzero_ph (), \
- (A), (C)))
-
-#endif /* __OPTIMIZE__ */
-
-/* Intrinsics vrsqrtph. */
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_rsqrt_ph (__m512h __A)
-{
- return __builtin_ia32_rsqrtph512_mask (__A, _mm512_setzero_ph (),
- (__mmask32) -1);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_rsqrt_ph (__m512h __A, __mmask32 __B, __m512h __C)
-{
- return __builtin_ia32_rsqrtph512_mask (__C, __A, __B);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_rsqrt_ph (__mmask32 __A, __m512h __B)
-{
- return __builtin_ia32_rsqrtph512_mask (__B, _mm512_setzero_ph (),
- __A);
-}
-
-/* Intrinsics vrsqrtsh. */
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt_sh (__m128h __A, __m128h __B)
-{
- return __builtin_ia32_rsqrtsh_mask (__B, __A, _mm_setzero_ph (),
- (__mmask8) -1);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_rsqrt_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
-{
- return __builtin_ia32_rsqrtsh_mask (__D, __C, __A, __B);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_rsqrt_sh (__mmask8 __A, __m128h __B, __m128h __C)
-{
- return __builtin_ia32_rsqrtsh_mask (__C, __B, _mm_setzero_ph (),
- __A);
-}
-
/* Intrinsics vsqrtsh. */
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -1718,28 +895,28 @@ _mm_maskz_sqrt_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
#endif /* __OPTIMIZE__ */
-/* Intrinsics vrcpph. */
-extern __inline __m512h
+/* Intrinsics vrsqrtsh. */
+extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_rcp_ph (__m512h __A)
+_mm_rsqrt_sh (__m128h __A, __m128h __B)
{
- return __builtin_ia32_rcpph512_mask (__A, _mm512_setzero_ph (),
- (__mmask32) -1);
+ return __builtin_ia32_rsqrtsh_mask (__B, __A, _mm_setzero_ph (),
+ (__mmask8) -1);
}
-extern __inline __m512h
+extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_rcp_ph (__m512h __A, __mmask32 __B, __m512h __C)
+_mm_mask_rsqrt_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
{
- return __builtin_ia32_rcpph512_mask (__C, __A, __B);
+ return __builtin_ia32_rsqrtsh_mask (__D, __C, __A, __B);
}
-extern __inline __m512h
+extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_rcp_ph (__mmask32 __A, __m512h __B)
+_mm_maskz_rsqrt_sh (__mmask8 __A, __m128h __B, __m128h __C)
{
- return __builtin_ia32_rcpph512_mask (__B, _mm512_setzero_ph (),
- __A);
+ return __builtin_ia32_rsqrtsh_mask (__C, __B, _mm_setzero_ph (),
+ __A);
}
/* Intrinsics vrcpsh. */
@@ -1766,80 +943,6 @@ _mm_maskz_rcp_sh (__mmask32 __A, __m128h __B, __m128h __C)
__A);
}
-/* Intrinsics vscalefph. */
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_scalef_ph (__m512h __A, __m512h __B)
-{
- return __builtin_ia32_scalefph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_scalef_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
-{
- return __builtin_ia32_scalefph512_mask_round (__C, __D, __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_scalef_ph (__mmask32 __A, __m512h __B, __m512h __C)
-{
- return __builtin_ia32_scalefph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_scalef_round_ph (__m512h __A, __m512h __B, const int __C)
-{
- return __builtin_ia32_scalefph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1, __C);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_scalef_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
- __m512h __D, const int __E)
-{
- return __builtin_ia32_scalefph512_mask_round (__C, __D, __A, __B,
- __E);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_scalef_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
- const int __D)
-{
- return __builtin_ia32_scalefph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A, __D);
-}
-
-#else
-#define _mm512_scalef_round_ph(A, B, C) \
- (__builtin_ia32_scalefph512_mask_round ((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, (C)))
-
-#define _mm512_mask_scalef_round_ph(A, B, C, D, E) \
- (__builtin_ia32_scalefph512_mask_round ((C), (D), (A), (B), (E)))
-
-#define _mm512_maskz_scalef_round_ph(A, B, C, D) \
- (__builtin_ia32_scalefph512_mask_round ((B), (C), \
- _mm512_setzero_ph (), \
- (A), (D)))
-
-#endif /* __OPTIMIZE__ */
-
/* Intrinsics vscalefsh. */
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -1913,95 +1016,6 @@ _mm_maskz_scalef_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
#endif /* __OPTIMIZE__ */
-/* Intrinsics vreduceph. */
-#ifdef __OPTIMIZE__
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_reduce_ph (__m512h __A, int __B)
-{
- return __builtin_ia32_reduceph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_reduce_ph (__m512h __A, __mmask32 __B, __m512h __C, int __D)
-{
- return __builtin_ia32_reduceph512_mask_round (__C, __D, __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_reduce_ph (__mmask32 __A, __m512h __B, int __C)
-{
- return __builtin_ia32_reduceph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_reduce_round_ph (__m512h __A, int __B, const int __C)
-{
- return __builtin_ia32_reduceph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1, __C);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_reduce_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
- int __D, const int __E)
-{
- return __builtin_ia32_reduceph512_mask_round (__C, __D, __A, __B,
- __E);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_reduce_round_ph (__mmask32 __A, __m512h __B, int __C,
- const int __D)
-{
- return __builtin_ia32_reduceph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A, __D);
-}
-
-#else
-#define _mm512_reduce_ph(A, B) \
- (__builtin_ia32_reduceph512_mask_round ((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_reduce_ph(A, B, C, D) \
- (__builtin_ia32_reduceph512_mask_round ((C), (D), (A), (B), \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_reduce_ph(A, B, C) \
- (__builtin_ia32_reduceph512_mask_round ((B), (C), \
- _mm512_setzero_ph (), \
- (A), _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_reduce_round_ph(A, B, C) \
- (__builtin_ia32_reduceph512_mask_round ((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, (C)))
-
-#define _mm512_mask_reduce_round_ph(A, B, C, D, E) \
- (__builtin_ia32_reduceph512_mask_round ((C), (D), (A), (B), (E)))
-
-#define _mm512_maskz_reduce_round_ph(A, B, C, D) \
- (__builtin_ia32_reduceph512_mask_round ((B), (C), \
- _mm512_setzero_ph (), \
- (A), (D)))
-
-#endif /* __OPTIMIZE__ */
-
/* Intrinsics vreducesh. */
#ifdef __OPTIMIZE__
extern __inline __m128h
@@ -2091,97 +1105,6 @@ _mm_maskz_reduce_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
#endif /* __OPTIMIZE__ */
-/* Intrinsics vrndscaleph. */
-#ifdef __OPTIMIZE__
-extern __inline __m512h
- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_roundscale_ph (__m512h __A, int __B)
-{
- return __builtin_ia32_rndscaleph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_roundscale_ph (__m512h __A, __mmask32 __B,
- __m512h __C, int __D)
-{
- return __builtin_ia32_rndscaleph512_mask_round (__C, __D, __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_roundscale_ph (__mmask32 __A, __m512h __B, int __C)
-{
- return __builtin_ia32_rndscaleph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_roundscale_round_ph (__m512h __A, int __B, const int __C)
-{
- return __builtin_ia32_rndscaleph512_mask_round (__A, __B,
- _mm512_setzero_ph (),
- (__mmask32) -1,
- __C);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_roundscale_round_ph (__m512h __A, __mmask32 __B,
- __m512h __C, int __D, const int __E)
-{
- return __builtin_ia32_rndscaleph512_mask_round (__C, __D, __A,
- __B, __E);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_roundscale_round_ph (__mmask32 __A, __m512h __B, int __C,
- const int __D)
-{
- return __builtin_ia32_rndscaleph512_mask_round (__B, __C,
- _mm512_setzero_ph (),
- __A, __D);
-}
-
-#else
-#define _mm512_roundscale_ph(A, B) \
- (__builtin_ia32_rndscaleph512_mask_round ((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_ph(A, B, C, D) \
- (__builtin_ia32_rndscaleph512_mask_round ((C), (D), (A), (B), \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_roundscale_ph(A, B, C) \
- (__builtin_ia32_rndscaleph512_mask_round ((B), (C), \
- _mm512_setzero_ph (), \
- (A), \
- _MM_FROUND_CUR_DIRECTION))
-#define _mm512_roundscale_round_ph(A, B, C) \
- (__builtin_ia32_rndscaleph512_mask_round ((A), (B), \
- _mm512_setzero_ph (), \
- (__mmask32)-1, (C)))
-
-#define _mm512_mask_roundscale_round_ph(A, B, C, D, E) \
- (__builtin_ia32_rndscaleph512_mask_round ((C), (D), (A), (B), (E)))
-
-#define _mm512_maskz_roundscale_round_ph(A, B, C, D) \
- (__builtin_ia32_rndscaleph512_mask_round ((B), (C), \
- _mm512_setzero_ph (), \
- (A), (D)))
-
-#endif /* __OPTIMIZE__ */
-
/* Intrinsics vrndscalesh. */
#ifdef __OPTIMIZE__
extern __inline __m128h
@@ -2297,39 +1220,10 @@ _mm_mask_fpclass_sh_mask (__mmask8 __U, __m128h __A, const int __imm)
#define _mm_mask_fpclass_sh_mask(U, X, C) \
((__mmask8) __builtin_ia32_fpclasssh_mask ((__v8hf) (__m128h) (X), \
(int) (C), (__mmask8) (U)))
-#endif /* __OPTIMIZE__ */
-
-/* Intrinsics vfpclassph. */
-#ifdef __OPTIMIZE__
-extern __inline __mmask32
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_fpclass_ph_mask (__mmask32 __U, __m512h __A,
- const int __imm)
-{
- return (__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) __A,
- __imm, __U);
-}
-
-extern __inline __mmask32
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_fpclass_ph_mask (__m512h __A, const int __imm)
-{
- return (__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) __A,
- __imm,
- (__mmask32) -1);
-}
-
-#else
-#define _mm512_mask_fpclass_ph_mask(u, x, c) \
- ((__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) (__m512h) (x), \
- (int) (c),(__mmask8)(u)))
-#define _mm512_fpclass_ph_mask(x, c) \
- ((__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) (__m512h) (x), \
- (int) (c),(__mmask8)-1))
-#endif /* __OPIMTIZE__ */
+#endif /* __OPTIMIZE__ */
-/* Intrinsics vgetexpph, vgetexpsh. */
+/* Intrinsics vgetexpsh. */
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_getexp_sh (__m128h __A, __m128h __B)
@@ -2362,35 +1256,6 @@ _mm_maskz_getexp_sh (__mmask8 __U, __m128h __A, __m128h __B)
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_getexp_ph (__m512h __A)
-{
- return (__m512h)
- __builtin_ia32_getexpph512_mask ((__v32hf) __A,
- (__v32hf) _mm512_setzero_ph (),
- (__mmask32) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_getexp_ph (__m512h __W, __mmask32 __U, __m512h __A)
-{
- return (__m512h)
- __builtin_ia32_getexpph512_mask ((__v32hf) __A, (__v32hf) __W,
- (__mmask32) __U, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_getexp_ph (__mmask32 __U, __m512h __A)
-{
- return (__m512h)
- __builtin_ia32_getexpph512_mask ((__v32hf) __A,
- (__v32hf) _mm512_setzero_ph (),
- (__mmask32) __U, _MM_FROUND_CUR_DIRECTION);
-}
-
#ifdef __OPTIMIZE__
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -2426,36 +1291,6 @@ _mm_maskz_getexp_round_sh (__mmask8 __U, __m128h __A, __m128h __B,
(__mmask8) __U, __R);
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_getexp_round_ph (__m512h __A, const int __R)
-{
- return (__m512h) __builtin_ia32_getexpph512_mask ((__v32hf) __A,
- (__v32hf)
- _mm512_setzero_ph (),
- (__mmask32) -1, __R);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_getexp_round_ph (__m512h __W, __mmask32 __U, __m512h __A,
- const int __R)
-{
- return (__m512h) __builtin_ia32_getexpph512_mask ((__v32hf) __A,
- (__v32hf) __W,
- (__mmask32) __U, __R);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_getexp_round_ph (__mmask32 __U, __m512h __A, const int __R)
-{
- return (__m512h) __builtin_ia32_getexpph512_mask ((__v32hf) __A,
- (__v32hf)
- _mm512_setzero_ph (),
- (__mmask32) __U, __R);
-}
-
#else
#define _mm_getexp_round_sh(A, B, R) \
((__m128h)__builtin_ia32_getexpsh_mask_round((__v8hf)(__m128h)(A), \
@@ -2471,21 +1306,9 @@ _mm512_maskz_getexp_round_ph (__mmask32 __U, __m512h __A, const int __R)
(__v8hf)_mm_setzero_ph(), \
U, C)
-#define _mm512_getexp_round_ph(A, R) \
- ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \
- (__v32hf)_mm512_setzero_ph(), (__mmask32)-1, R))
-
-#define _mm512_mask_getexp_round_ph(W, U, A, R) \
- ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \
- (__v32hf)(__m512h)(W), (__mmask32)(U), R))
-
-#define _mm512_maskz_getexp_round_ph(U, A, R) \
- ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \
- (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), R))
-
#endif /* __OPTIMIZE__ */
-/* Intrinsics vgetmantph, vgetmantsh. */
+/* Intrinsics vgetmantsh. */
#ifdef __OPTIMIZE__
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -2525,44 +1348,6 @@ _mm_maskz_getmant_sh (__mmask8 __U, __m128h __A, __m128h __B,
__U, _MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_getmant_ph (__m512h __A, _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C)
-{
- return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
- (__C << 2) | __B,
- _mm512_setzero_ph (),
- (__mmask32) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_getmant_ph (__m512h __W, __mmask32 __U, __m512h __A,
- _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C)
-{
- return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
- (__C << 2) | __B,
- (__v32hf) __W, __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_getmant_ph (__mmask32 __U, __m512h __A,
- _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C)
-{
- return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
- (__C << 2) | __B,
- (__v32hf)
- _mm512_setzero_ph (),
- __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
extern __inline __m128h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_getmant_round_sh (__m128h __A, __m128h __B,
@@ -2604,67 +1389,7 @@ _mm_maskz_getmant_round_sh (__mmask8 __U, __m128h __A, __m128h __B,
__U, __R);
}
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_getmant_round_ph (__m512h __A, _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
- (__C << 2) | __B,
- _mm512_setzero_ph (),
- (__mmask32) -1, __R);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_getmant_round_ph (__m512h __W, __mmask32 __U, __m512h __A,
- _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
- (__C << 2) | __B,
- (__v32hf) __W, __U,
- __R);
-}
-
-extern __inline __m512h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_getmant_round_ph (__mmask32 __U, __m512h __A,
- _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
- (__C << 2) | __B,
- (__v32hf)
- _mm512_setzero_ph (),
- __U, __R);
-}
-
#else
-#define _mm512_getmant_ph(X, B, C) \
- ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
- (int)(((C)<<2) | (B)), \
- (__v32hf)(__m512h) \
- _mm512_setzero_ph(), \
- (__mmask32)-1, \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_getmant_ph(W, U, X, B, C) \
- ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
- (int)(((C)<<2) | (B)), \
- (__v32hf)(__m512h)(W), \
- (__mmask32)(U), \
- _MM_FROUND_CUR_DIRECTION))
-
-
-#define _mm512_maskz_getmant_ph(U, X, B, C) \
- ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
- (int)(((C)<<2) | (B)), \
- (__v32hf)(__m512h) \
- _mm512_setzero_ph(), \
- (__mmask32)(U), \
- _MM_FROUND_CUR_DIRECTION))
-
#define _mm_getmant_sh(X, Y, C, D) \
((__m128h)__builtin_ia32_getmantsh_mask_round ((__v8hf)(__m128h)(X), \
(__v8hf)(__m128h)(Y), \
@@ -2691,30 +1416,6 @@ _mm512_maskz_getmant_round_ph (__mmask32 __U, __m512h __A,
(__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
-#define _mm512_getmant_round_ph(X, B, C, R) \
- ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
- (int)(((C)<<2) | (B)), \
- (__v32hf)(__m512h) \
- _mm512_setzero_ph(), \
- (__mmask32)-1, \
- (R)))
-
-#define _mm512_mask_getmant_round_ph(W, U, X, B, C, R) \
- ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
- (int)(((C)<<2) | (B)), \
- (__v32hf)(__m512h)(W), \
- (__mmask32)(U), \
- (R)))
-
-
-#define _mm512_maskz_getmant_round_ph(U, X, B, C, R) \
- ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
- (int)(((C)<<2) | (B)), \
- (__v32hf)(__m512h) \
- _mm512_setzero_ph(), \
- (__mmask32)(U), \
- (R)))
-
#define _mm_getmant_round_sh(X, Y, C, D, R) \
((__m128h)__builtin_ia32_getmantsh_mask_round ((__v8hf)(__m128h)(X), \
(__v8hf)(__m128h)(Y), \
@@ -2802,6 +1503,2674 @@ _mm_maskz_move_sh (__mmask8 __A, __m128h __B, __m128h __C)
return __builtin_ia32_vmovsh_mask (__B, __C, _mm_setzero_ph (), __A);
}
+/* Intrinsics vcvtsh2si, vcvtsh2us. */
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsh_i32 (__m128h __A)
+{
+ return (int) __builtin_ia32_vcvtsh2si32_round (__A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsh_u32 (__m128h __A)
+{
+ return (int) __builtin_ia32_vcvtsh2usi32_round (__A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsh_i32 (__m128h __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvtsh2si32_round (__A, __R);
+}
+
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsh_u32 (__m128h __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvtsh2usi32_round (__A, __R);
+}
+
+#else
+#define _mm_cvt_roundsh_i32(A, B) \
+ ((int)__builtin_ia32_vcvtsh2si32_round ((A), (B)))
+#define _mm_cvt_roundsh_u32(A, B) \
+ ((int)__builtin_ia32_vcvtsh2usi32_round ((A), (B)))
+
+#endif /* __OPTIMIZE__ */
+
+#ifdef __x86_64__
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsh_i64 (__m128h __A)
+{
+ return (long long)
+ __builtin_ia32_vcvtsh2si64_round (__A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsh_u64 (__m128h __A)
+{
+ return (long long)
+ __builtin_ia32_vcvtsh2usi64_round (__A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsh_i64 (__m128h __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvtsh2si64_round (__A, __R);
+}
+
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsh_u64 (__m128h __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvtsh2usi64_round (__A, __R);
+}
+
+#else
+#define _mm_cvt_roundsh_i64(A, B) \
+ ((long long)__builtin_ia32_vcvtsh2si64_round ((A), (B)))
+#define _mm_cvt_roundsh_u64(A, B) \
+ ((long long)__builtin_ia32_vcvtsh2usi64_round ((A), (B)))
+
+#endif /* __OPTIMIZE__ */
+#endif /* __x86_64__ */
+
+/* Intrinsics vcvtsi2sh, vcvtusi2sh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvti32_sh (__m128h __A, int __B)
+{
+ return __builtin_ia32_vcvtsi2sh32_round (__A, __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtu32_sh (__m128h __A, unsigned int __B)
+{
+ return __builtin_ia32_vcvtusi2sh32_round (__A, __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundi32_sh (__m128h __A, int __B, const int __R)
+{
+ return __builtin_ia32_vcvtsi2sh32_round (__A, __B, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundu32_sh (__m128h __A, unsigned int __B, const int __R)
+{
+ return __builtin_ia32_vcvtusi2sh32_round (__A, __B, __R);
+}
+
+#else
+#define _mm_cvt_roundi32_sh(A, B, C) \
+ (__builtin_ia32_vcvtsi2sh32_round ((A), (B), (C)))
+#define _mm_cvt_roundu32_sh(A, B, C) \
+ (__builtin_ia32_vcvtusi2sh32_round ((A), (B), (C)))
+
+#endif /* __OPTIMIZE__ */
+
+#ifdef __x86_64__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvti64_sh (__m128h __A, long long __B)
+{
+ return __builtin_ia32_vcvtsi2sh64_round (__A, __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtu64_sh (__m128h __A, unsigned long long __B)
+{
+ return __builtin_ia32_vcvtusi2sh64_round (__A, __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundi64_sh (__m128h __A, long long __B, const int __R)
+{
+ return __builtin_ia32_vcvtsi2sh64_round (__A, __B, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundu64_sh (__m128h __A, unsigned long long __B, const int __R)
+{
+ return __builtin_ia32_vcvtusi2sh64_round (__A, __B, __R);
+}
+
+#else
+#define _mm_cvt_roundi64_sh(A, B, C) \
+ (__builtin_ia32_vcvtsi2sh64_round ((A), (B), (C)))
+#define _mm_cvt_roundu64_sh(A, B, C) \
+ (__builtin_ia32_vcvtusi2sh64_round ((A), (B), (C)))
+
+#endif /* __OPTIMIZE__ */
+#endif /* __x86_64__ */
+
+/* Intrinsics vcvttsh2si, vcvttsh2us. */
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsh_i32 (__m128h __A)
+{
+ return (int)
+ __builtin_ia32_vcvttsh2si32_round (__A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsh_u32 (__m128h __A)
+{
+ return (int)
+ __builtin_ia32_vcvttsh2usi32_round (__A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundsh_i32 (__m128h __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvttsh2si32_round (__A, __R);
+}
+
+extern __inline unsigned
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundsh_u32 (__m128h __A, const int __R)
+{
+ return (int) __builtin_ia32_vcvttsh2usi32_round (__A, __R);
+}
+
+#else
+#define _mm_cvtt_roundsh_i32(A, B) \
+ ((int)__builtin_ia32_vcvttsh2si32_round ((A), (B)))
+#define _mm_cvtt_roundsh_u32(A, B) \
+ ((int)__builtin_ia32_vcvttsh2usi32_round ((A), (B)))
+
+#endif /* __OPTIMIZE__ */
+
+#ifdef __x86_64__
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsh_i64 (__m128h __A)
+{
+ return (long long)
+ __builtin_ia32_vcvttsh2si64_round (__A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsh_u64 (__m128h __A)
+{
+ return (long long)
+ __builtin_ia32_vcvttsh2usi64_round (__A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundsh_i64 (__m128h __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvttsh2si64_round (__A, __R);
+}
+
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_roundsh_u64 (__m128h __A, const int __R)
+{
+ return (long long) __builtin_ia32_vcvttsh2usi64_round (__A, __R);
+}
+
+#else
+#define _mm_cvtt_roundsh_i64(A, B) \
+ ((long long)__builtin_ia32_vcvttsh2si64_round ((A), (B)))
+#define _mm_cvtt_roundsh_u64(A, B) \
+ ((long long)__builtin_ia32_vcvttsh2usi64_round ((A), (B)))
+
+#endif /* __OPTIMIZE__ */
+#endif /* __x86_64__ */
+
+/* Intrinsics vcvtsh2ss, vcvtsh2sd. */
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsh_ss (__m128 __A, __m128h __B)
+{
+ return __builtin_ia32_vcvtsh2ss_mask_round (__B, __A,
+ _mm_setzero_ps (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsh_ss (__m128 __A, __mmask8 __B, __m128 __C,
+ __m128h __D)
+{
+ return __builtin_ia32_vcvtsh2ss_mask_round (__D, __C, __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtsh_ss (__mmask8 __A, __m128 __B,
+ __m128h __C)
+{
+ return __builtin_ia32_vcvtsh2ss_mask_round (__C, __B,
+ _mm_setzero_ps (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsh_sd (__m128d __A, __m128h __B)
+{
+ return __builtin_ia32_vcvtsh2sd_mask_round (__B, __A,
+ _mm_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsh_sd (__m128d __A, __mmask8 __B, __m128d __C,
+ __m128h __D)
+{
+ return __builtin_ia32_vcvtsh2sd_mask_round (__D, __C, __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtsh_sd (__mmask8 __A, __m128d __B, __m128h __C)
+{
+ return __builtin_ia32_vcvtsh2sd_mask_round (__C, __B,
+ _mm_setzero_pd (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsh_ss (__m128 __A, __m128h __B, const int __R)
+{
+ return __builtin_ia32_vcvtsh2ss_mask_round (__B, __A,
+ _mm_setzero_ps (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvt_roundsh_ss (__m128 __A, __mmask8 __B, __m128 __C,
+ __m128h __D, const int __R)
+{
+ return __builtin_ia32_vcvtsh2ss_mask_round (__D, __C, __A, __B, __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvt_roundsh_ss (__mmask8 __A, __m128 __B,
+ __m128h __C, const int __R)
+{
+ return __builtin_ia32_vcvtsh2ss_mask_round (__C, __B,
+ _mm_setzero_ps (),
+ __A, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsh_sd (__m128d __A, __m128h __B, const int __R)
+{
+ return __builtin_ia32_vcvtsh2sd_mask_round (__B, __A,
+ _mm_setzero_pd (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvt_roundsh_sd (__m128d __A, __mmask8 __B, __m128d __C,
+ __m128h __D, const int __R)
+{
+ return __builtin_ia32_vcvtsh2sd_mask_round (__D, __C, __A, __B, __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvt_roundsh_sd (__mmask8 __A, __m128d __B, __m128h __C, const int __R)
+{
+ return __builtin_ia32_vcvtsh2sd_mask_round (__C, __B,
+ _mm_setzero_pd (),
+ __A, __R);
+}
+
+#else
+#define _mm_cvt_roundsh_ss(A, B, R) \
+ (__builtin_ia32_vcvtsh2ss_mask_round ((B), (A), \
+ _mm_setzero_ps (), \
+ (__mmask8) -1, (R)))
+
+#define _mm_mask_cvt_roundsh_ss(A, B, C, D, R) \
+ (__builtin_ia32_vcvtsh2ss_mask_round ((D), (C), (A), (B), (R)))
+
+#define _mm_maskz_cvt_roundsh_ss(A, B, C, R) \
+ (__builtin_ia32_vcvtsh2ss_mask_round ((C), (B), \
+ _mm_setzero_ps (), \
+ (A), (R)))
+
+#define _mm_cvt_roundsh_sd(A, B, R) \
+ (__builtin_ia32_vcvtsh2sd_mask_round ((B), (A), \
+ _mm_setzero_pd (), \
+ (__mmask8) -1, (R)))
+
+#define _mm_mask_cvt_roundsh_sd(A, B, C, D, R) \
+ (__builtin_ia32_vcvtsh2sd_mask_round ((D), (C), (A), (B), (R)))
+
+#define _mm_maskz_cvt_roundsh_sd(A, B, C, R) \
+ (__builtin_ia32_vcvtsh2sd_mask_round ((C), (B), \
+ _mm_setzero_pd (), \
+ (A), (R)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vcvtss2sh, vcvtsd2sh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_sh (__m128h __A, __m128 __B)
+{
+ return __builtin_ia32_vcvtss2sh_mask_round (__B, __A,
+ _mm_setzero_ph (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtss_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128 __D)
+{
+ return __builtin_ia32_vcvtss2sh_mask_round (__D, __C, __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtss_sh (__mmask8 __A, __m128h __B, __m128 __C)
+{
+ return __builtin_ia32_vcvtss2sh_mask_round (__C, __B,
+ _mm_setzero_ph (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_sh (__m128h __A, __m128d __B)
+{
+ return __builtin_ia32_vcvtsd2sh_mask_round (__B, __A,
+ _mm_setzero_ph (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsd_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128d __D)
+{
+ return __builtin_ia32_vcvtsd2sh_mask_round (__D, __C, __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtsd_sh (__mmask8 __A, __m128h __B, __m128d __C)
+{
+ return __builtin_ia32_vcvtsd2sh_mask_round (__C, __B,
+ _mm_setzero_ph (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundss_sh (__m128h __A, __m128 __B, const int __R)
+{
+ return __builtin_ia32_vcvtss2sh_mask_round (__B, __A,
+ _mm_setzero_ph (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvt_roundss_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128 __D,
+ const int __R)
+{
+ return __builtin_ia32_vcvtss2sh_mask_round (__D, __C, __A, __B, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvt_roundss_sh (__mmask8 __A, __m128h __B, __m128 __C,
+ const int __R)
+{
+ return __builtin_ia32_vcvtss2sh_mask_round (__C, __B,
+ _mm_setzero_ph (),
+ __A, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_roundsd_sh (__m128h __A, __m128d __B, const int __R)
+{
+ return __builtin_ia32_vcvtsd2sh_mask_round (__B, __A,
+ _mm_setzero_ph (),
+ (__mmask8) -1, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvt_roundsd_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128d __D,
+ const int __R)
+{
+ return __builtin_ia32_vcvtsd2sh_mask_round (__D, __C, __A, __B, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvt_roundsd_sh (__mmask8 __A, __m128h __B, __m128d __C,
+ const int __R)
+{
+ return __builtin_ia32_vcvtsd2sh_mask_round (__C, __B,
+ _mm_setzero_ph (),
+ __A, __R);
+}
+
+#else
+#define _mm_cvt_roundss_sh(A, B, R) \
+ (__builtin_ia32_vcvtss2sh_mask_round ((B), (A), \
+ _mm_setzero_ph (), \
+ (__mmask8) -1, R))
+
+#define _mm_mask_cvt_roundss_sh(A, B, C, D, R) \
+ (__builtin_ia32_vcvtss2sh_mask_round ((D), (C), (A), (B), (R)))
+
+#define _mm_maskz_cvt_roundss_sh(A, B, C, R) \
+ (__builtin_ia32_vcvtss2sh_mask_round ((C), (B), \
+ _mm_setzero_ph (), \
+ A, R))
+
+#define _mm_cvt_roundsd_sh(A, B, R) \
+ (__builtin_ia32_vcvtsd2sh_mask_round ((B), (A), \
+ _mm_setzero_ph (), \
+ (__mmask8) -1, R))
+
+#define _mm_mask_cvt_roundsd_sh(A, B, C, D, R) \
+ (__builtin_ia32_vcvtsd2sh_mask_round ((D), (C), (A), (B), (R)))
+
+#define _mm_maskz_cvt_roundsd_sh(A, B, C, R) \
+ (__builtin_ia32_vcvtsd2sh_mask_round ((C), (B), \
+ _mm_setzero_ph (), \
+ (A), (R)))
+
+#endif /* __OPTIMIZE__ */
+
+extern __inline _Float16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsh_h (__m128h __A)
+{
+ return __A[0];
+}
+
+/* Intrinsics vfmadd[132,213,231]sh. */
+extern __inline __m128h
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_sh (__m128h __W, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask3 ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask3 ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_round_sh (__mmask8 __U, __m128h __W, __m128h __A,
+ __m128h __B, const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+#else
+#define _mm_fmadd_round_sh(A, B, C, R) \
+ ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), (C), (-1), (R)))
+#define _mm_mask_fmadd_round_sh(A, U, B, C, R) \
+ ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), (C), (U), (R)))
+#define _mm_mask3_fmadd_round_sh(A, B, C, U, R) \
+ ((__m128h) __builtin_ia32_vfmaddsh3_mask3 ((A), (B), (C), (U), (R)))
+#define _mm_maskz_fmadd_round_sh(U, A, B, C, R) \
+ ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), (B), (C), (U), (R)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vfnmadd[132,213,231]sh. */
+extern __inline __m128h
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fnmadd_sh (__m128h __W, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmadd_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmadd_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U)
+{
+ return (__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmadd_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfnmaddsh3_maskz ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fnmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R)
+{
+ return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmadd_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmadd_round_sh (__mmask8 __U, __m128h __W, __m128h __A,
+ __m128h __B, const int __R)
+{
+ return (__m128h) __builtin_ia32_vfnmaddsh3_maskz ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+#else
+#define _mm_fnmadd_round_sh(A, B, C, R) \
+ ((__m128h) __builtin_ia32_vfnmaddsh3_mask ((A), (B), (C), (-1), (R)))
+#define _mm_mask_fnmadd_round_sh(A, U, B, C, R) \
+ ((__m128h) __builtin_ia32_vfnmaddsh3_mask ((A), (B), (C), (U), (R)))
+#define _mm_mask3_fnmadd_round_sh(A, B, C, U, R) \
+ ((__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((A), (B), (C), (U), (R)))
+#define _mm_maskz_fnmadd_round_sh(U, A, B, C, R) \
+ ((__m128h) __builtin_ia32_vfnmaddsh3_maskz ((A), (B), (C), (U), (R)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vfmsub[132,213,231]sh. */
+extern __inline __m128h
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmsub_sh (__m128h __W, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmsub_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmsub_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U)
+{
+ return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmsub_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
+ (__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmsub_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ (__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmsub_round_sh (__mmask8 __U, __m128h __W, __m128h __A,
+ __m128h __B, const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
+ (__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+#else
+#define _mm_fmsub_round_sh(A, B, C, R) \
+ ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), -(C), (-1), (R)))
+#define _mm_mask_fmsub_round_sh(A, U, B, C, R) \
+ ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), -(C), (U), (R)))
+#define _mm_mask3_fmsub_round_sh(A, B, C, U, R) \
+ ((__m128h) __builtin_ia32_vfmsubsh3_mask3 ((A), (B), (C), (U), (R)))
+#define _mm_maskz_fmsub_round_sh(U, A, B, C, R) \
+ ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), (B), -(C), (U), (R)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vfnmsub[132,213,231]sh. */
+extern __inline __m128h
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fnmsub_sh (__m128h __W, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ -(__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmsub_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ -(__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmsub_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U)
+{
+ return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W,
+ -(__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmsub_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
+ -(__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fnmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ -(__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmsub_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
+ -(__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W,
+ -(__v8hf) __A,
+ (__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmsub_round_sh (__mmask8 __U, __m128h __W, __m128h __A,
+ __m128h __B, const int __R)
+{
+ return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
+ -(__v8hf) __A,
+ -(__v8hf) __B,
+ (__mmask8) __U, __R);
+}
+
+#else
+#define _mm_fnmsub_round_sh(A, B, C, R) \
+ ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), -(B), -(C), (-1), (R)))
+#define _mm_mask_fnmsub_round_sh(A, U, B, C, R) \
+ ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), -(B), -(C), (U), (R)))
+#define _mm_mask3_fnmsub_round_sh(A, B, C, U, R) \
+ ((__m128h) __builtin_ia32_vfmsubsh3_mask3 ((A), -(B), (C), (U), (R)))
+#define _mm_maskz_fnmsub_round_sh(U, A, B, C, R) \
+ ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), -(B), -(C), (U), (R)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vf[,c]maddcsh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_mask3_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C, __D,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_maskz_round ((__v8hf) __B,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_mask3_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C, __D,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_maskz_round ((__v8hf) __B,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_sch (__m128h __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C,
+ __mmask8 __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_mask3_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __D, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_maskz_round ((__v8hf) __B,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C,
+ __mmask8 __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_mask3_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __D, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_maskz_round ((__v8hf) __B,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __D);
+}
+#else
+#define _mm_mask_fcmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) \
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) (A), \
+ (__v8hf) (C), \
+ (__v8hf) (D), \
+ (B), (E)))
+
+
+#define _mm_mask3_fcmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) \
+ __builtin_ia32_vfcmaddcsh_mask3_round ((__v8hf) (A), \
+ (__v8hf) (B), \
+ (__v8hf) (C), \
+ (D), (E)))
+
+#define _mm_maskz_fcmadd_round_sch(A, B, C, D, E) \
+ __builtin_ia32_vfcmaddcsh_maskz_round ((B), (C), (D), (A), (E))
+
+#define _mm_fcmadd_round_sch(A, B, C, D) \
+ __builtin_ia32_vfcmaddcsh_round ((A), (B), (C), (D))
+
+#define _mm_mask_fmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) \
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) (A), \
+ (__v8hf) (C), \
+ (__v8hf) (D), \
+ (B), (E)))
+
+#define _mm_mask3_fmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) \
+ __builtin_ia32_vfmaddcsh_mask3_round ((__v8hf) (A), \
+ (__v8hf) (B), \
+ (__v8hf) (C), \
+ (D), (E)))
+
+#define _mm_maskz_fmadd_round_sch(A, B, C, D, E) \
+ __builtin_ia32_vfmaddcsh_maskz_round ((B), (C), (D), (A), (E))
+
+#define _mm_fmadd_round_sch(A, B, C, D) \
+ __builtin_ia32_vfmaddcsh_round ((A), (B), (C), (D))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vf[,c]mulcsh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmul_sch (__m128h __A, __m128h __B)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmul_sch (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmul_sch (__m128h __A, __m128h __B)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmul_sch (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmul_round_sch (__m128h __A, __m128h __B, const int __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmul_round_sch (__m128h __A, __m128h __B, const int __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_round ((__v8hf) __A,
+ (__v8hf) __B, __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, __E);
+}
+
+#else
+#define _mm_fcmul_round_sch(__A, __B, __D) \
+ (__m128h) __builtin_ia32_vfcmulcsh_round ((__v8hf) __A, \
+ (__v8hf) __B, __D)
+
+#define _mm_mask_fcmul_round_sch(__A, __B, __C, __D, __E) \
+ (__m128h) __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C, \
+ (__v8hf) __D, \
+ (__v8hf) __A, \
+ __B, __E)
+
+#define _mm_maskz_fcmul_round_sch(__A, __B, __C, __E) \
+ (__m128h) __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B, \
+ (__v8hf) __C, \
+ _mm_setzero_ph (), \
+ __A, __E)
+
+#define _mm_fmul_round_sch(__A, __B, __D) \
+ (__m128h) __builtin_ia32_vfmulcsh_round ((__v8hf) __A, \
+ (__v8hf) __B, __D)
+
+#define _mm_mask_fmul_round_sch(__A, __B, __C, __D, __E) \
+ (__m128h) __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C, \
+ (__v8hf) __D, \
+ (__v8hf) __A, \
+ __B, __E)
+
+#define _mm_maskz_fmul_round_sch(__A, __B, __C, __E) \
+ (__m128h) __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B, \
+ (__v8hf) __C, \
+ _mm_setzero_ph (), \
+ __A, __E)
+
+#endif /* __OPTIMIZE__ */
+
+#define _mm_mul_sch(A, B) _mm_fmul_sch ((A), (B))
+#define _mm_mask_mul_sch(W, U, A, B) _mm_mask_fmul_sch ((W), (U), (A), (B))
+#define _mm_maskz_mul_sch(U, A, B) _mm_maskz_fmul_sch ((U), (A), (B))
+#define _mm_mul_round_sch(A, B, R) _mm_fmul_round_sch ((A), (B), (R))
+#define _mm_mask_mul_round_sch(W, U, A, B, R) \
+ _mm_mask_fmul_round_sch ((W), (U), (A), (B), (R))
+#define _mm_maskz_mul_round_sch(U, A, B, R) \
+ _mm_maskz_fmul_round_sch ((U), (A), (B), (R))
+
+#define _mm_cmul_sch(A, B) _mm_fcmul_sch ((A), (B))
+#define _mm_mask_cmul_sch(W, U, A, B) _mm_mask_fcmul_sch ((W), (U), (A), (B))
+#define _mm_maskz_cmul_sch(U, A, B) _mm_maskz_fcmul_sch ((U), (A), (B))
+#define _mm_cmul_round_sch(A, B, R) _mm_fcmul_round_sch ((A), (B), (R))
+#define _mm_mask_cmul_round_sch(W, U, A, B, R) \
+ _mm_mask_fcmul_round_sch ((W), (U), (A), (B), (R))
+#define _mm_maskz_cmul_round_sch(U, A, B, R) \
+ _mm_maskz_fcmul_round_sch ((U), (A), (B), (R))
+
+#ifdef __DISABLE_AVX512FP16__
+#undef __DISABLE_AVX512FP16__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512FP16__ */
+
+#if !defined (__AVX512FP16__) || !defined (__EVEX512__)
+#pragma GCC push_options
+#pragma GCC target("avx512fp16,evex512")
+#define __DISABLE_AVX512FP16_512__
+#endif /* __AVX512FP16_512__ */
+
+typedef _Float16 __v32hf __attribute__ ((__vector_size__ (64)));
+typedef _Float16 __m512h __attribute__ ((__vector_size__ (64), __may_alias__));
+typedef _Float16 __m512h_u __attribute__ ((__vector_size__ (64), \
+ __may_alias__, __aligned__ (1)));
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_set_ph (_Float16 __A31, _Float16 __A30, _Float16 __A29,
+ _Float16 __A28, _Float16 __A27, _Float16 __A26,
+ _Float16 __A25, _Float16 __A24, _Float16 __A23,
+ _Float16 __A22, _Float16 __A21, _Float16 __A20,
+ _Float16 __A19, _Float16 __A18, _Float16 __A17,
+ _Float16 __A16, _Float16 __A15, _Float16 __A14,
+ _Float16 __A13, _Float16 __A12, _Float16 __A11,
+ _Float16 __A10, _Float16 __A9, _Float16 __A8,
+ _Float16 __A7, _Float16 __A6, _Float16 __A5,
+ _Float16 __A4, _Float16 __A3, _Float16 __A2,
+ _Float16 __A1, _Float16 __A0)
+{
+ return __extension__ (__m512h)(__v32hf){ __A0, __A1, __A2, __A3,
+ __A4, __A5, __A6, __A7,
+ __A8, __A9, __A10, __A11,
+ __A12, __A13, __A14, __A15,
+ __A16, __A17, __A18, __A19,
+ __A20, __A21, __A22, __A23,
+ __A24, __A25, __A26, __A27,
+ __A28, __A29, __A30, __A31 };
+}
+
+/* Create vectors of elements in the reversed order from
+ _mm512_set_ph functions. */
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_setr_ph (_Float16 __A0, _Float16 __A1, _Float16 __A2,
+ _Float16 __A3, _Float16 __A4, _Float16 __A5,
+ _Float16 __A6, _Float16 __A7, _Float16 __A8,
+ _Float16 __A9, _Float16 __A10, _Float16 __A11,
+ _Float16 __A12, _Float16 __A13, _Float16 __A14,
+ _Float16 __A15, _Float16 __A16, _Float16 __A17,
+ _Float16 __A18, _Float16 __A19, _Float16 __A20,
+ _Float16 __A21, _Float16 __A22, _Float16 __A23,
+ _Float16 __A24, _Float16 __A25, _Float16 __A26,
+ _Float16 __A27, _Float16 __A28, _Float16 __A29,
+ _Float16 __A30, _Float16 __A31)
+
+{
+ return _mm512_set_ph (__A31, __A30, __A29, __A28, __A27, __A26, __A25,
+ __A24, __A23, __A22, __A21, __A20, __A19, __A18,
+ __A17, __A16, __A15, __A14, __A13, __A12, __A11,
+ __A10, __A9, __A8, __A7, __A6, __A5, __A4, __A3,
+ __A2, __A1, __A0);
+}
+
+/* Broadcast _Float16 to vector. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_set1_ph (_Float16 __A)
+{
+ return _mm512_set_ph (__A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A, __A, __A, __A);
+}
+
+/* Create a vector with all zeros. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_setzero_ph (void)
+{
+ return _mm512_set1_ph (0.0f16);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_undefined_ph (void)
+{
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Winit-self"
+ __m512h __Y = __Y;
+#pragma GCC diagnostic pop
+ return __Y;
+}
+
+extern __inline _Float16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtsh_h (__m512h __A)
+{
+ return __A[0];
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_castph_ps (__m512h __a)
+{
+ return (__m512) __a;
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_castph_pd (__m512h __a)
+{
+ return (__m512d) __a;
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_castph_si512 (__m512h __a)
+{
+ return (__m512i) __a;
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_castph512_ph128 (__m512h __A)
+{
+ union
+ {
+ __m128h __a[4];
+ __m512h __v;
+ } __u = { .__v = __A };
+ return __u.__a[0];
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_castph512_ph256 (__m512h __A)
+{
+ union
+ {
+ __m256h __a[2];
+ __m512h __v;
+ } __u = { .__v = __A };
+ return __u.__a[0];
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_castph128_ph512 (__m128h __A)
+{
+ union
+ {
+ __m128h __a[4];
+ __m512h __v;
+ } __u;
+ __u.__a[0] = __A;
+ return __u.__v;
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_castph256_ph512 (__m256h __A)
+{
+ union
+ {
+ __m256h __a[2];
+ __m512h __v;
+ } __u;
+ __u.__a[0] = __A;
+ return __u.__v;
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_zextph128_ph512 (__m128h __A)
+{
+ return (__m512h) _mm512_insertf32x4 (_mm512_setzero_ps (),
+ (__m128) __A, 0);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_zextph256_ph512 (__m256h __A)
+{
+ return (__m512h) _mm512_insertf64x4 (_mm512_setzero_pd (),
+ (__m256d) __A, 0);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_castps_ph (__m512 __a)
+{
+ return (__m512h) __a;
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_castpd_ph (__m512d __a)
+{
+ return (__m512h) __a;
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_castsi512_ph (__m512i __a)
+{
+ return (__m512h) __a;
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_load_ph (void const *__P)
+{
+ return *(const __m512h *) __P;
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_loadu_ph (void const *__P)
+{
+ return *(const __m512h_u *) __P;
+}
+
+/* Stores the lower _Float16 value. */
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_store_ph (void *__P, __m512h __A)
+{
+ *(__m512h *) __P = __A;
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_storeu_ph (void *__P, __m512h __A)
+{
+ *(__m512h_u *) __P = __A;
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_abs_ph (__m512h __A)
+{
+ return (__m512h) _mm512_and_epi32 ( _mm512_set1_epi32 (0x7FFF7FFF),
+ (__m512i) __A);
+}
+
+/* Intrinsics v[add,sub,mul,div]ph. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_add_ph (__m512h __A, __m512h __B)
+{
+ return (__m512h) ((__v32hf) __A + (__v32hf) __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_add_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
+{
+ return __builtin_ia32_addph512_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_add_ph (__mmask32 __A, __m512h __B, __m512h __C)
+{
+ return __builtin_ia32_addph512_mask (__B, __C,
+ _mm512_setzero_ph (), __A);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sub_ph (__m512h __A, __m512h __B)
+{
+ return (__m512h) ((__v32hf) __A - (__v32hf) __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sub_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
+{
+ return __builtin_ia32_subph512_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sub_ph (__mmask32 __A, __m512h __B, __m512h __C)
+{
+ return __builtin_ia32_subph512_mask (__B, __C,
+ _mm512_setzero_ph (), __A);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mul_ph (__m512h __A, __m512h __B)
+{
+ return (__m512h) ((__v32hf) __A * (__v32hf) __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_mul_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
+{
+ return __builtin_ia32_mulph512_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_mul_ph (__mmask32 __A, __m512h __B, __m512h __C)
+{
+ return __builtin_ia32_mulph512_mask (__B, __C,
+ _mm512_setzero_ph (), __A);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_div_ph (__m512h __A, __m512h __B)
+{
+ return (__m512h) ((__v32hf) __A / (__v32hf) __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_div_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
+{
+ return __builtin_ia32_divph512_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_div_ph (__mmask32 __A, __m512h __B, __m512h __C)
+{
+ return __builtin_ia32_divph512_mask (__B, __C,
+ _mm512_setzero_ph (), __A);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_add_round_ph (__m512h __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_addph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_add_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ __m512h __D, const int __E)
+{
+ return __builtin_ia32_addph512_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_add_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_addph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sub_round_ph (__m512h __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_subph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sub_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ __m512h __D, const int __E)
+{
+ return __builtin_ia32_subph512_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sub_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_subph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mul_round_ph (__m512h __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_mulph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_mul_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ __m512h __D, const int __E)
+{
+ return __builtin_ia32_mulph512_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_mul_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_mulph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_div_round_ph (__m512h __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_divph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_div_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ __m512h __D, const int __E)
+{
+ return __builtin_ia32_divph512_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_div_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_divph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+#else
+#define _mm512_add_round_ph(A, B, C) \
+ ((__m512h)__builtin_ia32_addph512_mask_round((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_add_round_ph(A, B, C, D, E) \
+ ((__m512h)__builtin_ia32_addph512_mask_round((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_add_round_ph(A, B, C, D) \
+ ((__m512h)__builtin_ia32_addph512_mask_round((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+
+#define _mm512_sub_round_ph(A, B, C) \
+ ((__m512h)__builtin_ia32_subph512_mask_round((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_sub_round_ph(A, B, C, D, E) \
+ ((__m512h)__builtin_ia32_subph512_mask_round((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_sub_round_ph(A, B, C, D) \
+ ((__m512h)__builtin_ia32_subph512_mask_round((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+
+#define _mm512_mul_round_ph(A, B, C) \
+ ((__m512h)__builtin_ia32_mulph512_mask_round((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_mul_round_ph(A, B, C, D, E) \
+ ((__m512h)__builtin_ia32_mulph512_mask_round((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_mul_round_ph(A, B, C, D) \
+ ((__m512h)__builtin_ia32_mulph512_mask_round((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+
+#define _mm512_div_round_ph(A, B, C) \
+ ((__m512h)__builtin_ia32_divph512_mask_round((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_div_round_ph(A, B, C, D, E) \
+ ((__m512h)__builtin_ia32_divph512_mask_round((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_div_round_ph(A, B, C, D) \
+ ((__m512h)__builtin_ia32_divph512_mask_round((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+#endif /* __OPTIMIZE__ */
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_conj_pch (__m512h __A)
+{
+ return (__m512h) _mm512_xor_epi32 ((__m512i) __A, _mm512_set1_epi32 (1<<31));
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_conj_pch (__m512h __W, __mmask16 __U, __m512h __A)
+{
+ return (__m512h)
+ __builtin_ia32_movaps512_mask ((__v16sf) _mm512_conj_pch (__A),
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_conj_pch (__mmask16 __U, __m512h __A)
+{
+ return (__m512h)
+ __builtin_ia32_movaps512_mask ((__v16sf) _mm512_conj_pch (__A),
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+/* Intrinsic vmaxph vminph. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_max_ph (__m512h __A, __m512h __B)
+{
+ return __builtin_ia32_maxph512_mask (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_max_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
+{
+ return __builtin_ia32_maxph512_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_max_ph (__mmask32 __A, __m512h __B, __m512h __C)
+{
+ return __builtin_ia32_maxph512_mask (__B, __C,
+ _mm512_setzero_ph (), __A);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_min_ph (__m512h __A, __m512h __B)
+{
+ return __builtin_ia32_minph512_mask (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_min_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
+{
+ return __builtin_ia32_minph512_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_min_ph (__mmask32 __A, __m512h __B, __m512h __C)
+{
+ return __builtin_ia32_minph512_mask (__B, __C,
+ _mm512_setzero_ph (), __A);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_max_round_ph (__m512h __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_maxph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_max_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ __m512h __D, const int __E)
+{
+ return __builtin_ia32_maxph512_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_max_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_maxph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_min_round_ph (__m512h __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_minph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_min_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ __m512h __D, const int __E)
+{
+ return __builtin_ia32_minph512_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_min_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_minph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+
+#else
+#define _mm512_max_round_ph(A, B, C) \
+ (__builtin_ia32_maxph512_mask_round ((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_max_round_ph(A, B, C, D, E) \
+ (__builtin_ia32_maxph512_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_max_round_ph(A, B, C, D) \
+ (__builtin_ia32_maxph512_mask_round ((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+
+#define _mm512_min_round_ph(A, B, C) \
+ (__builtin_ia32_minph512_mask_round ((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_min_round_ph(A, B, C, D, E) \
+ (__builtin_ia32_minph512_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_min_round_ph(A, B, C, D) \
+ (__builtin_ia32_minph512_mask_round ((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+#endif /* __OPTIMIZE__ */
+
+/* vcmpph */
+#ifdef __OPTIMIZE
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmp_ph_mask (__m512h __A, __m512h __B, const int __C)
+{
+ return (__mmask32) __builtin_ia32_cmpph512_mask (__A, __B, __C,
+ (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmp_ph_mask (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return (__mmask32) __builtin_ia32_cmpph512_mask (__B, __C, __D,
+ __A);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmp_round_ph_mask (__m512h __A, __m512h __B, const int __C,
+ const int __D)
+{
+ return (__mmask32) __builtin_ia32_cmpph512_mask_round (__A, __B,
+ __C, (__mmask32) -1,
+ __D);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmp_round_ph_mask (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D, const int __E)
+{
+ return (__mmask32) __builtin_ia32_cmpph512_mask_round (__B, __C,
+ __D, __A,
+ __E);
+}
+
+#else
+#define _mm512_cmp_ph_mask(A, B, C) \
+ (__builtin_ia32_cmpph512_mask ((A), (B), (C), (-1)))
+
+#define _mm512_mask_cmp_ph_mask(A, B, C, D) \
+ (__builtin_ia32_cmpph512_mask ((B), (C), (D), (A)))
+
+#define _mm512_cmp_round_ph_mask(A, B, C, D) \
+ (__builtin_ia32_cmpph512_mask_round ((A), (B), (C), (-1), (D)))
+
+#define _mm512_mask_cmp_round_ph_mask(A, B, C, D, E) \
+ (__builtin_ia32_cmpph512_mask_round ((B), (C), (D), (A), (E)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vsqrtph. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sqrt_ph (__m512h __A)
+{
+ return __builtin_ia32_sqrtph512_mask_round (__A,
+ _mm512_setzero_ph(),
+ (__mmask32) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sqrt_ph (__m512h __A, __mmask32 __B, __m512h __C)
+{
+ return __builtin_ia32_sqrtph512_mask_round (__C, __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sqrt_ph (__mmask32 __A, __m512h __B)
+{
+ return __builtin_ia32_sqrtph512_mask_round (__B,
+ _mm512_setzero_ph (),
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sqrt_round_ph (__m512h __A, const int __B)
+{
+ return __builtin_ia32_sqrtph512_mask_round (__A,
+ _mm512_setzero_ph(),
+ (__mmask32) -1, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sqrt_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_sqrtph512_mask_round (__C, __A, __B, __D);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sqrt_round_ph (__mmask32 __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_sqrtph512_mask_round (__B,
+ _mm512_setzero_ph (),
+ __A, __C);
+}
+
+#else
+#define _mm512_sqrt_round_ph(A, B) \
+ (__builtin_ia32_sqrtph512_mask_round ((A), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (B)))
+
+#define _mm512_mask_sqrt_round_ph(A, B, C, D) \
+ (__builtin_ia32_sqrtph512_mask_round ((C), (A), (B), (D)))
+
+#define _mm512_maskz_sqrt_round_ph(A, B, C) \
+ (__builtin_ia32_sqrtph512_mask_round ((B), \
+ _mm512_setzero_ph (), \
+ (A), (C)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vrsqrtph. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_rsqrt_ph (__m512h __A)
+{
+ return __builtin_ia32_rsqrtph512_mask (__A, _mm512_setzero_ph (),
+ (__mmask32) -1);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_rsqrt_ph (__m512h __A, __mmask32 __B, __m512h __C)
+{
+ return __builtin_ia32_rsqrtph512_mask (__C, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_rsqrt_ph (__mmask32 __A, __m512h __B)
+{
+ return __builtin_ia32_rsqrtph512_mask (__B, _mm512_setzero_ph (),
+ __A);
+}
+
+/* Intrinsics vrcpph. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_rcp_ph (__m512h __A)
+{
+ return __builtin_ia32_rcpph512_mask (__A, _mm512_setzero_ph (),
+ (__mmask32) -1);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_rcp_ph (__m512h __A, __mmask32 __B, __m512h __C)
+{
+ return __builtin_ia32_rcpph512_mask (__C, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_rcp_ph (__mmask32 __A, __m512h __B)
+{
+ return __builtin_ia32_rcpph512_mask (__B, _mm512_setzero_ph (),
+ __A);
+}
+
+/* Intrinsics vscalefph. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_scalef_ph (__m512h __A, __m512h __B)
+{
+ return __builtin_ia32_scalefph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_scalef_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
+{
+ return __builtin_ia32_scalefph512_mask_round (__C, __D, __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_scalef_ph (__mmask32 __A, __m512h __B, __m512h __C)
+{
+ return __builtin_ia32_scalefph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_scalef_round_ph (__m512h __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_scalefph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_scalef_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ __m512h __D, const int __E)
+{
+ return __builtin_ia32_scalefph512_mask_round (__C, __D, __A, __B,
+ __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_scalef_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_scalefph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+
+#else
+#define _mm512_scalef_round_ph(A, B, C) \
+ (__builtin_ia32_scalefph512_mask_round ((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_scalef_round_ph(A, B, C, D, E) \
+ (__builtin_ia32_scalefph512_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_scalef_round_ph(A, B, C, D) \
+ (__builtin_ia32_scalefph512_mask_round ((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vreduceph. */
+#ifdef __OPTIMIZE__
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_reduce_ph (__m512h __A, int __B)
+{
+ return __builtin_ia32_reduceph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_reduce_ph (__m512h __A, __mmask32 __B, __m512h __C, int __D)
+{
+ return __builtin_ia32_reduceph512_mask_round (__C, __D, __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_reduce_ph (__mmask32 __A, __m512h __B, int __C)
+{
+ return __builtin_ia32_reduceph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_reduce_round_ph (__m512h __A, int __B, const int __C)
+{
+ return __builtin_ia32_reduceph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_reduce_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ int __D, const int __E)
+{
+ return __builtin_ia32_reduceph512_mask_round (__C, __D, __A, __B,
+ __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_reduce_round_ph (__mmask32 __A, __m512h __B, int __C,
+ const int __D)
+{
+ return __builtin_ia32_reduceph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+
+#else
+#define _mm512_reduce_ph(A, B) \
+ (__builtin_ia32_reduceph512_mask_round ((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_reduce_ph(A, B, C, D) \
+ (__builtin_ia32_reduceph512_mask_round ((C), (D), (A), (B), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_reduce_ph(A, B, C) \
+ (__builtin_ia32_reduceph512_mask_round ((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_reduce_round_ph(A, B, C) \
+ (__builtin_ia32_reduceph512_mask_round ((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_reduce_round_ph(A, B, C, D, E) \
+ (__builtin_ia32_reduceph512_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_reduce_round_ph(A, B, C, D) \
+ (__builtin_ia32_reduceph512_mask_round ((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vrndscaleph. */
+#ifdef __OPTIMIZE__
+extern __inline __m512h
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_roundscale_ph (__m512h __A, int __B)
+{
+ return __builtin_ia32_rndscaleph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_roundscale_ph (__m512h __A, __mmask32 __B,
+ __m512h __C, int __D)
+{
+ return __builtin_ia32_rndscaleph512_mask_round (__C, __D, __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_roundscale_ph (__mmask32 __A, __m512h __B, int __C)
+{
+ return __builtin_ia32_rndscaleph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_roundscale_round_ph (__m512h __A, int __B, const int __C)
+{
+ return __builtin_ia32_rndscaleph512_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1,
+ __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_roundscale_round_ph (__m512h __A, __mmask32 __B,
+ __m512h __C, int __D, const int __E)
+{
+ return __builtin_ia32_rndscaleph512_mask_round (__C, __D, __A,
+ __B, __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_roundscale_round_ph (__mmask32 __A, __m512h __B, int __C,
+ const int __D)
+{
+ return __builtin_ia32_rndscaleph512_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+
+#else
+#define _mm512_roundscale_ph(A, B) \
+ (__builtin_ia32_rndscaleph512_mask_round ((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_roundscale_ph(A, B, C, D) \
+ (__builtin_ia32_rndscaleph512_mask_round ((C), (D), (A), (B), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_roundscale_ph(A, B, C) \
+ (__builtin_ia32_rndscaleph512_mask_round ((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), \
+ _MM_FROUND_CUR_DIRECTION))
+#define _mm512_roundscale_round_ph(A, B, C) \
+ (__builtin_ia32_rndscaleph512_mask_round ((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_roundscale_round_ph(A, B, C, D, E) \
+ (__builtin_ia32_rndscaleph512_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_roundscale_round_ph(A, B, C, D) \
+ (__builtin_ia32_rndscaleph512_mask_round ((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vfpclassph. */
+#ifdef __OPTIMIZE__
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_fpclass_ph_mask (__mmask32 __U, __m512h __A,
+ const int __imm)
+{
+ return (__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) __A,
+ __imm, __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_fpclass_ph_mask (__m512h __A, const int __imm)
+{
+ return (__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) __A,
+ __imm,
+ (__mmask32) -1);
+}
+
+#else
+#define _mm512_mask_fpclass_ph_mask(u, x, c) \
+ ((__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) (__m512h) (x), \
+ (int) (c),(__mmask8)(u)))
+
+#define _mm512_fpclass_ph_mask(x, c) \
+ ((__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) (__m512h) (x), \
+ (int) (c),(__mmask8)-1))
+#endif /* __OPIMTIZE__ */
+
+/* Intrinsics vgetexpph. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_getexp_ph (__m512h __A)
+{
+ return (__m512h)
+ __builtin_ia32_getexpph512_mask ((__v32hf) __A,
+ (__v32hf) _mm512_setzero_ph (),
+ (__mmask32) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_getexp_ph (__m512h __W, __mmask32 __U, __m512h __A)
+{
+ return (__m512h)
+ __builtin_ia32_getexpph512_mask ((__v32hf) __A, (__v32hf) __W,
+ (__mmask32) __U, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_getexp_ph (__mmask32 __U, __m512h __A)
+{
+ return (__m512h)
+ __builtin_ia32_getexpph512_mask ((__v32hf) __A,
+ (__v32hf) _mm512_setzero_ph (),
+ (__mmask32) __U, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_getexp_round_ph (__m512h __A, const int __R)
+{
+ return (__m512h) __builtin_ia32_getexpph512_mask ((__v32hf) __A,
+ (__v32hf)
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __R);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_getexp_round_ph (__m512h __W, __mmask32 __U, __m512h __A,
+ const int __R)
+{
+ return (__m512h) __builtin_ia32_getexpph512_mask ((__v32hf) __A,
+ (__v32hf) __W,
+ (__mmask32) __U, __R);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_getexp_round_ph (__mmask32 __U, __m512h __A, const int __R)
+{
+ return (__m512h) __builtin_ia32_getexpph512_mask ((__v32hf) __A,
+ (__v32hf)
+ _mm512_setzero_ph (),
+ (__mmask32) __U, __R);
+}
+
+#else
+#define _mm512_getexp_round_ph(A, R) \
+ ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \
+ (__v32hf)_mm512_setzero_ph(), (__mmask32)-1, R))
+
+#define _mm512_mask_getexp_round_ph(W, U, A, R) \
+ ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \
+ (__v32hf)(__m512h)(W), (__mmask32)(U), R))
+
+#define _mm512_maskz_getexp_round_ph(U, A, R) \
+ ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \
+ (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), R))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vgetmantph. */
+#ifdef __OPTIMIZE__
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_getmant_ph (__m512h __A, _MM_MANTISSA_NORM_ENUM __B,
+ _MM_MANTISSA_SIGN_ENUM __C)
+{
+ return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
+ (__C << 2) | __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_getmant_ph (__m512h __W, __mmask32 __U, __m512h __A,
+ _MM_MANTISSA_NORM_ENUM __B,
+ _MM_MANTISSA_SIGN_ENUM __C)
+{
+ return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
+ (__C << 2) | __B,
+ (__v32hf) __W, __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_getmant_ph (__mmask32 __U, __m512h __A,
+ _MM_MANTISSA_NORM_ENUM __B,
+ _MM_MANTISSA_SIGN_ENUM __C)
+{
+ return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
+ (__C << 2) | __B,
+ (__v32hf)
+ _mm512_setzero_ph (),
+ __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_getmant_round_ph (__m512h __A, _MM_MANTISSA_NORM_ENUM __B,
+ _MM_MANTISSA_SIGN_ENUM __C, const int __R)
+{
+ return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
+ (__C << 2) | __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __R);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_getmant_round_ph (__m512h __W, __mmask32 __U, __m512h __A,
+ _MM_MANTISSA_NORM_ENUM __B,
+ _MM_MANTISSA_SIGN_ENUM __C, const int __R)
+{
+ return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
+ (__C << 2) | __B,
+ (__v32hf) __W, __U,
+ __R);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_getmant_round_ph (__mmask32 __U, __m512h __A,
+ _MM_MANTISSA_NORM_ENUM __B,
+ _MM_MANTISSA_SIGN_ENUM __C, const int __R)
+{
+ return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A,
+ (__C << 2) | __B,
+ (__v32hf)
+ _mm512_setzero_ph (),
+ __U, __R);
+}
+
+#else
+#define _mm512_getmant_ph(X, B, C) \
+ ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
+ (int)(((C)<<2) | (B)), \
+ (__v32hf)(__m512h) \
+ _mm512_setzero_ph(), \
+ (__mmask32)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_getmant_ph(W, U, X, B, C) \
+ ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
+ (int)(((C)<<2) | (B)), \
+ (__v32hf)(__m512h)(W), \
+ (__mmask32)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+
+#define _mm512_maskz_getmant_ph(U, X, B, C) \
+ ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
+ (int)(((C)<<2) | (B)), \
+ (__v32hf)(__m512h) \
+ _mm512_setzero_ph(), \
+ (__mmask32)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_getmant_round_ph(X, B, C, R) \
+ ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
+ (int)(((C)<<2) | (B)), \
+ (__v32hf)(__m512h) \
+ _mm512_setzero_ph(), \
+ (__mmask32)-1, \
+ (R)))
+
+#define _mm512_mask_getmant_round_ph(W, U, X, B, C, R) \
+ ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
+ (int)(((C)<<2) | (B)), \
+ (__v32hf)(__m512h)(W), \
+ (__mmask32)(U), \
+ (R)))
+
+
+#define _mm512_maskz_getmant_round_ph(U, X, B, C, R) \
+ ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \
+ (int)(((C)<<2) | (B)), \
+ (__v32hf)(__m512h) \
+ _mm512_setzero_ph(), \
+ (__mmask32)(U), \
+ (R)))
+
+#endif /* __OPTIMIZE__ */
+
/* Intrinsics vcvtph2dq. */
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -4364,244 +5733,6 @@ _mm512_maskz_cvt_roundepu16_ph (__mmask32 __A, __m512i __B, int __C)
#endif /* __OPTIMIZE__ */
-/* Intrinsics vcvtsh2si, vcvtsh2us. */
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsh_i32 (__m128h __A)
-{
- return (int) __builtin_ia32_vcvtsh2si32_round (__A, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsh_u32 (__m128h __A)
-{
- return (int) __builtin_ia32_vcvtsh2usi32_round (__A,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsh_i32 (__m128h __A, const int __R)
-{
- return (int) __builtin_ia32_vcvtsh2si32_round (__A, __R);
-}
-
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsh_u32 (__m128h __A, const int __R)
-{
- return (int) __builtin_ia32_vcvtsh2usi32_round (__A, __R);
-}
-
-#else
-#define _mm_cvt_roundsh_i32(A, B) \
- ((int)__builtin_ia32_vcvtsh2si32_round ((A), (B)))
-#define _mm_cvt_roundsh_u32(A, B) \
- ((int)__builtin_ia32_vcvtsh2usi32_round ((A), (B)))
-
-#endif /* __OPTIMIZE__ */
-
-#ifdef __x86_64__
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsh_i64 (__m128h __A)
-{
- return (long long)
- __builtin_ia32_vcvtsh2si64_round (__A, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsh_u64 (__m128h __A)
-{
- return (long long)
- __builtin_ia32_vcvtsh2usi64_round (__A, _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsh_i64 (__m128h __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvtsh2si64_round (__A, __R);
-}
-
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsh_u64 (__m128h __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvtsh2usi64_round (__A, __R);
-}
-
-#else
-#define _mm_cvt_roundsh_i64(A, B) \
- ((long long)__builtin_ia32_vcvtsh2si64_round ((A), (B)))
-#define _mm_cvt_roundsh_u64(A, B) \
- ((long long)__builtin_ia32_vcvtsh2usi64_round ((A), (B)))
-
-#endif /* __OPTIMIZE__ */
-#endif /* __x86_64__ */
-
-/* Intrinsics vcvttsh2si, vcvttsh2us. */
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsh_i32 (__m128h __A)
-{
- return (int)
- __builtin_ia32_vcvttsh2si32_round (__A, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsh_u32 (__m128h __A)
-{
- return (int)
- __builtin_ia32_vcvttsh2usi32_round (__A, _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline int
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundsh_i32 (__m128h __A, const int __R)
-{
- return (int) __builtin_ia32_vcvttsh2si32_round (__A, __R);
-}
-
-extern __inline unsigned
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundsh_u32 (__m128h __A, const int __R)
-{
- return (int) __builtin_ia32_vcvttsh2usi32_round (__A, __R);
-}
-
-#else
-#define _mm_cvtt_roundsh_i32(A, B) \
- ((int)__builtin_ia32_vcvttsh2si32_round ((A), (B)))
-#define _mm_cvtt_roundsh_u32(A, B) \
- ((int)__builtin_ia32_vcvttsh2usi32_round ((A), (B)))
-
-#endif /* __OPTIMIZE__ */
-
-#ifdef __x86_64__
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsh_i64 (__m128h __A)
-{
- return (long long)
- __builtin_ia32_vcvttsh2si64_round (__A, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsh_u64 (__m128h __A)
-{
- return (long long)
- __builtin_ia32_vcvttsh2usi64_round (__A, _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundsh_i64 (__m128h __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvttsh2si64_round (__A, __R);
-}
-
-extern __inline unsigned long long
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_roundsh_u64 (__m128h __A, const int __R)
-{
- return (long long) __builtin_ia32_vcvttsh2usi64_round (__A, __R);
-}
-
-#else
-#define _mm_cvtt_roundsh_i64(A, B) \
- ((long long)__builtin_ia32_vcvttsh2si64_round ((A), (B)))
-#define _mm_cvtt_roundsh_u64(A, B) \
- ((long long)__builtin_ia32_vcvttsh2usi64_round ((A), (B)))
-
-#endif /* __OPTIMIZE__ */
-#endif /* __x86_64__ */
-
-/* Intrinsics vcvtsi2sh, vcvtusi2sh. */
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvti32_sh (__m128h __A, int __B)
-{
- return __builtin_ia32_vcvtsi2sh32_round (__A, __B, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtu32_sh (__m128h __A, unsigned int __B)
-{
- return __builtin_ia32_vcvtusi2sh32_round (__A, __B, _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundi32_sh (__m128h __A, int __B, const int __R)
-{
- return __builtin_ia32_vcvtsi2sh32_round (__A, __B, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundu32_sh (__m128h __A, unsigned int __B, const int __R)
-{
- return __builtin_ia32_vcvtusi2sh32_round (__A, __B, __R);
-}
-
-#else
-#define _mm_cvt_roundi32_sh(A, B, C) \
- (__builtin_ia32_vcvtsi2sh32_round ((A), (B), (C)))
-#define _mm_cvt_roundu32_sh(A, B, C) \
- (__builtin_ia32_vcvtusi2sh32_round ((A), (B), (C)))
-
-#endif /* __OPTIMIZE__ */
-
-#ifdef __x86_64__
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvti64_sh (__m128h __A, long long __B)
-{
- return __builtin_ia32_vcvtsi2sh64_round (__A, __B, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtu64_sh (__m128h __A, unsigned long long __B)
-{
- return __builtin_ia32_vcvtusi2sh64_round (__A, __B, _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundi64_sh (__m128h __A, long long __B, const int __R)
-{
- return __builtin_ia32_vcvtsi2sh64_round (__A, __B, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundu64_sh (__m128h __A, unsigned long long __B, const int __R)
-{
- return __builtin_ia32_vcvtusi2sh64_round (__A, __B, __R);
-}
-
-#else
-#define _mm_cvt_roundi64_sh(A, B, C) \
- (__builtin_ia32_vcvtsi2sh64_round ((A), (B), (C)))
-#define _mm_cvt_roundu64_sh(A, B, C) \
- (__builtin_ia32_vcvtusi2sh64_round ((A), (B), (C)))
-
-#endif /* __OPTIMIZE__ */
-#endif /* __x86_64__ */
-
/* Intrinsics vcvtph2pd. */
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -4900,286 +6031,6 @@ _mm512_maskz_cvt_roundpd_ph (__mmask8 __A, __m512d __B, int __C)
#endif /* __OPTIMIZE__ */
-/* Intrinsics vcvtsh2ss, vcvtsh2sd. */
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsh_ss (__m128 __A, __m128h __B)
-{
- return __builtin_ia32_vcvtsh2ss_mask_round (__B, __A,
- _mm_setzero_ps (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtsh_ss (__m128 __A, __mmask8 __B, __m128 __C,
- __m128h __D)
-{
- return __builtin_ia32_vcvtsh2ss_mask_round (__D, __C, __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtsh_ss (__mmask8 __A, __m128 __B,
- __m128h __C)
-{
- return __builtin_ia32_vcvtsh2ss_mask_round (__C, __B,
- _mm_setzero_ps (),
- __A, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsh_sd (__m128d __A, __m128h __B)
-{
- return __builtin_ia32_vcvtsh2sd_mask_round (__B, __A,
- _mm_setzero_pd (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtsh_sd (__m128d __A, __mmask8 __B, __m128d __C,
- __m128h __D)
-{
- return __builtin_ia32_vcvtsh2sd_mask_round (__D, __C, __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtsh_sd (__mmask8 __A, __m128d __B, __m128h __C)
-{
- return __builtin_ia32_vcvtsh2sd_mask_round (__C, __B,
- _mm_setzero_pd (),
- __A, _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsh_ss (__m128 __A, __m128h __B, const int __R)
-{
- return __builtin_ia32_vcvtsh2ss_mask_round (__B, __A,
- _mm_setzero_ps (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvt_roundsh_ss (__m128 __A, __mmask8 __B, __m128 __C,
- __m128h __D, const int __R)
-{
- return __builtin_ia32_vcvtsh2ss_mask_round (__D, __C, __A, __B, __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvt_roundsh_ss (__mmask8 __A, __m128 __B,
- __m128h __C, const int __R)
-{
- return __builtin_ia32_vcvtsh2ss_mask_round (__C, __B,
- _mm_setzero_ps (),
- __A, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsh_sd (__m128d __A, __m128h __B, const int __R)
-{
- return __builtin_ia32_vcvtsh2sd_mask_round (__B, __A,
- _mm_setzero_pd (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvt_roundsh_sd (__m128d __A, __mmask8 __B, __m128d __C,
- __m128h __D, const int __R)
-{
- return __builtin_ia32_vcvtsh2sd_mask_round (__D, __C, __A, __B, __R);
-}
-
-extern __inline __m128d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvt_roundsh_sd (__mmask8 __A, __m128d __B, __m128h __C, const int __R)
-{
- return __builtin_ia32_vcvtsh2sd_mask_round (__C, __B,
- _mm_setzero_pd (),
- __A, __R);
-}
-
-#else
-#define _mm_cvt_roundsh_ss(A, B, R) \
- (__builtin_ia32_vcvtsh2ss_mask_round ((B), (A), \
- _mm_setzero_ps (), \
- (__mmask8) -1, (R)))
-
-#define _mm_mask_cvt_roundsh_ss(A, B, C, D, R) \
- (__builtin_ia32_vcvtsh2ss_mask_round ((D), (C), (A), (B), (R)))
-
-#define _mm_maskz_cvt_roundsh_ss(A, B, C, R) \
- (__builtin_ia32_vcvtsh2ss_mask_round ((C), (B), \
- _mm_setzero_ps (), \
- (A), (R)))
-
-#define _mm_cvt_roundsh_sd(A, B, R) \
- (__builtin_ia32_vcvtsh2sd_mask_round ((B), (A), \
- _mm_setzero_pd (), \
- (__mmask8) -1, (R)))
-
-#define _mm_mask_cvt_roundsh_sd(A, B, C, D, R) \
- (__builtin_ia32_vcvtsh2sd_mask_round ((D), (C), (A), (B), (R)))
-
-#define _mm_maskz_cvt_roundsh_sd(A, B, C, R) \
- (__builtin_ia32_vcvtsh2sd_mask_round ((C), (B), \
- _mm_setzero_pd (), \
- (A), (R)))
-
-#endif /* __OPTIMIZE__ */
-
-/* Intrinsics vcvtss2sh, vcvtsd2sh. */
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_sh (__m128h __A, __m128 __B)
-{
- return __builtin_ia32_vcvtss2sh_mask_round (__B, __A,
- _mm_setzero_ph (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtss_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128 __D)
-{
- return __builtin_ia32_vcvtss2sh_mask_round (__D, __C, __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtss_sh (__mmask8 __A, __m128h __B, __m128 __C)
-{
- return __builtin_ia32_vcvtss2sh_mask_round (__C, __B,
- _mm_setzero_ph (),
- __A, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_sh (__m128h __A, __m128d __B)
-{
- return __builtin_ia32_vcvtsd2sh_mask_round (__B, __A,
- _mm_setzero_ph (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtsd_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128d __D)
-{
- return __builtin_ia32_vcvtsd2sh_mask_round (__D, __C, __A, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtsd_sh (__mmask8 __A, __m128h __B, __m128d __C)
-{
- return __builtin_ia32_vcvtsd2sh_mask_round (__C, __B,
- _mm_setzero_ph (),
- __A, _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundss_sh (__m128h __A, __m128 __B, const int __R)
-{
- return __builtin_ia32_vcvtss2sh_mask_round (__B, __A,
- _mm_setzero_ph (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvt_roundss_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128 __D,
- const int __R)
-{
- return __builtin_ia32_vcvtss2sh_mask_round (__D, __C, __A, __B, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvt_roundss_sh (__mmask8 __A, __m128h __B, __m128 __C,
- const int __R)
-{
- return __builtin_ia32_vcvtss2sh_mask_round (__C, __B,
- _mm_setzero_ph (),
- __A, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_roundsd_sh (__m128h __A, __m128d __B, const int __R)
-{
- return __builtin_ia32_vcvtsd2sh_mask_round (__B, __A,
- _mm_setzero_ph (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvt_roundsd_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128d __D,
- const int __R)
-{
- return __builtin_ia32_vcvtsd2sh_mask_round (__D, __C, __A, __B, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvt_roundsd_sh (__mmask8 __A, __m128h __B, __m128d __C,
- const int __R)
-{
- return __builtin_ia32_vcvtsd2sh_mask_round (__C, __B,
- _mm_setzero_ph (),
- __A, __R);
-}
-
-#else
-#define _mm_cvt_roundss_sh(A, B, R) \
- (__builtin_ia32_vcvtss2sh_mask_round ((B), (A), \
- _mm_setzero_ph (), \
- (__mmask8) -1, R))
-
-#define _mm_mask_cvt_roundss_sh(A, B, C, D, R) \
- (__builtin_ia32_vcvtss2sh_mask_round ((D), (C), (A), (B), (R)))
-
-#define _mm_maskz_cvt_roundss_sh(A, B, C, R) \
- (__builtin_ia32_vcvtss2sh_mask_round ((C), (B), \
- _mm_setzero_ph (), \
- A, R))
-
-#define _mm_cvt_roundsd_sh(A, B, R) \
- (__builtin_ia32_vcvtsd2sh_mask_round ((B), (A), \
- _mm_setzero_ph (), \
- (__mmask8) -1, R))
-
-#define _mm_mask_cvt_roundsd_sh(A, B, C, D, R) \
- (__builtin_ia32_vcvtsd2sh_mask_round ((D), (C), (A), (B), (R)))
-
-#define _mm_maskz_cvt_roundsd_sh(A, B, C, R) \
- (__builtin_ia32_vcvtsd2sh_mask_round ((C), (B), \
- _mm_setzero_ph (), \
- (A), (R)))
-
-#endif /* __OPTIMIZE__ */
-
/* Intrinsics vfmaddsub[132,213,231]ph. */
extern __inline __m512h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -5840,418 +6691,6 @@ _mm512_maskz_fnmsub_round_ph (__mmask32 __U, __m512h __A, __m512h __B,
#endif /* __OPTIMIZE__ */
-/* Intrinsics vfmadd[132,213,231]sh. */
-extern __inline __m128h
- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmadd_sh (__m128h __W, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmadd_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmadd_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask3 ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmadd_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-
-#ifdef __OPTIMIZE__
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmadd_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask3 ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmadd_round_sh (__mmask8 __U, __m128h __W, __m128h __A,
- __m128h __B, const int __R)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-#else
-#define _mm_fmadd_round_sh(A, B, C, R) \
- ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), (C), (-1), (R)))
-#define _mm_mask_fmadd_round_sh(A, U, B, C, R) \
- ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), (C), (U), (R)))
-#define _mm_mask3_fmadd_round_sh(A, B, C, U, R) \
- ((__m128h) __builtin_ia32_vfmaddsh3_mask3 ((A), (B), (C), (U), (R)))
-#define _mm_maskz_fmadd_round_sh(U, A, B, C, R) \
- ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), (B), (C), (U), (R)))
-
-#endif /* __OPTIMIZE__ */
-
-/* Intrinsics vfnmadd[132,213,231]sh. */
-extern __inline __m128h
- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fnmadd_sh (__m128h __W, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmadd_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmadd_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U)
-{
- return (__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmadd_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfnmaddsh3_maskz ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-
-#ifdef __OPTIMIZE__
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fnmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R)
-{
- return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmadd_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmadd_round_sh (__mmask8 __U, __m128h __W, __m128h __A,
- __m128h __B, const int __R)
-{
- return (__m128h) __builtin_ia32_vfnmaddsh3_maskz ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-#else
-#define _mm_fnmadd_round_sh(A, B, C, R) \
- ((__m128h) __builtin_ia32_vfnmaddsh3_mask ((A), (B), (C), (-1), (R)))
-#define _mm_mask_fnmadd_round_sh(A, U, B, C, R) \
- ((__m128h) __builtin_ia32_vfnmaddsh3_mask ((A), (B), (C), (U), (R)))
-#define _mm_mask3_fnmadd_round_sh(A, B, C, U, R) \
- ((__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((A), (B), (C), (U), (R)))
-#define _mm_maskz_fnmadd_round_sh(U, A, B, C, R) \
- ((__m128h) __builtin_ia32_vfnmaddsh3_maskz ((A), (B), (C), (U), (R)))
-
-#endif /* __OPTIMIZE__ */
-
-/* Intrinsics vfmsub[132,213,231]sh. */
-extern __inline __m128h
- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmsub_sh (__m128h __W, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmsub_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmsub_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U)
-{
- return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmsub_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
- (__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-
-#ifdef __OPTIMIZE__
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmsub_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- (__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W,
- (__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmsub_round_sh (__mmask8 __U, __m128h __W, __m128h __A,
- __m128h __B, const int __R)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
- (__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-#else
-#define _mm_fmsub_round_sh(A, B, C, R) \
- ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), -(C), (-1), (R)))
-#define _mm_mask_fmsub_round_sh(A, U, B, C, R) \
- ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), -(C), (U), (R)))
-#define _mm_mask3_fmsub_round_sh(A, B, C, U, R) \
- ((__m128h) __builtin_ia32_vfmsubsh3_mask3 ((A), (B), (C), (U), (R)))
-#define _mm_maskz_fmsub_round_sh(U, A, B, C, R) \
- ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), (B), -(C), (U), (R)))
-
-#endif /* __OPTIMIZE__ */
-
-/* Intrinsics vfnmsub[132,213,231]sh. */
-extern __inline __m128h
- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fnmsub_sh (__m128h __W, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- -(__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmsub_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- -(__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmsub_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U)
-{
- return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W,
- -(__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmsub_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
- -(__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-
-#ifdef __OPTIMIZE__
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fnmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- -(__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fnmsub_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W,
- -(__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fnmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W,
- -(__v8hf) __A,
- (__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fnmsub_round_sh (__mmask8 __U, __m128h __W, __m128h __A,
- __m128h __B, const int __R)
-{
- return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W,
- -(__v8hf) __A,
- -(__v8hf) __B,
- (__mmask8) __U, __R);
-}
-
-#else
-#define _mm_fnmsub_round_sh(A, B, C, R) \
- ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), -(B), -(C), (-1), (R)))
-#define _mm_mask_fnmsub_round_sh(A, U, B, C, R) \
- ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), -(B), -(C), (U), (R)))
-#define _mm_mask3_fnmsub_round_sh(A, B, C, U, R) \
- ((__m128h) __builtin_ia32_vfmsubsh3_mask3 ((A), -(B), (C), (U), (R)))
-#define _mm_maskz_fnmsub_round_sh(U, A, B, C, R) \
- ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), -(B), -(C), (U), (R)))
-
-#endif /* __OPTIMIZE__ */
-
/* Intrinsics vf[,c]maddcph. */
extern __inline __m512h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -6636,400 +7075,6 @@ _mm512_maskz_fmul_round_pch (__mmask16 __A, __m512h __B,
#endif /* __OPTIMIZE__ */
-/* Intrinsics vf[,c]maddcsh. */
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fcmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
-{
- return (__m128h)
- __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
- (__v8hf) __C,
- (__v8hf) __D, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D)
-{
- return (__m128h)
- __builtin_ia32_vfcmaddcsh_mask3_round ((__v8hf) __A,
- (__v8hf) __B,
- (__v8hf) __C, __D,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fcmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D)
-{
- return (__m128h)
- __builtin_ia32_vfcmaddcsh_maskz_round ((__v8hf) __B,
- (__v8hf) __C,
- (__v8hf) __D,
- __A, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C)
-{
- return (__m128h)
- __builtin_ia32_vfcmaddcsh_round ((__v8hf) __A,
- (__v8hf) __B,
- (__v8hf) __C,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
-{
- return (__m128h)
- __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
- (__v8hf) __C,
- (__v8hf) __D, __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D)
-{
- return (__m128h)
- __builtin_ia32_vfmaddcsh_mask3_round ((__v8hf) __A,
- (__v8hf) __B,
- (__v8hf) __C, __D,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D)
-{
- return (__m128h)
- __builtin_ia32_vfmaddcsh_maskz_round ((__v8hf) __B,
- (__v8hf) __C,
- (__v8hf) __D,
- __A, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmadd_sch (__m128h __A, __m128h __B, __m128h __C)
-{
- return (__m128h)
- __builtin_ia32_vfmaddcsh_round ((__v8hf) __A,
- (__v8hf) __B,
- (__v8hf) __C,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fcmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
- __m128h __D, const int __E)
-{
- return (__m128h)
- __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
- (__v8hf) __C,
- (__v8hf) __D,
- __B, __E);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C,
- __mmask8 __D, const int __E)
-{
- return (__m128h)
- __builtin_ia32_vfcmaddcsh_mask3_round ((__v8hf) __A,
- (__v8hf) __B,
- (__v8hf) __C,
- __D, __E);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fcmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
- __m128h __D, const int __E)
-{
- return (__m128h)
- __builtin_ia32_vfcmaddcsh_maskz_round ((__v8hf) __B,
- (__v8hf) __C,
- (__v8hf) __D,
- __A, __E);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D)
-{
- return (__m128h)
- __builtin_ia32_vfcmaddcsh_round ((__v8hf) __A,
- (__v8hf) __B,
- (__v8hf) __C,
- __D);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
- __m128h __D, const int __E)
-{
- return (__m128h)
- __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
- (__v8hf) __C,
- (__v8hf) __D,
- __B, __E);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask3_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C,
- __mmask8 __D, const int __E)
-{
- return (__m128h)
- __builtin_ia32_vfmaddcsh_mask3_round ((__v8hf) __A,
- (__v8hf) __B,
- (__v8hf) __C,
- __D, __E);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
- __m128h __D, const int __E)
-{
- return (__m128h)
- __builtin_ia32_vfmaddcsh_maskz_round ((__v8hf) __B,
- (__v8hf) __C,
- (__v8hf) __D,
- __A, __E);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D)
-{
- return (__m128h)
- __builtin_ia32_vfmaddcsh_round ((__v8hf) __A,
- (__v8hf) __B,
- (__v8hf) __C,
- __D);
-}
-#else
-#define _mm_mask_fcmadd_round_sch(A, B, C, D, E) \
- ((__m128h) \
- __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) (A), \
- (__v8hf) (C), \
- (__v8hf) (D), \
- (B), (E)))
-
-
-#define _mm_mask3_fcmadd_round_sch(A, B, C, D, E) \
- ((__m128h) \
- __builtin_ia32_vfcmaddcsh_mask3_round ((__v8hf) (A), \
- (__v8hf) (B), \
- (__v8hf) (C), \
- (D), (E)))
-
-#define _mm_maskz_fcmadd_round_sch(A, B, C, D, E) \
- __builtin_ia32_vfcmaddcsh_maskz_round ((B), (C), (D), (A), (E))
-
-#define _mm_fcmadd_round_sch(A, B, C, D) \
- __builtin_ia32_vfcmaddcsh_round ((A), (B), (C), (D))
-
-#define _mm_mask_fmadd_round_sch(A, B, C, D, E) \
- ((__m128h) \
- __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) (A), \
- (__v8hf) (C), \
- (__v8hf) (D), \
- (B), (E)))
-
-#define _mm_mask3_fmadd_round_sch(A, B, C, D, E) \
- ((__m128h) \
- __builtin_ia32_vfmaddcsh_mask3_round ((__v8hf) (A), \
- (__v8hf) (B), \
- (__v8hf) (C), \
- (D), (E)))
-
-#define _mm_maskz_fmadd_round_sch(A, B, C, D, E) \
- __builtin_ia32_vfmaddcsh_maskz_round ((B), (C), (D), (A), (E))
-
-#define _mm_fmadd_round_sch(A, B, C, D) \
- __builtin_ia32_vfmaddcsh_round ((A), (B), (C), (D))
-
-#endif /* __OPTIMIZE__ */
-
-/* Intrinsics vf[,c]mulcsh. */
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fcmul_sch (__m128h __A, __m128h __B)
-{
- return (__m128h)
- __builtin_ia32_vfcmulcsh_round ((__v8hf) __A,
- (__v8hf) __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fcmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
-{
- return (__m128h)
- __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C,
- (__v8hf) __D,
- (__v8hf) __A,
- __B, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fcmul_sch (__mmask8 __A, __m128h __B, __m128h __C)
-{
- return (__m128h)
- __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B,
- (__v8hf) __C,
- _mm_setzero_ph (),
- __A, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmul_sch (__m128h __A, __m128h __B)
-{
- return (__m128h)
- __builtin_ia32_vfmulcsh_round ((__v8hf) __A,
- (__v8hf) __B,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
-{
- return (__m128h)
- __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C,
- (__v8hf) __D,
- (__v8hf) __A,
- __B, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmul_sch (__mmask8 __A, __m128h __B, __m128h __C)
-{
- return (__m128h)
- __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B,
- (__v8hf) __C,
- _mm_setzero_ph (),
- __A, _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fcmul_round_sch (__m128h __A, __m128h __B, const int __D)
-{
- return (__m128h)
- __builtin_ia32_vfcmulcsh_round ((__v8hf) __A,
- (__v8hf) __B,
- __D);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fcmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
- __m128h __D, const int __E)
-{
- return (__m128h)
- __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C,
- (__v8hf) __D,
- (__v8hf) __A,
- __B, __E);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fcmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
- const int __E)
-{
- return (__m128h)
- __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B,
- (__v8hf) __C,
- _mm_setzero_ph (),
- __A, __E);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_fmul_round_sch (__m128h __A, __m128h __B, const int __D)
-{
- return (__m128h)
- __builtin_ia32_vfmulcsh_round ((__v8hf) __A,
- (__v8hf) __B, __D);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_fmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
- __m128h __D, const int __E)
-{
- return (__m128h)
- __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C,
- (__v8hf) __D,
- (__v8hf) __A,
- __B, __E);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_fmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C, const int __E)
-{
- return (__m128h)
- __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B,
- (__v8hf) __C,
- _mm_setzero_ph (),
- __A, __E);
-}
-
-#else
-#define _mm_fcmul_round_sch(__A, __B, __D) \
- (__m128h) __builtin_ia32_vfcmulcsh_round ((__v8hf) __A, \
- (__v8hf) __B, __D)
-
-#define _mm_mask_fcmul_round_sch(__A, __B, __C, __D, __E) \
- (__m128h) __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C, \
- (__v8hf) __D, \
- (__v8hf) __A, \
- __B, __E)
-
-#define _mm_maskz_fcmul_round_sch(__A, __B, __C, __E) \
- (__m128h) __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B, \
- (__v8hf) __C, \
- _mm_setzero_ph (), \
- __A, __E)
-
-#define _mm_fmul_round_sch(__A, __B, __D) \
- (__m128h) __builtin_ia32_vfmulcsh_round ((__v8hf) __A, \
- (__v8hf) __B, __D)
-
-#define _mm_mask_fmul_round_sch(__A, __B, __C, __D, __E) \
- (__m128h) __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C, \
- (__v8hf) __D, \
- (__v8hf) __A, \
- __B, __E)
-
-#define _mm_maskz_fmul_round_sch(__A, __B, __C, __E) \
- (__m128h) __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B, \
- (__v8hf) __C, \
- _mm_setzero_ph (), \
- __A, __E)
-
-#endif /* __OPTIMIZE__ */
-
#define _MM512_REDUCE_OP(op) \
__m256h __T1 = (__m256h) _mm512_extractf64x4_pd ((__m512d) __A, 0); \
__m256h __T2 = (__m256h) _mm512_extractf64x4_pd ((__m512d) __A, 1); \
@@ -7193,27 +7238,9 @@ _mm512_set1_pch (_Float16 _Complex __A)
#define _mm512_maskz_cmul_round_pch(U, A, B, R) \
_mm512_maskz_fcmul_round_pch ((U), (A), (B), (R))
-#define _mm_mul_sch(A, B) _mm_fmul_sch ((A), (B))
-#define _mm_mask_mul_sch(W, U, A, B) _mm_mask_fmul_sch ((W), (U), (A), (B))
-#define _mm_maskz_mul_sch(U, A, B) _mm_maskz_fmul_sch ((U), (A), (B))
-#define _mm_mul_round_sch(A, B, R) _mm_fmul_round_sch ((A), (B), (R))
-#define _mm_mask_mul_round_sch(W, U, A, B, R) \
- _mm_mask_fmul_round_sch ((W), (U), (A), (B), (R))
-#define _mm_maskz_mul_round_sch(U, A, B, R) \
- _mm_maskz_fmul_round_sch ((U), (A), (B), (R))
-
-#define _mm_cmul_sch(A, B) _mm_fcmul_sch ((A), (B))
-#define _mm_mask_cmul_sch(W, U, A, B) _mm_mask_fcmul_sch ((W), (U), (A), (B))
-#define _mm_maskz_cmul_sch(U, A, B) _mm_maskz_fcmul_sch ((U), (A), (B))
-#define _mm_cmul_round_sch(A, B, R) _mm_fcmul_round_sch ((A), (B), (R))
-#define _mm_mask_cmul_round_sch(W, U, A, B, R) \
- _mm_mask_fcmul_round_sch ((W), (U), (A), (B), (R))
-#define _mm_maskz_cmul_round_sch(U, A, B, R) \
- _mm_maskz_fcmul_round_sch ((U), (A), (B), (R))
-
-#ifdef __DISABLE_AVX512FP16__
-#undef __DISABLE_AVX512FP16__
+#ifdef __DISABLE_AVX512FP16_512__
+#undef __DISABLE_AVX512FP16_512__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX512FP16__ */
+#endif /* __DISABLE_AVX512FP16_512__ */
-#endif /* __AVX512FP16INTRIN_H_INCLUDED */
+#endif /* _AVX512FP16INTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512ifmaintrin.h b/gcc/config/i386/avx512ifmaintrin.h
index fc97f1d..e08078b 100644
--- a/gcc/config/i386/avx512ifmaintrin.h
+++ b/gcc/config/i386/avx512ifmaintrin.h
@@ -28,9 +28,9 @@
#ifndef _AVX512IFMAINTRIN_H_INCLUDED
#define _AVX512IFMAINTRIN_H_INCLUDED
-#ifndef __AVX512IFMA__
+#if !defined (__AVX512IFMA__) || !defined (__EVEX512__)
#pragma GCC push_options
-#pragma GCC target("avx512ifma")
+#pragma GCC target("avx512ifma,evex512")
#define __DISABLE_AVX512IFMA__
#endif /* __AVX512IFMA__ */
diff --git a/gcc/config/i386/avx512pfintrin.h b/gcc/config/i386/avx512pfintrin.h
index a547610..58af26f 100644
--- a/gcc/config/i386/avx512pfintrin.h
+++ b/gcc/config/i386/avx512pfintrin.h
@@ -30,7 +30,7 @@
#ifndef __AVX512PF__
#pragma GCC push_options
-#pragma GCC target("avx512pf")
+#pragma GCC target("avx512pf,evex512")
#define __DISABLE_AVX512PF__
#endif /* __AVX512PF__ */
diff --git a/gcc/config/i386/avx512vbmi2intrin.h b/gcc/config/i386/avx512vbmi2intrin.h
index ca00f8a..b7ff07b 100644
--- a/gcc/config/i386/avx512vbmi2intrin.h
+++ b/gcc/config/i386/avx512vbmi2intrin.h
@@ -28,9 +28,9 @@
#ifndef __AVX512VBMI2INTRIN_H_INCLUDED
#define __AVX512VBMI2INTRIN_H_INCLUDED
-#if !defined(__AVX512VBMI2__)
+#if !defined(__AVX512VBMI2__) || !defined (__EVEX512__)
#pragma GCC push_options
-#pragma GCC target("avx512vbmi2")
+#pragma GCC target("avx512vbmi2,evex512")
#define __DISABLE_AVX512VBMI2__
#endif /* __AVX512VBMI2__ */
diff --git a/gcc/config/i386/avx512vbmiintrin.h b/gcc/config/i386/avx512vbmiintrin.h
index 5025860..1a7ab4e 100644
--- a/gcc/config/i386/avx512vbmiintrin.h
+++ b/gcc/config/i386/avx512vbmiintrin.h
@@ -28,9 +28,9 @@
#ifndef _AVX512VBMIINTRIN_H_INCLUDED
#define _AVX512VBMIINTRIN_H_INCLUDED
-#ifndef __AVX512VBMI__
+#if !defined (__AVX512VBMI__) || !defined (__EVEX512__)
#pragma GCC push_options
-#pragma GCC target("avx512vbmi")
+#pragma GCC target("avx512vbmi,evex512")
#define __DISABLE_AVX512VBMI__
#endif /* __AVX512VBMI__ */
diff --git a/gcc/config/i386/avx512vnniintrin.h b/gcc/config/i386/avx512vnniintrin.h
index e36e2e5..1090703 100644
--- a/gcc/config/i386/avx512vnniintrin.h
+++ b/gcc/config/i386/avx512vnniintrin.h
@@ -28,9 +28,9 @@
#ifndef __AVX512VNNIINTRIN_H_INCLUDED
#define __AVX512VNNIINTRIN_H_INCLUDED
-#if !defined(__AVX512VNNI__)
+#if !defined(__AVX512VNNI__) || !defined (__EVEX512__)
#pragma GCC push_options
-#pragma GCC target("avx512vnni")
+#pragma GCC target("avx512vnni,evex512")
#define __DISABLE_AVX512VNNI__
#endif /* __AVX512VNNI__ */
diff --git a/gcc/config/i386/avx512vp2intersectintrin.h b/gcc/config/i386/avx512vp2intersectintrin.h
index 65e2fb1..bf68245 100644
--- a/gcc/config/i386/avx512vp2intersectintrin.h
+++ b/gcc/config/i386/avx512vp2intersectintrin.h
@@ -28,9 +28,9 @@
#ifndef _AVX512VP2INTERSECTINTRIN_H_INCLUDED
#define _AVX512VP2INTERSECTINTRIN_H_INCLUDED
-#if !defined(__AVX512VP2INTERSECT__)
+#if !defined(__AVX512VP2INTERSECT__) || !defined (__EVEX512__)
#pragma GCC push_options
-#pragma GCC target("avx512vp2intersect")
+#pragma GCC target("avx512vp2intersect,evex512")
#define __DISABLE_AVX512VP2INTERSECT__
#endif /* __AVX512VP2INTERSECT__ */
diff --git a/gcc/config/i386/avx512vpopcntdqintrin.h b/gcc/config/i386/avx512vpopcntdqintrin.h
index 47897fb..9470a40 100644
--- a/gcc/config/i386/avx512vpopcntdqintrin.h
+++ b/gcc/config/i386/avx512vpopcntdqintrin.h
@@ -28,9 +28,9 @@
#ifndef _AVX512VPOPCNTDQINTRIN_H_INCLUDED
#define _AVX512VPOPCNTDQINTRIN_H_INCLUDED
-#ifndef __AVX512VPOPCNTDQ__
+#if !defined (__AVX512VPOPCNTDQ__) || !defined (__EVEX512__)
#pragma GCC push_options
-#pragma GCC target("avx512vpopcntdq")
+#pragma GCC target("avx512vpopcntdq,evex512")
#define __DISABLE_AVX512VPOPCNTDQ__
#endif /* __AVX512VPOPCNTDQ__ */
diff --git a/gcc/config/i386/constraints.md b/gcc/config/i386/constraints.md
index fd490f3..dc91bd9 100644
--- a/gcc/config/i386/constraints.md
+++ b/gcc/config/i386/constraints.md
@@ -19,7 +19,7 @@
;;; Unused letters:
;;; H
-;;; h j z
+;;; j z
;; Integer register constraints.
;; It is not necessary to define 'r' here.
@@ -371,3 +371,66 @@
(define_address_constraint "Ts"
"Address operand without segment register"
(match_operand 0 "address_no_seg_operand"))
+
+;; Constraint that force to use EGPR, can only adopt to register class.
+(define_register_constraint "jR" "GENERAL_REGS")
+
+(define_register_constraint "jr"
+ "TARGET_APX_EGPR ? GENERAL_GPR16 : GENERAL_REGS")
+
+(define_memory_constraint "jm"
+ "@internal memory operand without GPR32."
+ (and (match_operand 0 "memory_operand")
+ (not (and (match_test "TARGET_APX_EGPR")
+ (match_test "x86_extended_rex2reg_mentioned_p (op)")))))
+
+(define_constraint "j<"
+ "@internal auto-dec memory operand without GPR32."
+ (and (and (match_code "mem")
+ (ior (match_test "GET_CODE (XEXP (op, 0)) == PRE_DEC")
+ (match_test "GET_CODE (XEXP (op, 0)) == POST_DEC")))
+ (not (and (match_test "TARGET_APX_EGPR")
+ (match_test "x86_extended_rex2reg_mentioned_p (op)")))))
+
+(define_constraint "j>"
+ "@internal auto-dec memory operand without GPR32."
+ (and (and (match_code "mem")
+ (ior (match_test "GET_CODE (XEXP (op, 0)) == PRE_INC")
+ (match_test "GET_CODE (XEXP (op, 0)) == POST_INC")))
+ (not (and (match_test "TARGET_APX_EGPR")
+ (match_test "x86_extended_rex2reg_mentioned_p (op)")))))
+
+(define_memory_constraint "jo"
+ "@internal offsetable memory operand without GPR32."
+ (and (and (match_code "mem")
+ (match_test "offsettable_nonstrict_memref_p (op)"))
+ (not (and (match_test "TARGET_APX_EGPR")
+ (match_test "x86_extended_rex2reg_mentioned_p (op)")))))
+
+(define_constraint "jV"
+ "@internal non-offsetable memory operand without GPR32."
+ (and (and (match_code "mem")
+ (match_test "memory_address_addr_space_p (GET_MODE (op),
+ XEXP (op, 0),
+ MEM_ADDR_SPACE (op))")
+ (not (match_test "offsettable_nonstrict_memref_p (op)")))
+ (not (and (match_test "TARGET_APX_EGPR")
+ (match_test "x86_extended_rex2reg_mentioned_p (op)")))))
+
+(define_address_constraint "jp"
+ "@internal general address operand without GPR32"
+ (and (match_test "address_operand (op, VOIDmode)")
+ (not (and (match_test "TARGET_APX_EGPR")
+ (match_test "x86_extended_rex2reg_mentioned_p (op)")))))
+
+(define_special_memory_constraint "ja"
+ "@internal vector memory operand without GPR32."
+ (and (match_operand 0 "vector_memory_operand")
+ (not (and (match_test "TARGET_APX_EGPR")
+ (match_test "x86_extended_rex2reg_mentioned_p (op)")))))
+
+(define_address_constraint "jb"
+ "VSIB address operand without EGPR"
+ (and (match_operand 0 "vsib_address_operand")
+ (not (and (match_test "TARGET_APX_EGPR")
+ (match_test "x86_extended_rex2reg_mentioned_p (op)")))))
diff --git a/gcc/config/i386/cpuid.h b/gcc/config/i386/cpuid.h
index 73c1548..75ef271 100644
--- a/gcc/config/i386/cpuid.h
+++ b/gcc/config/i386/cpuid.h
@@ -149,6 +149,8 @@
#define bit_AVXNECONVERT (1 << 5)
#define bit_AVXVNNIINT16 (1 << 10)
#define bit_PREFETCHI (1 << 14)
+#define bit_USER_MSR (1 << 15)
+#define bit_APX_F (1 << 21)
/* Extended State Enumeration Sub-leaf (%eax == 0xd, %ecx == 1) */
#define bit_XSAVEOPT (1 << 0)
diff --git a/gcc/config/i386/driver-i386.cc b/gcc/config/i386/driver-i386.cc
index 08d0aed..a9e54ef 100644
--- a/gcc/config/i386/driver-i386.cc
+++ b/gcc/config/i386/driver-i386.cc
@@ -589,23 +589,14 @@ const char *host_detect_local_cpu (int argc, const char **argv)
if (arch)
{
/* This is unknown family 0x6 CPU. */
- if (has_feature (FEATURE_AVX))
+ if (has_feature (FEATURE_AVX512F))
{
- /* Assume Arrow Lake S. */
- if (has_feature (FEATURE_SM3))
- cpu = "arrowlake-s";
- /* Assume Grand Ridge. */
- else if (has_feature (FEATURE_RAOINT))
- cpu = "grandridge";
/* Assume Granite Rapids D. */
- else if (has_feature (FEATURE_AMX_COMPLEX))
+ if (has_feature (FEATURE_AMX_COMPLEX))
cpu = "graniterapids-d";
/* Assume Granite Rapids. */
else if (has_feature (FEATURE_AMX_FP16))
cpu = "graniterapids";
- /* Assume Sierra Forest. */
- else if (has_feature (FEATURE_AVXVNNIINT8))
- cpu = "sierraforest";
/* Assume Tiger Lake */
else if (has_feature (FEATURE_AVX512VP2INTERSECT))
cpu = "tigerlake";
@@ -618,36 +609,54 @@ const char *host_detect_local_cpu (int argc, const char **argv)
/* Assume Ice Lake Server. */
else if (has_feature (FEATURE_WBNOINVD))
cpu = "icelake-server";
- /* Assume Ice Lake. */
- else if (has_feature (FEATURE_AVX512BITALG))
- cpu = "icelake-client";
- /* Assume Cannon Lake. */
- else if (has_feature (FEATURE_AVX512VBMI))
- cpu = "cannonlake";
- /* Assume Knights Mill. */
- else if (has_feature (FEATURE_AVX5124VNNIW))
- cpu = "knm";
- /* Assume Knights Landing. */
- else if (has_feature (FEATURE_AVX512ER))
- cpu = "knl";
- /* Assume Skylake with AVX-512. */
- else if (has_feature (FEATURE_AVX512F))
- cpu = "skylake-avx512";
- /* Assume Alder Lake */
- else if (has_feature (FEATURE_SERIALIZE))
+ /* Assume Ice Lake. */
+ else if (has_feature (FEATURE_AVX512BITALG))
+ cpu = "icelake-client";
+ /* Assume Cannon Lake. */
+ else if (has_feature (FEATURE_AVX512VBMI))
+ cpu = "cannonlake";
+ /* Assume Knights Mill. */
+ else if (has_feature (FEATURE_AVX5124VNNIW))
+ cpu = "knm";
+ /* Assume Knights Landing. */
+ else if (has_feature (FEATURE_AVX512ER))
+ cpu = "knl";
+ /* Assume Skylake with AVX-512. */
+ else
+ cpu = "skylake-avx512";
+ }
+ else if (has_feature (FEATURE_AVX))
+ {
+ /* Assume Panther Lake. */
+ if (has_feature (FEATURE_PREFETCHI))
+ cpu = "pantherlake";
+ /* Assume Clearwater Forest. */
+ else if (has_feature (FEATURE_USER_MSR))
+ cpu = "clearwaterforest";
+ /* Assume Arrow Lake S. */
+ else if (has_feature (FEATURE_SM3))
+ cpu = "arrowlake-s";
+ /* Assume Grand Ridge. */
+ else if (has_feature (FEATURE_RAOINT))
+ cpu = "grandridge";
+ /* Assume Sierra Forest. */
+ else if (has_feature (FEATURE_AVXVNNIINT8))
+ cpu = "sierraforest";
+ /* Assume Alder Lake. */
+ else if (has_feature (FEATURE_SERIALIZE))
cpu = "alderlake";
- /* Assume Skylake. */
- else if (has_feature (FEATURE_CLFLUSHOPT))
- cpu = "skylake";
- /* Assume Broadwell. */
- else if (has_feature (FEATURE_ADX))
- cpu = "broadwell";
- else if (has_feature (FEATURE_AVX2))
- /* Assume Haswell. */
- cpu = "haswell";
- else
- /* Assume Sandy Bridge. */
- cpu = "sandybridge";
+ /* Assume Skylake. */
+ else if (has_feature (FEATURE_CLFLUSHOPT))
+ cpu = "skylake";
+ /* Assume Broadwell. */
+ else if (has_feature (FEATURE_ADX))
+ cpu = "broadwell";
+ /* Assume Haswell. */
+ else if (has_feature (FEATURE_AVX2))
+ cpu = "haswell";
+ /* Assume Sandy Bridge. */
+ else
+ cpu = "sandybridge";
}
else if (has_feature (FEATURE_SSE4_2))
{
diff --git a/gcc/config/i386/gfniintrin.h b/gcc/config/i386/gfniintrin.h
index ef3dc22..907e7a0 100644
--- a/gcc/config/i386/gfniintrin.h
+++ b/gcc/config/i386/gfniintrin.h
@@ -297,9 +297,53 @@ _mm256_maskz_gf2p8affine_epi64_epi8 (__mmask32 __A, __m256i __B,
#pragma GCC pop_options
#endif /* __GFNIAVX512VLBW__ */
-#if !defined(__GFNI__) || !defined(__AVX512F__) || !defined(__AVX512BW__)
+#if !defined(__GFNI__) || !defined(__EVEX512__) || !defined(__AVX512F__)
#pragma GCC push_options
-#pragma GCC target("gfni,avx512f,avx512bw")
+#pragma GCC target("gfni,avx512f,evex512")
+#define __DISABLE_GFNIAVX512F__
+#endif /* __GFNIAVX512F__ */
+
+extern __inline __m512i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_gf2p8mul_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi ((__v64qi) __A,
+ (__v64qi) __B);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_gf2p8affineinv_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
+{
+ return (__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi ((__v64qi) __A,
+ (__v64qi) __B, __C);
+}
+
+extern __inline __m512i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_gf2p8affine_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
+{
+ return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi ((__v64qi) __A,
+ (__v64qi) __B, __C);
+}
+#else
+#define _mm512_gf2p8affineinv_epi64_epi8(A, B, C) \
+ ((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi ( \
+ (__v64qi)(__m512i)(A), (__v64qi)(__m512i)(B), (int)(C)))
+#define _mm512_gf2p8affine_epi64_epi8(A, B, C) \
+ ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi ((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(C)))
+#endif
+
+#ifdef __DISABLE_GFNIAVX512F__
+#undef __DISABLE_GFNIAVX512F__
+#pragma GCC pop_options
+#endif /* __GFNIAVX512F__ */
+
+#if !defined(__GFNI__) || !defined(__EVEX512__) || !defined(__AVX512BW__)
+#pragma GCC push_options
+#pragma GCC target("gfni,avx512bw,evex512")
#define __DISABLE_GFNIAVX512FBW__
#endif /* __GFNIAVX512FBW__ */
@@ -319,13 +363,6 @@ _mm512_maskz_gf2p8mul_epi8 (__mmask64 __A, __m512i __B, __m512i __C)
return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi_mask ((__v64qi) __B,
(__v64qi) __C, (__v64qi) _mm512_setzero_si512 (), __A);
}
-extern __inline __m512i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_gf2p8mul_epi8 (__m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi ((__v64qi) __A,
- (__v64qi) __B);
-}
#ifdef __OPTIMIZE__
extern __inline __m512i
@@ -352,14 +389,6 @@ _mm512_maskz_gf2p8affineinv_epi64_epi8 (__mmask64 __A, __m512i __B,
extern __inline __m512i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_gf2p8affineinv_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
-{
- return (__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi ((__v64qi) __A,
- (__v64qi) __B, __C);
-}
-
-extern __inline __m512i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_gf2p8affine_epi64_epi8 (__m512i __A, __mmask64 __B, __m512i __C,
__m512i __D, const int __E)
{
@@ -375,13 +404,6 @@ _mm512_maskz_gf2p8affine_epi64_epi8 (__mmask64 __A, __m512i __B, __m512i __C,
return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask ((__v64qi) __B,
(__v64qi) __C, __D, (__v64qi) _mm512_setzero_si512 (), __A);
}
-extern __inline __m512i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_gf2p8affine_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
-{
- return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi ((__v64qi) __A,
- (__v64qi) __B, __C);
-}
#else
#define _mm512_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \
((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask( \
@@ -391,9 +413,6 @@ _mm512_gf2p8affine_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask( \
(__v64qi)(__m512i)(B), (__v64qi)(__m512i)(C), (int)(D), \
(__v64qi)(__m512i) _mm512_setzero_si512 (), (__mmask64)(A)))
-#define _mm512_gf2p8affineinv_epi64_epi8(A, B, C) \
- ((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi ( \
- (__v64qi)(__m512i)(A), (__v64qi)(__m512i)(B), (int)(C)))
#define _mm512_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \
((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask((__v64qi)(__m512i)(C),\
(__v64qi)(__m512i)(D), (int)(E), (__v64qi)(__m512i)(A), (__mmask64)(B)))
@@ -401,9 +420,6 @@ _mm512_gf2p8affine_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask((__v64qi)(__m512i)(B),\
(__v64qi)(__m512i)(C), (int)(D), \
(__v64qi)(__m512i) _mm512_setzero_si512 (), (__mmask64)(A)))
-#define _mm512_gf2p8affine_epi64_epi8(A, B, C) \
- ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi ((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), (int)(C)))
#endif
#ifdef __DISABLE_GFNIAVX512FBW__
diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def
index e946312..183029f 100644
--- a/gcc/config/i386/i386-builtin-types.def
+++ b/gcc/config/i386/i386-builtin-types.def
@@ -1422,3 +1422,6 @@ DEF_FUNCTION_TYPE (V4SI, V4SI, V4SI, V4SI, INT)
# SHA512 builtins
DEF_FUNCTION_TYPE (V4DI, V4DI, V4DI, V2DI)
+
+# USER_MSR builtins
+DEF_FUNCTION_TYPE (VOID, UINT64, UINT64)
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index 8738b3b..b90d5cc 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -200,53 +200,53 @@ BDESC (OPTION_MASK_ISA_AVX2, 0, CODE_FOR_avx2_maskstored256, "__builtin_ia32_mas
BDESC (OPTION_MASK_ISA_AVX2, 0, CODE_FOR_avx2_maskstoreq256, "__builtin_ia32_maskstoreq256", IX86_BUILTIN_MASKSTOREQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI_V4DI)
/* AVX512F */
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_compressstorev16sf_mask, "__builtin_ia32_compressstoresf512_mask", IX86_BUILTIN_COMPRESSPSSTORE512, UNKNOWN, (int) VOID_FTYPE_PV16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_compressstorev16si_mask, "__builtin_ia32_compressstoresi512_mask", IX86_BUILTIN_PCOMPRESSDSTORE512, UNKNOWN, (int) VOID_FTYPE_PV16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_compressstorev8df_mask, "__builtin_ia32_compressstoredf512_mask", IX86_BUILTIN_COMPRESSPDSTORE512, UNKNOWN, (int) VOID_FTYPE_PV8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_compressstorev8di_mask, "__builtin_ia32_compressstoredi512_mask", IX86_BUILTIN_PCOMPRESSQSTORE512, UNKNOWN, (int) VOID_FTYPE_PV8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_expandv16sf_mask, "__builtin_ia32_expandloadsf512_mask", IX86_BUILTIN_EXPANDPSLOAD512, UNKNOWN, (int) V16SF_FTYPE_PCV16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_expandv16sf_maskz, "__builtin_ia32_expandloadsf512_maskz", IX86_BUILTIN_EXPANDPSLOAD512Z, UNKNOWN, (int) V16SF_FTYPE_PCV16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_expandv16si_mask, "__builtin_ia32_expandloadsi512_mask", IX86_BUILTIN_PEXPANDDLOAD512, UNKNOWN, (int) V16SI_FTYPE_PCV16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_expandv16si_maskz, "__builtin_ia32_expandloadsi512_maskz", IX86_BUILTIN_PEXPANDDLOAD512Z, UNKNOWN, (int) V16SI_FTYPE_PCV16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_expandv8df_mask, "__builtin_ia32_expandloaddf512_mask", IX86_BUILTIN_EXPANDPDLOAD512, UNKNOWN, (int) V8DF_FTYPE_PCV8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_expandv8df_maskz, "__builtin_ia32_expandloaddf512_maskz", IX86_BUILTIN_EXPANDPDLOAD512Z, UNKNOWN, (int) V8DF_FTYPE_PCV8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_expandv8di_mask, "__builtin_ia32_expandloaddi512_mask", IX86_BUILTIN_PEXPANDQLOAD512, UNKNOWN, (int) V8DI_FTYPE_PCV8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_expandv8di_maskz, "__builtin_ia32_expandloaddi512_maskz", IX86_BUILTIN_PEXPANDQLOAD512Z, UNKNOWN, (int) V8DI_FTYPE_PCV8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv16si_mask, "__builtin_ia32_loaddqusi512_mask", IX86_BUILTIN_LOADDQUSI512, UNKNOWN, (int) V16SI_FTYPE_PCINT_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv8di_mask, "__builtin_ia32_loaddqudi512_mask", IX86_BUILTIN_LOADDQUDI512, UNKNOWN, (int) V8DI_FTYPE_PCINT64_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv8df_mask, "__builtin_ia32_loadupd512_mask", IX86_BUILTIN_LOADUPD512, UNKNOWN, (int) V8DF_FTYPE_PCDOUBLE_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv16sf_mask, "__builtin_ia32_loadups512_mask", IX86_BUILTIN_LOADUPS512, UNKNOWN, (int) V16SF_FTYPE_PCFLOAT_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv16sf_mask, "__builtin_ia32_loadaps512_mask", IX86_BUILTIN_LOADAPS512, UNKNOWN, (int) V16SF_FTYPE_PCV16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv16si_mask, "__builtin_ia32_movdqa32load512_mask", IX86_BUILTIN_MOVDQA32LOAD512, UNKNOWN, (int) V16SI_FTYPE_PCV16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv8df_mask, "__builtin_ia32_loadapd512_mask", IX86_BUILTIN_LOADAPD512, UNKNOWN, (int) V8DF_FTYPE_PCV8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv8di_mask, "__builtin_ia32_movdqa64load512_mask", IX86_BUILTIN_MOVDQA64LOAD512, UNKNOWN, (int) V8DI_FTYPE_PCV8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_movntv16sf, "__builtin_ia32_movntps512", IX86_BUILTIN_MOVNTPS512, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V16SF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_movntv8df, "__builtin_ia32_movntpd512", IX86_BUILTIN_MOVNTPD512, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V8DF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_movntv8di, "__builtin_ia32_movntdq512", IX86_BUILTIN_MOVNTDQ512, UNKNOWN, (int) VOID_FTYPE_PV8DI_V8DI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_movntdqa, "__builtin_ia32_movntdqa512", IX86_BUILTIN_MOVNTDQA512, UNKNOWN, (int) V8DI_FTYPE_PV8DI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_storev16si_mask, "__builtin_ia32_storedqusi512_mask", IX86_BUILTIN_STOREDQUSI512, UNKNOWN, (int) VOID_FTYPE_PINT_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_storev8di_mask, "__builtin_ia32_storedqudi512_mask", IX86_BUILTIN_STOREDQUDI512, UNKNOWN, (int) VOID_FTYPE_PINT64_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_storev8df_mask, "__builtin_ia32_storeupd512_mask", IX86_BUILTIN_STOREUPD512, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_us_truncatev8div8si2_mask_store, "__builtin_ia32_pmovusqd512mem_mask", IX86_BUILTIN_PMOVUSQD512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8SI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ss_truncatev8div8si2_mask_store, "__builtin_ia32_pmovsqd512mem_mask", IX86_BUILTIN_PMOVSQD512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8SI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_truncatev8div8si2_mask_store, "__builtin_ia32_pmovqd512mem_mask", IX86_BUILTIN_PMOVQD512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8SI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_us_truncatev8div8hi2_mask_store, "__builtin_ia32_pmovusqw512mem_mask", IX86_BUILTIN_PMOVUSQW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8HI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ss_truncatev8div8hi2_mask_store, "__builtin_ia32_pmovsqw512mem_mask", IX86_BUILTIN_PMOVSQW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8HI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_truncatev8div8hi2_mask_store, "__builtin_ia32_pmovqw512mem_mask", IX86_BUILTIN_PMOVQW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8HI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_us_truncatev16siv16hi2_mask_store, "__builtin_ia32_pmovusdw512mem_mask", IX86_BUILTIN_PMOVUSDW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16HI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ss_truncatev16siv16hi2_mask_store, "__builtin_ia32_pmovsdw512mem_mask", IX86_BUILTIN_PMOVSDW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16HI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_truncatev16siv16hi2_mask_store, "__builtin_ia32_pmovdw512mem_mask", IX86_BUILTIN_PMOVDW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16HI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_truncatev8div16qi2_mask_store_2, "__builtin_ia32_pmovqb512mem_mask", IX86_BUILTIN_PMOVQB512_MEM, UNKNOWN, (int) VOID_FTYPE_PUDI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_us_truncatev8div16qi2_mask_store_2, "__builtin_ia32_pmovusqb512mem_mask", IX86_BUILTIN_PMOVUSQB512_MEM, UNKNOWN, (int) VOID_FTYPE_PUDI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ss_truncatev8div16qi2_mask_store_2, "__builtin_ia32_pmovsqb512mem_mask", IX86_BUILTIN_PMOVSQB512_MEM, UNKNOWN, (int) VOID_FTYPE_PUDI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_us_truncatev16siv16qi2_mask_store, "__builtin_ia32_pmovusdb512mem_mask", IX86_BUILTIN_PMOVUSDB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16QI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ss_truncatev16siv16qi2_mask_store, "__builtin_ia32_pmovsdb512mem_mask", IX86_BUILTIN_PMOVSDB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16QI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_truncatev16siv16qi2_mask_store, "__builtin_ia32_pmovdb512mem_mask", IX86_BUILTIN_PMOVDB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16QI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_storev16sf_mask, "__builtin_ia32_storeups512_mask", IX86_BUILTIN_STOREUPS512, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_storev16sf_mask, "__builtin_ia32_storeaps512_mask", IX86_BUILTIN_STOREAPS512, UNKNOWN, (int) VOID_FTYPE_PV16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_storev16si_mask, "__builtin_ia32_movdqa32store512_mask", IX86_BUILTIN_MOVDQA32STORE512, UNKNOWN, (int) VOID_FTYPE_PV16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_storev8df_mask, "__builtin_ia32_storeapd512_mask", IX86_BUILTIN_STOREAPD512, UNKNOWN, (int) VOID_FTYPE_PV8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_storev8di_mask, "__builtin_ia32_movdqa64store512_mask", IX86_BUILTIN_MOVDQA64STORE512, UNKNOWN, (int) VOID_FTYPE_PV8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_compressstorev16sf_mask, "__builtin_ia32_compressstoresf512_mask", IX86_BUILTIN_COMPRESSPSSTORE512, UNKNOWN, (int) VOID_FTYPE_PV16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_compressstorev16si_mask, "__builtin_ia32_compressstoresi512_mask", IX86_BUILTIN_PCOMPRESSDSTORE512, UNKNOWN, (int) VOID_FTYPE_PV16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_compressstorev8df_mask, "__builtin_ia32_compressstoredf512_mask", IX86_BUILTIN_COMPRESSPDSTORE512, UNKNOWN, (int) VOID_FTYPE_PV8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_compressstorev8di_mask, "__builtin_ia32_compressstoredi512_mask", IX86_BUILTIN_PCOMPRESSQSTORE512, UNKNOWN, (int) VOID_FTYPE_PV8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv16sf_mask, "__builtin_ia32_expandloadsf512_mask", IX86_BUILTIN_EXPANDPSLOAD512, UNKNOWN, (int) V16SF_FTYPE_PCV16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_expandv16sf_maskz, "__builtin_ia32_expandloadsf512_maskz", IX86_BUILTIN_EXPANDPSLOAD512Z, UNKNOWN, (int) V16SF_FTYPE_PCV16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv16si_mask, "__builtin_ia32_expandloadsi512_mask", IX86_BUILTIN_PEXPANDDLOAD512, UNKNOWN, (int) V16SI_FTYPE_PCV16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_expandv16si_maskz, "__builtin_ia32_expandloadsi512_maskz", IX86_BUILTIN_PEXPANDDLOAD512Z, UNKNOWN, (int) V16SI_FTYPE_PCV16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv8df_mask, "__builtin_ia32_expandloaddf512_mask", IX86_BUILTIN_EXPANDPDLOAD512, UNKNOWN, (int) V8DF_FTYPE_PCV8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_expandv8df_maskz, "__builtin_ia32_expandloaddf512_maskz", IX86_BUILTIN_EXPANDPDLOAD512Z, UNKNOWN, (int) V8DF_FTYPE_PCV8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv8di_mask, "__builtin_ia32_expandloaddi512_mask", IX86_BUILTIN_PEXPANDQLOAD512, UNKNOWN, (int) V8DI_FTYPE_PCV8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_expandv8di_maskz, "__builtin_ia32_expandloaddi512_maskz", IX86_BUILTIN_PEXPANDQLOAD512Z, UNKNOWN, (int) V8DI_FTYPE_PCV8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv16si_mask, "__builtin_ia32_loaddqusi512_mask", IX86_BUILTIN_LOADDQUSI512, UNKNOWN, (int) V16SI_FTYPE_PCINT_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv8di_mask, "__builtin_ia32_loaddqudi512_mask", IX86_BUILTIN_LOADDQUDI512, UNKNOWN, (int) V8DI_FTYPE_PCINT64_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv8df_mask, "__builtin_ia32_loadupd512_mask", IX86_BUILTIN_LOADUPD512, UNKNOWN, (int) V8DF_FTYPE_PCDOUBLE_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv16sf_mask, "__builtin_ia32_loadups512_mask", IX86_BUILTIN_LOADUPS512, UNKNOWN, (int) V16SF_FTYPE_PCFLOAT_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv16sf_mask, "__builtin_ia32_loadaps512_mask", IX86_BUILTIN_LOADAPS512, UNKNOWN, (int) V16SF_FTYPE_PCV16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv16si_mask, "__builtin_ia32_movdqa32load512_mask", IX86_BUILTIN_MOVDQA32LOAD512, UNKNOWN, (int) V16SI_FTYPE_PCV16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv8df_mask, "__builtin_ia32_loadapd512_mask", IX86_BUILTIN_LOADAPD512, UNKNOWN, (int) V8DF_FTYPE_PCV8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv8di_mask, "__builtin_ia32_movdqa64load512_mask", IX86_BUILTIN_MOVDQA64LOAD512, UNKNOWN, (int) V8DI_FTYPE_PCV8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_movntv16sf, "__builtin_ia32_movntps512", IX86_BUILTIN_MOVNTPS512, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V16SF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_movntv8df, "__builtin_ia32_movntpd512", IX86_BUILTIN_MOVNTPD512, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V8DF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_movntv8di, "__builtin_ia32_movntdq512", IX86_BUILTIN_MOVNTDQ512, UNKNOWN, (int) VOID_FTYPE_PV8DI_V8DI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_movntdqa, "__builtin_ia32_movntdqa512", IX86_BUILTIN_MOVNTDQA512, UNKNOWN, (int) V8DI_FTYPE_PV8DI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_storev16si_mask, "__builtin_ia32_storedqusi512_mask", IX86_BUILTIN_STOREDQUSI512, UNKNOWN, (int) VOID_FTYPE_PINT_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_storev8di_mask, "__builtin_ia32_storedqudi512_mask", IX86_BUILTIN_STOREDQUDI512, UNKNOWN, (int) VOID_FTYPE_PINT64_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_storev8df_mask, "__builtin_ia32_storeupd512_mask", IX86_BUILTIN_STOREUPD512, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_us_truncatev8div8si2_mask_store, "__builtin_ia32_pmovusqd512mem_mask", IX86_BUILTIN_PMOVUSQD512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8SI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ss_truncatev8div8si2_mask_store, "__builtin_ia32_pmovsqd512mem_mask", IX86_BUILTIN_PMOVSQD512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8SI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_truncatev8div8si2_mask_store, "__builtin_ia32_pmovqd512mem_mask", IX86_BUILTIN_PMOVQD512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8SI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_us_truncatev8div8hi2_mask_store, "__builtin_ia32_pmovusqw512mem_mask", IX86_BUILTIN_PMOVUSQW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8HI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ss_truncatev8div8hi2_mask_store, "__builtin_ia32_pmovsqw512mem_mask", IX86_BUILTIN_PMOVSQW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8HI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_truncatev8div8hi2_mask_store, "__builtin_ia32_pmovqw512mem_mask", IX86_BUILTIN_PMOVQW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV8HI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_us_truncatev16siv16hi2_mask_store, "__builtin_ia32_pmovusdw512mem_mask", IX86_BUILTIN_PMOVUSDW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16HI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ss_truncatev16siv16hi2_mask_store, "__builtin_ia32_pmovsdw512mem_mask", IX86_BUILTIN_PMOVSDW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16HI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_truncatev16siv16hi2_mask_store, "__builtin_ia32_pmovdw512mem_mask", IX86_BUILTIN_PMOVDW512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16HI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_truncatev8div16qi2_mask_store_2, "__builtin_ia32_pmovqb512mem_mask", IX86_BUILTIN_PMOVQB512_MEM, UNKNOWN, (int) VOID_FTYPE_PUDI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_us_truncatev8div16qi2_mask_store_2, "__builtin_ia32_pmovusqb512mem_mask", IX86_BUILTIN_PMOVUSQB512_MEM, UNKNOWN, (int) VOID_FTYPE_PUDI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ss_truncatev8div16qi2_mask_store_2, "__builtin_ia32_pmovsqb512mem_mask", IX86_BUILTIN_PMOVSQB512_MEM, UNKNOWN, (int) VOID_FTYPE_PUDI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_us_truncatev16siv16qi2_mask_store, "__builtin_ia32_pmovusdb512mem_mask", IX86_BUILTIN_PMOVUSDB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16QI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ss_truncatev16siv16qi2_mask_store, "__builtin_ia32_pmovsdb512mem_mask", IX86_BUILTIN_PMOVSDB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16QI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_truncatev16siv16qi2_mask_store, "__builtin_ia32_pmovdb512mem_mask", IX86_BUILTIN_PMOVDB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV16QI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_storev16sf_mask, "__builtin_ia32_storeups512_mask", IX86_BUILTIN_STOREUPS512, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_storev16sf_mask, "__builtin_ia32_storeaps512_mask", IX86_BUILTIN_STOREAPS512, UNKNOWN, (int) VOID_FTYPE_PV16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_storev16si_mask, "__builtin_ia32_movdqa32store512_mask", IX86_BUILTIN_MOVDQA32STORE512, UNKNOWN, (int) VOID_FTYPE_PV16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_storev8df_mask, "__builtin_ia32_storeapd512_mask", IX86_BUILTIN_STOREAPD512, UNKNOWN, (int) VOID_FTYPE_PV8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_storev8di_mask, "__builtin_ia32_movdqa64store512_mask", IX86_BUILTIN_MOVDQA64STORE512, UNKNOWN, (int) VOID_FTYPE_PV8DI_V8DI_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loaddf_mask, "__builtin_ia32_loadsd_mask", IX86_BUILTIN_LOADSD_MASK, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE_V2DF_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadsf_mask, "__builtin_ia32_loadss_mask", IX86_BUILTIN_LOADSS_MASK, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT_V4SF_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_storedf_mask, "__builtin_ia32_storesd_mask", IX86_BUILTIN_STORESD_MASK, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF_UQI)
@@ -293,14 +293,14 @@ BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_CMPCCXADD, CODE_FOR_cmpccxadd_si,
BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_CMPCCXADD, CODE_FOR_cmpccxadd_di, "__builtin_ia32_cmpccxadd64", IX86_BUILTIN_CMPCCXADD64, UNKNOWN, (int) LONGLONG_FTYPE_PLONGLONG_LONGLONG_LONGLONG_INT)
/* AVX512BW */
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_loadv32hi_mask, "__builtin_ia32_loaddquhi512_mask", IX86_BUILTIN_LOADDQUHI512_MASK, UNKNOWN, (int) V32HI_FTYPE_PCSHORT_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_loadv64qi_mask, "__builtin_ia32_loaddquqi512_mask", IX86_BUILTIN_LOADDQUQI512_MASK, UNKNOWN, (int) V64QI_FTYPE_PCCHAR_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_storev32hi_mask, "__builtin_ia32_storedquhi512_mask", IX86_BUILTIN_STOREDQUHI512_MASK, UNKNOWN, (int) VOID_FTYPE_PSHORT_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_storev64qi_mask, "__builtin_ia32_storedquqi512_mask", IX86_BUILTIN_STOREDQUQI512_MASK, UNKNOWN, (int) VOID_FTYPE_PCHAR_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_loadv32hi_mask, "__builtin_ia32_loaddquhi512_mask", IX86_BUILTIN_LOADDQUHI512_MASK, UNKNOWN, (int) V32HI_FTYPE_PCSHORT_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_loadv64qi_mask, "__builtin_ia32_loaddquqi512_mask", IX86_BUILTIN_LOADDQUQI512_MASK, UNKNOWN, (int) V64QI_FTYPE_PCCHAR_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_storev32hi_mask, "__builtin_ia32_storedquhi512_mask", IX86_BUILTIN_STOREDQUHI512_MASK, UNKNOWN, (int) VOID_FTYPE_PSHORT_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_storev64qi_mask, "__builtin_ia32_storedquqi512_mask", IX86_BUILTIN_STOREDQUQI512_MASK, UNKNOWN, (int) VOID_FTYPE_PCHAR_V64QI_UDI)
/* AVX512VP2INTERSECT */
-BDESC (0, OPTION_MASK_ISA2_AVX512VP2INTERSECT, CODE_FOR_nothing, "__builtin_ia32_2intersectd512", IX86_BUILTIN_2INTERSECTD512, UNKNOWN, (int) VOID_FTYPE_PUHI_PUHI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX512VP2INTERSECT, CODE_FOR_nothing, "__builtin_ia32_2intersectq512", IX86_BUILTIN_2INTERSECTQ512, UNKNOWN, (int) VOID_FTYPE_PUQI_PUQI_V8DI_V8DI)
+BDESC (0, OPTION_MASK_ISA2_AVX512VP2INTERSECT | OPTION_MASK_ISA2_EVEX512, CODE_FOR_nothing, "__builtin_ia32_2intersectd512", IX86_BUILTIN_2INTERSECTD512, UNKNOWN, (int) VOID_FTYPE_PUHI_PUHI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX512VP2INTERSECT | OPTION_MASK_ISA2_EVEX512, CODE_FOR_nothing, "__builtin_ia32_2intersectq512", IX86_BUILTIN_2INTERSECTQ512, UNKNOWN, (int) VOID_FTYPE_PUQI_PUQI_V8DI_V8DI)
BDESC (0, OPTION_MASK_ISA2_AVX512VP2INTERSECT, CODE_FOR_nothing, "__builtin_ia32_2intersectd256", IX86_BUILTIN_2INTERSECTD256, UNKNOWN, (int) VOID_FTYPE_PUQI_PUQI_V8SI_V8SI)
BDESC (0, OPTION_MASK_ISA2_AVX512VP2INTERSECT, CODE_FOR_nothing, "__builtin_ia32_2intersectq256", IX86_BUILTIN_2INTERSECTQ256, UNKNOWN, (int) VOID_FTYPE_PUQI_PUQI_V4DI_V4DI)
BDESC (0, OPTION_MASK_ISA2_AVX512VP2INTERSECT, CODE_FOR_nothing, "__builtin_ia32_2intersectd128", IX86_BUILTIN_2INTERSECTD128, UNKNOWN, (int) VOID_FTYPE_PUQI_PUQI_V4SI_V4SI)
@@ -407,9 +407,9 @@ BDESC (OPTION_MASK_ISA_AVX512BW | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl
BDESC (OPTION_MASK_ISA_AVX512BW | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_ss_truncatev16hiv16qi2_mask_store, "__builtin_ia32_pmovswb256mem_mask", IX86_BUILTIN_PMOVSWB256_MEM, UNKNOWN, (int) VOID_FTYPE_PV16QI_V16HI_UHI)
BDESC (OPTION_MASK_ISA_AVX512BW | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_us_truncatev8hiv8qi2_mask_store_2, "__builtin_ia32_pmovuswb128mem_mask", IX86_BUILTIN_PMOVUSWB128_MEM, UNKNOWN, (int) VOID_FTYPE_PUDI_V8HI_UQI)
BDESC (OPTION_MASK_ISA_AVX512BW | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_us_truncatev16hiv16qi2_mask_store, "__builtin_ia32_pmovuswb256mem_mask", IX86_BUILTIN_PMOVUSWB256_MEM, UNKNOWN, (int) VOID_FTYPE_PV16QI_V16HI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_us_truncatev32hiv32qi2_mask_store, "__builtin_ia32_pmovuswb512mem_mask", IX86_BUILTIN_PMOVUSWB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV32QI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ss_truncatev32hiv32qi2_mask_store, "__builtin_ia32_pmovswb512mem_mask", IX86_BUILTIN_PMOVSWB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV32QI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_truncatev32hiv32qi2_mask_store, "__builtin_ia32_pmovwb512mem_mask", IX86_BUILTIN_PMOVWB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV32QI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_us_truncatev32hiv32qi2_mask_store, "__builtin_ia32_pmovuswb512mem_mask", IX86_BUILTIN_PMOVUSWB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV32QI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ss_truncatev32hiv32qi2_mask_store, "__builtin_ia32_pmovswb512mem_mask", IX86_BUILTIN_PMOVSWB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV32QI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_truncatev32hiv32qi2_mask_store, "__builtin_ia32_pmovwb512mem_mask", IX86_BUILTIN_PMOVWB512_MEM, UNKNOWN, (int) VOID_FTYPE_PV32QI_V32HI_USI)
/* AVX512FP16 */
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_loadhf_mask, "__builtin_ia32_loadsh_mask", IX86_BUILTIN_LOADSH_MASK, UNKNOWN, (int) V8HF_FTYPE_PCFLOAT16_V8HF_UQI)
@@ -430,17 +430,17 @@ BDESC (OPTION_MASK_ISA_PKU, 0, CODE_FOR_rdpkru, "__builtin_ia32_rdpkru", IX86_B
BDESC (OPTION_MASK_ISA_PKU, 0, CODE_FOR_wrpkru, "__builtin_ia32_wrpkru", IX86_BUILTIN_WRPKRU, UNKNOWN, (int) VOID_FTYPE_UNSIGNED)
/* VBMI2 */
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_compressstorev64qi_mask, "__builtin_ia32_compressstoreuqi512_mask", IX86_BUILTIN_PCOMPRESSBSTORE512, UNKNOWN, (int) VOID_FTYPE_PV64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_compressstorev32hi_mask, "__builtin_ia32_compressstoreuhi512_mask", IX86_BUILTIN_PCOMPRESSWSTORE512, UNKNOWN, (int) VOID_FTYPE_PV32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_compressstorev64qi_mask, "__builtin_ia32_compressstoreuqi512_mask", IX86_BUILTIN_PCOMPRESSBSTORE512, UNKNOWN, (int) VOID_FTYPE_PV64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_compressstorev32hi_mask, "__builtin_ia32_compressstoreuhi512_mask", IX86_BUILTIN_PCOMPRESSWSTORE512, UNKNOWN, (int) VOID_FTYPE_PV32HI_V32HI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_compressstorev32qi_mask, "__builtin_ia32_compressstoreuqi256_mask", IX86_BUILTIN_PCOMPRESSBSTORE256, UNKNOWN, (int) VOID_FTYPE_PV32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_compressstorev16qi_mask, "__builtin_ia32_compressstoreuqi128_mask", IX86_BUILTIN_PCOMPRESSBSTORE128, UNKNOWN, (int) VOID_FTYPE_PV16QI_V16QI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_compressstorev16hi_mask, "__builtin_ia32_compressstoreuhi256_mask", IX86_BUILTIN_PCOMPRESSWSTORE256, UNKNOWN, (int) VOID_FTYPE_PV16HI_V16HI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_compressstorev8hi_mask, "__builtin_ia32_compressstoreuhi128_mask", IX86_BUILTIN_PCOMPRESSWSTORE128, UNKNOWN, (int) VOID_FTYPE_PV8HI_V8HI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_expandv64qi_mask, "__builtin_ia32_expandloadqi512_mask", IX86_BUILTIN_PEXPANDBLOAD512, UNKNOWN, (int) V64QI_FTYPE_PCV64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_expandv64qi_maskz, "__builtin_ia32_expandloadqi512_maskz", IX86_BUILTIN_PEXPANDBLOAD512Z, UNKNOWN, (int) V64QI_FTYPE_PCV64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_expandv32hi_mask, "__builtin_ia32_expandloadhi512_mask", IX86_BUILTIN_PEXPANDWLOAD512, UNKNOWN, (int) V32HI_FTYPE_PCV32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_expandv32hi_maskz, "__builtin_ia32_expandloadhi512_maskz", IX86_BUILTIN_PEXPANDWLOAD512Z, UNKNOWN, (int) V32HI_FTYPE_PCV32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv64qi_mask, "__builtin_ia32_expandloadqi512_mask", IX86_BUILTIN_PEXPANDBLOAD512, UNKNOWN, (int) V64QI_FTYPE_PCV64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv64qi_maskz, "__builtin_ia32_expandloadqi512_maskz", IX86_BUILTIN_PEXPANDBLOAD512Z, UNKNOWN, (int) V64QI_FTYPE_PCV64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv32hi_mask, "__builtin_ia32_expandloadhi512_mask", IX86_BUILTIN_PEXPANDWLOAD512, UNKNOWN, (int) V32HI_FTYPE_PCV32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv32hi_maskz, "__builtin_ia32_expandloadhi512_maskz", IX86_BUILTIN_PEXPANDWLOAD512Z, UNKNOWN, (int) V32HI_FTYPE_PCV32HI_V32HI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_expandv32qi_mask, "__builtin_ia32_expandloadqi256_mask", IX86_BUILTIN_PEXPANDBLOAD256, UNKNOWN, (int) V32QI_FTYPE_PCV32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_expandv32qi_maskz, "__builtin_ia32_expandloadqi256_maskz", IX86_BUILTIN_PEXPANDBLOAD256Z, UNKNOWN, (int) V32QI_FTYPE_PCV32QI_V32QI_USI)
@@ -1360,291 +1360,291 @@ BDESC (OPTION_MASK_ISA_BMI2, 0, CODE_FOR_bmi2_pext_si3, "__builtin_ia32_pext_si"
BDESC (OPTION_MASK_ISA_BMI2 | OPTION_MASK_ISA_64BIT, 0, CODE_FOR_bmi2_pext_di3, "__builtin_ia32_pext_di", IX86_BUILTIN_PEXT64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64)
/* AVX512F */
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_si512_256si, "__builtin_ia32_si512_256si", IX86_BUILTIN_SI512_SI256, UNKNOWN, (int) V16SI_FTYPE_V8SI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ps512_256ps, "__builtin_ia32_ps512_256ps", IX86_BUILTIN_PS512_PS256, UNKNOWN, (int) V16SF_FTYPE_V8SF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_pd512_256pd, "__builtin_ia32_pd512_256pd", IX86_BUILTIN_PD512_PD256, UNKNOWN, (int) V8DF_FTYPE_V4DF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_si512_si, "__builtin_ia32_si512_si", IX86_BUILTIN_SI512_SI, UNKNOWN, (int) V16SI_FTYPE_V4SI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ps512_ps, "__builtin_ia32_ps512_ps", IX86_BUILTIN_PS512_PS, UNKNOWN, (int) V16SF_FTYPE_V4SF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_pd512_pd, "__builtin_ia32_pd512_pd", IX86_BUILTIN_PD512_PD, UNKNOWN, (int) V8DF_FTYPE_V2DF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_alignv16si_mask, "__builtin_ia32_alignd512_mask", IX86_BUILTIN_ALIGND512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_alignv8di_mask, "__builtin_ia32_alignq512_mask", IX86_BUILTIN_ALIGNQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_blendmv16si, "__builtin_ia32_blendmd_512_mask", IX86_BUILTIN_BLENDMD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_blendmv8df, "__builtin_ia32_blendmpd_512_mask", IX86_BUILTIN_BLENDMPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_blendmv16sf, "__builtin_ia32_blendmps_512_mask", IX86_BUILTIN_BLENDMPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_blendmv8di, "__builtin_ia32_blendmq_512_mask", IX86_BUILTIN_BLENDMQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_broadcastv16sf_mask, "__builtin_ia32_broadcastf32x4_512", IX86_BUILTIN_BROADCASTF32X4_512, UNKNOWN, (int) V16SF_FTYPE_V4SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_broadcastv8df_mask, "__builtin_ia32_broadcastf64x4_512", IX86_BUILTIN_BROADCASTF64X4_512, UNKNOWN, (int) V8DF_FTYPE_V4DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_broadcastv16si_mask, "__builtin_ia32_broadcasti32x4_512", IX86_BUILTIN_BROADCASTI32X4_512, UNKNOWN, (int) V16SI_FTYPE_V4SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_broadcastv8di_mask, "__builtin_ia32_broadcasti64x4_512", IX86_BUILTIN_BROADCASTI64X4_512, UNKNOWN, (int) V8DI_FTYPE_V4DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vec_dupv8df_mask, "__builtin_ia32_broadcastsd512", IX86_BUILTIN_BROADCASTSD512, UNKNOWN, (int) V8DF_FTYPE_V2DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vec_dupv16sf_mask, "__builtin_ia32_broadcastss512", IX86_BUILTIN_BROADCASTSS512, UNKNOWN, (int) V16SF_FTYPE_V4SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_cmpv16si3_mask, "__builtin_ia32_cmpd512_mask", IX86_BUILTIN_CMPD512, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_INT_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_cmpv8di3_mask, "__builtin_ia32_cmpq512_mask", IX86_BUILTIN_CMPQ512, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_INT_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_compressv8df_mask, "__builtin_ia32_compressdf512_mask", IX86_BUILTIN_COMPRESSPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_compressv16sf_mask, "__builtin_ia32_compresssf512_mask", IX86_BUILTIN_COMPRESSPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_floatv8siv8df2_mask, "__builtin_ia32_cvtdq2pd512_mask", IX86_BUILTIN_CVTDQ2PD512, UNKNOWN, (int) V8DF_FTYPE_V8SI_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vcvtps2ph512_mask_sae, "__builtin_ia32_vcvtps2ph512_mask", IX86_BUILTIN_CVTPS2PH512, UNKNOWN, (int) V16HI_FTYPE_V16SF_INT_V16HI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_floatunsv8siv8df2_mask, "__builtin_ia32_cvtudq2pd512_mask", IX86_BUILTIN_CVTUDQ2PD512, UNKNOWN, (int) V8DF_FTYPE_V8SI_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_si512_256si, "__builtin_ia32_si512_256si", IX86_BUILTIN_SI512_SI256, UNKNOWN, (int) V16SI_FTYPE_V8SI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ps512_256ps, "__builtin_ia32_ps512_256ps", IX86_BUILTIN_PS512_PS256, UNKNOWN, (int) V16SF_FTYPE_V8SF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_pd512_256pd, "__builtin_ia32_pd512_256pd", IX86_BUILTIN_PD512_PD256, UNKNOWN, (int) V8DF_FTYPE_V4DF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_si512_si, "__builtin_ia32_si512_si", IX86_BUILTIN_SI512_SI, UNKNOWN, (int) V16SI_FTYPE_V4SI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ps512_ps, "__builtin_ia32_ps512_ps", IX86_BUILTIN_PS512_PS, UNKNOWN, (int) V16SF_FTYPE_V4SF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_pd512_pd, "__builtin_ia32_pd512_pd", IX86_BUILTIN_PD512_PD, UNKNOWN, (int) V8DF_FTYPE_V2DF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_alignv16si_mask, "__builtin_ia32_alignd512_mask", IX86_BUILTIN_ALIGND512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_alignv8di_mask, "__builtin_ia32_alignq512_mask", IX86_BUILTIN_ALIGNQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_blendmv16si, "__builtin_ia32_blendmd_512_mask", IX86_BUILTIN_BLENDMD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_blendmv8df, "__builtin_ia32_blendmpd_512_mask", IX86_BUILTIN_BLENDMPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_blendmv16sf, "__builtin_ia32_blendmps_512_mask", IX86_BUILTIN_BLENDMPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_blendmv8di, "__builtin_ia32_blendmq_512_mask", IX86_BUILTIN_BLENDMQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_broadcastv16sf_mask, "__builtin_ia32_broadcastf32x4_512", IX86_BUILTIN_BROADCASTF32X4_512, UNKNOWN, (int) V16SF_FTYPE_V4SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_broadcastv8df_mask, "__builtin_ia32_broadcastf64x4_512", IX86_BUILTIN_BROADCASTF64X4_512, UNKNOWN, (int) V8DF_FTYPE_V4DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_broadcastv16si_mask, "__builtin_ia32_broadcasti32x4_512", IX86_BUILTIN_BROADCASTI32X4_512, UNKNOWN, (int) V16SI_FTYPE_V4SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_broadcastv8di_mask, "__builtin_ia32_broadcasti64x4_512", IX86_BUILTIN_BROADCASTI64X4_512, UNKNOWN, (int) V8DI_FTYPE_V4DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vec_dupv8df_mask, "__builtin_ia32_broadcastsd512", IX86_BUILTIN_BROADCASTSD512, UNKNOWN, (int) V8DF_FTYPE_V2DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vec_dupv16sf_mask, "__builtin_ia32_broadcastss512", IX86_BUILTIN_BROADCASTSS512, UNKNOWN, (int) V16SF_FTYPE_V4SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cmpv16si3_mask, "__builtin_ia32_cmpd512_mask", IX86_BUILTIN_CMPD512, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_INT_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cmpv8di3_mask, "__builtin_ia32_cmpq512_mask", IX86_BUILTIN_CMPQ512, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_INT_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_compressv8df_mask, "__builtin_ia32_compressdf512_mask", IX86_BUILTIN_COMPRESSPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_compressv16sf_mask, "__builtin_ia32_compresssf512_mask", IX86_BUILTIN_COMPRESSPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_floatv8siv8df2_mask, "__builtin_ia32_cvtdq2pd512_mask", IX86_BUILTIN_CVTDQ2PD512, UNKNOWN, (int) V8DF_FTYPE_V8SI_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vcvtps2ph512_mask_sae, "__builtin_ia32_vcvtps2ph512_mask", IX86_BUILTIN_CVTPS2PH512, UNKNOWN, (int) V16HI_FTYPE_V16SF_INT_V16HI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_floatunsv8siv8df2_mask, "__builtin_ia32_cvtudq2pd512_mask", IX86_BUILTIN_CVTUDQ2PD512, UNKNOWN, (int) V8DF_FTYPE_V8SI_V8DF_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_cvtusi2sd32, "__builtin_ia32_cvtusi2sd32", IX86_BUILTIN_CVTUSI2SD32, UNKNOWN, (int) V2DF_FTYPE_V2DF_UINT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_expandv8df_mask, "__builtin_ia32_expanddf512_mask", IX86_BUILTIN_EXPANDPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_expandv8df_maskz, "__builtin_ia32_expanddf512_maskz", IX86_BUILTIN_EXPANDPD512Z, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_expandv16sf_mask, "__builtin_ia32_expandsf512_mask", IX86_BUILTIN_EXPANDPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_expandv16sf_maskz, "__builtin_ia32_expandsf512_maskz", IX86_BUILTIN_EXPANDPS512Z, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vextractf32x4_mask, "__builtin_ia32_extractf32x4_mask", IX86_BUILTIN_EXTRACTF32X4, UNKNOWN, (int) V4SF_FTYPE_V16SF_INT_V4SF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vextractf64x4_mask, "__builtin_ia32_extractf64x4_mask", IX86_BUILTIN_EXTRACTF64X4, UNKNOWN, (int) V4DF_FTYPE_V8DF_INT_V4DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vextracti32x4_mask, "__builtin_ia32_extracti32x4_mask", IX86_BUILTIN_EXTRACTI32X4, UNKNOWN, (int) V4SI_FTYPE_V16SI_INT_V4SI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vextracti64x4_mask, "__builtin_ia32_extracti64x4_mask", IX86_BUILTIN_EXTRACTI64X4, UNKNOWN, (int) V4DI_FTYPE_V8DI_INT_V4DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vinsertf32x4_mask, "__builtin_ia32_insertf32x4_mask", IX86_BUILTIN_INSERTF32X4, UNKNOWN, (int) V16SF_FTYPE_V16SF_V4SF_INT_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vinsertf64x4_mask, "__builtin_ia32_insertf64x4_mask", IX86_BUILTIN_INSERTF64X4, UNKNOWN, (int) V8DF_FTYPE_V8DF_V4DF_INT_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vinserti32x4_mask, "__builtin_ia32_inserti32x4_mask", IX86_BUILTIN_INSERTI32X4, UNKNOWN, (int) V16SI_FTYPE_V16SI_V4SI_INT_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vinserti64x4_mask, "__builtin_ia32_inserti64x4_mask", IX86_BUILTIN_INSERTI64X4, UNKNOWN, (int) V8DI_FTYPE_V8DI_V4DI_INT_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv8df_mask, "__builtin_ia32_movapd512_mask", IX86_BUILTIN_MOVAPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv16sf_mask, "__builtin_ia32_movaps512_mask", IX86_BUILTIN_MOVAPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_movddup512_mask, "__builtin_ia32_movddup512_mask", IX86_BUILTIN_MOVDDUP512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv16si_mask, "__builtin_ia32_movdqa32_512_mask", IX86_BUILTIN_MOVDQA32_512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_loadv8di_mask, "__builtin_ia32_movdqa64_512_mask", IX86_BUILTIN_MOVDQA64_512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_movshdup512_mask, "__builtin_ia32_movshdup512_mask", IX86_BUILTIN_MOVSHDUP512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_movsldup512_mask, "__builtin_ia32_movsldup512_mask", IX86_BUILTIN_MOVSLDUP512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_absv16si2_mask, "__builtin_ia32_pabsd512_mask", IX86_BUILTIN_PABSD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_absv8di2_mask, "__builtin_ia32_pabsq512_mask", IX86_BUILTIN_PABSQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_addv16si3_mask, "__builtin_ia32_paddd512_mask", IX86_BUILTIN_PADDD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_addv8di3_mask, "__builtin_ia32_paddq512_mask", IX86_BUILTIN_PADDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_andv16si3_mask, "__builtin_ia32_pandd512_mask", IX86_BUILTIN_PANDD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_andnotv16si3_mask, "__builtin_ia32_pandnd512_mask", IX86_BUILTIN_PANDND512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_andnotv8di3_mask, "__builtin_ia32_pandnq512_mask", IX86_BUILTIN_PANDNQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_andv8di3_mask, "__builtin_ia32_pandq512_mask", IX86_BUILTIN_PANDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vec_dupv16si_mask, "__builtin_ia32_pbroadcastd512", IX86_BUILTIN_PBROADCASTD512, UNKNOWN, (int) V16SI_FTYPE_V4SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vec_dup_gprv16si_mask, "__builtin_ia32_pbroadcastd512_gpr_mask", IX86_BUILTIN_PBROADCASTD512_GPR, UNKNOWN, (int) V16SI_FTYPE_SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512CD, 0, CODE_FOR_avx512cd_maskb_vec_dupv8di, "__builtin_ia32_broadcastmb512", IX86_BUILTIN_PBROADCASTMB512, UNKNOWN, (int) V8DI_FTYPE_UQI)
-BDESC (OPTION_MASK_ISA_AVX512CD, 0, CODE_FOR_avx512cd_maskw_vec_dupv16si, "__builtin_ia32_broadcastmw512", IX86_BUILTIN_PBROADCASTMW512, UNKNOWN, (int) V16SI_FTYPE_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vec_dupv8di_mask, "__builtin_ia32_pbroadcastq512", IX86_BUILTIN_PBROADCASTQ512, UNKNOWN, (int) V8DI_FTYPE_V2DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vec_dup_gprv8di_mask, "__builtin_ia32_pbroadcastq512_gpr_mask", IX86_BUILTIN_PBROADCASTQ512_GPR, UNKNOWN, (int) V8DI_FTYPE_DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_eqv16si3_mask, "__builtin_ia32_pcmpeqd512_mask", IX86_BUILTIN_PCMPEQD512_MASK, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_eqv8di3_mask, "__builtin_ia32_pcmpeqq512_mask", IX86_BUILTIN_PCMPEQQ512_MASK, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_gtv16si3_mask, "__builtin_ia32_pcmpgtd512_mask", IX86_BUILTIN_PCMPGTD512_MASK, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_gtv8di3_mask, "__builtin_ia32_pcmpgtq512_mask", IX86_BUILTIN_PCMPGTQ512_MASK, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_compressv16si_mask, "__builtin_ia32_compresssi512_mask", IX86_BUILTIN_PCOMPRESSD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_compressv8di_mask, "__builtin_ia32_compressdi512_mask", IX86_BUILTIN_PCOMPRESSQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_expandv16si_mask, "__builtin_ia32_expandsi512_mask", IX86_BUILTIN_PEXPANDD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_expandv16si_maskz, "__builtin_ia32_expandsi512_maskz", IX86_BUILTIN_PEXPANDD512Z, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_expandv8di_mask, "__builtin_ia32_expanddi512_mask", IX86_BUILTIN_PEXPANDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_expandv8di_maskz, "__builtin_ia32_expanddi512_maskz", IX86_BUILTIN_PEXPANDQ512Z, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_smaxv16si3_mask, "__builtin_ia32_pmaxsd512_mask", IX86_BUILTIN_PMAXSD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_smaxv8di3_mask, "__builtin_ia32_pmaxsq512_mask", IX86_BUILTIN_PMAXSQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_umaxv16si3_mask, "__builtin_ia32_pmaxud512_mask", IX86_BUILTIN_PMAXUD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_umaxv8di3_mask, "__builtin_ia32_pmaxuq512_mask", IX86_BUILTIN_PMAXUQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sminv16si3_mask, "__builtin_ia32_pminsd512_mask", IX86_BUILTIN_PMINSD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sminv8di3_mask, "__builtin_ia32_pminsq512_mask", IX86_BUILTIN_PMINSQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_uminv16si3_mask, "__builtin_ia32_pminud512_mask", IX86_BUILTIN_PMINUD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_uminv8di3_mask, "__builtin_ia32_pminuq512_mask", IX86_BUILTIN_PMINUQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_truncatev16siv16qi2_mask, "__builtin_ia32_pmovdb512_mask", IX86_BUILTIN_PMOVDB512, UNKNOWN, (int) V16QI_FTYPE_V16SI_V16QI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_truncatev16siv16hi2_mask, "__builtin_ia32_pmovdw512_mask", IX86_BUILTIN_PMOVDW512, UNKNOWN, (int) V16HI_FTYPE_V16SI_V16HI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_truncatev8div16qi2_mask, "__builtin_ia32_pmovqb512_mask", IX86_BUILTIN_PMOVQB512, UNKNOWN, (int) V16QI_FTYPE_V8DI_V16QI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_truncatev8div8si2_mask, "__builtin_ia32_pmovqd512_mask", IX86_BUILTIN_PMOVQD512, UNKNOWN, (int) V8SI_FTYPE_V8DI_V8SI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_truncatev8div8hi2_mask, "__builtin_ia32_pmovqw512_mask", IX86_BUILTIN_PMOVQW512, UNKNOWN, (int) V8HI_FTYPE_V8DI_V8HI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ss_truncatev16siv16qi2_mask, "__builtin_ia32_pmovsdb512_mask", IX86_BUILTIN_PMOVSDB512, UNKNOWN, (int) V16QI_FTYPE_V16SI_V16QI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ss_truncatev16siv16hi2_mask, "__builtin_ia32_pmovsdw512_mask", IX86_BUILTIN_PMOVSDW512, UNKNOWN, (int) V16HI_FTYPE_V16SI_V16HI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ss_truncatev8div16qi2_mask, "__builtin_ia32_pmovsqb512_mask", IX86_BUILTIN_PMOVSQB512, UNKNOWN, (int) V16QI_FTYPE_V8DI_V16QI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ss_truncatev8div8si2_mask, "__builtin_ia32_pmovsqd512_mask", IX86_BUILTIN_PMOVSQD512, UNKNOWN, (int) V8SI_FTYPE_V8DI_V8SI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ss_truncatev8div8hi2_mask, "__builtin_ia32_pmovsqw512_mask", IX86_BUILTIN_PMOVSQW512, UNKNOWN, (int) V8HI_FTYPE_V8DI_V8HI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sign_extendv16qiv16si2_mask, "__builtin_ia32_pmovsxbd512_mask", IX86_BUILTIN_PMOVSXBD512, UNKNOWN, (int) V16SI_FTYPE_V16QI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sign_extendv8qiv8di2_mask, "__builtin_ia32_pmovsxbq512_mask", IX86_BUILTIN_PMOVSXBQ512, UNKNOWN, (int) V8DI_FTYPE_V16QI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sign_extendv8siv8di2_mask, "__builtin_ia32_pmovsxdq512_mask", IX86_BUILTIN_PMOVSXDQ512, UNKNOWN, (int) V8DI_FTYPE_V8SI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sign_extendv16hiv16si2_mask, "__builtin_ia32_pmovsxwd512_mask", IX86_BUILTIN_PMOVSXWD512, UNKNOWN, (int) V16SI_FTYPE_V16HI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sign_extendv8hiv8di2_mask, "__builtin_ia32_pmovsxwq512_mask", IX86_BUILTIN_PMOVSXWQ512, UNKNOWN, (int) V8DI_FTYPE_V8HI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_us_truncatev16siv16qi2_mask, "__builtin_ia32_pmovusdb512_mask", IX86_BUILTIN_PMOVUSDB512, UNKNOWN, (int) V16QI_FTYPE_V16SI_V16QI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_us_truncatev16siv16hi2_mask, "__builtin_ia32_pmovusdw512_mask", IX86_BUILTIN_PMOVUSDW512, UNKNOWN, (int) V16HI_FTYPE_V16SI_V16HI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_us_truncatev8div16qi2_mask, "__builtin_ia32_pmovusqb512_mask", IX86_BUILTIN_PMOVUSQB512, UNKNOWN, (int) V16QI_FTYPE_V8DI_V16QI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_us_truncatev8div8si2_mask, "__builtin_ia32_pmovusqd512_mask", IX86_BUILTIN_PMOVUSQD512, UNKNOWN, (int) V8SI_FTYPE_V8DI_V8SI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_us_truncatev8div8hi2_mask, "__builtin_ia32_pmovusqw512_mask", IX86_BUILTIN_PMOVUSQW512, UNKNOWN, (int) V8HI_FTYPE_V8DI_V8HI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_zero_extendv16qiv16si2_mask, "__builtin_ia32_pmovzxbd512_mask", IX86_BUILTIN_PMOVZXBD512, UNKNOWN, (int) V16SI_FTYPE_V16QI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_zero_extendv8qiv8di2_mask, "__builtin_ia32_pmovzxbq512_mask", IX86_BUILTIN_PMOVZXBQ512, UNKNOWN, (int) V8DI_FTYPE_V16QI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_zero_extendv8siv8di2_mask, "__builtin_ia32_pmovzxdq512_mask", IX86_BUILTIN_PMOVZXDQ512, UNKNOWN, (int) V8DI_FTYPE_V8SI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_zero_extendv16hiv16si2_mask, "__builtin_ia32_pmovzxwd512_mask", IX86_BUILTIN_PMOVZXWD512, UNKNOWN, (int) V16SI_FTYPE_V16HI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_zero_extendv8hiv8di2_mask, "__builtin_ia32_pmovzxwq512_mask", IX86_BUILTIN_PMOVZXWQ512, UNKNOWN, (int) V8DI_FTYPE_V8HI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_vec_widen_smult_even_v16si_mask, "__builtin_ia32_pmuldq512_mask", IX86_BUILTIN_PMULDQ512, UNKNOWN, (int) V8DI_FTYPE_V16SI_V16SI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_mulv16si3_mask, "__builtin_ia32_pmulld512_mask" , IX86_BUILTIN_PMULLD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_vec_widen_umult_even_v16si_mask, "__builtin_ia32_pmuludq512_mask", IX86_BUILTIN_PMULUDQ512, UNKNOWN, (int) V8DI_FTYPE_V16SI_V16SI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_iorv16si3_mask, "__builtin_ia32_pord512_mask", IX86_BUILTIN_PORD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_iorv8di3_mask, "__builtin_ia32_porq512_mask", IX86_BUILTIN_PORQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rolv16si_mask, "__builtin_ia32_prold512_mask", IX86_BUILTIN_PROLD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rolv8di_mask, "__builtin_ia32_prolq512_mask", IX86_BUILTIN_PROLQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rolvv16si_mask, "__builtin_ia32_prolvd512_mask", IX86_BUILTIN_PROLVD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rolvv8di_mask, "__builtin_ia32_prolvq512_mask", IX86_BUILTIN_PROLVQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rorv16si_mask, "__builtin_ia32_prord512_mask", IX86_BUILTIN_PRORD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rorv8di_mask, "__builtin_ia32_prorq512_mask", IX86_BUILTIN_PRORQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rorvv16si_mask, "__builtin_ia32_prorvd512_mask", IX86_BUILTIN_PRORVD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rorvv8di_mask, "__builtin_ia32_prorvq512_mask", IX86_BUILTIN_PRORVQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_pshufdv3_mask, "__builtin_ia32_pshufd512_mask", IX86_BUILTIN_PSHUFD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_ashlv16si3_mask, "__builtin_ia32_pslld512_mask", IX86_BUILTIN_PSLLD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V4SI_V16SI_UHI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_ashlv16si3_mask, "__builtin_ia32_pslldi512_mask", IX86_BUILTIN_PSLLDI512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_ashlv8di3_mask, "__builtin_ia32_psllq512_mask", IX86_BUILTIN_PSLLQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V2DI_V8DI_UQI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_ashlv8di3_mask, "__builtin_ia32_psllqi512_mask", IX86_BUILTIN_PSLLQI512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ashlvv16si_mask, "__builtin_ia32_psllv16si_mask", IX86_BUILTIN_PSLLVV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ashlvv8di_mask, "__builtin_ia32_psllv8di_mask", IX86_BUILTIN_PSLLVV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_ashrv16si3_mask, "__builtin_ia32_psrad512_mask", IX86_BUILTIN_PSRAD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V4SI_V16SI_UHI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_ashrv16si3_mask, "__builtin_ia32_psradi512_mask", IX86_BUILTIN_PSRADI512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_ashrv8di3_mask, "__builtin_ia32_psraq512_mask", IX86_BUILTIN_PSRAQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V2DI_V8DI_UQI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_ashrv8di3_mask, "__builtin_ia32_psraqi512_mask", IX86_BUILTIN_PSRAQI512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ashrvv16si_mask, "__builtin_ia32_psrav16si_mask", IX86_BUILTIN_PSRAVV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ashrvv8di_mask, "__builtin_ia32_psrav8di_mask", IX86_BUILTIN_PSRAVV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_lshrv16si3_mask, "__builtin_ia32_psrld512_mask", IX86_BUILTIN_PSRLD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V4SI_V16SI_UHI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_lshrv16si3_mask, "__builtin_ia32_psrldi512_mask", IX86_BUILTIN_PSRLDI512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_lshrv8di3_mask, "__builtin_ia32_psrlq512_mask", IX86_BUILTIN_PSRLQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V2DI_V8DI_UQI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_lshrv8di3_mask, "__builtin_ia32_psrlqi512_mask", IX86_BUILTIN_PSRLQI512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_lshrvv16si_mask, "__builtin_ia32_psrlv16si_mask", IX86_BUILTIN_PSRLVV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_lshrvv8di_mask, "__builtin_ia32_psrlv8di_mask", IX86_BUILTIN_PSRLVV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_subv16si3_mask, "__builtin_ia32_psubd512_mask", IX86_BUILTIN_PSUBD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_subv8di3_mask, "__builtin_ia32_psubq512_mask", IX86_BUILTIN_PSUBQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_testmv16si3_mask, "__builtin_ia32_ptestmd512", IX86_BUILTIN_PTESTMD512, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_testmv8di3_mask, "__builtin_ia32_ptestmq512", IX86_BUILTIN_PTESTMQ512, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_testnmv16si3_mask, "__builtin_ia32_ptestnmd512", IX86_BUILTIN_PTESTNMD512, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_testnmv8di3_mask, "__builtin_ia32_ptestnmq512", IX86_BUILTIN_PTESTNMQ512, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_interleave_highv16si_mask, "__builtin_ia32_punpckhdq512_mask", IX86_BUILTIN_PUNPCKHDQ512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_interleave_highv8di_mask, "__builtin_ia32_punpckhqdq512_mask", IX86_BUILTIN_PUNPCKHQDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_interleave_lowv16si_mask, "__builtin_ia32_punpckldq512_mask", IX86_BUILTIN_PUNPCKLDQ512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_interleave_lowv8di_mask, "__builtin_ia32_punpcklqdq512_mask", IX86_BUILTIN_PUNPCKLQDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_xorv16si3_mask, "__builtin_ia32_pxord512_mask", IX86_BUILTIN_PXORD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_xorv8di3_mask, "__builtin_ia32_pxorq512_mask", IX86_BUILTIN_PXORQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_rcp14v8df_mask, "__builtin_ia32_rcp14pd512_mask", IX86_BUILTIN_RCP14PD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_rcp14v16sf_mask, "__builtin_ia32_rcp14ps512_mask", IX86_BUILTIN_RCP14PS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv8df_mask, "__builtin_ia32_expanddf512_mask", IX86_BUILTIN_EXPANDPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_expandv8df_maskz, "__builtin_ia32_expanddf512_maskz", IX86_BUILTIN_EXPANDPD512Z, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv16sf_mask, "__builtin_ia32_expandsf512_mask", IX86_BUILTIN_EXPANDPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_expandv16sf_maskz, "__builtin_ia32_expandsf512_maskz", IX86_BUILTIN_EXPANDPS512Z, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vextractf32x4_mask, "__builtin_ia32_extractf32x4_mask", IX86_BUILTIN_EXTRACTF32X4, UNKNOWN, (int) V4SF_FTYPE_V16SF_INT_V4SF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vextractf64x4_mask, "__builtin_ia32_extractf64x4_mask", IX86_BUILTIN_EXTRACTF64X4, UNKNOWN, (int) V4DF_FTYPE_V8DF_INT_V4DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vextracti32x4_mask, "__builtin_ia32_extracti32x4_mask", IX86_BUILTIN_EXTRACTI32X4, UNKNOWN, (int) V4SI_FTYPE_V16SI_INT_V4SI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vextracti64x4_mask, "__builtin_ia32_extracti64x4_mask", IX86_BUILTIN_EXTRACTI64X4, UNKNOWN, (int) V4DI_FTYPE_V8DI_INT_V4DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vinsertf32x4_mask, "__builtin_ia32_insertf32x4_mask", IX86_BUILTIN_INSERTF32X4, UNKNOWN, (int) V16SF_FTYPE_V16SF_V4SF_INT_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vinsertf64x4_mask, "__builtin_ia32_insertf64x4_mask", IX86_BUILTIN_INSERTF64X4, UNKNOWN, (int) V8DF_FTYPE_V8DF_V4DF_INT_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vinserti32x4_mask, "__builtin_ia32_inserti32x4_mask", IX86_BUILTIN_INSERTI32X4, UNKNOWN, (int) V16SI_FTYPE_V16SI_V4SI_INT_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vinserti64x4_mask, "__builtin_ia32_inserti64x4_mask", IX86_BUILTIN_INSERTI64X4, UNKNOWN, (int) V8DI_FTYPE_V8DI_V4DI_INT_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv8df_mask, "__builtin_ia32_movapd512_mask", IX86_BUILTIN_MOVAPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv16sf_mask, "__builtin_ia32_movaps512_mask", IX86_BUILTIN_MOVAPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_movddup512_mask, "__builtin_ia32_movddup512_mask", IX86_BUILTIN_MOVDDUP512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv16si_mask, "__builtin_ia32_movdqa32_512_mask", IX86_BUILTIN_MOVDQA32_512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_loadv8di_mask, "__builtin_ia32_movdqa64_512_mask", IX86_BUILTIN_MOVDQA64_512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_movshdup512_mask, "__builtin_ia32_movshdup512_mask", IX86_BUILTIN_MOVSHDUP512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_movsldup512_mask, "__builtin_ia32_movsldup512_mask", IX86_BUILTIN_MOVSLDUP512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_absv16si2_mask, "__builtin_ia32_pabsd512_mask", IX86_BUILTIN_PABSD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_absv8di2_mask, "__builtin_ia32_pabsq512_mask", IX86_BUILTIN_PABSQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_addv16si3_mask, "__builtin_ia32_paddd512_mask", IX86_BUILTIN_PADDD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_addv8di3_mask, "__builtin_ia32_paddq512_mask", IX86_BUILTIN_PADDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_andv16si3_mask, "__builtin_ia32_pandd512_mask", IX86_BUILTIN_PANDD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_andnotv16si3_mask, "__builtin_ia32_pandnd512_mask", IX86_BUILTIN_PANDND512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_andnotv8di3_mask, "__builtin_ia32_pandnq512_mask", IX86_BUILTIN_PANDNQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_andv8di3_mask, "__builtin_ia32_pandq512_mask", IX86_BUILTIN_PANDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vec_dupv16si_mask, "__builtin_ia32_pbroadcastd512", IX86_BUILTIN_PBROADCASTD512, UNKNOWN, (int) V16SI_FTYPE_V4SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vec_dup_gprv16si_mask, "__builtin_ia32_pbroadcastd512_gpr_mask", IX86_BUILTIN_PBROADCASTD512_GPR, UNKNOWN, (int) V16SI_FTYPE_SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512CD, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512cd_maskb_vec_dupv8di, "__builtin_ia32_broadcastmb512", IX86_BUILTIN_PBROADCASTMB512, UNKNOWN, (int) V8DI_FTYPE_UQI)
+BDESC (OPTION_MASK_ISA_AVX512CD, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512cd_maskw_vec_dupv16si, "__builtin_ia32_broadcastmw512", IX86_BUILTIN_PBROADCASTMW512, UNKNOWN, (int) V16SI_FTYPE_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vec_dupv8di_mask, "__builtin_ia32_pbroadcastq512", IX86_BUILTIN_PBROADCASTQ512, UNKNOWN, (int) V8DI_FTYPE_V2DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vec_dup_gprv8di_mask, "__builtin_ia32_pbroadcastq512_gpr_mask", IX86_BUILTIN_PBROADCASTQ512_GPR, UNKNOWN, (int) V8DI_FTYPE_DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_eqv16si3_mask, "__builtin_ia32_pcmpeqd512_mask", IX86_BUILTIN_PCMPEQD512_MASK, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_eqv8di3_mask, "__builtin_ia32_pcmpeqq512_mask", IX86_BUILTIN_PCMPEQQ512_MASK, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_gtv16si3_mask, "__builtin_ia32_pcmpgtd512_mask", IX86_BUILTIN_PCMPGTD512_MASK, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_gtv8di3_mask, "__builtin_ia32_pcmpgtq512_mask", IX86_BUILTIN_PCMPGTQ512_MASK, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_compressv16si_mask, "__builtin_ia32_compresssi512_mask", IX86_BUILTIN_PCOMPRESSD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_compressv8di_mask, "__builtin_ia32_compressdi512_mask", IX86_BUILTIN_PCOMPRESSQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv16si_mask, "__builtin_ia32_expandsi512_mask", IX86_BUILTIN_PEXPANDD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_expandv16si_maskz, "__builtin_ia32_expandsi512_maskz", IX86_BUILTIN_PEXPANDD512Z, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv8di_mask, "__builtin_ia32_expanddi512_mask", IX86_BUILTIN_PEXPANDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_expandv8di_maskz, "__builtin_ia32_expanddi512_maskz", IX86_BUILTIN_PEXPANDQ512Z, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_smaxv16si3_mask, "__builtin_ia32_pmaxsd512_mask", IX86_BUILTIN_PMAXSD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_smaxv8di3_mask, "__builtin_ia32_pmaxsq512_mask", IX86_BUILTIN_PMAXSQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_umaxv16si3_mask, "__builtin_ia32_pmaxud512_mask", IX86_BUILTIN_PMAXUD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_umaxv8di3_mask, "__builtin_ia32_pmaxuq512_mask", IX86_BUILTIN_PMAXUQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_sminv16si3_mask, "__builtin_ia32_pminsd512_mask", IX86_BUILTIN_PMINSD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_sminv8di3_mask, "__builtin_ia32_pminsq512_mask", IX86_BUILTIN_PMINSQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_uminv16si3_mask, "__builtin_ia32_pminud512_mask", IX86_BUILTIN_PMINUD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_uminv8di3_mask, "__builtin_ia32_pminuq512_mask", IX86_BUILTIN_PMINUQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_truncatev16siv16qi2_mask, "__builtin_ia32_pmovdb512_mask", IX86_BUILTIN_PMOVDB512, UNKNOWN, (int) V16QI_FTYPE_V16SI_V16QI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_truncatev16siv16hi2_mask, "__builtin_ia32_pmovdw512_mask", IX86_BUILTIN_PMOVDW512, UNKNOWN, (int) V16HI_FTYPE_V16SI_V16HI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_truncatev8div16qi2_mask, "__builtin_ia32_pmovqb512_mask", IX86_BUILTIN_PMOVQB512, UNKNOWN, (int) V16QI_FTYPE_V8DI_V16QI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_truncatev8div8si2_mask, "__builtin_ia32_pmovqd512_mask", IX86_BUILTIN_PMOVQD512, UNKNOWN, (int) V8SI_FTYPE_V8DI_V8SI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_truncatev8div8hi2_mask, "__builtin_ia32_pmovqw512_mask", IX86_BUILTIN_PMOVQW512, UNKNOWN, (int) V8HI_FTYPE_V8DI_V8HI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ss_truncatev16siv16qi2_mask, "__builtin_ia32_pmovsdb512_mask", IX86_BUILTIN_PMOVSDB512, UNKNOWN, (int) V16QI_FTYPE_V16SI_V16QI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ss_truncatev16siv16hi2_mask, "__builtin_ia32_pmovsdw512_mask", IX86_BUILTIN_PMOVSDW512, UNKNOWN, (int) V16HI_FTYPE_V16SI_V16HI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ss_truncatev8div16qi2_mask, "__builtin_ia32_pmovsqb512_mask", IX86_BUILTIN_PMOVSQB512, UNKNOWN, (int) V16QI_FTYPE_V8DI_V16QI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ss_truncatev8div8si2_mask, "__builtin_ia32_pmovsqd512_mask", IX86_BUILTIN_PMOVSQD512, UNKNOWN, (int) V8SI_FTYPE_V8DI_V8SI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ss_truncatev8div8hi2_mask, "__builtin_ia32_pmovsqw512_mask", IX86_BUILTIN_PMOVSQW512, UNKNOWN, (int) V8HI_FTYPE_V8DI_V8HI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_sign_extendv16qiv16si2_mask, "__builtin_ia32_pmovsxbd512_mask", IX86_BUILTIN_PMOVSXBD512, UNKNOWN, (int) V16SI_FTYPE_V16QI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_sign_extendv8qiv8di2_mask, "__builtin_ia32_pmovsxbq512_mask", IX86_BUILTIN_PMOVSXBQ512, UNKNOWN, (int) V8DI_FTYPE_V16QI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_sign_extendv8siv8di2_mask, "__builtin_ia32_pmovsxdq512_mask", IX86_BUILTIN_PMOVSXDQ512, UNKNOWN, (int) V8DI_FTYPE_V8SI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_sign_extendv16hiv16si2_mask, "__builtin_ia32_pmovsxwd512_mask", IX86_BUILTIN_PMOVSXWD512, UNKNOWN, (int) V16SI_FTYPE_V16HI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_sign_extendv8hiv8di2_mask, "__builtin_ia32_pmovsxwq512_mask", IX86_BUILTIN_PMOVSXWQ512, UNKNOWN, (int) V8DI_FTYPE_V8HI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_us_truncatev16siv16qi2_mask, "__builtin_ia32_pmovusdb512_mask", IX86_BUILTIN_PMOVUSDB512, UNKNOWN, (int) V16QI_FTYPE_V16SI_V16QI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_us_truncatev16siv16hi2_mask, "__builtin_ia32_pmovusdw512_mask", IX86_BUILTIN_PMOVUSDW512, UNKNOWN, (int) V16HI_FTYPE_V16SI_V16HI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_us_truncatev8div16qi2_mask, "__builtin_ia32_pmovusqb512_mask", IX86_BUILTIN_PMOVUSQB512, UNKNOWN, (int) V16QI_FTYPE_V8DI_V16QI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_us_truncatev8div8si2_mask, "__builtin_ia32_pmovusqd512_mask", IX86_BUILTIN_PMOVUSQD512, UNKNOWN, (int) V8SI_FTYPE_V8DI_V8SI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_us_truncatev8div8hi2_mask, "__builtin_ia32_pmovusqw512_mask", IX86_BUILTIN_PMOVUSQW512, UNKNOWN, (int) V8HI_FTYPE_V8DI_V8HI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_zero_extendv16qiv16si2_mask, "__builtin_ia32_pmovzxbd512_mask", IX86_BUILTIN_PMOVZXBD512, UNKNOWN, (int) V16SI_FTYPE_V16QI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_zero_extendv8qiv8di2_mask, "__builtin_ia32_pmovzxbq512_mask", IX86_BUILTIN_PMOVZXBQ512, UNKNOWN, (int) V8DI_FTYPE_V16QI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_zero_extendv8siv8di2_mask, "__builtin_ia32_pmovzxdq512_mask", IX86_BUILTIN_PMOVZXDQ512, UNKNOWN, (int) V8DI_FTYPE_V8SI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_zero_extendv16hiv16si2_mask, "__builtin_ia32_pmovzxwd512_mask", IX86_BUILTIN_PMOVZXWD512, UNKNOWN, (int) V16SI_FTYPE_V16HI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_zero_extendv8hiv8di2_mask, "__builtin_ia32_pmovzxwq512_mask", IX86_BUILTIN_PMOVZXWQ512, UNKNOWN, (int) V8DI_FTYPE_V8HI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vec_widen_smult_even_v16si_mask, "__builtin_ia32_pmuldq512_mask", IX86_BUILTIN_PMULDQ512, UNKNOWN, (int) V8DI_FTYPE_V16SI_V16SI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_mulv16si3_mask, "__builtin_ia32_pmulld512_mask" , IX86_BUILTIN_PMULLD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vec_widen_umult_even_v16si_mask, "__builtin_ia32_pmuludq512_mask", IX86_BUILTIN_PMULUDQ512, UNKNOWN, (int) V8DI_FTYPE_V16SI_V16SI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_iorv16si3_mask, "__builtin_ia32_pord512_mask", IX86_BUILTIN_PORD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_iorv8di3_mask, "__builtin_ia32_porq512_mask", IX86_BUILTIN_PORQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_rolv16si_mask, "__builtin_ia32_prold512_mask", IX86_BUILTIN_PROLD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_rolv8di_mask, "__builtin_ia32_prolq512_mask", IX86_BUILTIN_PROLQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_rolvv16si_mask, "__builtin_ia32_prolvd512_mask", IX86_BUILTIN_PROLVD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_rolvv8di_mask, "__builtin_ia32_prolvq512_mask", IX86_BUILTIN_PROLVQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_rorv16si_mask, "__builtin_ia32_prord512_mask", IX86_BUILTIN_PRORD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_rorv8di_mask, "__builtin_ia32_prorq512_mask", IX86_BUILTIN_PRORQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_rorvv16si_mask, "__builtin_ia32_prorvd512_mask", IX86_BUILTIN_PRORVD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_rorvv8di_mask, "__builtin_ia32_prorvq512_mask", IX86_BUILTIN_PRORVQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_pshufdv3_mask, "__builtin_ia32_pshufd512_mask", IX86_BUILTIN_PSHUFD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashlv16si3_mask, "__builtin_ia32_pslld512_mask", IX86_BUILTIN_PSLLD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V4SI_V16SI_UHI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashlv16si3_mask, "__builtin_ia32_pslldi512_mask", IX86_BUILTIN_PSLLDI512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashlv8di3_mask, "__builtin_ia32_psllq512_mask", IX86_BUILTIN_PSLLQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V2DI_V8DI_UQI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashlv8di3_mask, "__builtin_ia32_psllqi512_mask", IX86_BUILTIN_PSLLQI512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ashlvv16si_mask, "__builtin_ia32_psllv16si_mask", IX86_BUILTIN_PSLLVV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ashlvv8di_mask, "__builtin_ia32_psllv8di_mask", IX86_BUILTIN_PSLLVV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashrv16si3_mask, "__builtin_ia32_psrad512_mask", IX86_BUILTIN_PSRAD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V4SI_V16SI_UHI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashrv16si3_mask, "__builtin_ia32_psradi512_mask", IX86_BUILTIN_PSRADI512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashrv8di3_mask, "__builtin_ia32_psraq512_mask", IX86_BUILTIN_PSRAQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V2DI_V8DI_UQI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashrv8di3_mask, "__builtin_ia32_psraqi512_mask", IX86_BUILTIN_PSRAQI512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ashrvv16si_mask, "__builtin_ia32_psrav16si_mask", IX86_BUILTIN_PSRAVV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ashrvv8di_mask, "__builtin_ia32_psrav8di_mask", IX86_BUILTIN_PSRAVV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_lshrv16si3_mask, "__builtin_ia32_psrld512_mask", IX86_BUILTIN_PSRLD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V4SI_V16SI_UHI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_lshrv16si3_mask, "__builtin_ia32_psrldi512_mask", IX86_BUILTIN_PSRLDI512, UNKNOWN, (int) V16SI_FTYPE_V16SI_INT_V16SI_UHI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_lshrv8di3_mask, "__builtin_ia32_psrlq512_mask", IX86_BUILTIN_PSRLQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V2DI_V8DI_UQI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_lshrv8di3_mask, "__builtin_ia32_psrlqi512_mask", IX86_BUILTIN_PSRLQI512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_lshrvv16si_mask, "__builtin_ia32_psrlv16si_mask", IX86_BUILTIN_PSRLVV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_lshrvv8di_mask, "__builtin_ia32_psrlv8di_mask", IX86_BUILTIN_PSRLVV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_subv16si3_mask, "__builtin_ia32_psubd512_mask", IX86_BUILTIN_PSUBD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_subv8di3_mask, "__builtin_ia32_psubq512_mask", IX86_BUILTIN_PSUBQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_testmv16si3_mask, "__builtin_ia32_ptestmd512", IX86_BUILTIN_PTESTMD512, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_testmv8di3_mask, "__builtin_ia32_ptestmq512", IX86_BUILTIN_PTESTMQ512, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_testnmv16si3_mask, "__builtin_ia32_ptestnmd512", IX86_BUILTIN_PTESTNMD512, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_testnmv8di3_mask, "__builtin_ia32_ptestnmq512", IX86_BUILTIN_PTESTNMQ512, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_interleave_highv16si_mask, "__builtin_ia32_punpckhdq512_mask", IX86_BUILTIN_PUNPCKHDQ512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_interleave_highv8di_mask, "__builtin_ia32_punpckhqdq512_mask", IX86_BUILTIN_PUNPCKHQDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_interleave_lowv16si_mask, "__builtin_ia32_punpckldq512_mask", IX86_BUILTIN_PUNPCKLDQ512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_interleave_lowv8di_mask, "__builtin_ia32_punpcklqdq512_mask", IX86_BUILTIN_PUNPCKLQDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_xorv16si3_mask, "__builtin_ia32_pxord512_mask", IX86_BUILTIN_PXORD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_xorv8di3_mask, "__builtin_ia32_pxorq512_mask", IX86_BUILTIN_PXORQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_rcp14v8df_mask, "__builtin_ia32_rcp14pd512_mask", IX86_BUILTIN_RCP14PD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_rcp14v16sf_mask, "__builtin_ia32_rcp14ps512_mask", IX86_BUILTIN_RCP14PS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_srcp14v2df, "__builtin_ia32_rcp14sd", IX86_BUILTIN_RCP14SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_srcp14v2df_mask, "__builtin_ia32_rcp14sd_mask", IX86_BUILTIN_RCP14SDMASK, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_srcp14v4sf, "__builtin_ia32_rcp14ss", IX86_BUILTIN_RCP14SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_srcp14v4sf_mask, "__builtin_ia32_rcp14ss_mask", IX86_BUILTIN_RCP14SSMASK, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_rsqrt14v8df_mask, "__builtin_ia32_rsqrt14pd512_mask", IX86_BUILTIN_RSQRT14PD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_rsqrt14v16sf_mask, "__builtin_ia32_rsqrt14ps512_mask", IX86_BUILTIN_RSQRT14PS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_rsqrt14v8df_mask, "__builtin_ia32_rsqrt14pd512_mask", IX86_BUILTIN_RSQRT14PD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_rsqrt14v16sf_mask, "__builtin_ia32_rsqrt14ps512_mask", IX86_BUILTIN_RSQRT14PS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_UHI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_rsqrt14v2df, "__builtin_ia32_rsqrt14sd", IX86_BUILTIN_RSQRT14SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_rsqrt14_v2df_mask, "__builtin_ia32_rsqrt14sd_mask", IX86_BUILTIN_RSQRT14SDMASK, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_rsqrt14v4sf, "__builtin_ia32_rsqrt14ss", IX86_BUILTIN_RSQRT14SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_rsqrt14_v4sf_mask, "__builtin_ia32_rsqrt14ss_mask", IX86_BUILTIN_RSQRT14SSMASK, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_shufpd512_mask, "__builtin_ia32_shufpd512_mask", IX86_BUILTIN_SHUFPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_INT_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_shufps512_mask, "__builtin_ia32_shufps512_mask", IX86_BUILTIN_SHUFPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_INT_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_shuf_f32x4_mask, "__builtin_ia32_shuf_f32x4_mask", IX86_BUILTIN_SHUF_F32x4, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_INT_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_shuf_f64x2_mask, "__builtin_ia32_shuf_f64x2_mask", IX86_BUILTIN_SHUF_F64x2, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_INT_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_shuf_i32x4_mask, "__builtin_ia32_shuf_i32x4_mask", IX86_BUILTIN_SHUF_I32x4, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_shuf_i64x2_mask, "__builtin_ia32_shuf_i64x2_mask", IX86_BUILTIN_SHUF_I64x2, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ucmpv16si3_mask, "__builtin_ia32_ucmpd512_mask", IX86_BUILTIN_UCMPD512, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_INT_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ucmpv8di3_mask, "__builtin_ia32_ucmpq512_mask", IX86_BUILTIN_UCMPQ512, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_INT_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_unpckhpd512_mask, "__builtin_ia32_unpckhpd512_mask", IX86_BUILTIN_UNPCKHPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_unpckhps512_mask, "__builtin_ia32_unpckhps512_mask", IX86_BUILTIN_UNPCKHPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_unpcklpd512_mask, "__builtin_ia32_unpcklpd512_mask", IX86_BUILTIN_UNPCKLPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_unpcklps512_mask, "__builtin_ia32_unpcklps512_mask", IX86_BUILTIN_UNPCKLPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512CD, 0, CODE_FOR_clzv16si2_mask, "__builtin_ia32_vplzcntd_512_mask", IX86_BUILTIN_VPCLZCNTD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512CD, 0, CODE_FOR_clzv8di2_mask, "__builtin_ia32_vplzcntq_512_mask", IX86_BUILTIN_VPCLZCNTQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512CD, 0, CODE_FOR_conflictv16si_mask, "__builtin_ia32_vpconflictsi_512_mask", IX86_BUILTIN_VPCONFLICTD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512CD, 0, CODE_FOR_conflictv8di_mask, "__builtin_ia32_vpconflictdi_512_mask", IX86_BUILTIN_VPCONFLICTQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_permv8df_mask, "__builtin_ia32_permdf512_mask", IX86_BUILTIN_VPERMDF512, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_permv8di_mask, "__builtin_ia32_permdi512_mask", IX86_BUILTIN_VPERMDI512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermi2varv16si3_mask, "__builtin_ia32_vpermi2vard512_mask", IX86_BUILTIN_VPERMI2VARD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermi2varv8df3_mask, "__builtin_ia32_vpermi2varpd512_mask", IX86_BUILTIN_VPERMI2VARPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DI_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermi2varv16sf3_mask, "__builtin_ia32_vpermi2varps512_mask", IX86_BUILTIN_VPERMI2VARPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SI_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermi2varv8di3_mask, "__builtin_ia32_vpermi2varq512_mask", IX86_BUILTIN_VPERMI2VARQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermilv8df_mask, "__builtin_ia32_vpermilpd512_mask", IX86_BUILTIN_VPERMILPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermilv16sf_mask, "__builtin_ia32_vpermilps512_mask", IX86_BUILTIN_VPERMILPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermilvarv8df3_mask, "__builtin_ia32_vpermilvarpd512_mask", IX86_BUILTIN_VPERMILVARPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DI_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermilvarv16sf3_mask, "__builtin_ia32_vpermilvarps512_mask", IX86_BUILTIN_VPERMILVARPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SI_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermt2varv16si3_mask, "__builtin_ia32_vpermt2vard512_mask", IX86_BUILTIN_VPERMT2VARD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermt2varv16si3_maskz, "__builtin_ia32_vpermt2vard512_maskz", IX86_BUILTIN_VPERMT2VARD512_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermt2varv8df3_mask, "__builtin_ia32_vpermt2varpd512_mask", IX86_BUILTIN_VPERMT2VARPD512, UNKNOWN, (int) V8DF_FTYPE_V8DI_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermt2varv8df3_maskz, "__builtin_ia32_vpermt2varpd512_maskz", IX86_BUILTIN_VPERMT2VARPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DI_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermt2varv16sf3_mask, "__builtin_ia32_vpermt2varps512_mask", IX86_BUILTIN_VPERMT2VARPS512, UNKNOWN, (int) V16SF_FTYPE_V16SI_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermt2varv16sf3_maskz, "__builtin_ia32_vpermt2varps512_maskz", IX86_BUILTIN_VPERMT2VARPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SI_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermt2varv8di3_mask, "__builtin_ia32_vpermt2varq512_mask", IX86_BUILTIN_VPERMT2VARQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vpermt2varv8di3_maskz, "__builtin_ia32_vpermt2varq512_maskz", IX86_BUILTIN_VPERMT2VARQ512_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_permvarv8df_mask, "__builtin_ia32_permvardf512_mask", IX86_BUILTIN_VPERMVARDF512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DI_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_permvarv8di_mask, "__builtin_ia32_permvardi512_mask", IX86_BUILTIN_VPERMVARDI512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_permvarv16sf_mask, "__builtin_ia32_permvarsf512_mask", IX86_BUILTIN_VPERMVARSF512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SI_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_permvarv16si_mask, "__builtin_ia32_permvarsi512_mask", IX86_BUILTIN_VPERMVARSI512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vternlogv16si_mask, "__builtin_ia32_pternlogd512_mask", IX86_BUILTIN_VTERNLOGD512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_INT_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vternlogv16si_maskz, "__builtin_ia32_pternlogd512_maskz", IX86_BUILTIN_VTERNLOGD512_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_INT_UHI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vternlogv8di_mask, "__builtin_ia32_pternlogq512_mask", IX86_BUILTIN_VTERNLOGQ512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_INT_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vternlogv8di_maskz, "__builtin_ia32_pternlogq512_maskz", IX86_BUILTIN_VTERNLOGQ512_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_INT_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_shufpd512_mask, "__builtin_ia32_shufpd512_mask", IX86_BUILTIN_SHUFPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_INT_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_shufps512_mask, "__builtin_ia32_shufps512_mask", IX86_BUILTIN_SHUFPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_INT_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_shuf_f32x4_mask, "__builtin_ia32_shuf_f32x4_mask", IX86_BUILTIN_SHUF_F32x4, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_INT_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_shuf_f64x2_mask, "__builtin_ia32_shuf_f64x2_mask", IX86_BUILTIN_SHUF_F64x2, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_INT_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_shuf_i32x4_mask, "__builtin_ia32_shuf_i32x4_mask", IX86_BUILTIN_SHUF_I32x4, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_shuf_i64x2_mask, "__builtin_ia32_shuf_i64x2_mask", IX86_BUILTIN_SHUF_I64x2, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ucmpv16si3_mask, "__builtin_ia32_ucmpd512_mask", IX86_BUILTIN_UCMPD512, UNKNOWN, (int) UHI_FTYPE_V16SI_V16SI_INT_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_ucmpv8di3_mask, "__builtin_ia32_ucmpq512_mask", IX86_BUILTIN_UCMPQ512, UNKNOWN, (int) UQI_FTYPE_V8DI_V8DI_INT_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_unpckhpd512_mask, "__builtin_ia32_unpckhpd512_mask", IX86_BUILTIN_UNPCKHPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_unpckhps512_mask, "__builtin_ia32_unpckhps512_mask", IX86_BUILTIN_UNPCKHPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_unpcklpd512_mask, "__builtin_ia32_unpcklpd512_mask", IX86_BUILTIN_UNPCKLPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_unpcklps512_mask, "__builtin_ia32_unpcklps512_mask", IX86_BUILTIN_UNPCKLPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512CD, OPTION_MASK_ISA2_EVEX512, CODE_FOR_clzv16si2_mask, "__builtin_ia32_vplzcntd_512_mask", IX86_BUILTIN_VPCLZCNTD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512CD, OPTION_MASK_ISA2_EVEX512, CODE_FOR_clzv8di2_mask, "__builtin_ia32_vplzcntq_512_mask", IX86_BUILTIN_VPCLZCNTQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512CD, OPTION_MASK_ISA2_EVEX512, CODE_FOR_conflictv16si_mask, "__builtin_ia32_vpconflictsi_512_mask", IX86_BUILTIN_VPCONFLICTD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512CD, OPTION_MASK_ISA2_EVEX512, CODE_FOR_conflictv8di_mask, "__builtin_ia32_vpconflictdi_512_mask", IX86_BUILTIN_VPCONFLICTQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_permv8df_mask, "__builtin_ia32_permdf512_mask", IX86_BUILTIN_VPERMDF512, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_permv8di_mask, "__builtin_ia32_permdi512_mask", IX86_BUILTIN_VPERMDI512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermi2varv16si3_mask, "__builtin_ia32_vpermi2vard512_mask", IX86_BUILTIN_VPERMI2VARD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermi2varv8df3_mask, "__builtin_ia32_vpermi2varpd512_mask", IX86_BUILTIN_VPERMI2VARPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DI_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermi2varv16sf3_mask, "__builtin_ia32_vpermi2varps512_mask", IX86_BUILTIN_VPERMI2VARPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SI_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermi2varv8di3_mask, "__builtin_ia32_vpermi2varq512_mask", IX86_BUILTIN_VPERMI2VARQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermilv8df_mask, "__builtin_ia32_vpermilpd512_mask", IX86_BUILTIN_VPERMILPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermilv16sf_mask, "__builtin_ia32_vpermilps512_mask", IX86_BUILTIN_VPERMILPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermilvarv8df3_mask, "__builtin_ia32_vpermilvarpd512_mask", IX86_BUILTIN_VPERMILVARPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DI_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermilvarv16sf3_mask, "__builtin_ia32_vpermilvarps512_mask", IX86_BUILTIN_VPERMILVARPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SI_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermt2varv16si3_mask, "__builtin_ia32_vpermt2vard512_mask", IX86_BUILTIN_VPERMT2VARD512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermt2varv16si3_maskz, "__builtin_ia32_vpermt2vard512_maskz", IX86_BUILTIN_VPERMT2VARD512_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermt2varv8df3_mask, "__builtin_ia32_vpermt2varpd512_mask", IX86_BUILTIN_VPERMT2VARPD512, UNKNOWN, (int) V8DF_FTYPE_V8DI_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermt2varv8df3_maskz, "__builtin_ia32_vpermt2varpd512_maskz", IX86_BUILTIN_VPERMT2VARPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DI_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermt2varv16sf3_mask, "__builtin_ia32_vpermt2varps512_mask", IX86_BUILTIN_VPERMT2VARPS512, UNKNOWN, (int) V16SF_FTYPE_V16SI_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermt2varv16sf3_maskz, "__builtin_ia32_vpermt2varps512_maskz", IX86_BUILTIN_VPERMT2VARPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SI_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermt2varv8di3_mask, "__builtin_ia32_vpermt2varq512_mask", IX86_BUILTIN_VPERMT2VARQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vpermt2varv8di3_maskz, "__builtin_ia32_vpermt2varq512_maskz", IX86_BUILTIN_VPERMT2VARQ512_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_permvarv8df_mask, "__builtin_ia32_permvardf512_mask", IX86_BUILTIN_VPERMVARDF512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DI_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_permvarv8di_mask, "__builtin_ia32_permvardi512_mask", IX86_BUILTIN_VPERMVARDI512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_permvarv16sf_mask, "__builtin_ia32_permvarsf512_mask", IX86_BUILTIN_VPERMVARSF512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SI_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_permvarv16si_mask, "__builtin_ia32_permvarsi512_mask", IX86_BUILTIN_VPERMVARSI512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vternlogv16si_mask, "__builtin_ia32_pternlogd512_mask", IX86_BUILTIN_VTERNLOGD512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_INT_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vternlogv16si_maskz, "__builtin_ia32_pternlogd512_maskz", IX86_BUILTIN_VTERNLOGD512_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_INT_UHI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vternlogv8di_mask, "__builtin_ia32_pternlogq512_mask", IX86_BUILTIN_VTERNLOGQ512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_INT_UQI)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vternlogv8di_maskz, "__builtin_ia32_pternlogq512_maskz", IX86_BUILTIN_VTERNLOGQ512_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_INT_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_movdf_mask, "__builtin_ia32_movesd_mask", IX86_BUILTIN_MOVSD_MASK, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_movsf_mask, "__builtin_ia32_movess_mask", IX86_BUILTIN_MOVSS_MASK, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_copysignv16sf3, "__builtin_ia32_copysignps512", IX86_BUILTIN_CPYSGNPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_copysignv8df3, "__builtin_ia32_copysignpd512", IX86_BUILTIN_CPYSGNPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sqrtv8df2, "__builtin_ia32_sqrtpd512", IX86_BUILTIN_SQRTPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sqrtv16sf2, "__builtin_ia32_sqrtps512", IX86_BUILTIN_SQRTPS_NR512, UNKNOWN, (int) V16SF_FTYPE_V16SF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_copysignv16sf3, "__builtin_ia32_copysignps512", IX86_BUILTIN_CPYSGNPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_copysignv8df3, "__builtin_ia32_copysignpd512", IX86_BUILTIN_CPYSGNPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_sqrtv8df2, "__builtin_ia32_sqrtpd512", IX86_BUILTIN_SQRTPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_sqrtv16sf2, "__builtin_ia32_sqrtps512", IX86_BUILTIN_SQRTPS_NR512, UNKNOWN, (int) V16SF_FTYPE_V16SF)
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_exp2v16sf, "__builtin_ia32_exp2ps", IX86_BUILTIN_EXP2PS, UNKNOWN, (int) V16SF_FTYPE_V16SF)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_rndscalev32hf, "__builtin_ia32_floorph512", IX86_BUILTIN_FLOORPH512, (enum rtx_code) ROUND_FLOOR, (int) V32HF_FTYPE_V32HF_ROUND)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_rndscalev32hf, "__builtin_ia32_ceilph512", IX86_BUILTIN_CEILPH512, (enum rtx_code) ROUND_CEIL, (int) V32HF_FTYPE_V32HF_ROUND)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_rndscalev32hf, "__builtin_ia32_truncph512", IX86_BUILTIN_TRUNCPH512, (enum rtx_code) ROUND_TRUNC, (int) V32HF_FTYPE_V32HF_ROUND)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_roundps512, "__builtin_ia32_floorps512", IX86_BUILTIN_FLOORPS512, (enum rtx_code) ROUND_FLOOR, (int) V16SF_FTYPE_V16SF_ROUND)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_roundps512, "__builtin_ia32_ceilps512", IX86_BUILTIN_CEILPS512, (enum rtx_code) ROUND_CEIL, (int) V16SF_FTYPE_V16SF_ROUND)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_roundps512, "__builtin_ia32_truncps512", IX86_BUILTIN_TRUNCPS512, (enum rtx_code) ROUND_TRUNC, (int) V16SF_FTYPE_V16SF_ROUND)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_roundpd512, "__builtin_ia32_floorpd512", IX86_BUILTIN_FLOORPD512, (enum rtx_code) ROUND_FLOOR, (int) V8DF_FTYPE_V8DF_ROUND)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_roundpd512, "__builtin_ia32_ceilpd512", IX86_BUILTIN_CEILPD512, (enum rtx_code) ROUND_CEIL, (int) V8DF_FTYPE_V8DF_ROUND)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_roundpd512, "__builtin_ia32_truncpd512", IX86_BUILTIN_TRUNCPD512, (enum rtx_code) ROUND_TRUNC, (int) V8DF_FTYPE_V8DF_ROUND)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fix_notruncv16sfv16si, "__builtin_ia32_cvtps2dq512", IX86_BUILTIN_CVTPS2DQ512, UNKNOWN, (int) V16SI_FTYPE_V16SF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vec_pack_sfix_v8df, "__builtin_ia32_vec_pack_sfix512", IX86_BUILTIN_VEC_PACK_SFIX512, UNKNOWN, (int) V16SI_FTYPE_V8DF_V8DF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_roundv16sf2_sfix, "__builtin_ia32_roundps_az_sfix512", IX86_BUILTIN_ROUNDPS_AZ_SFIX512, UNKNOWN, (int) V16SI_FTYPE_V16SF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_roundps512_sfix, "__builtin_ia32_floorps_sfix512", IX86_BUILTIN_FLOORPS_SFIX512, (enum rtx_code) ROUND_FLOOR, (int) V16SI_FTYPE_V16SF_ROUND)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_roundps512_sfix, "__builtin_ia32_ceilps_sfix512", IX86_BUILTIN_CEILPS_SFIX512, (enum rtx_code) ROUND_CEIL, (int) V16SI_FTYPE_V16SF_ROUND)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_roundv8df2_vec_pack_sfix, "__builtin_ia32_roundpd_az_vec_pack_sfix512", IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX512, UNKNOWN, (int) V16SI_FTYPE_V8DF_V8DF)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_roundpd_vec_pack_sfix512, "__builtin_ia32_floorpd_vec_pack_sfix512", IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX512, (enum rtx_code) ROUND_FLOOR, (int) V16SI_FTYPE_V8DF_V8DF_ROUND)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_roundpd_vec_pack_sfix512, "__builtin_ia32_ceilpd_vec_pack_sfix512", IX86_BUILTIN_CEILPD_VEC_PACK_SFIX512, (enum rtx_code) ROUND_CEIL, (int) V16SI_FTYPE_V8DF_V8DF_ROUND)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_rndscalev32hf, "__builtin_ia32_floorph512", IX86_BUILTIN_FLOORPH512, (enum rtx_code) ROUND_FLOOR, (int) V32HF_FTYPE_V32HF_ROUND)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_rndscalev32hf, "__builtin_ia32_ceilph512", IX86_BUILTIN_CEILPH512, (enum rtx_code) ROUND_CEIL, (int) V32HF_FTYPE_V32HF_ROUND)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_rndscalev32hf, "__builtin_ia32_truncph512", IX86_BUILTIN_TRUNCPH512, (enum rtx_code) ROUND_TRUNC, (int) V32HF_FTYPE_V32HF_ROUND)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_roundps512, "__builtin_ia32_floorps512", IX86_BUILTIN_FLOORPS512, (enum rtx_code) ROUND_FLOOR, (int) V16SF_FTYPE_V16SF_ROUND)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_roundps512, "__builtin_ia32_ceilps512", IX86_BUILTIN_CEILPS512, (enum rtx_code) ROUND_CEIL, (int) V16SF_FTYPE_V16SF_ROUND)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_roundps512, "__builtin_ia32_truncps512", IX86_BUILTIN_TRUNCPS512, (enum rtx_code) ROUND_TRUNC, (int) V16SF_FTYPE_V16SF_ROUND)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_roundpd512, "__builtin_ia32_floorpd512", IX86_BUILTIN_FLOORPD512, (enum rtx_code) ROUND_FLOOR, (int) V8DF_FTYPE_V8DF_ROUND)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_roundpd512, "__builtin_ia32_ceilpd512", IX86_BUILTIN_CEILPD512, (enum rtx_code) ROUND_CEIL, (int) V8DF_FTYPE_V8DF_ROUND)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_roundpd512, "__builtin_ia32_truncpd512", IX86_BUILTIN_TRUNCPD512, (enum rtx_code) ROUND_TRUNC, (int) V8DF_FTYPE_V8DF_ROUND)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fix_notruncv16sfv16si, "__builtin_ia32_cvtps2dq512", IX86_BUILTIN_CVTPS2DQ512, UNKNOWN, (int) V16SI_FTYPE_V16SF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vec_pack_sfix_v8df, "__builtin_ia32_vec_pack_sfix512", IX86_BUILTIN_VEC_PACK_SFIX512, UNKNOWN, (int) V16SI_FTYPE_V8DF_V8DF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_roundv16sf2_sfix, "__builtin_ia32_roundps_az_sfix512", IX86_BUILTIN_ROUNDPS_AZ_SFIX512, UNKNOWN, (int) V16SI_FTYPE_V16SF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_roundps512_sfix, "__builtin_ia32_floorps_sfix512", IX86_BUILTIN_FLOORPS_SFIX512, (enum rtx_code) ROUND_FLOOR, (int) V16SI_FTYPE_V16SF_ROUND)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_roundps512_sfix, "__builtin_ia32_ceilps_sfix512", IX86_BUILTIN_CEILPS_SFIX512, (enum rtx_code) ROUND_CEIL, (int) V16SI_FTYPE_V16SF_ROUND)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_roundv8df2_vec_pack_sfix, "__builtin_ia32_roundpd_az_vec_pack_sfix512", IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX512, UNKNOWN, (int) V16SI_FTYPE_V8DF_V8DF)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_roundpd_vec_pack_sfix512, "__builtin_ia32_floorpd_vec_pack_sfix512", IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX512, (enum rtx_code) ROUND_FLOOR, (int) V16SI_FTYPE_V8DF_V8DF_ROUND)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_roundpd_vec_pack_sfix512, "__builtin_ia32_ceilpd_vec_pack_sfix512", IX86_BUILTIN_CEILPD_VEC_PACK_SFIX512, (enum rtx_code) ROUND_CEIL, (int) V16SI_FTYPE_V8DF_V8DF_ROUND)
/* Mask arithmetic operations */
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kashiftqi, "__builtin_ia32_kshiftliqi", IX86_BUILTIN_KSHIFTLI8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI_CONST)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_kashifthi, "__builtin_ia32_kshiftlihi", IX86_BUILTIN_KSHIFTLI16, UNKNOWN, (int) UHI_FTYPE_UHI_UQI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kashiftsi, "__builtin_ia32_kshiftlisi", IX86_BUILTIN_KSHIFTLI32, UNKNOWN, (int) USI_FTYPE_USI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kashiftdi, "__builtin_ia32_kshiftlidi", IX86_BUILTIN_KSHIFTLI64, UNKNOWN, (int) UDI_FTYPE_UDI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kashiftdi, "__builtin_ia32_kshiftlidi", IX86_BUILTIN_KSHIFTLI64, UNKNOWN, (int) UDI_FTYPE_UDI_UQI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_klshiftrtqi, "__builtin_ia32_kshiftriqi", IX86_BUILTIN_KSHIFTRI8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI_CONST)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_klshiftrthi, "__builtin_ia32_kshiftrihi", IX86_BUILTIN_KSHIFTRI16, UNKNOWN, (int) UHI_FTYPE_UHI_UQI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_klshiftrtsi, "__builtin_ia32_kshiftrisi", IX86_BUILTIN_KSHIFTRI32, UNKNOWN, (int) USI_FTYPE_USI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_klshiftrtdi, "__builtin_ia32_kshiftridi", IX86_BUILTIN_KSHIFTRI64, UNKNOWN, (int) UDI_FTYPE_UDI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_klshiftrtdi, "__builtin_ia32_kshiftridi", IX86_BUILTIN_KSHIFTRI64, UNKNOWN, (int) UDI_FTYPE_UDI_UQI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kandqi, "__builtin_ia32_kandqi", IX86_BUILTIN_KAND8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_kandhi, "__builtin_ia32_kandhi", IX86_BUILTIN_KAND16, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kandsi, "__builtin_ia32_kandsi", IX86_BUILTIN_KAND32, UNKNOWN, (int) USI_FTYPE_USI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kanddi, "__builtin_ia32_kanddi", IX86_BUILTIN_KAND64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kanddi, "__builtin_ia32_kanddi", IX86_BUILTIN_KAND64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kandnqi, "__builtin_ia32_kandnqi", IX86_BUILTIN_KANDN8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_kandnhi, "__builtin_ia32_kandnhi", IX86_BUILTIN_KANDN16, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kandnsi, "__builtin_ia32_kandnsi", IX86_BUILTIN_KANDN32, UNKNOWN, (int) USI_FTYPE_USI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kandndi, "__builtin_ia32_kandndi", IX86_BUILTIN_KANDN64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kandndi, "__builtin_ia32_kandndi", IX86_BUILTIN_KANDN64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_knotqi, "__builtin_ia32_knotqi", IX86_BUILTIN_KNOT8, UNKNOWN, (int) UQI_FTYPE_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_knothi, "__builtin_ia32_knothi", IX86_BUILTIN_KNOT16, UNKNOWN, (int) UHI_FTYPE_UHI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_knotsi, "__builtin_ia32_knotsi", IX86_BUILTIN_KNOT32, UNKNOWN, (int) USI_FTYPE_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_knotdi, "__builtin_ia32_knotdi", IX86_BUILTIN_KNOT64, UNKNOWN, (int) UDI_FTYPE_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_knotdi, "__builtin_ia32_knotdi", IX86_BUILTIN_KNOT64, UNKNOWN, (int) UDI_FTYPE_UDI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kiorqi, "__builtin_ia32_korqi", IX86_BUILTIN_KOR8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_kiorhi, "__builtin_ia32_korhi", IX86_BUILTIN_KOR16, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kiorsi, "__builtin_ia32_korsi", IX86_BUILTIN_KOR32, UNKNOWN, (int) USI_FTYPE_USI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kiordi, "__builtin_ia32_kordi", IX86_BUILTIN_KOR64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kiordi, "__builtin_ia32_kordi", IX86_BUILTIN_KOR64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_ktestqi, "__builtin_ia32_ktestcqi", IX86_BUILTIN_KTESTC8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_ktestqi, "__builtin_ia32_ktestzqi", IX86_BUILTIN_KTESTZ8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_ktesthi, "__builtin_ia32_ktestchi", IX86_BUILTIN_KTESTC16, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_ktesthi, "__builtin_ia32_ktestzhi", IX86_BUILTIN_KTESTZ16, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_ktestsi, "__builtin_ia32_ktestcsi", IX86_BUILTIN_KTESTC32, UNKNOWN, (int) USI_FTYPE_USI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_ktestsi, "__builtin_ia32_ktestzsi", IX86_BUILTIN_KTESTZ32, UNKNOWN, (int) USI_FTYPE_USI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_ktestdi, "__builtin_ia32_ktestcdi", IX86_BUILTIN_KTESTC64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_ktestdi, "__builtin_ia32_ktestzdi", IX86_BUILTIN_KTESTZ64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ktestsi, "__builtin_ia32_ktestcsi", IX86_BUILTIN_KTESTC32, UNKNOWN, (int) USI_FTYPE_USI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ktestsi, "__builtin_ia32_ktestzsi", IX86_BUILTIN_KTESTZ32, UNKNOWN, (int) USI_FTYPE_USI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ktestdi, "__builtin_ia32_ktestcdi", IX86_BUILTIN_KTESTC64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ktestdi, "__builtin_ia32_ktestzdi", IX86_BUILTIN_KTESTZ64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kortestqi, "__builtin_ia32_kortestcqi", IX86_BUILTIN_KORTESTC8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kortestqi, "__builtin_ia32_kortestzqi", IX86_BUILTIN_KORTESTZ8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_kortesthi, "__builtin_ia32_kortestchi", IX86_BUILTIN_KORTESTC16, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_kortesthi, "__builtin_ia32_kortestzhi", IX86_BUILTIN_KORTESTZ16, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kortestsi, "__builtin_ia32_kortestcsi", IX86_BUILTIN_KORTESTC32, UNKNOWN, (int) USI_FTYPE_USI_USI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kortestsi, "__builtin_ia32_kortestzsi", IX86_BUILTIN_KORTESTZ32, UNKNOWN, (int) USI_FTYPE_USI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kortestdi, "__builtin_ia32_kortestcdi", IX86_BUILTIN_KORTESTC64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kortestdi, "__builtin_ia32_kortestzdi", IX86_BUILTIN_KORTESTZ64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kortestdi, "__builtin_ia32_kortestcdi", IX86_BUILTIN_KORTESTC64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kortestdi, "__builtin_ia32_kortestzdi", IX86_BUILTIN_KORTESTZ64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_kunpckhi, "__builtin_ia32_kunpckhi", IX86_BUILTIN_KUNPCKBW, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kxnorqi, "__builtin_ia32_kxnorqi", IX86_BUILTIN_KXNOR8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_kxnorhi, "__builtin_ia32_kxnorhi", IX86_BUILTIN_KXNOR16, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kxnorsi, "__builtin_ia32_kxnorsi", IX86_BUILTIN_KXNOR32, UNKNOWN, (int) USI_FTYPE_USI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kxnordi, "__builtin_ia32_kxnordi", IX86_BUILTIN_KXNOR64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kxnordi, "__builtin_ia32_kxnordi", IX86_BUILTIN_KXNOR64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kxorqi, "__builtin_ia32_kxorqi", IX86_BUILTIN_KXOR8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_kxorhi, "__builtin_ia32_kxorhi", IX86_BUILTIN_KXOR16, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kxorsi, "__builtin_ia32_kxorsi", IX86_BUILTIN_KXOR32, UNKNOWN, (int) USI_FTYPE_USI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kxordi, "__builtin_ia32_kxordi", IX86_BUILTIN_KXOR64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kxordi, "__builtin_ia32_kxordi", IX86_BUILTIN_KXOR64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kmovb, "__builtin_ia32_kmovb", IX86_BUILTIN_KMOV8, UNKNOWN, (int) UQI_FTYPE_UQI)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_kmovw, "__builtin_ia32_kmovw", IX86_BUILTIN_KMOV16, UNKNOWN, (int) UHI_FTYPE_UHI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kmovd, "__builtin_ia32_kmovd", IX86_BUILTIN_KMOV32, UNKNOWN, (int) USI_FTYPE_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kmovq, "__builtin_ia32_kmovq", IX86_BUILTIN_KMOV64, UNKNOWN, (int) UDI_FTYPE_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kmovq, "__builtin_ia32_kmovq", IX86_BUILTIN_KMOV64, UNKNOWN, (int) UDI_FTYPE_UDI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kaddqi, "__builtin_ia32_kaddqi", IX86_BUILTIN_KADD8, UNKNOWN, (int) UQI_FTYPE_UQI_UQI)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_kaddhi, "__builtin_ia32_kaddhi", IX86_BUILTIN_KADD16, UNKNOWN, (int) UHI_FTYPE_UHI_UHI)
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kaddsi, "__builtin_ia32_kaddsi", IX86_BUILTIN_KADD32, UNKNOWN, (int) USI_FTYPE_USI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kadddi, "__builtin_ia32_kadddi", IX86_BUILTIN_KADD64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kadddi, "__builtin_ia32_kadddi", IX86_BUILTIN_KADD64, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
/* SHA */
BDESC (OPTION_MASK_ISA_SSE2, 0, CODE_FOR_sha1msg1, 0, IX86_BUILTIN_SHA1MSG1, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI)
@@ -2408,136 +2408,136 @@ BDESC (OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_cmpv2df3_mask, "__builtin_
BDESC (OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_cmpv4sf3_mask, "__builtin_ia32_cmpps128_mask", IX86_BUILTIN_CMPPS128_MASK, UNKNOWN, (int) UQI_FTYPE_V4SF_V4SF_INT_UQI)
/* AVX512DQ. */
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_broadcastv16sf_mask, "__builtin_ia32_broadcastf32x2_512_mask", IX86_BUILTIN_BROADCASTF32x2_512, UNKNOWN, (int) V16SF_FTYPE_V4SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_broadcastv16si_mask, "__builtin_ia32_broadcasti32x2_512_mask", IX86_BUILTIN_BROADCASTI32x2_512, UNKNOWN, (int) V16SI_FTYPE_V4SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_broadcastv8df_mask_1, "__builtin_ia32_broadcastf64x2_512_mask", IX86_BUILTIN_BROADCASTF64X2_512, UNKNOWN, (int) V8DF_FTYPE_V2DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_broadcastv8di_mask_1, "__builtin_ia32_broadcasti64x2_512_mask", IX86_BUILTIN_BROADCASTI64X2_512, UNKNOWN, (int) V8DI_FTYPE_V2DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_broadcastv16sf_mask_1, "__builtin_ia32_broadcastf32x8_512_mask", IX86_BUILTIN_BROADCASTF32X8_512, UNKNOWN, (int) V16SF_FTYPE_V8SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_broadcastv16si_mask_1, "__builtin_ia32_broadcasti32x8_512_mask", IX86_BUILTIN_BROADCASTI32X8_512, UNKNOWN, (int) V16SI_FTYPE_V8SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_vextractf64x2_mask, "__builtin_ia32_extractf64x2_512_mask", IX86_BUILTIN_EXTRACTF64X2_512, UNKNOWN, (int) V2DF_FTYPE_V8DF_INT_V2DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_vextractf32x8_mask, "__builtin_ia32_extractf32x8_mask", IX86_BUILTIN_EXTRACTF32X8, UNKNOWN, (int) V8SF_FTYPE_V16SF_INT_V8SF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_vextracti64x2_mask, "__builtin_ia32_extracti64x2_512_mask", IX86_BUILTIN_EXTRACTI64X2_512, UNKNOWN, (int) V2DI_FTYPE_V8DI_INT_V2DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_vextracti32x8_mask, "__builtin_ia32_extracti32x8_mask", IX86_BUILTIN_EXTRACTI32X8, UNKNOWN, (int) V8SI_FTYPE_V16SI_INT_V8SI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_reducepv8df_mask, "__builtin_ia32_reducepd512_mask", IX86_BUILTIN_REDUCEPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_reducepv16sf_mask, "__builtin_ia32_reduceps512_mask", IX86_BUILTIN_REDUCEPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_mulv8di3_mask, "__builtin_ia32_pmullq512_mask", IX86_BUILTIN_PMULLQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_xorv8df3_mask, "__builtin_ia32_xorpd512_mask", IX86_BUILTIN_XORPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_xorv16sf3_mask, "__builtin_ia32_xorps512_mask", IX86_BUILTIN_XORPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_iorv8df3_mask, "__builtin_ia32_orpd512_mask", IX86_BUILTIN_ORPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_iorv16sf3_mask, "__builtin_ia32_orps512_mask", IX86_BUILTIN_ORPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_andv8df3_mask, "__builtin_ia32_andpd512_mask", IX86_BUILTIN_ANDPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_andv16sf3_mask, "__builtin_ia32_andps512_mask", IX86_BUILTIN_ANDPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512f_andnotv8df3_mask, "__builtin_ia32_andnpd512_mask", IX86_BUILTIN_ANDNPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512f_andnotv16sf3_mask, "__builtin_ia32_andnps512_mask", IX86_BUILTIN_ANDNPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_vinsertf32x8_mask, "__builtin_ia32_insertf32x8_mask", IX86_BUILTIN_INSERTF32X8, UNKNOWN, (int) V16SF_FTYPE_V16SF_V8SF_INT_V16SF_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_vinserti32x8_mask, "__builtin_ia32_inserti32x8_mask", IX86_BUILTIN_INSERTI32X8, UNKNOWN, (int) V16SI_FTYPE_V16SI_V8SI_INT_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_vinsertf64x2_mask, "__builtin_ia32_insertf64x2_512_mask", IX86_BUILTIN_INSERTF64X2_512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V2DF_INT_V8DF_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_vinserti64x2_mask, "__builtin_ia32_inserti64x2_512_mask", IX86_BUILTIN_INSERTI64X2_512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V2DI_INT_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_fpclassv8df_mask, "__builtin_ia32_fpclasspd512_mask", IX86_BUILTIN_FPCLASSPD512, UNKNOWN, (int) QI_FTYPE_V8DF_INT_UQI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_fpclassv16sf_mask, "__builtin_ia32_fpclassps512_mask", IX86_BUILTIN_FPCLASSPS512, UNKNOWN, (int) HI_FTYPE_V16SF_INT_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512f_cvtd2maskv16si, "__builtin_ia32_cvtd2mask512", IX86_BUILTIN_CVTD2MASK512, UNKNOWN, (int) UHI_FTYPE_V16SI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512f_cvtq2maskv8di, "__builtin_ia32_cvtq2mask512", IX86_BUILTIN_CVTQ2MASK512, UNKNOWN, (int) UQI_FTYPE_V8DI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512f_cvtmask2dv16si, "__builtin_ia32_cvtmask2d512", IX86_BUILTIN_CVTMASK2D512, UNKNOWN, (int) V16SI_FTYPE_UHI)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512f_cvtmask2qv8di, "__builtin_ia32_cvtmask2q512", IX86_BUILTIN_CVTMASK2Q512, UNKNOWN, (int) V8DI_FTYPE_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_broadcastv16sf_mask, "__builtin_ia32_broadcastf32x2_512_mask", IX86_BUILTIN_BROADCASTF32x2_512, UNKNOWN, (int) V16SF_FTYPE_V4SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_broadcastv16si_mask, "__builtin_ia32_broadcasti32x2_512_mask", IX86_BUILTIN_BROADCASTI32x2_512, UNKNOWN, (int) V16SI_FTYPE_V4SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_broadcastv8df_mask_1, "__builtin_ia32_broadcastf64x2_512_mask", IX86_BUILTIN_BROADCASTF64X2_512, UNKNOWN, (int) V8DF_FTYPE_V2DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_broadcastv8di_mask_1, "__builtin_ia32_broadcasti64x2_512_mask", IX86_BUILTIN_BROADCASTI64X2_512, UNKNOWN, (int) V8DI_FTYPE_V2DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_broadcastv16sf_mask_1, "__builtin_ia32_broadcastf32x8_512_mask", IX86_BUILTIN_BROADCASTF32X8_512, UNKNOWN, (int) V16SF_FTYPE_V8SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_broadcastv16si_mask_1, "__builtin_ia32_broadcasti32x8_512_mask", IX86_BUILTIN_BROADCASTI32X8_512, UNKNOWN, (int) V16SI_FTYPE_V8SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_vextractf64x2_mask, "__builtin_ia32_extractf64x2_512_mask", IX86_BUILTIN_EXTRACTF64X2_512, UNKNOWN, (int) V2DF_FTYPE_V8DF_INT_V2DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_vextractf32x8_mask, "__builtin_ia32_extractf32x8_mask", IX86_BUILTIN_EXTRACTF32X8, UNKNOWN, (int) V8SF_FTYPE_V16SF_INT_V8SF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_vextracti64x2_mask, "__builtin_ia32_extracti64x2_512_mask", IX86_BUILTIN_EXTRACTI64X2_512, UNKNOWN, (int) V2DI_FTYPE_V8DI_INT_V2DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_vextracti32x8_mask, "__builtin_ia32_extracti32x8_mask", IX86_BUILTIN_EXTRACTI32X8, UNKNOWN, (int) V8SI_FTYPE_V16SI_INT_V8SI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_reducepv8df_mask, "__builtin_ia32_reducepd512_mask", IX86_BUILTIN_REDUCEPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_reducepv16sf_mask, "__builtin_ia32_reduceps512_mask", IX86_BUILTIN_REDUCEPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_mulv8di3_mask, "__builtin_ia32_pmullq512_mask", IX86_BUILTIN_PMULLQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_xorv8df3_mask, "__builtin_ia32_xorpd512_mask", IX86_BUILTIN_XORPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_xorv16sf3_mask, "__builtin_ia32_xorps512_mask", IX86_BUILTIN_XORPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_iorv8df3_mask, "__builtin_ia32_orpd512_mask", IX86_BUILTIN_ORPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_iorv16sf3_mask, "__builtin_ia32_orps512_mask", IX86_BUILTIN_ORPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_andv8df3_mask, "__builtin_ia32_andpd512_mask", IX86_BUILTIN_ANDPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_andv16sf3_mask, "__builtin_ia32_andps512_mask", IX86_BUILTIN_ANDPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_andnotv8df3_mask, "__builtin_ia32_andnpd512_mask", IX86_BUILTIN_ANDNPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_andnotv16sf3_mask, "__builtin_ia32_andnps512_mask", IX86_BUILTIN_ANDNPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_vinsertf32x8_mask, "__builtin_ia32_insertf32x8_mask", IX86_BUILTIN_INSERTF32X8, UNKNOWN, (int) V16SF_FTYPE_V16SF_V8SF_INT_V16SF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_vinserti32x8_mask, "__builtin_ia32_inserti32x8_mask", IX86_BUILTIN_INSERTI32X8, UNKNOWN, (int) V16SI_FTYPE_V16SI_V8SI_INT_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_vinsertf64x2_mask, "__builtin_ia32_insertf64x2_512_mask", IX86_BUILTIN_INSERTF64X2_512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V2DF_INT_V8DF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_vinserti64x2_mask, "__builtin_ia32_inserti64x2_512_mask", IX86_BUILTIN_INSERTI64X2_512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V2DI_INT_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_fpclassv8df_mask, "__builtin_ia32_fpclasspd512_mask", IX86_BUILTIN_FPCLASSPD512, UNKNOWN, (int) QI_FTYPE_V8DF_INT_UQI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_fpclassv16sf_mask, "__builtin_ia32_fpclassps512_mask", IX86_BUILTIN_FPCLASSPS512, UNKNOWN, (int) HI_FTYPE_V16SF_INT_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtd2maskv16si, "__builtin_ia32_cvtd2mask512", IX86_BUILTIN_CVTD2MASK512, UNKNOWN, (int) UHI_FTYPE_V16SI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtq2maskv8di, "__builtin_ia32_cvtq2mask512", IX86_BUILTIN_CVTQ2MASK512, UNKNOWN, (int) UQI_FTYPE_V8DI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtmask2dv16si, "__builtin_ia32_cvtmask2d512", IX86_BUILTIN_CVTMASK2D512, UNKNOWN, (int) V16SI_FTYPE_UHI)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtmask2qv8di, "__builtin_ia32_cvtmask2q512", IX86_BUILTIN_CVTMASK2Q512, UNKNOWN, (int) V8DI_FTYPE_UQI)
/* AVX512BW. */
BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kunpcksi, "__builtin_ia32_kunpcksi", IX86_BUILTIN_KUNPCKWD, UNKNOWN, (int) USI_FTYPE_USI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_kunpckdi, "__builtin_ia32_kunpckdi", IX86_BUILTIN_KUNPCKDQ, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_packusdw_mask, "__builtin_ia32_packusdw512_mask", IX86_BUILTIN_PACKUSDW512, UNKNOWN, (int) V32HI_FTYPE_V16SI_V16SI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ashlv4ti3, "__builtin_ia32_pslldq512", IX86_BUILTIN_PSLLDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_CONVERT)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_lshrv4ti3, "__builtin_ia32_psrldq512", IX86_BUILTIN_PSRLDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_CONVERT)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_packssdw_mask, "__builtin_ia32_packssdw512_mask", IX86_BUILTIN_PACKSSDW512, UNKNOWN, (int) V32HI_FTYPE_V16SI_V16SI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_palignrv4ti, "__builtin_ia32_palignr512", IX86_BUILTIN_PALIGNR512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_CONVERT)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_palignrv64qi_mask, "__builtin_ia32_palignr512_mask", IX86_BUILTIN_PALIGNR512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_V8DI_UDI_CONVERT)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_loadv32hi_mask, "__builtin_ia32_movdquhi512_mask", IX86_BUILTIN_MOVDQUHI512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_loadv64qi_mask, "__builtin_ia32_movdquqi512_mask", IX86_BUILTIN_MOVDQUQI512_MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512f_psadbw, "__builtin_ia32_psadbw512", IX86_BUILTIN_PSADBW512, UNKNOWN, (int) V8DI_FTYPE_V64QI_V64QI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_dbpsadbwv32hi_mask, "__builtin_ia32_dbpsadbw512_mask", IX86_BUILTIN_DBPSADBW512, UNKNOWN, (int) V32HI_FTYPE_V64QI_V64QI_INT_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_vec_dupv64qi_mask, "__builtin_ia32_pbroadcastb512_mask", IX86_BUILTIN_PBROADCASTB512, UNKNOWN, (int) V64QI_FTYPE_V16QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_vec_dup_gprv64qi_mask, "__builtin_ia32_pbroadcastb512_gpr_mask", IX86_BUILTIN_PBROADCASTB512_GPR, UNKNOWN, (int) V64QI_FTYPE_QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_vec_dupv32hi_mask, "__builtin_ia32_pbroadcastw512_mask", IX86_BUILTIN_PBROADCASTW512, UNKNOWN, (int) V32HI_FTYPE_V8HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_vec_dup_gprv32hi_mask, "__builtin_ia32_pbroadcastw512_gpr_mask", IX86_BUILTIN_PBROADCASTW512_GPR, UNKNOWN, (int) V32HI_FTYPE_HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_sign_extendv32qiv32hi2_mask, "__builtin_ia32_pmovsxbw512_mask", IX86_BUILTIN_PMOVSXBW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32QI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_zero_extendv32qiv32hi2_mask, "__builtin_ia32_pmovzxbw512_mask", IX86_BUILTIN_PMOVZXBW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32QI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_permvarv32hi_mask, "__builtin_ia32_permvarhi512_mask", IX86_BUILTIN_VPERMVARHI512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_vpermt2varv32hi3_mask, "__builtin_ia32_vpermt2varhi512_mask", IX86_BUILTIN_VPERMT2VARHI512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_vpermt2varv32hi3_maskz, "__builtin_ia32_vpermt2varhi512_maskz", IX86_BUILTIN_VPERMT2VARHI512_MASKZ, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_vpermi2varv32hi3_mask, "__builtin_ia32_vpermi2varhi512_mask", IX86_BUILTIN_VPERMI2VARHI512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_uavgv64qi3_mask, "__builtin_ia32_pavgb512_mask", IX86_BUILTIN_PAVGB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_uavgv32hi3_mask, "__builtin_ia32_pavgw512_mask", IX86_BUILTIN_PAVGW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_addv64qi3_mask, "__builtin_ia32_paddb512_mask", IX86_BUILTIN_PADDB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_subv64qi3_mask, "__builtin_ia32_psubb512_mask", IX86_BUILTIN_PSUBB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_sssubv64qi3_mask, "__builtin_ia32_psubsb512_mask", IX86_BUILTIN_PSUBSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ssaddv64qi3_mask, "__builtin_ia32_paddsb512_mask", IX86_BUILTIN_PADDSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ussubv64qi3_mask, "__builtin_ia32_psubusb512_mask", IX86_BUILTIN_PSUBUSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_usaddv64qi3_mask, "__builtin_ia32_paddusb512_mask", IX86_BUILTIN_PADDUSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_subv32hi3_mask, "__builtin_ia32_psubw512_mask", IX86_BUILTIN_PSUBW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_addv32hi3_mask, "__builtin_ia32_paddw512_mask", IX86_BUILTIN_PADDW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_sssubv32hi3_mask, "__builtin_ia32_psubsw512_mask", IX86_BUILTIN_PSUBSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ssaddv32hi3_mask, "__builtin_ia32_paddsw512_mask", IX86_BUILTIN_PADDSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ussubv32hi3_mask, "__builtin_ia32_psubusw512_mask", IX86_BUILTIN_PSUBUSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_usaddv32hi3_mask, "__builtin_ia32_paddusw512_mask", IX86_BUILTIN_PADDUSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_umaxv32hi3_mask, "__builtin_ia32_pmaxuw512_mask", IX86_BUILTIN_PMAXUW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_smaxv32hi3_mask, "__builtin_ia32_pmaxsw512_mask", IX86_BUILTIN_PMAXSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_uminv32hi3_mask, "__builtin_ia32_pminuw512_mask", IX86_BUILTIN_PMINUW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_sminv32hi3_mask, "__builtin_ia32_pminsw512_mask", IX86_BUILTIN_PMINSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_umaxv64qi3_mask, "__builtin_ia32_pmaxub512_mask", IX86_BUILTIN_PMAXUB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_smaxv64qi3_mask, "__builtin_ia32_pmaxsb512_mask", IX86_BUILTIN_PMAXSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_uminv64qi3_mask, "__builtin_ia32_pminub512_mask", IX86_BUILTIN_PMINUB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_sminv64qi3_mask, "__builtin_ia32_pminsb512_mask", IX86_BUILTIN_PMINSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_truncatev32hiv32qi2_mask, "__builtin_ia32_pmovwb512_mask", IX86_BUILTIN_PMOVWB512, UNKNOWN, (int) V32QI_FTYPE_V32HI_V32QI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ss_truncatev32hiv32qi2_mask, "__builtin_ia32_pmovswb512_mask", IX86_BUILTIN_PMOVSWB512, UNKNOWN, (int) V32QI_FTYPE_V32HI_V32QI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_us_truncatev32hiv32qi2_mask, "__builtin_ia32_pmovuswb512_mask", IX86_BUILTIN_PMOVUSWB512, UNKNOWN, (int) V32QI_FTYPE_V32HI_V32QI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_umulhrswv32hi3_mask, "__builtin_ia32_pmulhrsw512_mask", IX86_BUILTIN_PMULHRSW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_umulv32hi3_highpart_mask, "__builtin_ia32_pmulhuw512_mask" , IX86_BUILTIN_PMULHUW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_smulv32hi3_highpart_mask, "__builtin_ia32_pmulhw512_mask" , IX86_BUILTIN_PMULHW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_mulv32hi3_mask, "__builtin_ia32_pmullw512_mask", IX86_BUILTIN_PMULLW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_ashlv32hi3_mask, "__builtin_ia32_psllwi512_mask", IX86_BUILTIN_PSLLWI512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_INT_V32HI_USI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_ashlv32hi3_mask, "__builtin_ia32_psllw512_mask", IX86_BUILTIN_PSLLW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V8HI_V32HI_USI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_packsswb_mask, "__builtin_ia32_packsswb512_mask", IX86_BUILTIN_PACKSSWB512, UNKNOWN, (int) V64QI_FTYPE_V32HI_V32HI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_packuswb_mask, "__builtin_ia32_packuswb512_mask", IX86_BUILTIN_PACKUSWB512, UNKNOWN, (int) V64QI_FTYPE_V32HI_V32HI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ashrvv32hi_mask, "__builtin_ia32_psrav32hi_mask", IX86_BUILTIN_PSRAVV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_pmaddubsw512v32hi_mask, "__builtin_ia32_pmaddubsw512_mask", IX86_BUILTIN_PMADDUBSW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V64QI_V64QI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_pmaddwd512v32hi_mask, "__builtin_ia32_pmaddwd512_mask", IX86_BUILTIN_PMADDWD512_MASK, UNKNOWN, (int) V16SI_FTYPE_V32HI_V32HI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_lshrvv32hi_mask, "__builtin_ia32_psrlv32hi_mask", IX86_BUILTIN_PSRLVV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_interleave_highv64qi_mask, "__builtin_ia32_punpckhbw512_mask", IX86_BUILTIN_PUNPCKHBW512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_interleave_highv32hi_mask, "__builtin_ia32_punpckhwd512_mask", IX86_BUILTIN_PUNPCKHWD512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_interleave_lowv64qi_mask, "__builtin_ia32_punpcklbw512_mask", IX86_BUILTIN_PUNPCKLBW512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_interleave_lowv32hi_mask, "__builtin_ia32_punpcklwd512_mask", IX86_BUILTIN_PUNPCKLWD512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_pshufbv64qi3_mask, "__builtin_ia32_pshufb512_mask", IX86_BUILTIN_PSHUFB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_pshufhwv32hi_mask, "__builtin_ia32_pshufhw512_mask", IX86_BUILTIN_PSHUFHW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_INT_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_pshuflwv32hi_mask, "__builtin_ia32_pshuflw512_mask", IX86_BUILTIN_PSHUFLW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_INT_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_ashrv32hi3_mask, "__builtin_ia32_psrawi512_mask", IX86_BUILTIN_PSRAWI512, UNKNOWN, (int) V32HI_FTYPE_V32HI_INT_V32HI_USI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_ashrv32hi3_mask, "__builtin_ia32_psraw512_mask", IX86_BUILTIN_PSRAW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V8HI_V32HI_USI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_lshrv32hi3_mask, "__builtin_ia32_psrlwi512_mask", IX86_BUILTIN_PSRLWI512, UNKNOWN, (int) V32HI_FTYPE_V32HI_INT_V32HI_USI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_lshrv32hi3_mask, "__builtin_ia32_psrlw512_mask", IX86_BUILTIN_PSRLW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V8HI_V32HI_USI_COUNT)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_cvtb2maskv64qi, "__builtin_ia32_cvtb2mask512", IX86_BUILTIN_CVTB2MASK512, UNKNOWN, (int) UDI_FTYPE_V64QI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_cvtw2maskv32hi, "__builtin_ia32_cvtw2mask512", IX86_BUILTIN_CVTW2MASK512, UNKNOWN, (int) USI_FTYPE_V32HI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_cvtmask2bv64qi, "__builtin_ia32_cvtmask2b512", IX86_BUILTIN_CVTMASK2B512, UNKNOWN, (int) V64QI_FTYPE_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_cvtmask2wv32hi, "__builtin_ia32_cvtmask2w512", IX86_BUILTIN_CVTMASK2W512, UNKNOWN, (int) V32HI_FTYPE_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_eqv64qi3_mask, "__builtin_ia32_pcmpeqb512_mask", IX86_BUILTIN_PCMPEQB512_MASK, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_eqv32hi3_mask, "__builtin_ia32_pcmpeqw512_mask", IX86_BUILTIN_PCMPEQW512_MASK, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_gtv64qi3_mask, "__builtin_ia32_pcmpgtb512_mask", IX86_BUILTIN_PCMPGTB512_MASK, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_gtv32hi3_mask, "__builtin_ia32_pcmpgtw512_mask", IX86_BUILTIN_PCMPGTW512_MASK, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_testmv64qi3_mask, "__builtin_ia32_ptestmb512", IX86_BUILTIN_PTESTMB512, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_testmv32hi3_mask, "__builtin_ia32_ptestmw512", IX86_BUILTIN_PTESTMW512, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_testnmv64qi3_mask, "__builtin_ia32_ptestnmb512", IX86_BUILTIN_PTESTNMB512, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_testnmv32hi3_mask, "__builtin_ia32_ptestnmw512", IX86_BUILTIN_PTESTNMW512, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ashlvv32hi_mask, "__builtin_ia32_psllv32hi_mask", IX86_BUILTIN_PSLLVV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_absv64qi2_mask, "__builtin_ia32_pabsb512_mask", IX86_BUILTIN_PABSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_absv32hi2_mask, "__builtin_ia32_pabsw512_mask", IX86_BUILTIN_PABSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_blendmv32hi, "__builtin_ia32_blendmw_512_mask", IX86_BUILTIN_BLENDMW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_blendmv64qi, "__builtin_ia32_blendmb_512_mask", IX86_BUILTIN_BLENDMB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_cmpv64qi3_mask, "__builtin_ia32_cmpb512_mask", IX86_BUILTIN_CMPB512, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_INT_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_cmpv32hi3_mask, "__builtin_ia32_cmpw512_mask", IX86_BUILTIN_CMPW512, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_INT_USI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ucmpv64qi3_mask, "__builtin_ia32_ucmpb512_mask", IX86_BUILTIN_UCMPB512, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_INT_UDI)
-BDESC (OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_avx512bw_ucmpv32hi3_mask, "__builtin_ia32_ucmpw512_mask", IX86_BUILTIN_UCMPW512, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_INT_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_kunpckdi, "__builtin_ia32_kunpckdi", IX86_BUILTIN_KUNPCKDQ, UNKNOWN, (int) UDI_FTYPE_UDI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_packusdw_mask, "__builtin_ia32_packusdw512_mask", IX86_BUILTIN_PACKUSDW512, UNKNOWN, (int) V32HI_FTYPE_V16SI_V16SI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ashlv4ti3, "__builtin_ia32_pslldq512", IX86_BUILTIN_PSLLDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_CONVERT)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_lshrv4ti3, "__builtin_ia32_psrldq512", IX86_BUILTIN_PSRLDQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_INT_CONVERT)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_packssdw_mask, "__builtin_ia32_packssdw512_mask", IX86_BUILTIN_PACKSSDW512, UNKNOWN, (int) V32HI_FTYPE_V16SI_V16SI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_palignrv4ti, "__builtin_ia32_palignr512", IX86_BUILTIN_PALIGNR512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_CONVERT)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_palignrv64qi_mask, "__builtin_ia32_palignr512_mask", IX86_BUILTIN_PALIGNR512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_V8DI_UDI_CONVERT)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_loadv32hi_mask, "__builtin_ia32_movdquhi512_mask", IX86_BUILTIN_MOVDQUHI512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_loadv64qi_mask, "__builtin_ia32_movdquqi512_mask", IX86_BUILTIN_MOVDQUQI512_MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_psadbw, "__builtin_ia32_psadbw512", IX86_BUILTIN_PSADBW512, UNKNOWN, (int) V8DI_FTYPE_V64QI_V64QI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_dbpsadbwv32hi_mask, "__builtin_ia32_dbpsadbw512_mask", IX86_BUILTIN_DBPSADBW512, UNKNOWN, (int) V32HI_FTYPE_V64QI_V64QI_INT_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_vec_dupv64qi_mask, "__builtin_ia32_pbroadcastb512_mask", IX86_BUILTIN_PBROADCASTB512, UNKNOWN, (int) V64QI_FTYPE_V16QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_vec_dup_gprv64qi_mask, "__builtin_ia32_pbroadcastb512_gpr_mask", IX86_BUILTIN_PBROADCASTB512_GPR, UNKNOWN, (int) V64QI_FTYPE_QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_vec_dupv32hi_mask, "__builtin_ia32_pbroadcastw512_mask", IX86_BUILTIN_PBROADCASTW512, UNKNOWN, (int) V32HI_FTYPE_V8HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_vec_dup_gprv32hi_mask, "__builtin_ia32_pbroadcastw512_gpr_mask", IX86_BUILTIN_PBROADCASTW512_GPR, UNKNOWN, (int) V32HI_FTYPE_HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_sign_extendv32qiv32hi2_mask, "__builtin_ia32_pmovsxbw512_mask", IX86_BUILTIN_PMOVSXBW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32QI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_zero_extendv32qiv32hi2_mask, "__builtin_ia32_pmovzxbw512_mask", IX86_BUILTIN_PMOVZXBW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32QI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_permvarv32hi_mask, "__builtin_ia32_permvarhi512_mask", IX86_BUILTIN_VPERMVARHI512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_vpermt2varv32hi3_mask, "__builtin_ia32_vpermt2varhi512_mask", IX86_BUILTIN_VPERMT2VARHI512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_vpermt2varv32hi3_maskz, "__builtin_ia32_vpermt2varhi512_maskz", IX86_BUILTIN_VPERMT2VARHI512_MASKZ, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_vpermi2varv32hi3_mask, "__builtin_ia32_vpermi2varhi512_mask", IX86_BUILTIN_VPERMI2VARHI512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_uavgv64qi3_mask, "__builtin_ia32_pavgb512_mask", IX86_BUILTIN_PAVGB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_uavgv32hi3_mask, "__builtin_ia32_pavgw512_mask", IX86_BUILTIN_PAVGW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_addv64qi3_mask, "__builtin_ia32_paddb512_mask", IX86_BUILTIN_PADDB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_subv64qi3_mask, "__builtin_ia32_psubb512_mask", IX86_BUILTIN_PSUBB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_sssubv64qi3_mask, "__builtin_ia32_psubsb512_mask", IX86_BUILTIN_PSUBSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ssaddv64qi3_mask, "__builtin_ia32_paddsb512_mask", IX86_BUILTIN_PADDSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ussubv64qi3_mask, "__builtin_ia32_psubusb512_mask", IX86_BUILTIN_PSUBUSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_usaddv64qi3_mask, "__builtin_ia32_paddusb512_mask", IX86_BUILTIN_PADDUSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_subv32hi3_mask, "__builtin_ia32_psubw512_mask", IX86_BUILTIN_PSUBW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_addv32hi3_mask, "__builtin_ia32_paddw512_mask", IX86_BUILTIN_PADDW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_sssubv32hi3_mask, "__builtin_ia32_psubsw512_mask", IX86_BUILTIN_PSUBSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ssaddv32hi3_mask, "__builtin_ia32_paddsw512_mask", IX86_BUILTIN_PADDSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ussubv32hi3_mask, "__builtin_ia32_psubusw512_mask", IX86_BUILTIN_PSUBUSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_usaddv32hi3_mask, "__builtin_ia32_paddusw512_mask", IX86_BUILTIN_PADDUSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_umaxv32hi3_mask, "__builtin_ia32_pmaxuw512_mask", IX86_BUILTIN_PMAXUW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_smaxv32hi3_mask, "__builtin_ia32_pmaxsw512_mask", IX86_BUILTIN_PMAXSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_uminv32hi3_mask, "__builtin_ia32_pminuw512_mask", IX86_BUILTIN_PMINUW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_sminv32hi3_mask, "__builtin_ia32_pminsw512_mask", IX86_BUILTIN_PMINSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_umaxv64qi3_mask, "__builtin_ia32_pmaxub512_mask", IX86_BUILTIN_PMAXUB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_smaxv64qi3_mask, "__builtin_ia32_pmaxsb512_mask", IX86_BUILTIN_PMAXSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_uminv64qi3_mask, "__builtin_ia32_pminub512_mask", IX86_BUILTIN_PMINUB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_sminv64qi3_mask, "__builtin_ia32_pminsb512_mask", IX86_BUILTIN_PMINSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_truncatev32hiv32qi2_mask, "__builtin_ia32_pmovwb512_mask", IX86_BUILTIN_PMOVWB512, UNKNOWN, (int) V32QI_FTYPE_V32HI_V32QI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ss_truncatev32hiv32qi2_mask, "__builtin_ia32_pmovswb512_mask", IX86_BUILTIN_PMOVSWB512, UNKNOWN, (int) V32QI_FTYPE_V32HI_V32QI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_us_truncatev32hiv32qi2_mask, "__builtin_ia32_pmovuswb512_mask", IX86_BUILTIN_PMOVUSWB512, UNKNOWN, (int) V32QI_FTYPE_V32HI_V32QI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_umulhrswv32hi3_mask, "__builtin_ia32_pmulhrsw512_mask", IX86_BUILTIN_PMULHRSW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_umulv32hi3_highpart_mask, "__builtin_ia32_pmulhuw512_mask" , IX86_BUILTIN_PMULHUW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_smulv32hi3_highpart_mask, "__builtin_ia32_pmulhw512_mask" , IX86_BUILTIN_PMULHW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_mulv32hi3_mask, "__builtin_ia32_pmullw512_mask", IX86_BUILTIN_PMULLW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashlv32hi3_mask, "__builtin_ia32_psllwi512_mask", IX86_BUILTIN_PSLLWI512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_INT_V32HI_USI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashlv32hi3_mask, "__builtin_ia32_psllw512_mask", IX86_BUILTIN_PSLLW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V8HI_V32HI_USI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_packsswb_mask, "__builtin_ia32_packsswb512_mask", IX86_BUILTIN_PACKSSWB512, UNKNOWN, (int) V64QI_FTYPE_V32HI_V32HI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_packuswb_mask, "__builtin_ia32_packuswb512_mask", IX86_BUILTIN_PACKUSWB512, UNKNOWN, (int) V64QI_FTYPE_V32HI_V32HI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ashrvv32hi_mask, "__builtin_ia32_psrav32hi_mask", IX86_BUILTIN_PSRAVV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_pmaddubsw512v32hi_mask, "__builtin_ia32_pmaddubsw512_mask", IX86_BUILTIN_PMADDUBSW512_MASK, UNKNOWN, (int) V32HI_FTYPE_V64QI_V64QI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_pmaddwd512v32hi_mask, "__builtin_ia32_pmaddwd512_mask", IX86_BUILTIN_PMADDWD512_MASK, UNKNOWN, (int) V16SI_FTYPE_V32HI_V32HI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_lshrvv32hi_mask, "__builtin_ia32_psrlv32hi_mask", IX86_BUILTIN_PSRLVV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_interleave_highv64qi_mask, "__builtin_ia32_punpckhbw512_mask", IX86_BUILTIN_PUNPCKHBW512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_interleave_highv32hi_mask, "__builtin_ia32_punpckhwd512_mask", IX86_BUILTIN_PUNPCKHWD512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_interleave_lowv64qi_mask, "__builtin_ia32_punpcklbw512_mask", IX86_BUILTIN_PUNPCKLBW512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_interleave_lowv32hi_mask, "__builtin_ia32_punpcklwd512_mask", IX86_BUILTIN_PUNPCKLWD512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_pshufbv64qi3_mask, "__builtin_ia32_pshufb512_mask", IX86_BUILTIN_PSHUFB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_pshufhwv32hi_mask, "__builtin_ia32_pshufhw512_mask", IX86_BUILTIN_PSHUFHW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_INT_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_pshuflwv32hi_mask, "__builtin_ia32_pshuflw512_mask", IX86_BUILTIN_PSHUFLW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_INT_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashrv32hi3_mask, "__builtin_ia32_psrawi512_mask", IX86_BUILTIN_PSRAWI512, UNKNOWN, (int) V32HI_FTYPE_V32HI_INT_V32HI_USI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_ashrv32hi3_mask, "__builtin_ia32_psraw512_mask", IX86_BUILTIN_PSRAW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V8HI_V32HI_USI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_lshrv32hi3_mask, "__builtin_ia32_psrlwi512_mask", IX86_BUILTIN_PSRLWI512, UNKNOWN, (int) V32HI_FTYPE_V32HI_INT_V32HI_USI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_lshrv32hi3_mask, "__builtin_ia32_psrlw512_mask", IX86_BUILTIN_PSRLW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V8HI_V32HI_USI_COUNT)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_cvtb2maskv64qi, "__builtin_ia32_cvtb2mask512", IX86_BUILTIN_CVTB2MASK512, UNKNOWN, (int) UDI_FTYPE_V64QI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_cvtw2maskv32hi, "__builtin_ia32_cvtw2mask512", IX86_BUILTIN_CVTW2MASK512, UNKNOWN, (int) USI_FTYPE_V32HI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_cvtmask2bv64qi, "__builtin_ia32_cvtmask2b512", IX86_BUILTIN_CVTMASK2B512, UNKNOWN, (int) V64QI_FTYPE_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_cvtmask2wv32hi, "__builtin_ia32_cvtmask2w512", IX86_BUILTIN_CVTMASK2W512, UNKNOWN, (int) V32HI_FTYPE_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_eqv64qi3_mask, "__builtin_ia32_pcmpeqb512_mask", IX86_BUILTIN_PCMPEQB512_MASK, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_eqv32hi3_mask, "__builtin_ia32_pcmpeqw512_mask", IX86_BUILTIN_PCMPEQW512_MASK, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_gtv64qi3_mask, "__builtin_ia32_pcmpgtb512_mask", IX86_BUILTIN_PCMPGTB512_MASK, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_gtv32hi3_mask, "__builtin_ia32_pcmpgtw512_mask", IX86_BUILTIN_PCMPGTW512_MASK, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_testmv64qi3_mask, "__builtin_ia32_ptestmb512", IX86_BUILTIN_PTESTMB512, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_testmv32hi3_mask, "__builtin_ia32_ptestmw512", IX86_BUILTIN_PTESTMW512, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_testnmv64qi3_mask, "__builtin_ia32_ptestnmb512", IX86_BUILTIN_PTESTNMB512, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_testnmv32hi3_mask, "__builtin_ia32_ptestnmw512", IX86_BUILTIN_PTESTNMW512, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ashlvv32hi_mask, "__builtin_ia32_psllv32hi_mask", IX86_BUILTIN_PSLLVV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_absv64qi2_mask, "__builtin_ia32_pabsb512_mask", IX86_BUILTIN_PABSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_absv32hi2_mask, "__builtin_ia32_pabsw512_mask", IX86_BUILTIN_PABSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_blendmv32hi, "__builtin_ia32_blendmw_512_mask", IX86_BUILTIN_BLENDMW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_blendmv64qi, "__builtin_ia32_blendmb_512_mask", IX86_BUILTIN_BLENDMB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_cmpv64qi3_mask, "__builtin_ia32_cmpb512_mask", IX86_BUILTIN_CMPB512, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_INT_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_cmpv32hi3_mask, "__builtin_ia32_cmpw512_mask", IX86_BUILTIN_CMPW512, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_INT_USI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ucmpv64qi3_mask, "__builtin_ia32_ucmpb512_mask", IX86_BUILTIN_UCMPB512, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_INT_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_ucmpv32hi3_mask, "__builtin_ia32_ucmpw512_mask", IX86_BUILTIN_UCMPW512, UNKNOWN, (int) USI_FTYPE_V32HI_V32HI_INT_USI)
/* AVX512IFMA */
-BDESC (OPTION_MASK_ISA_AVX512IFMA, 0, CODE_FOR_vpmadd52luqv8di_mask, "__builtin_ia32_vpmadd52luq512_mask", IX86_BUILTIN_VPMADD52LUQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512IFMA, 0, CODE_FOR_vpmadd52luqv8di_maskz, "__builtin_ia32_vpmadd52luq512_maskz", IX86_BUILTIN_VPMADD52LUQ512_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512IFMA, 0, CODE_FOR_vpmadd52huqv8di_mask, "__builtin_ia32_vpmadd52huq512_mask", IX86_BUILTIN_VPMADD52HUQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512IFMA, 0, CODE_FOR_vpmadd52huqv8di_maskz, "__builtin_ia32_vpmadd52huq512_maskz", IX86_BUILTIN_VPMADD52HUQ512_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512IFMA, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpmadd52luqv8di_mask, "__builtin_ia32_vpmadd52luq512_mask", IX86_BUILTIN_VPMADD52LUQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512IFMA, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpmadd52luqv8di_maskz, "__builtin_ia32_vpmadd52luq512_maskz", IX86_BUILTIN_VPMADD52LUQ512_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512IFMA, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpmadd52huqv8di_mask, "__builtin_ia32_vpmadd52huq512_mask", IX86_BUILTIN_VPMADD52HUQ512, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512IFMA, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpmadd52huqv8di_maskz, "__builtin_ia32_vpmadd52huq512_maskz", IX86_BUILTIN_VPMADD52HUQ512_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
BDESC (OPTION_MASK_ISA_AVX512IFMA | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpmadd52luqv4di_mask, "__builtin_ia32_vpmadd52luq256_mask", IX86_BUILTIN_VPMADD52LUQ256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_V4DI_UQI)
BDESC (OPTION_MASK_ISA_AVX512IFMA | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpmadd52luqv4di_maskz, "__builtin_ia32_vpmadd52luq256_maskz", IX86_BUILTIN_VPMADD52LUQ256_MASKZ, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_V4DI_UQI)
BDESC (OPTION_MASK_ISA_AVX512IFMA | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpmadd52huqv4di_mask, "__builtin_ia32_vpmadd52huq256_mask", IX86_BUILTIN_VPMADD52HUQ256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_V4DI_UQI)
@@ -2552,13 +2552,13 @@ BDESC (OPTION_MASK_ISA_AVX512IFMA | OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_A
BDESC (OPTION_MASK_ISA_AVX512IFMA | OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVXIFMA, CODE_FOR_vpmadd52huqv2di, "__builtin_ia32_vpmadd52huq128", IX86_BUINTIN_VPMADD52HUQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI)
/* AVX512VBMI */
-BDESC (OPTION_MASK_ISA_AVX512VBMI, 0, CODE_FOR_vpmultishiftqbv64qi_mask, "__builtin_ia32_vpmultishiftqb512_mask", IX86_BUILTIN_VPMULTISHIFTQB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpmultishiftqbv64qi_mask, "__builtin_ia32_vpmultishiftqb512_mask", IX86_BUILTIN_VPMULTISHIFTQB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
BDESC (OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpmultishiftqbv32qi_mask, "__builtin_ia32_vpmultishiftqb256_mask", IX86_BUILTIN_VPMULTISHIFTQB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpmultishiftqbv16qi_mask, "__builtin_ia32_vpmultishiftqb128_mask", IX86_BUILTIN_VPMULTISHIFTQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI, 0, CODE_FOR_avx512bw_permvarv64qi_mask, "__builtin_ia32_permvarqi512_mask", IX86_BUILTIN_VPERMVARQI512_MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI, 0, CODE_FOR_avx512bw_vpermt2varv64qi3_mask, "__builtin_ia32_vpermt2varqi512_mask", IX86_BUILTIN_VPERMT2VARQI512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI, 0, CODE_FOR_avx512bw_vpermt2varv64qi3_maskz, "__builtin_ia32_vpermt2varqi512_maskz", IX86_BUILTIN_VPERMT2VARQI512_MASKZ, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI, 0, CODE_FOR_avx512bw_vpermi2varv64qi3_mask, "__builtin_ia32_vpermi2varqi512_mask", IX86_BUILTIN_VPERMI2VARQI512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_permvarv64qi_mask, "__builtin_ia32_permvarqi512_mask", IX86_BUILTIN_VPERMVARQI512_MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_vpermt2varv64qi3_mask, "__builtin_ia32_vpermt2varqi512_mask", IX86_BUILTIN_VPERMT2VARQI512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_vpermt2varv64qi3_maskz, "__builtin_ia32_vpermt2varqi512_maskz", IX86_BUILTIN_VPERMT2VARQI512_MASKZ, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_vpermi2varv64qi3_mask, "__builtin_ia32_vpermi2varqi512_mask", IX86_BUILTIN_VPERMI2VARQI512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
BDESC (OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_permvarv32qi_mask, "__builtin_ia32_permvarqi256_mask", IX86_BUILTIN_VPERMVARQI256_MASK, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_permvarv16qi_mask, "__builtin_ia32_permvarqi128_mask", IX86_BUILTIN_VPERMVARQI128_MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_vpermt2varv32qi3_mask, "__builtin_ia32_vpermt2varqi256_mask", IX86_BUILTIN_VPERMT2VARQI256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_V32QI_USI)
@@ -2569,16 +2569,16 @@ BDESC (OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512
BDESC (OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_vpermi2varv16qi3_mask, "__builtin_ia32_vpermi2varqi128_mask", IX86_BUILTIN_VPERMI2VARQI128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI_UHI)
/* VBMI2 */
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_compressv64qi_mask, "__builtin_ia32_compressqi512_mask", IX86_BUILTIN_PCOMPRESSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_compressv32hi_mask, "__builtin_ia32_compresshi512_mask", IX86_BUILTIN_PCOMPRESSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_compressv64qi_mask, "__builtin_ia32_compressqi512_mask", IX86_BUILTIN_PCOMPRESSB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_compressv32hi_mask, "__builtin_ia32_compresshi512_mask", IX86_BUILTIN_PCOMPRESSW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_compressv32qi_mask, "__builtin_ia32_compressqi256_mask", IX86_BUILTIN_PCOMPRESSB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_compressv16qi_mask, "__builtin_ia32_compressqi128_mask", IX86_BUILTIN_PCOMPRESSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_compressv16hi_mask, "__builtin_ia32_compresshi256_mask", IX86_BUILTIN_PCOMPRESSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_compressv8hi_mask, "__builtin_ia32_compresshi128_mask", IX86_BUILTIN_PCOMPRESSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_expandv64qi_mask, "__builtin_ia32_expandqi512_mask", IX86_BUILTIN_PEXPANDB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_expandv64qi_maskz, "__builtin_ia32_expandqi512_maskz", IX86_BUILTIN_PEXPANDB512Z, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_expandv32hi_mask, "__builtin_ia32_expandhi512_mask", IX86_BUILTIN_PEXPANDW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_expandv32hi_maskz, "__builtin_ia32_expandhi512_maskz", IX86_BUILTIN_PEXPANDW512Z, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv64qi_mask, "__builtin_ia32_expandqi512_mask", IX86_BUILTIN_PEXPANDB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv64qi_maskz, "__builtin_ia32_expandqi512_maskz", IX86_BUILTIN_PEXPANDB512Z, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv32hi_mask, "__builtin_ia32_expandhi512_mask", IX86_BUILTIN_PEXPANDW512, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_expandv32hi_maskz, "__builtin_ia32_expandhi512_maskz", IX86_BUILTIN_PEXPANDW512Z, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_expandv32qi_mask, "__builtin_ia32_expandqi256_mask", IX86_BUILTIN_PEXPANDB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_expandv32qi_maskz, "__builtin_ia32_expandqi256_maskz", IX86_BUILTIN_PEXPANDB256Z, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_expandv16qi_mask, "__builtin_ia32_expandqi128_mask", IX86_BUILTIN_PEXPANDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_UHI)
@@ -2587,64 +2587,64 @@ BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_expan
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_expandv16hi_maskz, "__builtin_ia32_expandhi256_maskz", IX86_BUILTIN_PEXPANDW256Z, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_expandv8hi_mask, "__builtin_ia32_expandhi128_mask", IX86_BUILTIN_PEXPANDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_expandv8hi_maskz, "__builtin_ia32_expandhi128_maskz", IX86_BUILTIN_PEXPANDW128Z, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrd_v32hi, "__builtin_ia32_vpshrd_v32hi", IX86_BUILTIN_VPSHRDV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrd_v32hi_mask, "__builtin_ia32_vpshrd_v32hi_mask", IX86_BUILTIN_VPSHRDV32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_INT_V32HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrd_v32hi, "__builtin_ia32_vpshrd_v32hi", IX86_BUILTIN_VPSHRDV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrd_v32hi_mask, "__builtin_ia32_vpshrd_v32hi_mask", IX86_BUILTIN_VPSHRDV32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_INT_V32HI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v16hi, "__builtin_ia32_vpshrd_v16hi", IX86_BUILTIN_VPSHRDV16HI, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v16hi_mask, "__builtin_ia32_vpshrd_v16hi_mask", IX86_BUILTIN_VPSHRDV16HI_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_INT_V16HI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v8hi, "__builtin_ia32_vpshrd_v8hi", IX86_BUILTIN_VPSHRDV8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v8hi_mask, "__builtin_ia32_vpshrd_v8hi_mask", IX86_BUILTIN_VPSHRDV8HI_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT_V8HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrd_v16si, "__builtin_ia32_vpshrd_v16si", IX86_BUILTIN_VPSHRDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrd_v16si_mask, "__builtin_ia32_vpshrd_v16si_mask", IX86_BUILTIN_VPSHRDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT_V16SI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrd_v16si, "__builtin_ia32_vpshrd_v16si", IX86_BUILTIN_VPSHRDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrd_v16si_mask, "__builtin_ia32_vpshrd_v16si_mask", IX86_BUILTIN_VPSHRDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT_V16SI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v8si, "__builtin_ia32_vpshrd_v8si", IX86_BUILTIN_VPSHRDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v8si_mask, "__builtin_ia32_vpshrd_v8si_mask", IX86_BUILTIN_VPSHRDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT_V8SI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v4si, "__builtin_ia32_vpshrd_v4si", IX86_BUILTIN_VPSHRDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v4si_mask, "__builtin_ia32_vpshrd_v4si_mask", IX86_BUILTIN_VPSHRDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_INT_V4SI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrd_v8di, "__builtin_ia32_vpshrd_v8di", IX86_BUILTIN_VPSHRDV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrd_v8di_mask, "__builtin_ia32_vpshrd_v8di_mask", IX86_BUILTIN_VPSHRDV8DI_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_V8DI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrd_v8di, "__builtin_ia32_vpshrd_v8di", IX86_BUILTIN_VPSHRDV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrd_v8di_mask, "__builtin_ia32_vpshrd_v8di_mask", IX86_BUILTIN_VPSHRDV8DI_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_V8DI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v4di, "__builtin_ia32_vpshrd_v4di", IX86_BUILTIN_VPSHRDV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v4di_mask, "__builtin_ia32_vpshrd_v4di_mask", IX86_BUILTIN_VPSHRDV4DI_MASK, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_INT_V4DI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v2di, "__builtin_ia32_vpshrd_v2di", IX86_BUILTIN_VPSHRDV2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrd_v2di_mask, "__builtin_ia32_vpshrd_v2di_mask", IX86_BUILTIN_VPSHRDV2DI_MASK, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_V2DI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshld_v32hi, "__builtin_ia32_vpshld_v32hi", IX86_BUILTIN_VPSHLDV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshld_v32hi_mask, "__builtin_ia32_vpshld_v32hi_mask", IX86_BUILTIN_VPSHLDV32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_INT_V32HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshld_v32hi, "__builtin_ia32_vpshld_v32hi", IX86_BUILTIN_VPSHLDV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshld_v32hi_mask, "__builtin_ia32_vpshld_v32hi_mask", IX86_BUILTIN_VPSHLDV32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_INT_V32HI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v16hi, "__builtin_ia32_vpshld_v16hi", IX86_BUILTIN_VPSHLDV16HI, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v16hi_mask, "__builtin_ia32_vpshld_v16hi_mask", IX86_BUILTIN_VPSHLDV16HI_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_INT_V16HI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v8hi, "__builtin_ia32_vpshld_v8hi", IX86_BUILTIN_VPSHLDV8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v8hi_mask, "__builtin_ia32_vpshld_v8hi_mask", IX86_BUILTIN_VPSHLDV8HI_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT_V8HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshld_v16si, "__builtin_ia32_vpshld_v16si", IX86_BUILTIN_VPSHLDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshld_v16si_mask, "__builtin_ia32_vpshld_v16si_mask", IX86_BUILTIN_VPSHLDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT_V16SI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshld_v16si, "__builtin_ia32_vpshld_v16si", IX86_BUILTIN_VPSHLDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshld_v16si_mask, "__builtin_ia32_vpshld_v16si_mask", IX86_BUILTIN_VPSHLDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_INT_V16SI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v8si, "__builtin_ia32_vpshld_v8si", IX86_BUILTIN_VPSHLDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v8si_mask, "__builtin_ia32_vpshld_v8si_mask", IX86_BUILTIN_VPSHLDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT_V8SI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v4si, "__builtin_ia32_vpshld_v4si", IX86_BUILTIN_VPSHLDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v4si_mask, "__builtin_ia32_vpshld_v4si_mask", IX86_BUILTIN_VPSHLDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_INT_V4SI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshld_v8di, "__builtin_ia32_vpshld_v8di", IX86_BUILTIN_VPSHLDV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshld_v8di_mask, "__builtin_ia32_vpshld_v8di_mask", IX86_BUILTIN_VPSHLDV8DI_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_V8DI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshld_v8di, "__builtin_ia32_vpshld_v8di", IX86_BUILTIN_VPSHLDV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshld_v8di_mask, "__builtin_ia32_vpshld_v8di_mask", IX86_BUILTIN_VPSHLDV8DI_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT_V8DI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v4di, "__builtin_ia32_vpshld_v4di", IX86_BUILTIN_VPSHLDV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v4di_mask, "__builtin_ia32_vpshld_v4di_mask", IX86_BUILTIN_VPSHLDV4DI_MASK, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_INT_V4DI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v2di, "__builtin_ia32_vpshld_v2di", IX86_BUILTIN_VPSHLDV2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshld_v2di_mask, "__builtin_ia32_vpshld_v2di_mask", IX86_BUILTIN_VPSHLDV2DI_MASK, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_V2DI_INT)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrdv_v32hi, "__builtin_ia32_vpshrdv_v32hi", IX86_BUILTIN_VPSHRDVV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrdv_v32hi_mask, "__builtin_ia32_vpshrdv_v32hi_mask", IX86_BUILTIN_VPSHRDVV32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrdv_v32hi_maskz, "__builtin_ia32_vpshrdv_v32hi_maskz", IX86_BUILTIN_VPSHRDVV32HI_MASKZ, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrdv_v32hi, "__builtin_ia32_vpshrdv_v32hi", IX86_BUILTIN_VPSHRDVV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrdv_v32hi_mask, "__builtin_ia32_vpshrdv_v32hi_mask", IX86_BUILTIN_VPSHRDVV32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrdv_v32hi_maskz, "__builtin_ia32_vpshrdv_v32hi_maskz", IX86_BUILTIN_VPSHRDVV32HI_MASKZ, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v16hi, "__builtin_ia32_vpshrdv_v16hi", IX86_BUILTIN_VPSHRDVV16HI, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_V16HI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v16hi_mask, "__builtin_ia32_vpshrdv_v16hi_mask", IX86_BUILTIN_VPSHRDVV16HI_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_V16HI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v16hi_maskz, "__builtin_ia32_vpshrdv_v16hi_maskz", IX86_BUILTIN_VPSHRDVV16HI_MASKZ, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_V16HI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v8hi, "__builtin_ia32_vpshrdv_v8hi", IX86_BUILTIN_VPSHRDVV8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v8hi_mask, "__builtin_ia32_vpshrdv_v8hi_mask", IX86_BUILTIN_VPSHRDVV8HI_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v8hi_maskz, "__builtin_ia32_vpshrdv_v8hi_maskz", IX86_BUILTIN_VPSHRDVV8HI_MASKZ, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrdv_v16si, "__builtin_ia32_vpshrdv_v16si", IX86_BUILTIN_VPSHRDVV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrdv_v16si_mask, "__builtin_ia32_vpshrdv_v16si_mask", IX86_BUILTIN_VPSHRDVV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrdv_v16si_maskz, "__builtin_ia32_vpshrdv_v16si_maskz", IX86_BUILTIN_VPSHRDVV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrdv_v16si, "__builtin_ia32_vpshrdv_v16si", IX86_BUILTIN_VPSHRDVV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrdv_v16si_mask, "__builtin_ia32_vpshrdv_v16si_mask", IX86_BUILTIN_VPSHRDVV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrdv_v16si_maskz, "__builtin_ia32_vpshrdv_v16si_maskz", IX86_BUILTIN_VPSHRDVV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v8si, "__builtin_ia32_vpshrdv_v8si", IX86_BUILTIN_VPSHRDVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v8si_mask, "__builtin_ia32_vpshrdv_v8si_mask", IX86_BUILTIN_VPSHRDVV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v8si_maskz, "__builtin_ia32_vpshrdv_v8si_maskz", IX86_BUILTIN_VPSHRDVV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v4si, "__builtin_ia32_vpshrdv_v4si", IX86_BUILTIN_VPSHRDVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v4si_mask, "__builtin_ia32_vpshrdv_v4si_mask", IX86_BUILTIN_VPSHRDVV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v4si_maskz, "__builtin_ia32_vpshrdv_v4si_maskz", IX86_BUILTIN_VPSHRDVV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrdv_v8di, "__builtin_ia32_vpshrdv_v8di", IX86_BUILTIN_VPSHRDVV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrdv_v8di_mask, "__builtin_ia32_vpshrdv_v8di_mask", IX86_BUILTIN_VPSHRDVV8DI_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshrdv_v8di_maskz, "__builtin_ia32_vpshrdv_v8di_maskz", IX86_BUILTIN_VPSHRDVV8DI_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrdv_v8di, "__builtin_ia32_vpshrdv_v8di", IX86_BUILTIN_VPSHRDVV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrdv_v8di_mask, "__builtin_ia32_vpshrdv_v8di_mask", IX86_BUILTIN_VPSHRDVV8DI_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshrdv_v8di_maskz, "__builtin_ia32_vpshrdv_v8di_maskz", IX86_BUILTIN_VPSHRDVV8DI_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v4di, "__builtin_ia32_vpshrdv_v4di", IX86_BUILTIN_VPSHRDVV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_V4DI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v4di_mask, "__builtin_ia32_vpshrdv_v4di_mask", IX86_BUILTIN_VPSHRDVV4DI_MASK, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_V4DI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v4di_maskz, "__builtin_ia32_vpshrdv_v4di_maskz", IX86_BUILTIN_VPSHRDVV4DI_MASKZ, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_V4DI_UQI)
@@ -2652,27 +2652,27 @@ BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshr
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v2di_mask, "__builtin_ia32_vpshrdv_v2di_mask", IX86_BUILTIN_VPSHRDVV2DI_MASK, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshrdv_v2di_maskz, "__builtin_ia32_vpshrdv_v2di_maskz", IX86_BUILTIN_VPSHRDVV2DI_MASKZ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshldv_v32hi, "__builtin_ia32_vpshldv_v32hi", IX86_BUILTIN_VPSHLDVV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshldv_v32hi_mask, "__builtin_ia32_vpshldv_v32hi_mask", IX86_BUILTIN_VPSHLDVV32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshldv_v32hi_maskz, "__builtin_ia32_vpshldv_v32hi_maskz", IX86_BUILTIN_VPSHLDVV32HI_MASKZ, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshldv_v32hi, "__builtin_ia32_vpshldv_v32hi", IX86_BUILTIN_VPSHLDVV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshldv_v32hi_mask, "__builtin_ia32_vpshldv_v32hi_mask", IX86_BUILTIN_VPSHLDVV32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshldv_v32hi_maskz, "__builtin_ia32_vpshldv_v32hi_maskz", IX86_BUILTIN_VPSHLDVV32HI_MASKZ, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_V32HI_USI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v16hi, "__builtin_ia32_vpshldv_v16hi", IX86_BUILTIN_VPSHLDVV16HI, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_V16HI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v16hi_mask, "__builtin_ia32_vpshldv_v16hi_mask", IX86_BUILTIN_VPSHLDVV16HI_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_V16HI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v16hi_maskz, "__builtin_ia32_vpshldv_v16hi_maskz", IX86_BUILTIN_VPSHLDVV16HI_MASKZ, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_V16HI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v8hi, "__builtin_ia32_vpshldv_v8hi", IX86_BUILTIN_VPSHLDVV8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v8hi_mask, "__builtin_ia32_vpshldv_v8hi_mask", IX86_BUILTIN_VPSHLDVV8HI_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v8hi_maskz, "__builtin_ia32_vpshldv_v8hi_maskz", IX86_BUILTIN_VPSHLDVV8HI_MASKZ, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshldv_v16si, "__builtin_ia32_vpshldv_v16si", IX86_BUILTIN_VPSHLDVV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshldv_v16si_mask, "__builtin_ia32_vpshldv_v16si_mask", IX86_BUILTIN_VPSHLDVV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshldv_v16si_maskz, "__builtin_ia32_vpshldv_v16si_maskz", IX86_BUILTIN_VPSHLDVV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshldv_v16si, "__builtin_ia32_vpshldv_v16si", IX86_BUILTIN_VPSHLDVV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshldv_v16si_mask, "__builtin_ia32_vpshldv_v16si_mask", IX86_BUILTIN_VPSHLDVV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshldv_v16si_maskz, "__builtin_ia32_vpshldv_v16si_maskz", IX86_BUILTIN_VPSHLDVV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v8si, "__builtin_ia32_vpshldv_v8si", IX86_BUILTIN_VPSHLDVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v8si_mask, "__builtin_ia32_vpshldv_v8si_mask", IX86_BUILTIN_VPSHLDVV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v8si_maskz, "__builtin_ia32_vpshldv_v8si_maskz", IX86_BUILTIN_VPSHLDVV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v4si, "__builtin_ia32_vpshldv_v4si", IX86_BUILTIN_VPSHLDVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v4si_mask, "__builtin_ia32_vpshldv_v4si_mask", IX86_BUILTIN_VPSHLDVV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v4si_maskz, "__builtin_ia32_vpshldv_v4si_maskz", IX86_BUILTIN_VPSHLDVV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshldv_v8di, "__builtin_ia32_vpshldv_v8di", IX86_BUILTIN_VPSHLDVV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshldv_v8di_mask, "__builtin_ia32_vpshldv_v8di_mask", IX86_BUILTIN_VPSHLDVV8DI_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VBMI2, 0, CODE_FOR_vpshldv_v8di_maskz, "__builtin_ia32_vpshldv_v8di_maskz", IX86_BUILTIN_VPSHLDVV8DI_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshldv_v8di, "__builtin_ia32_vpshldv_v8di", IX86_BUILTIN_VPSHLDVV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshldv_v8di_mask, "__builtin_ia32_vpshldv_v8di_mask", IX86_BUILTIN_VPSHLDVV8DI_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VBMI2, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpshldv_v8di_maskz, "__builtin_ia32_vpshldv_v8di_maskz", IX86_BUILTIN_VPSHLDVV8DI_MASKZ, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_V8DI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v4di, "__builtin_ia32_vpshldv_v4di", IX86_BUILTIN_VPSHLDVV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_V4DI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v4di_mask, "__builtin_ia32_vpshldv_v4di_mask", IX86_BUILTIN_VPSHLDVV4DI_MASK, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_V4DI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v4di_maskz, "__builtin_ia32_vpshldv_v4di_maskz", IX86_BUILTIN_VPSHLDVV4DI_MASKZ, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_V4DI_UQI)
@@ -2681,20 +2681,20 @@ BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshl
BDESC (OPTION_MASK_ISA_AVX512VBMI2 | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpshldv_v2di_maskz, "__builtin_ia32_vpshldv_v2di_maskz", IX86_BUILTIN_VPSHLDVV2DI_MASKZ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI_UQI)
/* GFNI */
-BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_vgf2p8affineinvqb_v64qi, "__builtin_ia32_vgf2p8affineinvqb_v64qi", IX86_BUILTIN_VGF2P8AFFINEINVQB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT)
-BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_vgf2p8affineinvqb_v64qi_mask, "__builtin_ia32_vgf2p8affineinvqb_v64qi_mask", IX86_BUILTIN_VGF2P8AFFINEINVQB512MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vgf2p8affineinvqb_v64qi, "__builtin_ia32_vgf2p8affineinvqb_v64qi", IX86_BUILTIN_VGF2P8AFFINEINVQB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vgf2p8affineinvqb_v64qi_mask, "__builtin_ia32_vgf2p8affineinvqb_v64qi_mask", IX86_BUILTIN_VGF2P8AFFINEINVQB512MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT_V64QI_UDI)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX, 0, CODE_FOR_vgf2p8affineinvqb_v32qi, "__builtin_ia32_vgf2p8affineinvqb_v32qi", IX86_BUILTIN_VGF2P8AFFINEINVQB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512VL | OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_vgf2p8affineinvqb_v32qi_mask, "__builtin_ia32_vgf2p8affineinvqb_v32qi_mask", IX86_BUILTIN_VGF2P8AFFINEINVQB256MASK, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT_V32QI_USI)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_SSE2, 0, CODE_FOR_vgf2p8affineinvqb_v16qi, "__builtin_ia32_vgf2p8affineinvqb_v16qi", IX86_BUILTIN_VGF2P8AFFINEINVQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vgf2p8affineinvqb_v16qi_mask, "__builtin_ia32_vgf2p8affineinvqb_v16qi_mask", IX86_BUILTIN_VGF2P8AFFINEINVQB128MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT_V16QI_UHI)
-BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_vgf2p8affineqb_v64qi, "__builtin_ia32_vgf2p8affineqb_v64qi", IX86_BUILTIN_VGF2P8AFFINEQB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT)
-BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_vgf2p8affineqb_v64qi_mask, "__builtin_ia32_vgf2p8affineqb_v64qi_mask", IX86_BUILTIN_VGF2P8AFFINEQB512MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vgf2p8affineqb_v64qi, "__builtin_ia32_vgf2p8affineqb_v64qi", IX86_BUILTIN_VGF2P8AFFINEQB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vgf2p8affineqb_v64qi_mask, "__builtin_ia32_vgf2p8affineqb_v64qi_mask", IX86_BUILTIN_VGF2P8AFFINEQB512MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT_V64QI_UDI)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX, 0, CODE_FOR_vgf2p8affineqb_v32qi, "__builtin_ia32_vgf2p8affineqb_v32qi", IX86_BUILTIN_VGF2P8AFFINEQB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512VL | OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_vgf2p8affineqb_v32qi_mask, "__builtin_ia32_vgf2p8affineqb_v32qi_mask", IX86_BUILTIN_VGF2P8AFFINEQB256MASK, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT_V32QI_USI)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_SSE2, 0, CODE_FOR_vgf2p8affineqb_v16qi, "__builtin_ia32_vgf2p8affineqb_v16qi", IX86_BUILTIN_VGF2P8AFFINEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vgf2p8affineqb_v16qi_mask, "__builtin_ia32_vgf2p8affineqb_v16qi_mask", IX86_BUILTIN_VGF2P8AFFINEQB128MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT_V16QI_UHI)
-BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_vgf2p8mulb_v64qi, "__builtin_ia32_vgf2p8mulb_v64qi", IX86_BUILTIN_VGF2P8MULB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
-BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_vgf2p8mulb_v64qi_mask, "__builtin_ia32_vgf2p8mulb_v64qi_mask", IX86_BUILTIN_VGF2P8MULB512MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vgf2p8mulb_v64qi, "__builtin_ia32_vgf2p8mulb_v64qi", IX86_BUILTIN_VGF2P8MULB512, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
+BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512BW, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vgf2p8mulb_v64qi_mask, "__builtin_ia32_vgf2p8mulb_v64qi_mask", IX86_BUILTIN_VGF2P8MULB512MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_V64QI_UDI)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX, 0, CODE_FOR_vgf2p8mulb_v32qi, "__builtin_ia32_vgf2p8mulb_v32qi", IX86_BUILTIN_VGF2P8MULB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512VL | OPTION_MASK_ISA_AVX512BW, 0, CODE_FOR_vgf2p8mulb_v32qi_mask, "__builtin_ia32_vgf2p8mulb_v32qi_mask", IX86_BUILTIN_VGF2P8MULB256MASK, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_SSE2, 0, CODE_FOR_vgf2p8mulb_v16qi, "__builtin_ia32_vgf2p8mulb_v16qi", IX86_BUILTIN_VGF2P8MULB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
@@ -2702,9 +2702,9 @@ BDESC (OPTION_MASK_ISA_GFNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vgf2p8mulb_v
/* AVX512_VNNI */
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpbusd_v16si, "__builtin_ia32_vpdpbusd_v16si", IX86_BUILTIN_VPDPBUSDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpbusd_v16si_mask, "__builtin_ia32_vpdpbusd_v16si_mask", IX86_BUILTIN_VPDPBUSDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpbusd_v16si_maskz, "__builtin_ia32_vpdpbusd_v16si_maskz", IX86_BUILTIN_VPDPBUSDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpbusd_v16si, "__builtin_ia32_vpdpbusd_v16si", IX86_BUILTIN_VPDPBUSDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpbusd_v16si_mask, "__builtin_ia32_vpdpbusd_v16si_mask", IX86_BUILTIN_VPDPBUSDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpbusd_v16si_maskz, "__builtin_ia32_vpdpbusd_v16si_maskz", IX86_BUILTIN_VPDPBUSDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVXVNNI, CODE_FOR_vpdpbusd_v8si, "__builtin_ia32_vpdpbusd_v8si", IX86_BUILTIN_VPDPBUSDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpbusd_v8si_mask, "__builtin_ia32_vpdpbusd_v8si_mask", IX86_BUILTIN_VPDPBUSDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpbusd_v8si_maskz, "__builtin_ia32_vpdpbusd_v8si_maskz", IX86_BUILTIN_VPDPBUSDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
@@ -2712,9 +2712,9 @@ BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_A
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpbusd_v4si_mask, "__builtin_ia32_vpdpbusd_v4si_mask", IX86_BUILTIN_VPDPBUSDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpbusd_v4si_maskz, "__builtin_ia32_vpdpbusd_v4si_maskz", IX86_BUILTIN_VPDPBUSDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpbusds_v16si, "__builtin_ia32_vpdpbusds_v16si", IX86_BUILTIN_VPDPBUSDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpbusds_v16si_mask, "__builtin_ia32_vpdpbusds_v16si_mask", IX86_BUILTIN_VPDPBUSDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpbusds_v16si_maskz, "__builtin_ia32_vpdpbusds_v16si_maskz", IX86_BUILTIN_VPDPBUSDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpbusds_v16si, "__builtin_ia32_vpdpbusds_v16si", IX86_BUILTIN_VPDPBUSDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpbusds_v16si_mask, "__builtin_ia32_vpdpbusds_v16si_mask", IX86_BUILTIN_VPDPBUSDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpbusds_v16si_maskz, "__builtin_ia32_vpdpbusds_v16si_maskz", IX86_BUILTIN_VPDPBUSDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVXVNNI, CODE_FOR_vpdpbusds_v8si, "__builtin_ia32_vpdpbusds_v8si", IX86_BUILTIN_VPDPBUSDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpbusds_v8si_mask, "__builtin_ia32_vpdpbusds_v8si_mask", IX86_BUILTIN_VPDPBUSDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpbusds_v8si_maskz, "__builtin_ia32_vpdpbusds_v8si_maskz", IX86_BUILTIN_VPDPBUSDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
@@ -2722,9 +2722,9 @@ BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_A
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpbusds_v4si_mask, "__builtin_ia32_vpdpbusds_v4si_mask", IX86_BUILTIN_VPDPBUSDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpbusds_v4si_maskz, "__builtin_ia32_vpdpbusds_v4si_maskz", IX86_BUILTIN_VPDPBUSDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpwssd_v16si, "__builtin_ia32_vpdpwssd_v16si", IX86_BUILTIN_VPDPWSSDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpwssd_v16si_mask, "__builtin_ia32_vpdpwssd_v16si_mask", IX86_BUILTIN_VPDPWSSDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpwssd_v16si_maskz, "__builtin_ia32_vpdpwssd_v16si_maskz", IX86_BUILTIN_VPDPWSSDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpwssd_v16si, "__builtin_ia32_vpdpwssd_v16si", IX86_BUILTIN_VPDPWSSDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpwssd_v16si_mask, "__builtin_ia32_vpdpwssd_v16si_mask", IX86_BUILTIN_VPDPWSSDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpwssd_v16si_maskz, "__builtin_ia32_vpdpwssd_v16si_maskz", IX86_BUILTIN_VPDPWSSDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVXVNNI, CODE_FOR_vpdpwssd_v8si, "__builtin_ia32_vpdpwssd_v8si", IX86_BUILTIN_VPDPWSSDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpwssd_v8si_mask, "__builtin_ia32_vpdpwssd_v8si_mask", IX86_BUILTIN_VPDPWSSDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpwssd_v8si_maskz, "__builtin_ia32_vpdpwssd_v8si_maskz", IX86_BUILTIN_VPDPWSSDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
@@ -2732,9 +2732,9 @@ BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_A
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpwssd_v4si_mask, "__builtin_ia32_vpdpwssd_v4si_mask", IX86_BUILTIN_VPDPWSSDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpwssd_v4si_maskz, "__builtin_ia32_vpdpwssd_v4si_maskz", IX86_BUILTIN_VPDPWSSDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpwssds_v16si, "__builtin_ia32_vpdpwssds_v16si", IX86_BUILTIN_VPDPWSSDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpwssds_v16si_mask, "__builtin_ia32_vpdpwssds_v16si_mask", IX86_BUILTIN_VPDPWSSDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512VNNI, 0, CODE_FOR_vpdpwssds_v16si_maskz, "__builtin_ia32_vpdpwssds_v16si_maskz", IX86_BUILTIN_VPDPWSSDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpwssds_v16si, "__builtin_ia32_vpdpwssds_v16si", IX86_BUILTIN_VPDPWSSDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpwssds_v16si_mask, "__builtin_ia32_vpdpwssds_v16si_mask", IX86_BUILTIN_VPDPWSSDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VNNI, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpdpwssds_v16si_maskz, "__builtin_ia32_vpdpwssds_v16si_maskz", IX86_BUILTIN_VPDPWSSDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVXVNNI, CODE_FOR_vpdpwssds_v8si, "__builtin_ia32_vpdpwssds_v8si", IX86_BUILTIN_VPDPWSSDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpwssds_v8si_mask, "__builtin_ia32_vpdpwssds_v8si_mask", IX86_BUILTIN_VPDPWSSDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpwssds_v8si_maskz, "__builtin_ia32_vpdpwssds_v8si_maskz", IX86_BUILTIN_VPDPWSSDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
@@ -2773,13 +2773,13 @@ BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16, CODE_FOR_vpdpwuuds_v4si, "__builtin_ia3
/* VPCLMULQDQ */
BDESC (OPTION_MASK_ISA_VPCLMULQDQ | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpclmulqdq_v2di, "__builtin_ia32_vpclmulqdq_v2di", IX86_BUILTIN_VPCLMULQDQ2, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT)
BDESC (OPTION_MASK_ISA_VPCLMULQDQ | OPTION_MASK_ISA_AVX, 0, CODE_FOR_vpclmulqdq_v4di, "__builtin_ia32_vpclmulqdq_v4di", IX86_BUILTIN_VPCLMULQDQ4, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_INT)
-BDESC (OPTION_MASK_ISA_VPCLMULQDQ | OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_vpclmulqdq_v8di, "__builtin_ia32_vpclmulqdq_v8di", IX86_BUILTIN_VPCLMULQDQ8, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT)
+BDESC (OPTION_MASK_ISA_VPCLMULQDQ | OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpclmulqdq_v8di, "__builtin_ia32_vpclmulqdq_v8di", IX86_BUILTIN_VPCLMULQDQ8, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_INT)
/* VPOPCNTDQ */
-BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ, 0, CODE_FOR_vpopcountv16si, "__builtin_ia32_vpopcountd_v16si", IX86_BUILTIN_VPOPCOUNTDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI)
-BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ, 0, CODE_FOR_vpopcountv16si_mask, "__builtin_ia32_vpopcountd_v16si_mask", IX86_BUILTIN_VPOPCOUNTDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ, 0, CODE_FOR_vpopcountv8di, "__builtin_ia32_vpopcountq_v8di", IX86_BUILTIN_VPOPCOUNTQV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI)
-BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ, 0, CODE_FOR_vpopcountv8di_mask, "__builtin_ia32_vpopcountq_v8di_mask", IX86_BUILTIN_VPOPCOUNTQV8DI_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpopcountv16si, "__builtin_ia32_vpopcountd_v16si", IX86_BUILTIN_VPOPCOUNTDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI)
+BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpopcountv16si_mask, "__builtin_ia32_vpopcountd_v16si_mask", IX86_BUILTIN_VPOPCOUNTDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpopcountv8di, "__builtin_ia32_vpopcountq_v8di", IX86_BUILTIN_VPOPCOUNTQV8DI, UNKNOWN, (int) V8DI_FTYPE_V8DI)
+BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpopcountv8di_mask, "__builtin_ia32_vpopcountq_v8di_mask", IX86_BUILTIN_VPOPCOUNTQV8DI_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DI_V8DI_UQI)
BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv4di, "__builtin_ia32_vpopcountq_v4di", IX86_BUILTIN_VPOPCOUNTQV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI)
BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv4di_mask, "__builtin_ia32_vpopcountq_v4di_mask", IX86_BUILTIN_VPOPCOUNTQV4DI_MASK, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_UQI)
@@ -2791,21 +2791,21 @@ BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_v
BDESC (OPTION_MASK_ISA_AVX512VPOPCNTDQ | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv8si_mask, "__builtin_ia32_vpopcountd_v8si_mask", IX86_BUILTIN_VPOPCOUNTDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_UHI)
/* BITALG */
-BDESC (OPTION_MASK_ISA_AVX512BITALG, 0, CODE_FOR_vpopcountv64qi, "__builtin_ia32_vpopcountb_v64qi", IX86_BUILTIN_VPOPCOUNTBV64QI, UNKNOWN, (int) V64QI_FTYPE_V64QI)
-BDESC (OPTION_MASK_ISA_AVX512BITALG, 0, CODE_FOR_vpopcountv64qi_mask, "__builtin_ia32_vpopcountb_v64qi_mask", IX86_BUILTIN_VPOPCOUNTBV64QI_MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BITALG, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpopcountv64qi, "__builtin_ia32_vpopcountb_v64qi", IX86_BUILTIN_VPOPCOUNTBV64QI, UNKNOWN, (int) V64QI_FTYPE_V64QI)
+BDESC (OPTION_MASK_ISA_AVX512BITALG, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpopcountv64qi_mask, "__builtin_ia32_vpopcountb_v64qi_mask", IX86_BUILTIN_VPOPCOUNTBV64QI_MASK, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_UDI)
BDESC (OPTION_MASK_ISA_AVX512BITALG | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv32qi, "__builtin_ia32_vpopcountb_v32qi", IX86_BUILTIN_VPOPCOUNTBV32QI, UNKNOWN, (int) V32QI_FTYPE_V32QI)
BDESC (OPTION_MASK_ISA_AVX512BITALG | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv32qi_mask, "__builtin_ia32_vpopcountb_v32qi_mask", IX86_BUILTIN_VPOPCOUNTBV32QI_MASK, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_AVX512BITALG | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv16qi, "__builtin_ia32_vpopcountb_v16qi", IX86_BUILTIN_VPOPCOUNTBV16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI)
BDESC (OPTION_MASK_ISA_AVX512BITALG | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv16qi_mask, "__builtin_ia32_vpopcountb_v16qi_mask", IX86_BUILTIN_VPOPCOUNTBV16QI_MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_UHI)
-BDESC (OPTION_MASK_ISA_AVX512BITALG, 0, CODE_FOR_vpopcountv32hi, "__builtin_ia32_vpopcountw_v32hi", IX86_BUILTIN_VPOPCOUNTWV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI)
-BDESC (OPTION_MASK_ISA_AVX512BITALG, 0, CODE_FOR_vpopcountv32hi_mask, "__builtin_ia32_vpopcountw_v32hi_mask", IX86_BUILTIN_VPOPCOUNTQV32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_AVX512BITALG, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpopcountv32hi, "__builtin_ia32_vpopcountw_v32hi", IX86_BUILTIN_VPOPCOUNTWV32HI, UNKNOWN, (int) V32HI_FTYPE_V32HI)
+BDESC (OPTION_MASK_ISA_AVX512BITALG, OPTION_MASK_ISA2_EVEX512, CODE_FOR_vpopcountv32hi_mask, "__builtin_ia32_vpopcountw_v32hi_mask", IX86_BUILTIN_VPOPCOUNTQV32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HI_V32HI_USI)
BDESC (OPTION_MASK_ISA_AVX512BITALG | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv16hi, "__builtin_ia32_vpopcountw_v16hi", IX86_BUILTIN_VPOPCOUNTWV16HI, UNKNOWN, (int) V16HI_FTYPE_V16HI)
BDESC (OPTION_MASK_ISA_AVX512BITALG | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv16hi_mask, "__builtin_ia32_vpopcountw_v16hi_mask", IX86_BUILTIN_VPOPCOUNTQV16HI_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_UHI)
BDESC (OPTION_MASK_ISA_AVX512BITALG | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv8hi, "__builtin_ia32_vpopcountw_v8hi", IX86_BUILTIN_VPOPCOUNTWV8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI)
BDESC (OPTION_MASK_ISA_AVX512BITALG | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpopcountv8hi_mask, "__builtin_ia32_vpopcountw_v8hi_mask", IX86_BUILTIN_VPOPCOUNTQV8HI_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_UQI)
-BDESC (OPTION_MASK_ISA_AVX512BITALG, 0, CODE_FOR_avx512vl_vpshufbitqmbv64qi_mask, "__builtin_ia32_vpshufbitqmb512_mask", IX86_BUILTIN_VPSHUFBITQMB512_MASK, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_AVX512BITALG, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512vl_vpshufbitqmbv64qi_mask, "__builtin_ia32_vpshufbitqmb512_mask", IX86_BUILTIN_VPSHUFBITQMB512_MASK, UNKNOWN, (int) UDI_FTYPE_V64QI_V64QI_UDI)
BDESC (OPTION_MASK_ISA_AVX512BITALG | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_vpshufbitqmbv32qi_mask, "__builtin_ia32_vpshufbitqmb256_mask", IX86_BUILTIN_VPSHUFBITQMB256_MASK, UNKNOWN, (int) USI_FTYPE_V32QI_V32QI_USI)
BDESC (OPTION_MASK_ISA_AVX512BITALG | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_avx512vl_vpshufbitqmbv16qi_mask, "__builtin_ia32_vpshufbitqmb128_mask", IX86_BUILTIN_VPSHUFBITQMB128_MASK, UNKNOWN, (int) UHI_FTYPE_V16QI_V16QI_UHI)
@@ -2829,39 +2829,39 @@ BDESC (0, OPTION_MASK_ISA2_RDPID, CODE_FOR_rdpid, "__builtin_ia32_rdpid", IX86_B
/* VAES. */
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdec_v16qi, "__builtin_ia32_vaesdec_v16qi", IX86_BUILTIN_VAESDEC16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdec_v32qi, "__builtin_ia32_vaesdec_v32qi", IX86_BUILTIN_VAESDEC32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdec_v64qi, "__builtin_ia32_vaesdec_v64qi", IX86_BUILTIN_VAESDEC64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
+BDESC (0, OPTION_MASK_ISA2_VAES | OPTION_MASK_ISA2_EVEX512, CODE_FOR_vaesdec_v64qi, "__builtin_ia32_vaesdec_v64qi", IX86_BUILTIN_VAESDEC64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdeclast_v16qi, "__builtin_ia32_vaesdeclast_v16qi", IX86_BUILTIN_VAESDECLAST16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdeclast_v32qi, "__builtin_ia32_vaesdeclast_v32qi", IX86_BUILTIN_VAESDECLAST32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdeclast_v64qi, "__builtin_ia32_vaesdeclast_v64qi", IX86_BUILTIN_VAESDECLAST64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
+BDESC (0, OPTION_MASK_ISA2_VAES | OPTION_MASK_ISA2_EVEX512, CODE_FOR_vaesdeclast_v64qi, "__builtin_ia32_vaesdeclast_v64qi", IX86_BUILTIN_VAESDECLAST64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenc_v16qi, "__builtin_ia32_vaesenc_v16qi", IX86_BUILTIN_VAESENC16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenc_v32qi, "__builtin_ia32_vaesenc_v32qi", IX86_BUILTIN_VAESENC32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenc_v64qi, "__builtin_ia32_vaesenc_v64qi", IX86_BUILTIN_VAESENC64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
+BDESC (0, OPTION_MASK_ISA2_VAES | OPTION_MASK_ISA2_EVEX512, CODE_FOR_vaesenc_v64qi, "__builtin_ia32_vaesenc_v64qi", IX86_BUILTIN_VAESENC64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenclast_v16qi, "__builtin_ia32_vaesenclast_v16qi", IX86_BUILTIN_VAESENCLAST16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenclast_v32qi, "__builtin_ia32_vaesenclast_v32qi", IX86_BUILTIN_VAESENCLAST32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenclast_v64qi, "__builtin_ia32_vaesenclast_v64qi", IX86_BUILTIN_VAESENCLAST64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
+BDESC (0, OPTION_MASK_ISA2_VAES | OPTION_MASK_ISA2_EVEX512, CODE_FOR_vaesenclast_v64qi, "__builtin_ia32_vaesenclast_v64qi", IX86_BUILTIN_VAESENCLAST64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
/* BF16 */
-BDESC (0, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtne2ps2bf16_v32bf, "__builtin_ia32_cvtne2ps2bf16_v32bf", IX86_BUILTIN_CVTNE2PS2BF16_V32BF, UNKNOWN, (int) V32BF_FTYPE_V16SF_V16SF)
-BDESC (0, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtne2ps2bf16_v32bf_mask, "__builtin_ia32_cvtne2ps2bf16_v32bf_mask", IX86_BUILTIN_CVTNE2PS2BF16_V32BF_MASK, UNKNOWN, (int) V32BF_FTYPE_V16SF_V16SF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtne2ps2bf16_v32bf_maskz, "__builtin_ia32_cvtne2ps2bf16_v32bf_maskz", IX86_BUILTIN_CVTNE2PS2BF16_V32BF_MASKZ, UNKNOWN, (int) V32BF_FTYPE_V16SF_V16SF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512BF16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtne2ps2bf16_v32bf, "__builtin_ia32_cvtne2ps2bf16_v32bf", IX86_BUILTIN_CVTNE2PS2BF16_V32BF, UNKNOWN, (int) V32BF_FTYPE_V16SF_V16SF)
+BDESC (0, OPTION_MASK_ISA2_AVX512BF16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtne2ps2bf16_v32bf_mask, "__builtin_ia32_cvtne2ps2bf16_v32bf_mask", IX86_BUILTIN_CVTNE2PS2BF16_V32BF_MASK, UNKNOWN, (int) V32BF_FTYPE_V16SF_V16SF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512BF16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtne2ps2bf16_v32bf_maskz, "__builtin_ia32_cvtne2ps2bf16_v32bf_maskz", IX86_BUILTIN_CVTNE2PS2BF16_V32BF_MASKZ, UNKNOWN, (int) V32BF_FTYPE_V16SF_V16SF_USI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtne2ps2bf16_v16bf, "__builtin_ia32_cvtne2ps2bf16_v16bf", IX86_BUILTIN_CVTNE2PS2BF16_V16BF, UNKNOWN, (int) V16BF_FTYPE_V8SF_V8SF)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtne2ps2bf16_v16bf_mask, "__builtin_ia32_cvtne2ps2bf16_v16bf_mask", IX86_BUILTIN_CVTNE2PS2BF16_V16BF_MASK, UNKNOWN, (int) V16BF_FTYPE_V8SF_V8SF_V16BF_UHI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtne2ps2bf16_v16bf_maskz, "__builtin_ia32_cvtne2ps2bf16_v16bf_maskz", IX86_BUILTIN_CVTNE2PS2BF16_V16BF_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V8SF_V8SF_UHI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtne2ps2bf16_v8bf, "__builtin_ia32_cvtne2ps2bf16_v8bf", IX86_BUILTIN_CVTNE2PS2BF16_V8BF, UNKNOWN, (int) V8BF_FTYPE_V4SF_V4SF)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtne2ps2bf16_v8bf_mask, "__builtin_ia32_cvtne2ps2bf16_v8bf_mask", IX86_BUILTIN_CVTNE2PS2BF16_V8BF_MASK, UNKNOWN, (int) V8BF_FTYPE_V4SF_V4SF_V8BF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtne2ps2bf16_v8bf_maskz, "__builtin_ia32_cvtne2ps2bf16_v8bf_maskz", IX86_BUILTIN_CVTNE2PS2BF16_V8BF_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V4SF_V4SF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtneps2bf16_v16sf, "__builtin_ia32_cvtneps2bf16_v16sf", IX86_BUILTIN_CVTNEPS2BF16_V16SF, UNKNOWN, (int) V16BF_FTYPE_V16SF)
-BDESC (0, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtneps2bf16_v16sf_mask, "__builtin_ia32_cvtneps2bf16_v16sf_mask", IX86_BUILTIN_CVTNEPS2BF16_V16SF_MASK, UNKNOWN, (int) V16BF_FTYPE_V16SF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtneps2bf16_v16sf_maskz, "__builtin_ia32_cvtneps2bf16_v16sf_maskz", IX86_BUILTIN_CVTNE2PS2BF16_V16SF_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V16SF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX512BF16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtneps2bf16_v16sf, "__builtin_ia32_cvtneps2bf16_v16sf", IX86_BUILTIN_CVTNEPS2BF16_V16SF, UNKNOWN, (int) V16BF_FTYPE_V16SF)
+BDESC (0, OPTION_MASK_ISA2_AVX512BF16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtneps2bf16_v16sf_mask, "__builtin_ia32_cvtneps2bf16_v16sf_mask", IX86_BUILTIN_CVTNEPS2BF16_V16SF_MASK, UNKNOWN, (int) V16BF_FTYPE_V16SF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX512BF16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtneps2bf16_v16sf_maskz, "__builtin_ia32_cvtneps2bf16_v16sf_maskz", IX86_BUILTIN_CVTNE2PS2BF16_V16SF_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V16SF_UHI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVXNECONVERT | OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_vcvtneps2bf16_v8sf, "__builtin_ia32_cvtneps2bf16_v8sf", IX86_BUILTIN_CVTNEPS2BF16_V8SF, UNKNOWN, (int) V8BF_FTYPE_V8SF)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtneps2bf16_v8sf_mask, "__builtin_ia32_cvtneps2bf16_v8sf_mask", IX86_BUILTIN_CVTNEPS2BF16_V8SF_MASK, UNKNOWN, (int) V8BF_FTYPE_V8SF_V8BF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtneps2bf16_v8sf_maskz, "__builtin_ia32_cvtneps2bf16_v8sf_maskz", IX86_BUILTIN_CVTNE2PS2BF16_V8SF_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V8SF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVXNECONVERT | OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_vcvtneps2bf16_v4sf, "__builtin_ia32_cvtneps2bf16_v4sf", IX86_BUILTIN_CVTNEPS2BF16_V4SF, UNKNOWN, (int) V8BF_FTYPE_V4SF)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtneps2bf16_v4sf_mask, "__builtin_ia32_cvtneps2bf16_v4sf_mask", IX86_BUILTIN_CVTNEPS2BF16_V4SF_MASK, UNKNOWN, (int) V8BF_FTYPE_V4SF_V8BF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtneps2bf16_v4sf_maskz, "__builtin_ia32_cvtneps2bf16_v4sf_maskz", IX86_BUILTIN_CVTNE2PS2BF16_V4SF_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V4SF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_dpbf16ps_v16sf, "__builtin_ia32_dpbf16ps_v16sf", IX86_BUILTIN_DPBF16PS_V16SF, UNKNOWN, (int) V16SF_FTYPE_V16SF_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_dpbf16ps_v16sf_mask, "__builtin_ia32_dpbf16ps_v16sf_mask", IX86_BUILTIN_DPBF16PS_V16SF_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V32BF_V32BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_dpbf16ps_v16sf_maskz, "__builtin_ia32_dpbf16ps_v16sf_maskz", IX86_BUILTIN_DPBF16PS_V16SF_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V32BF_V32BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX512BF16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_dpbf16ps_v16sf, "__builtin_ia32_dpbf16ps_v16sf", IX86_BUILTIN_DPBF16PS_V16SF, UNKNOWN, (int) V16SF_FTYPE_V16SF_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX512BF16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_dpbf16ps_v16sf_mask, "__builtin_ia32_dpbf16ps_v16sf_mask", IX86_BUILTIN_DPBF16PS_V16SF_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V32BF_V32BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX512BF16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_dpbf16ps_v16sf_maskz, "__builtin_ia32_dpbf16ps_v16sf_maskz", IX86_BUILTIN_DPBF16PS_V16SF_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V32BF_V32BF_UHI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_dpbf16ps_v8sf, "__builtin_ia32_dpbf16ps_v8sf", IX86_BUILTIN_DPBF16PS_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V16BF_V16BF)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_dpbf16ps_v8sf_mask, "__builtin_ia32_dpbf16ps_v8sf_mask", IX86_BUILTIN_DPBF16PS_V8SF_MASK, UNKNOWN, (int) V8SF_FTYPE_V8SF_V16BF_V16BF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_dpbf16ps_v8sf_maskz, "__builtin_ia32_dpbf16ps_v8sf_maskz", IX86_BUILTIN_DPBF16PS_V8SF_MASKZ, UNKNOWN, (int) V8SF_FTYPE_V8SF_V16BF_V16BF_UQI)
@@ -2874,40 +2874,40 @@ BDESC (OPTION_MASK_ISA_SSE2, 0, CODE_FOR_extendbfsf2_1, "__builtin_ia32_cvtbf2sf
/* AVX512FP16. */
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_addv8hf3_mask, "__builtin_ia32_addph128_mask", IX86_BUILTIN_ADDPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_addv16hf3_mask, "__builtin_ia32_addph256_mask", IX86_BUILTIN_ADDPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_addv32hf3_mask, "__builtin_ia32_addph512_mask", IX86_BUILTIN_ADDPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_addv32hf3_mask, "__builtin_ia32_addph512_mask", IX86_BUILTIN_ADDPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_subv8hf3_mask, "__builtin_ia32_subph128_mask", IX86_BUILTIN_SUBPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_subv16hf3_mask, "__builtin_ia32_subph256_mask", IX86_BUILTIN_SUBPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_subv32hf3_mask, "__builtin_ia32_subph512_mask", IX86_BUILTIN_SUBPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_subv32hf3_mask, "__builtin_ia32_subph512_mask", IX86_BUILTIN_SUBPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_mulv8hf3_mask, "__builtin_ia32_mulph128_mask", IX86_BUILTIN_MULPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_mulv16hf3_mask, "__builtin_ia32_mulph256_mask", IX86_BUILTIN_MULPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_mulv32hf3_mask, "__builtin_ia32_mulph512_mask", IX86_BUILTIN_MULPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_mulv32hf3_mask, "__builtin_ia32_mulph512_mask", IX86_BUILTIN_MULPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_divv8hf3_mask, "__builtin_ia32_divph128_mask", IX86_BUILTIN_DIVPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_divv16hf3_mask, "__builtin_ia32_divph256_mask", IX86_BUILTIN_DIVPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_divv32hf3_mask, "__builtin_ia32_divph512_mask", IX86_BUILTIN_DIVPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_divv32hf3_mask, "__builtin_ia32_divph512_mask", IX86_BUILTIN_DIVPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmaddv8hf3_mask, "__builtin_ia32_addsh_mask", IX86_BUILTIN_ADDSH_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsubv8hf3_mask, "__builtin_ia32_subsh_mask", IX86_BUILTIN_SUBSH_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmmulv8hf3_mask, "__builtin_ia32_mulsh_mask", IX86_BUILTIN_MULSH_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmdivv8hf3_mask, "__builtin_ia32_divsh_mask", IX86_BUILTIN_DIVSH_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_smaxv8hf3_mask, "__builtin_ia32_maxph128_mask", IX86_BUILTIN_MAXPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_smaxv16hf3_mask, "__builtin_ia32_maxph256_mask", IX86_BUILTIN_MAXPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_smaxv32hf3_mask, "__builtin_ia32_maxph512_mask", IX86_BUILTIN_MAXPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_smaxv32hf3_mask, "__builtin_ia32_maxph512_mask", IX86_BUILTIN_MAXPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv8hf3_mask, "__builtin_ia32_minph128_mask", IX86_BUILTIN_MINPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv16hf3_mask, "__builtin_ia32_minph256_mask", IX86_BUILTIN_MINPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv32hf3_mask, "__builtin_ia32_minph512_mask", IX86_BUILTIN_MINPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_sminv32hf3_mask, "__builtin_ia32_minph512_mask", IX86_BUILTIN_MINPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsmaxv8hf3_mask, "__builtin_ia32_maxsh_mask", IX86_BUILTIN_MAXSH_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsminv8hf3_mask, "__builtin_ia32_minsh_mask", IX86_BUILTIN_MINSH_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_cmpv8hf3_mask, "__builtin_ia32_cmpph128_mask", IX86_BUILTIN_CMPPH128_MASK, UNKNOWN, (int) UQI_FTYPE_V8HF_V8HF_INT_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512vl_cmpv16hf3_mask, "__builtin_ia32_cmpph256_mask", IX86_BUILTIN_CMPPH256_MASK, UNKNOWN, (int) UHI_FTYPE_V16HF_V16HF_INT_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_cmpv32hf3_mask, "__builtin_ia32_cmpph512_mask", IX86_BUILTIN_CMPPH512_MASK, UNKNOWN, (int) USI_FTYPE_V32HF_V32HF_INT_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_cmpv32hf3_mask, "__builtin_ia32_cmpph512_mask", IX86_BUILTIN_CMPPH512_MASK, UNKNOWN, (int) USI_FTYPE_V32HF_V32HF_INT_USI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_sqrtv8hf2_mask, "__builtin_ia32_sqrtph128_mask", IX86_BUILTIN_SQRTPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_sqrtv16hf2_mask, "__builtin_ia32_sqrtph256_mask", IX86_BUILTIN_SQRTPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_UHI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_rsqrtv8hf2_mask, "__builtin_ia32_rsqrtph128_mask", IX86_BUILTIN_RSQRTPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_rsqrtv16hf2_mask, "__builtin_ia32_rsqrtph256_mask", IX86_BUILTIN_RSQRTPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_rsqrtv32hf2_mask, "__builtin_ia32_rsqrtph512_mask", IX86_BUILTIN_RSQRTPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_rsqrtv32hf2_mask, "__builtin_ia32_rsqrtph512_mask", IX86_BUILTIN_RSQRTPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmrsqrtv8hf2_mask, "__builtin_ia32_rsqrtsh_mask", IX86_BUILTIN_RSQRTSH_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_rcpv8hf2_mask, "__builtin_ia32_rcpph128_mask", IX86_BUILTIN_RCPPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_rcpv16hf2_mask, "__builtin_ia32_rcpph256_mask", IX86_BUILTIN_RCPPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_rcpv32hf2_mask, "__builtin_ia32_rcpph512_mask", IX86_BUILTIN_RCPPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_rcpv32hf2_mask, "__builtin_ia32_rcpph512_mask", IX86_BUILTIN_RCPPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmrcpv8hf2_mask, "__builtin_ia32_rcpsh_mask", IX86_BUILTIN_RCPSH_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_scalefv8hf_mask, "__builtin_ia32_scalefph128_mask", IX86_BUILTIN_SCALEFPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512vl_scalefv16hf_mask, "__builtin_ia32_scalefph256_mask", IX86_BUILTIN_SCALEFPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI)
@@ -2917,7 +2917,7 @@ BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp1
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512vl_rndscalev16hf_mask, "__builtin_ia32_rndscaleph256_mask", IX86_BUILTIN_RNDSCALEPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_INT_V16HF_UHI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512dq_fpclassv16hf_mask, "__builtin_ia32_fpclassph256_mask", IX86_BUILTIN_FPCLASSPH256, UNKNOWN, (int) HI_FTYPE_V16HF_INT_UHI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512dq_fpclassv8hf_mask, "__builtin_ia32_fpclassph128_mask", IX86_BUILTIN_FPCLASSPH128, UNKNOWN, (int) QI_FTYPE_V8HF_INT_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512dq_fpclassv32hf_mask, "__builtin_ia32_fpclassph512_mask", IX86_BUILTIN_FPCLASSPH512, UNKNOWN, (int) SI_FTYPE_V32HF_INT_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_fpclassv32hf_mask, "__builtin_ia32_fpclassph512_mask", IX86_BUILTIN_FPCLASSPH512, UNKNOWN, (int) SI_FTYPE_V32HF_INT_USI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512dq_vmfpclassv8hf_mask, "__builtin_ia32_fpclasssh_mask", IX86_BUILTIN_FPCLASSSH_MASK, UNKNOWN, (int) QI_FTYPE_V8HF_INT_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512vl_getexpv16hf_mask, "__builtin_ia32_getexpph256_mask", IX86_BUILTIN_GETEXPPH256, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_UHI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_getexpv8hf_mask, "__builtin_ia32_getexpph128_mask", IX86_BUILTIN_GETEXPPH128, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_UQI)
@@ -3034,26 +3034,26 @@ BDESC_END (ARGS, ROUND_ARGS)
/* AVX512F. */
BDESC_FIRST (round_args, ROUND_ARGS,
- OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_addv8df3_mask_round, "__builtin_ia32_addpd512_mask", IX86_BUILTIN_ADDPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_addv16sf3_mask_round, "__builtin_ia32_addps512_mask", IX86_BUILTIN_ADDPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+ OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_addv8df3_mask_round, "__builtin_ia32_addpd512_mask", IX86_BUILTIN_ADDPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_addv16sf3_mask_round, "__builtin_ia32_addps512_mask", IX86_BUILTIN_ADDPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmaddv2df3_round, "__builtin_ia32_addsd_round", IX86_BUILTIN_ADDSD_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmaddv2df3_mask_round, "__builtin_ia32_addsd_mask_round", IX86_BUILTIN_ADDSD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmaddv4sf3_round, "__builtin_ia32_addss_round", IX86_BUILTIN_ADDSS_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmaddv4sf3_mask_round, "__builtin_ia32_addss_mask_round", IX86_BUILTIN_ADDSS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_cmpv8df3_mask_round, "__builtin_ia32_cmppd512_mask", IX86_BUILTIN_CMPPD512, UNKNOWN, (int) UQI_FTYPE_V8DF_V8DF_INT_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_cmpv16sf3_mask_round, "__builtin_ia32_cmpps512_mask", IX86_BUILTIN_CMPPS512, UNKNOWN, (int) UHI_FTYPE_V16SF_V16SF_INT_UHI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cmpv8df3_mask_round, "__builtin_ia32_cmppd512_mask", IX86_BUILTIN_CMPPD512, UNKNOWN, (int) UQI_FTYPE_V8DF_V8DF_INT_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cmpv16sf3_mask_round, "__builtin_ia32_cmpps512_mask", IX86_BUILTIN_CMPPS512, UNKNOWN, (int) UHI_FTYPE_V16SF_V16SF_INT_UHI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vmcmpv2df3_mask_round, "__builtin_ia32_cmpsd_mask", IX86_BUILTIN_CMPSD_MASK, UNKNOWN, (int) UQI_FTYPE_V2DF_V2DF_INT_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vmcmpv4sf3_mask_round, "__builtin_ia32_cmpss_mask", IX86_BUILTIN_CMPSS_MASK, UNKNOWN, (int) UQI_FTYPE_V4SF_V4SF_INT_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_comi_round, "__builtin_ia32_vcomisd", IX86_BUILTIN_COMIDF, UNKNOWN, (int) INT_FTYPE_V2DF_V2DF_INT_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_comi_round, "__builtin_ia32_vcomiss", IX86_BUILTIN_COMISF, UNKNOWN, (int) INT_FTYPE_V4SF_V4SF_INT_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_floatv16siv16sf2_mask_round, "__builtin_ia32_cvtdq2ps512_mask", IX86_BUILTIN_CVTDQ2PS512, UNKNOWN, (int) V16SF_FTYPE_V16SI_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_cvtpd2dq512_mask_round, "__builtin_ia32_cvtpd2dq512_mask", IX86_BUILTIN_CVTPD2DQ512, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_cvtpd2ps512_mask_round, "__builtin_ia32_cvtpd2ps512_mask", IX86_BUILTIN_CVTPD2PS512, UNKNOWN, (int) V8SF_FTYPE_V8DF_V8SF_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_fixuns_notruncv8dfv8si2_mask_round, "__builtin_ia32_cvtpd2udq512_mask", IX86_BUILTIN_CVTPD2UDQ512, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vcvtph2ps512_mask_round, "__builtin_ia32_vcvtph2ps512_mask", IX86_BUILTIN_CVTPH2PS512, UNKNOWN, (int) V16SF_FTYPE_V16HI_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fix_notruncv16sfv16si_mask_round, "__builtin_ia32_cvtps2dq512_mask", IX86_BUILTIN_CVTPS2DQ512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_cvtps2pd512_mask_round, "__builtin_ia32_cvtps2pd512_mask", IX86_BUILTIN_CVTPS2PD512, UNKNOWN, (int) V8DF_FTYPE_V8SF_V8DF_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fixuns_notruncv16sfv16si_mask_round, "__builtin_ia32_cvtps2udq512_mask", IX86_BUILTIN_CVTPS2UDQ512, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_floatv16siv16sf2_mask_round, "__builtin_ia32_cvtdq2ps512_mask", IX86_BUILTIN_CVTDQ2PS512, UNKNOWN, (int) V16SF_FTYPE_V16SI_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtpd2dq512_mask_round, "__builtin_ia32_cvtpd2dq512_mask", IX86_BUILTIN_CVTPD2DQ512, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtpd2ps512_mask_round, "__builtin_ia32_cvtpd2ps512_mask", IX86_BUILTIN_CVTPD2PS512, UNKNOWN, (int) V8SF_FTYPE_V8DF_V8SF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_fixuns_notruncv8dfv8si2_mask_round, "__builtin_ia32_cvtpd2udq512_mask", IX86_BUILTIN_CVTPD2UDQ512, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_vcvtph2ps512_mask_round, "__builtin_ia32_vcvtph2ps512_mask", IX86_BUILTIN_CVTPH2PS512, UNKNOWN, (int) V16SF_FTYPE_V16HI_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fix_notruncv16sfv16si_mask_round, "__builtin_ia32_cvtps2dq512_mask", IX86_BUILTIN_CVTPS2DQ512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_cvtps2pd512_mask_round, "__builtin_ia32_cvtps2pd512_mask", IX86_BUILTIN_CVTPS2PD512, UNKNOWN, (int) V8DF_FTYPE_V8SF_V8DF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fixuns_notruncv16sfv16si_mask_round, "__builtin_ia32_cvtps2udq512_mask", IX86_BUILTIN_CVTPS2UDQ512, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_cvtsd2ss_round, "__builtin_ia32_cvtsd2ss_round", IX86_BUILTIN_CVTSD2SS_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_cvtsd2ss_mask_round, "__builtin_ia32_cvtsd2ss_mask_round", IX86_BUILTIN_CVTSD2SS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF_V4SF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F | OPTION_MASK_ISA_64BIT, 0, CODE_FOR_sse2_cvtsi2sdq_round, "__builtin_ia32_cvtsi2sd64", IX86_BUILTIN_CVTSI2SD64, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT64_INT)
@@ -3069,64 +3069,64 @@ BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_floatunsv16siv16sf2_mask_round, "__b
BDESC (OPTION_MASK_ISA_AVX512F | OPTION_MASK_ISA_64BIT, 0, CODE_FOR_cvtusi2sd64_round, "__builtin_ia32_cvtusi2sd64", IX86_BUILTIN_CVTUSI2SD64, UNKNOWN, (int) V2DF_FTYPE_V2DF_UINT64_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_cvtusi2ss32_round, "__builtin_ia32_cvtusi2ss32", IX86_BUILTIN_CVTUSI2SS32, UNKNOWN, (int) V4SF_FTYPE_V4SF_UINT_INT)
BDESC (OPTION_MASK_ISA_AVX512F | OPTION_MASK_ISA_64BIT, 0, CODE_FOR_cvtusi2ss64_round, "__builtin_ia32_cvtusi2ss64", IX86_BUILTIN_CVTUSI2SS64, UNKNOWN, (int) V4SF_FTYPE_V4SF_UINT64_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_divv8df3_mask_round, "__builtin_ia32_divpd512_mask", IX86_BUILTIN_DIVPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_divv16sf3_mask_round, "__builtin_ia32_divps512_mask", IX86_BUILTIN_DIVPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_divv8df3_mask_round, "__builtin_ia32_divpd512_mask", IX86_BUILTIN_DIVPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_divv16sf3_mask_round, "__builtin_ia32_divps512_mask", IX86_BUILTIN_DIVPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmdivv2df3_round, "__builtin_ia32_divsd_round", IX86_BUILTIN_DIVSD_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmdivv2df3_mask_round, "__builtin_ia32_divsd_mask_round", IX86_BUILTIN_DIVSD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmdivv4sf3_round, "__builtin_ia32_divss_round", IX86_BUILTIN_DIVSS_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmdivv4sf3_mask_round, "__builtin_ia32_divss_mask_round", IX86_BUILTIN_DIVSS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fixupimmv8df_mask_round, "__builtin_ia32_fixupimmpd512_mask", IX86_BUILTIN_FIXUPIMMPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DI_INT_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fixupimmv8df_maskz_round, "__builtin_ia32_fixupimmpd512_maskz", IX86_BUILTIN_FIXUPIMMPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DI_INT_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fixupimmv16sf_mask_round, "__builtin_ia32_fixupimmps512_mask", IX86_BUILTIN_FIXUPIMMPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SI_INT_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fixupimmv16sf_maskz_round, "__builtin_ia32_fixupimmps512_maskz", IX86_BUILTIN_FIXUPIMMPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SI_INT_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fixupimmv8df_mask_round, "__builtin_ia32_fixupimmpd512_mask", IX86_BUILTIN_FIXUPIMMPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DI_INT_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fixupimmv8df_maskz_round, "__builtin_ia32_fixupimmpd512_maskz", IX86_BUILTIN_FIXUPIMMPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DI_INT_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fixupimmv16sf_mask_round, "__builtin_ia32_fixupimmps512_mask", IX86_BUILTIN_FIXUPIMMPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SI_INT_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fixupimmv16sf_maskz_round, "__builtin_ia32_fixupimmps512_maskz", IX86_BUILTIN_FIXUPIMMPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SI_INT_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sfixupimmv2df_mask_round, "__builtin_ia32_fixupimmsd_mask", IX86_BUILTIN_FIXUPIMMSD128_MASK, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI_INT_QI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sfixupimmv2df_maskz_round, "__builtin_ia32_fixupimmsd_maskz", IX86_BUILTIN_FIXUPIMMSD128_MASKZ, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI_INT_QI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sfixupimmv4sf_mask_round, "__builtin_ia32_fixupimmss_mask", IX86_BUILTIN_FIXUPIMMSS128_MASK, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI_INT_QI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sfixupimmv4sf_maskz_round, "__builtin_ia32_fixupimmss_maskz", IX86_BUILTIN_FIXUPIMMSS128_MASKZ, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI_INT_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_getexpv8df_mask_round, "__builtin_ia32_getexppd512_mask", IX86_BUILTIN_GETEXPPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_getexpv16sf_mask_round, "__builtin_ia32_getexpps512_mask", IX86_BUILTIN_GETEXPPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_getexpv8df_mask_round, "__builtin_ia32_getexppd512_mask", IX86_BUILTIN_GETEXPPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_getexpv16sf_mask_round, "__builtin_ia32_getexpps512_mask", IX86_BUILTIN_GETEXPPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sgetexpv2df_round, "__builtin_ia32_getexpsd128_round", IX86_BUILTIN_GETEXPSD128, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sgetexpv2df_mask_round, "__builtin_ia32_getexpsd_mask_round", IX86_BUILTIN_GETEXPSD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sgetexpv4sf_round, "__builtin_ia32_getexpss128_round", IX86_BUILTIN_GETEXPSS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sgetexpv4sf_mask_round, "__builtin_ia32_getexpss_mask_round", IX86_BUILTIN_GETEXPSS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_getmantv8df_mask_round, "__builtin_ia32_getmantpd512_mask", IX86_BUILTIN_GETMANTPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_getmantv16sf_mask_round, "__builtin_ia32_getmantps512_mask", IX86_BUILTIN_GETMANTPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_getmantv8df_mask_round, "__builtin_ia32_getmantpd512_mask", IX86_BUILTIN_GETMANTPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_getmantv16sf_mask_round, "__builtin_ia32_getmantps512_mask", IX86_BUILTIN_GETMANTPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vgetmantv2df_round, "__builtin_ia32_getmantsd_round", IX86_BUILTIN_GETMANTSD128, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vgetmantv2df_mask_round, "__builtin_ia32_getmantsd_mask_round", IX86_BUILTIN_GETMANTSD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vgetmantv4sf_round, "__builtin_ia32_getmantss_round", IX86_BUILTIN_GETMANTSS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vgetmantv4sf_mask_round, "__builtin_ia32_getmantss_mask_round", IX86_BUILTIN_GETMANTSS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_smaxv8df3_mask_round, "__builtin_ia32_maxpd512_mask", IX86_BUILTIN_MAXPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_smaxv16sf3_mask_round, "__builtin_ia32_maxps512_mask", IX86_BUILTIN_MAXPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_smaxv8df3_mask_round, "__builtin_ia32_maxpd512_mask", IX86_BUILTIN_MAXPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_smaxv16sf3_mask_round, "__builtin_ia32_maxps512_mask", IX86_BUILTIN_MAXPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmsmaxv2df3_round, "__builtin_ia32_maxsd_round", IX86_BUILTIN_MAXSD_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmsmaxv2df3_mask_round, "__builtin_ia32_maxsd_mask_round", IX86_BUILTIN_MAXSD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmsmaxv4sf3_round, "__builtin_ia32_maxss_round", IX86_BUILTIN_MAXSS_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmsmaxv4sf3_mask_round, "__builtin_ia32_maxss_mask_round", IX86_BUILTIN_MAXSS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sminv8df3_mask_round, "__builtin_ia32_minpd512_mask", IX86_BUILTIN_MINPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sminv16sf3_mask_round, "__builtin_ia32_minps512_mask", IX86_BUILTIN_MINPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_sminv8df3_mask_round, "__builtin_ia32_minpd512_mask", IX86_BUILTIN_MINPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_sminv16sf3_mask_round, "__builtin_ia32_minps512_mask", IX86_BUILTIN_MINPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmsminv2df3_round, "__builtin_ia32_minsd_round", IX86_BUILTIN_MINSD_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmsminv2df3_mask_round, "__builtin_ia32_minsd_mask_round", IX86_BUILTIN_MINSD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmsminv4sf3_round, "__builtin_ia32_minss_round", IX86_BUILTIN_MINSS_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmsminv4sf3_mask_round, "__builtin_ia32_minss_mask_round", IX86_BUILTIN_MINSS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_mulv8df3_mask_round, "__builtin_ia32_mulpd512_mask", IX86_BUILTIN_MULPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_mulv16sf3_mask_round, "__builtin_ia32_mulps512_mask", IX86_BUILTIN_MULPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_mulv8df3_mask_round, "__builtin_ia32_mulpd512_mask", IX86_BUILTIN_MULPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_mulv16sf3_mask_round, "__builtin_ia32_mulps512_mask", IX86_BUILTIN_MULPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmmulv2df3_round, "__builtin_ia32_mulsd_round", IX86_BUILTIN_MULSD_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmmulv2df3_mask_round, "__builtin_ia32_mulsd_mask_round", IX86_BUILTIN_MULSD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmmulv4sf3_round, "__builtin_ia32_mulss_round", IX86_BUILTIN_MULSS_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmmulv4sf3_mask_round, "__builtin_ia32_mulss_mask_round", IX86_BUILTIN_MULSS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rndscalev8df_mask_round, "__builtin_ia32_rndscalepd_mask", IX86_BUILTIN_RNDSCALEPD, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rndscalev16sf_mask_round, "__builtin_ia32_rndscaleps_mask", IX86_BUILTIN_RNDSCALEPS, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_rndscalev8df_mask_round, "__builtin_ia32_rndscalepd_mask", IX86_BUILTIN_RNDSCALEPD, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_rndscalev16sf_mask_round, "__builtin_ia32_rndscaleps_mask", IX86_BUILTIN_RNDSCALEPS, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rndscalev2df_mask_round, "__builtin_ia32_rndscalesd_mask_round", IX86_BUILTIN_RNDSCALESD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_rndscalev4sf_mask_round, "__builtin_ia32_rndscaless_mask_round", IX86_BUILTIN_RNDSCALESS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_scalefv8df_mask_round, "__builtin_ia32_scalefpd512_mask", IX86_BUILTIN_SCALEFPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_scalefv16sf_mask_round, "__builtin_ia32_scalefps512_mask", IX86_BUILTIN_SCALEFPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_scalefv8df_mask_round, "__builtin_ia32_scalefpd512_mask", IX86_BUILTIN_SCALEFPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_scalefv16sf_mask_round, "__builtin_ia32_scalefps512_mask", IX86_BUILTIN_SCALEFPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vmscalefv2df_mask_round, "__builtin_ia32_scalefsd_mask_round", IX86_BUILTIN_SCALEFSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vmscalefv4sf_mask_round, "__builtin_ia32_scalefss_mask_round", IX86_BUILTIN_SCALEFSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sqrtv8df2_mask_round, "__builtin_ia32_sqrtpd512_mask", IX86_BUILTIN_SQRTPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_sqrtv16sf2_mask_round, "__builtin_ia32_sqrtps512_mask", IX86_BUILTIN_SQRTPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_sqrtv8df2_mask_round, "__builtin_ia32_sqrtpd512_mask", IX86_BUILTIN_SQRTPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_sqrtv16sf2_mask_round, "__builtin_ia32_sqrtps512_mask", IX86_BUILTIN_SQRTPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmsqrtv2df2_mask_round, "__builtin_ia32_sqrtsd_mask_round", IX86_BUILTIN_SQRTSD_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmsqrtv4sf2_mask_round, "__builtin_ia32_sqrtss_mask_round", IX86_BUILTIN_SQRTSS_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_subv8df3_mask_round, "__builtin_ia32_subpd512_mask", IX86_BUILTIN_SUBPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_subv16sf3_mask_round, "__builtin_ia32_subps512_mask", IX86_BUILTIN_SUBPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_subv8df3_mask_round, "__builtin_ia32_subpd512_mask", IX86_BUILTIN_SUBPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_subv16sf3_mask_round, "__builtin_ia32_subps512_mask", IX86_BUILTIN_SUBPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmsubv2df3_round, "__builtin_ia32_subsd_round", IX86_BUILTIN_SUBSD_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_vmsubv2df3_mask_round, "__builtin_ia32_subsd_mask_round", IX86_BUILTIN_SUBSD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_vmsubv4sf3_round, "__builtin_ia32_subss_round", IX86_BUILTIN_SUBSS_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT)
@@ -3147,12 +3147,12 @@ BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_cvttss2si_round, "__builtin_ia32
BDESC (OPTION_MASK_ISA_AVX512F | OPTION_MASK_ISA_64BIT, 0, CODE_FOR_sse_cvttss2siq_round, "__builtin_ia32_vcvttss2si64", IX86_BUILTIN_VCVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vcvttss2usi_round, "__builtin_ia32_vcvttss2usi32", IX86_BUILTIN_VCVTTSS2USI32, UNKNOWN, (int) UINT_FTYPE_V4SF_INT)
BDESC (OPTION_MASK_ISA_AVX512F | OPTION_MASK_ISA_64BIT, 0, CODE_FOR_avx512f_vcvttss2usiq_round, "__builtin_ia32_vcvttss2usi64", IX86_BUILTIN_VCVTTSS2USI64, UNKNOWN, (int) UINT64_FTYPE_V4SF_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmadd_v8df_mask_round, "__builtin_ia32_vfmaddpd512_mask", IX86_BUILTIN_VFMADDPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmadd_v8df_mask3_round, "__builtin_ia32_vfmaddpd512_mask3", IX86_BUILTIN_VFMADDPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmadd_v8df_maskz_round, "__builtin_ia32_vfmaddpd512_maskz", IX86_BUILTIN_VFMADDPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmadd_v16sf_mask_round, "__builtin_ia32_vfmaddps512_mask", IX86_BUILTIN_VFMADDPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmadd_v16sf_mask3_round, "__builtin_ia32_vfmaddps512_mask3", IX86_BUILTIN_VFMADDPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmadd_v16sf_maskz_round, "__builtin_ia32_vfmaddps512_maskz", IX86_BUILTIN_VFMADDPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmadd_v8df_mask_round, "__builtin_ia32_vfmaddpd512_mask", IX86_BUILTIN_VFMADDPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmadd_v8df_mask3_round, "__builtin_ia32_vfmaddpd512_mask3", IX86_BUILTIN_VFMADDPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmadd_v8df_maskz_round, "__builtin_ia32_vfmaddpd512_maskz", IX86_BUILTIN_VFMADDPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmadd_v16sf_mask_round, "__builtin_ia32_vfmaddps512_mask", IX86_BUILTIN_VFMADDPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmadd_v16sf_mask3_round, "__builtin_ia32_vfmaddps512_mask3", IX86_BUILTIN_VFMADDPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmadd_v16sf_maskz_round, "__builtin_ia32_vfmaddps512_maskz", IX86_BUILTIN_VFMADDPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_fmai_vmfmadd_v2df_round, "__builtin_ia32_vfmaddsd3_round", IX86_BUILTIN_VFMADDSD3_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_fmai_vmfmadd_v4sf_round, "__builtin_ia32_vfmaddss3_round", IX86_BUILTIN_VFMADDSS3_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vmfmadd_v2df_mask_round, "__builtin_ia32_vfmaddsd3_mask", IX86_BUILTIN_VFMADDSD3_MASK, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
@@ -3163,32 +3163,32 @@ BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vmfmadd_v4sf_mask_round, "__
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vmfmadd_v4sf_mask3_round, "__builtin_ia32_vfmaddss3_mask3", IX86_BUILTIN_VFMADDSS3_MASK3, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vmfmadd_v4sf_maskz_round, "__builtin_ia32_vfmaddss3_maskz", IX86_BUILTIN_VFMADDSS3_MASKZ, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_vmfmsub_v4sf_mask3_round, "__builtin_ia32_vfmsubss3_mask3", IX86_BUILTIN_VFMSUBSS3_MASK3, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmaddsub_v8df_mask_round, "__builtin_ia32_vfmaddsubpd512_mask", IX86_BUILTIN_VFMADDSUBPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmaddsub_v8df_mask3_round, "__builtin_ia32_vfmaddsubpd512_mask3", IX86_BUILTIN_VFMADDSUBPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmaddsub_v8df_maskz_round, "__builtin_ia32_vfmaddsubpd512_maskz", IX86_BUILTIN_VFMADDSUBPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmaddsub_v16sf_mask_round, "__builtin_ia32_vfmaddsubps512_mask", IX86_BUILTIN_VFMADDSUBPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmaddsub_v16sf_mask3_round, "__builtin_ia32_vfmaddsubps512_mask3", IX86_BUILTIN_VFMADDSUBPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmaddsub_v16sf_maskz_round, "__builtin_ia32_vfmaddsubps512_maskz", IX86_BUILTIN_VFMADDSUBPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmsubadd_v8df_mask3_round, "__builtin_ia32_vfmsubaddpd512_mask3", IX86_BUILTIN_VFMSUBADDPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmsubadd_v16sf_mask3_round, "__builtin_ia32_vfmsubaddps512_mask3", IX86_BUILTIN_VFMSUBADDPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmsub_v8df_mask_round, "__builtin_ia32_vfmsubpd512_mask", IX86_BUILTIN_VFMSUBPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmsub_v8df_mask3_round, "__builtin_ia32_vfmsubpd512_mask3", IX86_BUILTIN_VFMSUBPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmsub_v8df_maskz_round, "__builtin_ia32_vfmsubpd512_maskz", IX86_BUILTIN_VFMSUBPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmsub_v16sf_mask_round, "__builtin_ia32_vfmsubps512_mask", IX86_BUILTIN_VFMSUBPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmsub_v16sf_mask3_round, "__builtin_ia32_vfmsubps512_mask3", IX86_BUILTIN_VFMSUBPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fmsub_v16sf_maskz_round, "__builtin_ia32_vfmsubps512_maskz", IX86_BUILTIN_VFMSUBPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmadd_v8df_mask_round, "__builtin_ia32_vfnmaddpd512_mask", IX86_BUILTIN_VFNMADDPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmadd_v8df_mask3_round, "__builtin_ia32_vfnmaddpd512_mask3", IX86_BUILTIN_VFNMADDPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmadd_v8df_maskz_round, "__builtin_ia32_vfnmaddpd512_maskz", IX86_BUILTIN_VFNMADDPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmadd_v16sf_mask_round, "__builtin_ia32_vfnmaddps512_mask", IX86_BUILTIN_VFNMADDPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmadd_v16sf_mask3_round, "__builtin_ia32_vfnmaddps512_mask3", IX86_BUILTIN_VFNMADDPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmadd_v16sf_maskz_round, "__builtin_ia32_vfnmaddps512_maskz", IX86_BUILTIN_VFNMADDPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmsub_v8df_mask_round, "__builtin_ia32_vfnmsubpd512_mask", IX86_BUILTIN_VFNMSUBPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmsub_v8df_mask3_round, "__builtin_ia32_vfnmsubpd512_mask3", IX86_BUILTIN_VFNMSUBPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmsub_v8df_maskz_round, "__builtin_ia32_vfnmsubpd512_maskz", IX86_BUILTIN_VFNMSUBPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmsub_v16sf_mask_round, "__builtin_ia32_vfnmsubps512_mask", IX86_BUILTIN_VFNMSUBPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmsub_v16sf_mask3_round, "__builtin_ia32_vfnmsubps512_mask3", IX86_BUILTIN_VFNMSUBPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_fnmsub_v16sf_maskz_round, "__builtin_ia32_vfnmsubps512_maskz", IX86_BUILTIN_VFNMSUBPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmaddsub_v8df_mask_round, "__builtin_ia32_vfmaddsubpd512_mask", IX86_BUILTIN_VFMADDSUBPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmaddsub_v8df_mask3_round, "__builtin_ia32_vfmaddsubpd512_mask3", IX86_BUILTIN_VFMADDSUBPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmaddsub_v8df_maskz_round, "__builtin_ia32_vfmaddsubpd512_maskz", IX86_BUILTIN_VFMADDSUBPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmaddsub_v16sf_mask_round, "__builtin_ia32_vfmaddsubps512_mask", IX86_BUILTIN_VFMADDSUBPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmaddsub_v16sf_mask3_round, "__builtin_ia32_vfmaddsubps512_mask3", IX86_BUILTIN_VFMADDSUBPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmaddsub_v16sf_maskz_round, "__builtin_ia32_vfmaddsubps512_maskz", IX86_BUILTIN_VFMADDSUBPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmsubadd_v8df_mask3_round, "__builtin_ia32_vfmsubaddpd512_mask3", IX86_BUILTIN_VFMSUBADDPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmsubadd_v16sf_mask3_round, "__builtin_ia32_vfmsubaddps512_mask3", IX86_BUILTIN_VFMSUBADDPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmsub_v8df_mask_round, "__builtin_ia32_vfmsubpd512_mask", IX86_BUILTIN_VFMSUBPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmsub_v8df_mask3_round, "__builtin_ia32_vfmsubpd512_mask3", IX86_BUILTIN_VFMSUBPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmsub_v8df_maskz_round, "__builtin_ia32_vfmsubpd512_maskz", IX86_BUILTIN_VFMSUBPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmsub_v16sf_mask_round, "__builtin_ia32_vfmsubps512_mask", IX86_BUILTIN_VFMSUBPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmsub_v16sf_mask3_round, "__builtin_ia32_vfmsubps512_mask3", IX86_BUILTIN_VFMSUBPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fmsub_v16sf_maskz_round, "__builtin_ia32_vfmsubps512_maskz", IX86_BUILTIN_VFMSUBPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmadd_v8df_mask_round, "__builtin_ia32_vfnmaddpd512_mask", IX86_BUILTIN_VFNMADDPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmadd_v8df_mask3_round, "__builtin_ia32_vfnmaddpd512_mask3", IX86_BUILTIN_VFNMADDPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmadd_v8df_maskz_round, "__builtin_ia32_vfnmaddpd512_maskz", IX86_BUILTIN_VFNMADDPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmadd_v16sf_mask_round, "__builtin_ia32_vfnmaddps512_mask", IX86_BUILTIN_VFNMADDPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmadd_v16sf_mask3_round, "__builtin_ia32_vfnmaddps512_mask3", IX86_BUILTIN_VFNMADDPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmadd_v16sf_maskz_round, "__builtin_ia32_vfnmaddps512_maskz", IX86_BUILTIN_VFNMADDPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmsub_v8df_mask_round, "__builtin_ia32_vfnmsubpd512_mask", IX86_BUILTIN_VFNMSUBPD512_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmsub_v8df_mask3_round, "__builtin_ia32_vfnmsubpd512_mask3", IX86_BUILTIN_VFNMSUBPD512_MASK3, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmsub_v8df_maskz_round, "__builtin_ia32_vfnmsubpd512_maskz", IX86_BUILTIN_VFNMSUBPD512_MASKZ, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmsub_v16sf_mask_round, "__builtin_ia32_vfnmsubps512_mask", IX86_BUILTIN_VFNMSUBPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmsub_v16sf_mask3_round, "__builtin_ia32_vfnmsubps512_mask3", IX86_BUILTIN_VFNMSUBPS512_MASK3, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512f_fnmsub_v16sf_maskz_round, "__builtin_ia32_vfnmsubps512_maskz", IX86_BUILTIN_VFNMSUBPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT)
/* AVX512ER */
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_exp2v8df_mask_round, "__builtin_ia32_exp2pd_mask", IX86_BUILTIN_EXP2PD_MASK, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_QI_INT)
@@ -3207,72 +3207,72 @@ BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_vmrsqrt28v4sf_round, "__bu
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_vmrsqrt28v4sf_mask_round, "__builtin_ia32_rsqrt28ss_mask_round", IX86_BUILTIN_RSQRT28SS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
/* AVX512DQ. */
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_reducepv8df_mask_round, "__builtin_ia32_reducepd512_mask_round", IX86_BUILTIN_REDUCEPD512_MASK_ROUND, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_reducepv16sf_mask_round, "__builtin_ia32_reduceps512_mask_round", IX86_BUILTIN_REDUCEPS512_MASK_ROUND, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_UHI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_reducepv8df_mask_round, "__builtin_ia32_reducepd512_mask_round", IX86_BUILTIN_REDUCEPD512_MASK_ROUND, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_reducepv16sf_mask_round, "__builtin_ia32_reduceps512_mask_round", IX86_BUILTIN_REDUCEPS512_MASK_ROUND, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_UHI_INT)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_reducesv2df_mask_round, "__builtin_ia32_reducesd_mask_round", IX86_BUILTIN_REDUCESD128_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_reducesv4sf_mask_round, "__builtin_ia32_reducess_mask_round", IX86_BUILTIN_REDUCESS128_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_rangesv2df_mask_round, "__builtin_ia32_rangesd128_mask_round", IX86_BUILTIN_RANGESD128, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_rangesv4sf_mask_round, "__builtin_ia32_rangess128_mask_round", IX86_BUILTIN_RANGESS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_fix_notruncv8dfv8di2_mask_round, "__builtin_ia32_cvtpd2qq512_mask", IX86_BUILTIN_CVTPD2QQ512, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_cvtps2qqv8di_mask_round, "__builtin_ia32_cvtps2qq512_mask", IX86_BUILTIN_CVTPS2QQ512, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_fixuns_notruncv8dfv8di2_mask_round, "__builtin_ia32_cvtpd2uqq512_mask", IX86_BUILTIN_CVTPD2UQQ512, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_cvtps2uqqv8di_mask_round, "__builtin_ia32_cvtps2uqq512_mask", IX86_BUILTIN_CVTPS2UQQ512, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_floatv8div8sf2_mask_round, "__builtin_ia32_cvtqq2ps512_mask", IX86_BUILTIN_CVTQQ2PS512, UNKNOWN, (int) V8SF_FTYPE_V8DI_V8SF_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_floatunsv8div8sf2_mask_round, "__builtin_ia32_cvtuqq2ps512_mask", IX86_BUILTIN_CVTUQQ2PS512, UNKNOWN, (int) V8SF_FTYPE_V8DI_V8SF_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_floatv8div8df2_mask_round, "__builtin_ia32_cvtqq2pd512_mask", IX86_BUILTIN_CVTQQ2PD512, UNKNOWN, (int) V8DF_FTYPE_V8DI_V8DF_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_floatunsv8div8df2_mask_round, "__builtin_ia32_cvtuqq2pd512_mask", IX86_BUILTIN_CVTUQQ2PD512, UNKNOWN, (int) V8DF_FTYPE_V8DI_V8DF_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_fix_truncv8sfv8di2_mask_round, "__builtin_ia32_cvttps2qq512_mask", IX86_BUILTIN_CVTTPS2QQ512, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_fixuns_truncv8sfv8di2_mask_round, "__builtin_ia32_cvttps2uqq512_mask", IX86_BUILTIN_CVTTPS2UQQ512, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_fix_truncv8dfv8di2_mask_round, "__builtin_ia32_cvttpd2qq512_mask", IX86_BUILTIN_CVTTPD2QQ512, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_fixuns_truncv8dfv8di2_mask_round, "__builtin_ia32_cvttpd2uqq512_mask", IX86_BUILTIN_CVTTPD2UQQ512, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_rangepv16sf_mask_round, "__builtin_ia32_rangeps512_mask", IX86_BUILTIN_RANGEPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_INT_V16SF_HI_INT)
-BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_rangepv8df_mask_round, "__builtin_ia32_rangepd512_mask", IX86_BUILTIN_RANGEPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_INT_V8DF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_fix_notruncv8dfv8di2_mask_round, "__builtin_ia32_cvtpd2qq512_mask", IX86_BUILTIN_CVTPD2QQ512, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_cvtps2qqv8di_mask_round, "__builtin_ia32_cvtps2qq512_mask", IX86_BUILTIN_CVTPS2QQ512, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_fixuns_notruncv8dfv8di2_mask_round, "__builtin_ia32_cvtpd2uqq512_mask", IX86_BUILTIN_CVTPD2UQQ512, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_cvtps2uqqv8di_mask_round, "__builtin_ia32_cvtps2uqq512_mask", IX86_BUILTIN_CVTPS2UQQ512, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_floatv8div8sf2_mask_round, "__builtin_ia32_cvtqq2ps512_mask", IX86_BUILTIN_CVTQQ2PS512, UNKNOWN, (int) V8SF_FTYPE_V8DI_V8SF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_floatunsv8div8sf2_mask_round, "__builtin_ia32_cvtuqq2ps512_mask", IX86_BUILTIN_CVTUQQ2PS512, UNKNOWN, (int) V8SF_FTYPE_V8DI_V8SF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_floatv8div8df2_mask_round, "__builtin_ia32_cvtqq2pd512_mask", IX86_BUILTIN_CVTQQ2PD512, UNKNOWN, (int) V8DF_FTYPE_V8DI_V8DF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_floatunsv8div8df2_mask_round, "__builtin_ia32_cvtuqq2pd512_mask", IX86_BUILTIN_CVTUQQ2PD512, UNKNOWN, (int) V8DF_FTYPE_V8DI_V8DF_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_fix_truncv8sfv8di2_mask_round, "__builtin_ia32_cvttps2qq512_mask", IX86_BUILTIN_CVTTPS2QQ512, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_fixuns_truncv8sfv8di2_mask_round, "__builtin_ia32_cvttps2uqq512_mask", IX86_BUILTIN_CVTTPS2UQQ512, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_fix_truncv8dfv8di2_mask_round, "__builtin_ia32_cvttpd2qq512_mask", IX86_BUILTIN_CVTTPD2QQ512, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_fixuns_truncv8dfv8di2_mask_round, "__builtin_ia32_cvttpd2uqq512_mask", IX86_BUILTIN_CVTTPD2UQQ512, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_rangepv16sf_mask_round, "__builtin_ia32_rangeps512_mask", IX86_BUILTIN_RANGEPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_INT_V16SF_HI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512dq_rangepv8df_mask_round, "__builtin_ia32_rangepd512_mask", IX86_BUILTIN_RANGEPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_INT_V8DF_QI_INT)
/* AVX512FP16. */
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_addv32hf3_mask_round, "__builtin_ia32_addph512_mask_round", IX86_BUILTIN_ADDPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_subv32hf3_mask_round, "__builtin_ia32_subph512_mask_round", IX86_BUILTIN_SUBPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_mulv32hf3_mask_round, "__builtin_ia32_mulph512_mask_round", IX86_BUILTIN_MULPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_divv32hf3_mask_round, "__builtin_ia32_divph512_mask_round", IX86_BUILTIN_DIVPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_addv32hf3_mask_round, "__builtin_ia32_addph512_mask_round", IX86_BUILTIN_ADDPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_subv32hf3_mask_round, "__builtin_ia32_subph512_mask_round", IX86_BUILTIN_SUBPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_mulv32hf3_mask_round, "__builtin_ia32_mulph512_mask_round", IX86_BUILTIN_MULPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_divv32hf3_mask_round, "__builtin_ia32_divph512_mask_round", IX86_BUILTIN_DIVPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmaddv8hf3_mask_round, "__builtin_ia32_addsh_mask_round", IX86_BUILTIN_ADDSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsubv8hf3_mask_round, "__builtin_ia32_subsh_mask_round", IX86_BUILTIN_SUBSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmmulv8hf3_mask_round, "__builtin_ia32_mulsh_mask_round", IX86_BUILTIN_MULSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmdivv8hf3_mask_round, "__builtin_ia32_divsh_mask_round", IX86_BUILTIN_DIVSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_smaxv32hf3_mask_round, "__builtin_ia32_maxph512_mask_round", IX86_BUILTIN_MAXPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv32hf3_mask_round, "__builtin_ia32_minph512_mask_round", IX86_BUILTIN_MINPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_smaxv32hf3_mask_round, "__builtin_ia32_maxph512_mask_round", IX86_BUILTIN_MAXPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_sminv32hf3_mask_round, "__builtin_ia32_minph512_mask_round", IX86_BUILTIN_MINPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsmaxv8hf3_mask_round, "__builtin_ia32_maxsh_mask_round", IX86_BUILTIN_MAXSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsminv8hf3_mask_round, "__builtin_ia32_minsh_mask_round", IX86_BUILTIN_MINSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_cmpv32hf3_mask_round, "__builtin_ia32_cmpph512_mask_round", IX86_BUILTIN_CMPPH512_MASK_ROUND, UNKNOWN, (int) USI_FTYPE_V32HF_V32HF_INT_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_cmpv32hf3_mask_round, "__builtin_ia32_cmpph512_mask_round", IX86_BUILTIN_CMPPH512_MASK_ROUND, UNKNOWN, (int) USI_FTYPE_V32HF_V32HF_INT_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmcmpv8hf3_mask_round, "__builtin_ia32_cmpsh_mask_round", IX86_BUILTIN_CMPSH_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V8HF_V8HF_INT_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_sqrtv32hf2_mask_round, "__builtin_ia32_sqrtph512_mask_round", IX86_BUILTIN_SQRTPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_sqrtv32hf2_mask_round, "__builtin_ia32_sqrtph512_mask_round", IX86_BUILTIN_SQRTPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsqrtv8hf2_mask_round, "__builtin_ia32_sqrtsh_mask_round", IX86_BUILTIN_SQRTSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_scalefv32hf_mask_round, "__builtin_ia32_scalefph512_mask_round", IX86_BUILTIN_SCALEFPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_scalefv32hf_mask_round, "__builtin_ia32_scalefph512_mask_round", IX86_BUILTIN_SCALEFPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmscalefv8hf_mask_round, "__builtin_ia32_scalefsh_mask_round", IX86_BUILTIN_SCALEFSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_reducepv32hf_mask_round, "__builtin_ia32_reduceph512_mask_round", IX86_BUILTIN_REDUCEPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_INT_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_reducepv32hf_mask_round, "__builtin_ia32_reduceph512_mask_round", IX86_BUILTIN_REDUCEPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_INT_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_reducesv8hf_mask_round, "__builtin_ia32_reducesh_mask_round", IX86_BUILTIN_REDUCESH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_rndscalev32hf_mask_round, "__builtin_ia32_rndscaleph512_mask_round", IX86_BUILTIN_RNDSCALEPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_INT_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_rndscalev32hf_mask_round, "__builtin_ia32_rndscaleph512_mask_round", IX86_BUILTIN_RNDSCALEPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_INT_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_rndscalev8hf_mask_round, "__builtin_ia32_rndscalesh_mask_round", IX86_BUILTIN_RNDSCALESH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_getexpv32hf_mask_round, "__builtin_ia32_getexpph512_mask", IX86_BUILTIN_GETEXPPH512, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_getexpv32hf_mask_round, "__builtin_ia32_getexpph512_mask", IX86_BUILTIN_GETEXPPH512, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_sgetexpv8hf_mask_round, "__builtin_ia32_getexpsh_mask_round", IX86_BUILTIN_GETEXPSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_getmantv32hf_mask_round, "__builtin_ia32_getmantph512_mask", IX86_BUILTIN_GETMANTPH512, UNKNOWN, (int) V32HF_FTYPE_V32HF_INT_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_getmantv32hf_mask_round, "__builtin_ia32_getmantph512_mask", IX86_BUILTIN_GETMANTPH512, UNKNOWN, (int) V32HF_FTYPE_V32HF_INT_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vgetmantv8hf_mask_round, "__builtin_ia32_getmantsh_mask_round", IX86_BUILTIN_GETMANTSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2dq_v16si_mask_round, "__builtin_ia32_vcvtph2dq512_mask_round", IX86_BUILTIN_VCVTPH2DQ512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16HF_V16SI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2udq_v16si_mask_round, "__builtin_ia32_vcvtph2udq512_mask_round", IX86_BUILTIN_VCVTPH2UDQ512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16HF_V16SI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fix_truncv16si2_mask_round, "__builtin_ia32_vcvttph2dq512_mask_round", IX86_BUILTIN_VCVTTPH2DQ512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16HF_V16SI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fixuns_truncv16si2_mask_round, "__builtin_ia32_vcvttph2udq512_mask_round", IX86_BUILTIN_VCVTTPH2UDQ512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16HF_V16SI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2qq_v8di_mask_round, "__builtin_ia32_vcvtph2qq512_mask_round", IX86_BUILTIN_VCVTPH2QQ512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8HF_V8DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2uqq_v8di_mask_round, "__builtin_ia32_vcvtph2uqq512_mask_round", IX86_BUILTIN_VCVTPH2UQQ512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8HF_V8DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fix_truncv8di2_mask_round, "__builtin_ia32_vcvttph2qq512_mask_round", IX86_BUILTIN_VCVTTPH2QQ512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8HF_V8DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fixuns_truncv8di2_mask_round, "__builtin_ia32_vcvttph2uqq512_mask_round", IX86_BUILTIN_VCVTTPH2UQQ512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8HF_V8DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2w_v32hi_mask_round, "__builtin_ia32_vcvtph2w512_mask_round", IX86_BUILTIN_VCVTPH2W512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2uw_v32hi_mask_round, "__builtin_ia32_vcvtph2uw512_mask_round", IX86_BUILTIN_VCVTPH2UW512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fix_truncv32hi2_mask_round, "__builtin_ia32_vcvttph2w512_mask_round", IX86_BUILTIN_VCVTTPH2W512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fixuns_truncv32hi2_mask_round, "__builtin_ia32_vcvttph2uw512_mask_round", IX86_BUILTIN_VCVTTPH2UW512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtw2ph_v32hi_mask_round, "__builtin_ia32_vcvtw2ph512_mask_round", IX86_BUILTIN_VCVTW2PH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HI_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtuw2ph_v32hi_mask_round, "__builtin_ia32_vcvtuw2ph512_mask_round", IX86_BUILTIN_VCVTUW2PH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HI_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtdq2ph_v16si_mask_round, "__builtin_ia32_vcvtdq2ph512_mask_round", IX86_BUILTIN_VCVTDQ2PH512_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16SI_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtudq2ph_v16si_mask_round, "__builtin_ia32_vcvtudq2ph512_mask_round", IX86_BUILTIN_VCVTUDQ2PH512_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16SI_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtqq2ph_v8di_mask_round, "__builtin_ia32_vcvtqq2ph512_mask_round", IX86_BUILTIN_VCVTQQ2PH512_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8DI_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtuqq2ph_v8di_mask_round, "__builtin_ia32_vcvtuqq2ph512_mask_round", IX86_BUILTIN_VCVTUQQ2PH512_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8DI_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtph2dq_v16si_mask_round, "__builtin_ia32_vcvtph2dq512_mask_round", IX86_BUILTIN_VCVTPH2DQ512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16HF_V16SI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtph2udq_v16si_mask_round, "__builtin_ia32_vcvtph2udq512_mask_round", IX86_BUILTIN_VCVTPH2UDQ512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16HF_V16SI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_fix_truncv16si2_mask_round, "__builtin_ia32_vcvttph2dq512_mask_round", IX86_BUILTIN_VCVTTPH2DQ512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16HF_V16SI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_fixuns_truncv16si2_mask_round, "__builtin_ia32_vcvttph2udq512_mask_round", IX86_BUILTIN_VCVTTPH2UDQ512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16HF_V16SI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtph2qq_v8di_mask_round, "__builtin_ia32_vcvtph2qq512_mask_round", IX86_BUILTIN_VCVTPH2QQ512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8HF_V8DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtph2uqq_v8di_mask_round, "__builtin_ia32_vcvtph2uqq512_mask_round", IX86_BUILTIN_VCVTPH2UQQ512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8HF_V8DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_fix_truncv8di2_mask_round, "__builtin_ia32_vcvttph2qq512_mask_round", IX86_BUILTIN_VCVTTPH2QQ512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8HF_V8DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_fixuns_truncv8di2_mask_round, "__builtin_ia32_vcvttph2uqq512_mask_round", IX86_BUILTIN_VCVTTPH2UQQ512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8HF_V8DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtph2w_v32hi_mask_round, "__builtin_ia32_vcvtph2w512_mask_round", IX86_BUILTIN_VCVTPH2W512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtph2uw_v32hi_mask_round, "__builtin_ia32_vcvtph2uw512_mask_round", IX86_BUILTIN_VCVTPH2UW512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_fix_truncv32hi2_mask_round, "__builtin_ia32_vcvttph2w512_mask_round", IX86_BUILTIN_VCVTTPH2W512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_fixuns_truncv32hi2_mask_round, "__builtin_ia32_vcvttph2uw512_mask_round", IX86_BUILTIN_VCVTTPH2UW512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtw2ph_v32hi_mask_round, "__builtin_ia32_vcvtw2ph512_mask_round", IX86_BUILTIN_VCVTW2PH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HI_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtuw2ph_v32hi_mask_round, "__builtin_ia32_vcvtuw2ph512_mask_round", IX86_BUILTIN_VCVTUW2PH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HI_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtdq2ph_v16si_mask_round, "__builtin_ia32_vcvtdq2ph512_mask_round", IX86_BUILTIN_VCVTDQ2PH512_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16SI_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtudq2ph_v16si_mask_round, "__builtin_ia32_vcvtudq2ph512_mask_round", IX86_BUILTIN_VCVTUDQ2PH512_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16SI_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtqq2ph_v8di_mask_round, "__builtin_ia32_vcvtqq2ph512_mask_round", IX86_BUILTIN_VCVTQQ2PH512_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8DI_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtuqq2ph_v8di_mask_round, "__builtin_ia32_vcvtuqq2ph512_mask_round", IX86_BUILTIN_VCVTUQQ2PH512_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8DI_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtsh2si_round, "__builtin_ia32_vcvtsh2si32_round", IX86_BUILTIN_VCVTSH2SI32_ROUND, UNKNOWN, (int) INT_FTYPE_V8HF_INT)
BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtsh2siq_round, "__builtin_ia32_vcvtsh2si64_round", IX86_BUILTIN_VCVTSH2SI64_ROUND, UNKNOWN, (int) INT64_FTYPE_V8HF_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtsh2usi_round, "__builtin_ia32_vcvtsh2usi32_round", IX86_BUILTIN_VCVTSH2USI32_ROUND, UNKNOWN, (int) UINT_FTYPE_V8HF_INT)
@@ -3285,32 +3285,32 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtsi2sh_round, "__b
BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtsi2shq_round, "__builtin_ia32_vcvtsi2sh64_round", IX86_BUILTIN_VCVTSI2SH64_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_INT64_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtusi2sh_round, "__builtin_ia32_vcvtusi2sh32_round", IX86_BUILTIN_VCVTUSI2SH32_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_UINT_INT)
BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtusi2shq_round, "__builtin_ia32_vcvtusi2sh64_round", IX86_BUILTIN_VCVTUSI2SH64_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_UINT64_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_float_extend_phv8df2_mask_round, "__builtin_ia32_vcvtph2pd512_mask_round", IX86_BUILTIN_VCVTPH2PD512_MASK_ROUND, UNKNOWN, (int) V8DF_FTYPE_V8HF_V8DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_float_extend_phv16sf2_mask_round, "__builtin_ia32_vcvtph2psx512_mask_round", IX86_BUILTIN_VCVTPH2PSX512_MASK_ROUND, UNKNOWN, (int) V16SF_FTYPE_V16HF_V16SF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtpd2ph_v8df_mask_round, "__builtin_ia32_vcvtpd2ph512_mask_round", IX86_BUILTIN_VCVTPD2PH512_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8DF_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtps2ph_v16sf_mask_round, "__builtin_ia32_vcvtps2phx512_mask_round", IX86_BUILTIN_VCVTPS2PHX512_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16SF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_float_extend_phv8df2_mask_round, "__builtin_ia32_vcvtph2pd512_mask_round", IX86_BUILTIN_VCVTPH2PD512_MASK_ROUND, UNKNOWN, (int) V8DF_FTYPE_V8HF_V8DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_float_extend_phv16sf2_mask_round, "__builtin_ia32_vcvtph2psx512_mask_round", IX86_BUILTIN_VCVTPH2PSX512_MASK_ROUND, UNKNOWN, (int) V16SF_FTYPE_V16HF_V16SF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtpd2ph_v8df_mask_round, "__builtin_ia32_vcvtpd2ph512_mask_round", IX86_BUILTIN_VCVTPD2PH512_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8DF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512fp16_vcvtps2ph_v16sf_mask_round, "__builtin_ia32_vcvtps2phx512_mask_round", IX86_BUILTIN_VCVTPS2PHX512_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16SF_V16HF_UHI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtsh2ss_mask_round, "__builtin_ia32_vcvtsh2ss_mask_round", IX86_BUILTIN_VCVTSH2SS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V8HF_V4SF_V4SF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtsh2sd_mask_round, "__builtin_ia32_vcvtsh2sd_mask_round", IX86_BUILTIN_VCVTSH2SD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V8HF_V2DF_V2DF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtss2sh_mask_round, "__builtin_ia32_vcvtss2sh_mask_round", IX86_BUILTIN_VCVTSS2SH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V4SF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtsd2sh_mask_round, "__builtin_ia32_vcvtsd2sh_mask_round", IX86_BUILTIN_VCVTSD2SH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V2DF_V8HF_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmaddsub_v32hf_mask_round, "__builtin_ia32_vfmaddsubph512_mask", IX86_BUILTIN_VFMADDSUBPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmaddsub_v32hf_mask3_round, "__builtin_ia32_vfmaddsubph512_mask3", IX86_BUILTIN_VFMADDSUBPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmaddsub_v32hf_maskz_round, "__builtin_ia32_vfmaddsubph512_maskz", IX86_BUILTIN_VFMADDSUBPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmsubadd_v32hf_mask_round, "__builtin_ia32_vfmsubaddph512_mask", IX86_BUILTIN_VFMSUBADDPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmsubadd_v32hf_mask3_round, "__builtin_ia32_vfmsubaddph512_mask3", IX86_BUILTIN_VFMSUBADDPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmsubadd_v32hf_maskz_round, "__builtin_ia32_vfmsubaddph512_maskz", IX86_BUILTIN_VFMSUBADDPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmadd_v32hf_mask_round, "__builtin_ia32_vfmaddph512_mask", IX86_BUILTIN_VFMADDPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmadd_v32hf_mask3_round, "__builtin_ia32_vfmaddph512_mask3", IX86_BUILTIN_VFMADDPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmadd_v32hf_maskz_round, "__builtin_ia32_vfmaddph512_maskz", IX86_BUILTIN_VFMADDPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fnmadd_v32hf_mask_round, "__builtin_ia32_vfnmaddph512_mask", IX86_BUILTIN_VFNMADDPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fnmadd_v32hf_mask3_round, "__builtin_ia32_vfnmaddph512_mask3", IX86_BUILTIN_VFNMADDPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fnmadd_v32hf_maskz_round, "__builtin_ia32_vfnmaddph512_maskz", IX86_BUILTIN_VFNMADDPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmsub_v32hf_mask_round, "__builtin_ia32_vfmsubph512_mask", IX86_BUILTIN_VFMSUBPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmsub_v32hf_mask3_round, "__builtin_ia32_vfmsubph512_mask3", IX86_BUILTIN_VFMSUBPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmsub_v32hf_maskz_round, "__builtin_ia32_vfmsubph512_maskz", IX86_BUILTIN_VFMSUBPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fnmsub_v32hf_mask_round, "__builtin_ia32_vfnmsubph512_mask", IX86_BUILTIN_VFNMSUBPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fnmsub_v32hf_mask3_round, "__builtin_ia32_vfnmsubph512_mask3", IX86_BUILTIN_VFNMSUBPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fnmsub_v32hf_maskz_round, "__builtin_ia32_vfnmsubph512_maskz", IX86_BUILTIN_VFNMSUBPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmaddsub_v32hf_mask_round, "__builtin_ia32_vfmaddsubph512_mask", IX86_BUILTIN_VFMADDSUBPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmaddsub_v32hf_mask3_round, "__builtin_ia32_vfmaddsubph512_mask3", IX86_BUILTIN_VFMADDSUBPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmaddsub_v32hf_maskz_round, "__builtin_ia32_vfmaddsubph512_maskz", IX86_BUILTIN_VFMADDSUBPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmsubadd_v32hf_mask_round, "__builtin_ia32_vfmsubaddph512_mask", IX86_BUILTIN_VFMSUBADDPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmsubadd_v32hf_mask3_round, "__builtin_ia32_vfmsubaddph512_mask3", IX86_BUILTIN_VFMSUBADDPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmsubadd_v32hf_maskz_round, "__builtin_ia32_vfmsubaddph512_maskz", IX86_BUILTIN_VFMSUBADDPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmadd_v32hf_mask_round, "__builtin_ia32_vfmaddph512_mask", IX86_BUILTIN_VFMADDPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmadd_v32hf_mask3_round, "__builtin_ia32_vfmaddph512_mask3", IX86_BUILTIN_VFMADDPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmadd_v32hf_maskz_round, "__builtin_ia32_vfmaddph512_maskz", IX86_BUILTIN_VFMADDPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fnmadd_v32hf_mask_round, "__builtin_ia32_vfnmaddph512_mask", IX86_BUILTIN_VFNMADDPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fnmadd_v32hf_mask3_round, "__builtin_ia32_vfnmaddph512_mask3", IX86_BUILTIN_VFNMADDPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fnmadd_v32hf_maskz_round, "__builtin_ia32_vfnmaddph512_maskz", IX86_BUILTIN_VFNMADDPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmsub_v32hf_mask_round, "__builtin_ia32_vfmsubph512_mask", IX86_BUILTIN_VFMSUBPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmsub_v32hf_mask3_round, "__builtin_ia32_vfmsubph512_mask3", IX86_BUILTIN_VFMSUBPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmsub_v32hf_maskz_round, "__builtin_ia32_vfmsubph512_maskz", IX86_BUILTIN_VFMSUBPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fnmsub_v32hf_mask_round, "__builtin_ia32_vfnmsubph512_mask", IX86_BUILTIN_VFNMSUBPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fnmsub_v32hf_mask3_round, "__builtin_ia32_vfnmsubph512_mask3", IX86_BUILTIN_VFNMSUBPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fnmsub_v32hf_maskz_round, "__builtin_ia32_vfnmsubph512_maskz", IX86_BUILTIN_VFNMSUBPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfmadd_v8hf_mask_round, "__builtin_ia32_vfmaddsh3_mask", IX86_BUILTIN_VFMADDSH3_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfmadd_v8hf_mask3_round, "__builtin_ia32_vfmaddsh3_mask3", IX86_BUILTIN_VFMADDSH3_MASK3, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfmadd_v8hf_maskz_round, "__builtin_ia32_vfmaddsh3_maskz", IX86_BUILTIN_VFMADDSH3_MASKZ, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
@@ -3318,18 +3318,18 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfnmadd_v8hf_mask_round
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfnmadd_v8hf_mask3_round, "__builtin_ia32_vfnmaddsh3_mask3", IX86_BUILTIN_VFNMADDSH3_MASK3, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfnmadd_v8hf_maskz_round, "__builtin_ia32_vfnmaddsh3_maskz", IX86_BUILTIN_VFNMADDSH3_MASKZ, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfmsub_v8hf_mask3_round, "__builtin_ia32_vfmsubsh3_mask3", IX86_BUILTIN_VFMSUBSH3_MASK3, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_fma_fmaddc_v32hf_round, "__builtin_ia32_vfmaddcph512_round", IX86_BUILTIN_VFMADDCPH512_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmaddc_v32hf_mask1_round, "__builtin_ia32_vfmaddcph512_mask_round", IX86_BUILTIN_VFMADDCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmaddc_v32hf_mask_round, "__builtin_ia32_vfmaddcph512_mask3_round", IX86_BUILTIN_VFMADDCPH512_MASK3_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmaddc_v32hf_maskz_round, "__builtin_ia32_vfmaddcph512_maskz_round", IX86_BUILTIN_VFMADDCPH512_MASKZ_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_fma_fcmaddc_v32hf_round, "__builtin_ia32_vfcmaddcph512_round", IX86_BUILTIN_VFCMADDCPH512_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmaddc_v32hf_mask1_round, "__builtin_ia32_vfcmaddcph512_mask_round", IX86_BUILTIN_VFCMADDCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmaddc_v32hf_mask_round, "__builtin_ia32_vfcmaddcph512_mask3_round", IX86_BUILTIN_VFCMADDCPH512_MASK3_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmaddc_v32hf_maskz_round, "__builtin_ia32_vfcmaddcph512_maskz_round", IX86_BUILTIN_VFCMADDCPH512_MASKZ_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmulc_v32hf_round, "__builtin_ia32_vfcmulcph512_round", IX86_BUILTIN_VFCMULCPH512_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmulc_v32hf_mask_round, "__builtin_ia32_vfcmulcph512_mask_round", IX86_BUILTIN_VFCMULCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmulc_v32hf_round, "__builtin_ia32_vfmulcph512_round", IX86_BUILTIN_VFMULCPH512_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmulc_v32hf_mask_round, "__builtin_ia32_vfmulcph512_mask_round", IX86_BUILTIN_VFMULCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_fma_fmaddc_v32hf_round, "__builtin_ia32_vfmaddcph512_round", IX86_BUILTIN_VFMADDCPH512_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmaddc_v32hf_mask1_round, "__builtin_ia32_vfmaddcph512_mask_round", IX86_BUILTIN_VFMADDCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmaddc_v32hf_mask_round, "__builtin_ia32_vfmaddcph512_mask3_round", IX86_BUILTIN_VFMADDCPH512_MASK3_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmaddc_v32hf_maskz_round, "__builtin_ia32_vfmaddcph512_maskz_round", IX86_BUILTIN_VFMADDCPH512_MASKZ_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_fma_fcmaddc_v32hf_round, "__builtin_ia32_vfcmaddcph512_round", IX86_BUILTIN_VFCMADDCPH512_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fcmaddc_v32hf_mask1_round, "__builtin_ia32_vfcmaddcph512_mask_round", IX86_BUILTIN_VFCMADDCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fcmaddc_v32hf_mask_round, "__builtin_ia32_vfcmaddcph512_mask3_round", IX86_BUILTIN_VFCMADDCPH512_MASK3_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fcmaddc_v32hf_maskz_round, "__builtin_ia32_vfcmaddcph512_maskz_round", IX86_BUILTIN_VFCMADDCPH512_MASKZ_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fcmulc_v32hf_round, "__builtin_ia32_vfcmulcph512_round", IX86_BUILTIN_VFCMULCPH512_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fcmulc_v32hf_mask_round, "__builtin_ia32_vfcmulcph512_mask_round", IX86_BUILTIN_VFCMULCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmulc_v32hf_round, "__builtin_ia32_vfmulcph512_round", IX86_BUILTIN_VFMULCPH512_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_EVEX512, CODE_FOR_avx512bw_fmulc_v32hf_mask_round, "__builtin_ia32_vfmulcph512_mask_round", IX86_BUILTIN_VFMULCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fma_fcmaddcsh_v8hf_round, "__builtin_ia32_vfcmaddcsh_round", IX86_BUILTIN_VFCMADDCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmaddcsh_v8hf_mask1_round, "__builtin_ia32_vfcmaddcsh_mask_round", IX86_BUILTIN_VFCMADDCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmaddcsh_v8hf_mask3_round, "__builtin_ia32_vfcmaddcsh_mask3_round", IX86_BUILTIN_VFCMADDCSH_MASK3_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
diff --git a/gcc/config/i386/i386-builtins.cc b/gcc/config/i386/i386-builtins.cc
index 8a0b8df..42fc375 100644
--- a/gcc/config/i386/i386-builtins.cc
+++ b/gcc/config/i386/i386-builtins.cc
@@ -784,83 +784,103 @@ ix86_init_mmx_sse_builtins (void)
IX86_BUILTIN_GATHERALTDIV8SI);
/* AVX512F */
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gathersiv16sf",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gathersiv16sf",
V16SF_FTYPE_V16SF_PCVOID_V16SI_HI_INT,
IX86_BUILTIN_GATHER3SIV16SF);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gathersiv8df",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gathersiv8df",
V8DF_FTYPE_V8DF_PCVOID_V8SI_QI_INT,
IX86_BUILTIN_GATHER3SIV8DF);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gatherdiv16sf",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gatherdiv16sf",
V8SF_FTYPE_V8SF_PCVOID_V8DI_QI_INT,
IX86_BUILTIN_GATHER3DIV16SF);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gatherdiv8df",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gatherdiv8df",
V8DF_FTYPE_V8DF_PCVOID_V8DI_QI_INT,
IX86_BUILTIN_GATHER3DIV8DF);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gathersiv16si",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gathersiv16si",
V16SI_FTYPE_V16SI_PCVOID_V16SI_HI_INT,
IX86_BUILTIN_GATHER3SIV16SI);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gathersiv8di",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gathersiv8di",
V8DI_FTYPE_V8DI_PCVOID_V8SI_QI_INT,
IX86_BUILTIN_GATHER3SIV8DI);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gatherdiv16si",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gatherdiv16si",
V8SI_FTYPE_V8SI_PCVOID_V8DI_QI_INT,
IX86_BUILTIN_GATHER3DIV16SI);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gatherdiv8di",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gatherdiv8di",
V8DI_FTYPE_V8DI_PCVOID_V8DI_QI_INT,
IX86_BUILTIN_GATHER3DIV8DI);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gather3altsiv8df ",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gather3altsiv8df ",
V8DF_FTYPE_V8DF_PCDOUBLE_V16SI_QI_INT,
IX86_BUILTIN_GATHER3ALTSIV8DF);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gather3altdiv16sf ",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gather3altdiv16sf ",
V16SF_FTYPE_V16SF_PCFLOAT_V8DI_HI_INT,
IX86_BUILTIN_GATHER3ALTDIV16SF);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gather3altsiv8di ",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gather3altsiv8di ",
V8DI_FTYPE_V8DI_PCINT64_V16SI_QI_INT,
IX86_BUILTIN_GATHER3ALTSIV8DI);
- def_builtin_pure (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_gather3altdiv16si ",
+ def_builtin_pure (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_gather3altdiv16si ",
V16SI_FTYPE_V16SI_PCINT_V8DI_HI_INT,
IX86_BUILTIN_GATHER3ALTDIV16SI);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scattersiv16sf",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scattersiv16sf",
VOID_FTYPE_PVOID_HI_V16SI_V16SF_INT,
IX86_BUILTIN_SCATTERSIV16SF);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scattersiv8df",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scattersiv8df",
VOID_FTYPE_PVOID_QI_V8SI_V8DF_INT,
IX86_BUILTIN_SCATTERSIV8DF);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scatterdiv16sf",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scatterdiv16sf",
VOID_FTYPE_PVOID_QI_V8DI_V8SF_INT,
IX86_BUILTIN_SCATTERDIV16SF);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scatterdiv8df",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scatterdiv8df",
VOID_FTYPE_PVOID_QI_V8DI_V8DF_INT,
IX86_BUILTIN_SCATTERDIV8DF);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scattersiv16si",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scattersiv16si",
VOID_FTYPE_PVOID_HI_V16SI_V16SI_INT,
IX86_BUILTIN_SCATTERSIV16SI);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scattersiv8di",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scattersiv8di",
VOID_FTYPE_PVOID_QI_V8SI_V8DI_INT,
IX86_BUILTIN_SCATTERSIV8DI);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scatterdiv16si",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scatterdiv16si",
VOID_FTYPE_PVOID_QI_V8DI_V8SI_INT,
IX86_BUILTIN_SCATTERDIV16SI);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scatterdiv8di",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scatterdiv8di",
VOID_FTYPE_PVOID_QI_V8DI_V8DI_INT,
IX86_BUILTIN_SCATTERDIV8DI);
@@ -1009,19 +1029,23 @@ ix86_init_mmx_sse_builtins (void)
VOID_FTYPE_PVOID_QI_V2DI_V2DI_INT,
IX86_BUILTIN_SCATTERDIV2DI);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scatteraltsiv8df ",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scatteraltsiv8df ",
VOID_FTYPE_PDOUBLE_QI_V16SI_V8DF_INT,
IX86_BUILTIN_SCATTERALTSIV8DF);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scatteraltdiv16sf ",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scatteraltdiv16sf ",
VOID_FTYPE_PFLOAT_HI_V8DI_V16SF_INT,
IX86_BUILTIN_SCATTERALTDIV16SF);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scatteraltsiv8di ",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scatteraltsiv8di ",
VOID_FTYPE_PLONGLONG_QI_V16SI_V8DI_INT,
IX86_BUILTIN_SCATTERALTSIV8DI);
- def_builtin (OPTION_MASK_ISA_AVX512F, 0, "__builtin_ia32_scatteraltdiv16si ",
+ def_builtin (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_EVEX512,
+ "__builtin_ia32_scatteraltdiv16si ",
VOID_FTYPE_PINT_HI_V8DI_V16SI_INT,
IX86_BUILTIN_SCATTERALTDIV16SI);
@@ -1238,6 +1262,14 @@ ix86_init_mmx_sse_builtins (void)
"__builtin_ia32_testui",
UINT8_FTYPE_VOID, IX86_BUILTIN_TESTUI);
+ /* USER_MSR. */
+ def_builtin (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_USER_MSR,
+ "__builtin_ia32_urdmsr", UINT64_FTYPE_UINT64,
+ IX86_BUILTIN_URDMSR);
+ def_builtin (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_USER_MSR,
+ "__builtin_ia32_uwrmsr", VOID_FTYPE_UINT64_UINT64,
+ IX86_BUILTIN_UWRMSR);
+
/* CLDEMOTE. */
def_builtin (0, OPTION_MASK_ISA2_CLDEMOTE, "__builtin_ia32_cldemote",
VOID_FTYPE_PCVOID, IX86_BUILTIN_CLDEMOTE);
@@ -1651,6 +1683,10 @@ ix86_vectorize_builtin_gather (const_tree mem_vectype,
{
bool si;
enum ix86_builtins code;
+ const machine_mode mode = TYPE_MODE (TREE_TYPE (mem_vectype));
+
+ if ((!TARGET_AVX512F || !TARGET_EVEX512) && GET_MODE_SIZE (mode) == 64)
+ return NULL_TREE;
if (! TARGET_AVX2
|| (known_eq (TYPE_VECTOR_SUBPARTS (mem_vectype), 2u)
@@ -1731,28 +1767,16 @@ ix86_vectorize_builtin_gather (const_tree mem_vectype,
code = si ? IX86_BUILTIN_GATHERSIV8SI : IX86_BUILTIN_GATHERALTDIV8SI;
break;
case E_V8DFmode:
- if (TARGET_AVX512F)
- code = si ? IX86_BUILTIN_GATHER3ALTSIV8DF : IX86_BUILTIN_GATHER3DIV8DF;
- else
- return NULL_TREE;
+ code = si ? IX86_BUILTIN_GATHER3ALTSIV8DF : IX86_BUILTIN_GATHER3DIV8DF;
break;
case E_V8DImode:
- if (TARGET_AVX512F)
- code = si ? IX86_BUILTIN_GATHER3ALTSIV8DI : IX86_BUILTIN_GATHER3DIV8DI;
- else
- return NULL_TREE;
+ code = si ? IX86_BUILTIN_GATHER3ALTSIV8DI : IX86_BUILTIN_GATHER3DIV8DI;
break;
case E_V16SFmode:
- if (TARGET_AVX512F)
- code = si ? IX86_BUILTIN_GATHER3SIV16SF : IX86_BUILTIN_GATHER3ALTDIV16SF;
- else
- return NULL_TREE;
+ code = si ? IX86_BUILTIN_GATHER3SIV16SF : IX86_BUILTIN_GATHER3ALTDIV16SF;
break;
case E_V16SImode:
- if (TARGET_AVX512F)
- code = si ? IX86_BUILTIN_GATHER3SIV16SI : IX86_BUILTIN_GATHER3ALTDIV16SI;
- else
- return NULL_TREE;
+ code = si ? IX86_BUILTIN_GATHER3SIV16SI : IX86_BUILTIN_GATHER3ALTDIV16SI;
break;
default:
return NULL_TREE;
diff --git a/gcc/config/i386/i386-builtins.h b/gcc/config/i386/i386-builtins.h
index c632482..40785b0 100644
--- a/gcc/config/i386/i386-builtins.h
+++ b/gcc/config/i386/i386-builtins.h
@@ -39,6 +39,8 @@ enum ix86_builtins
IX86_BUILTIN_MWAIT,
IX86_BUILTIN_UMONITOR,
IX86_BUILTIN_UMWAIT,
+ IX86_BUILTIN_URDMSR,
+ IX86_BUILTIN_UWRMSR,
IX86_BUILTIN_TPAUSE,
IX86_BUILTIN_TESTUI,
IX86_BUILTIN_CLZERO,
diff --git a/gcc/config/i386/i386-c.cc b/gcc/config/i386/i386-c.cc
index 47768fa..ebe6a63 100644
--- a/gcc/config/i386/i386-c.cc
+++ b/gcc/config/i386/i386-c.cc
@@ -210,6 +210,10 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
def_or_undef (parse_in, "__grandridge");
def_or_undef (parse_in, "__grandridge__");
break;
+ case PROCESSOR_CLEARWATERFOREST:
+ def_or_undef (parse_in, "__clearwaterforest");
+ def_or_undef (parse_in, "__clearwaterforest__");
+ break;
case PROCESSOR_KNL:
def_or_undef (parse_in, "__knl");
def_or_undef (parse_in, "__knl__");
@@ -278,6 +282,10 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
def_or_undef (parse_in, "__arrowlake_s");
def_or_undef (parse_in, "__arrowlake_s__");
break;
+ case PROCESSOR_PANTHERLAKE:
+ def_or_undef (parse_in, "__pantherlake");
+ def_or_undef (parse_in, "__pantherlake__");
+ break;
/* use PROCESSOR_max to not set/unset the arch macro. */
case PROCESSOR_max:
@@ -415,6 +423,9 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
case PROCESSOR_GRANDRIDGE:
def_or_undef (parse_in, "__tune_grandridge__");
break;
+ case PROCESSOR_CLEARWATERFOREST:
+ def_or_undef (parse_in, "__tune_clearwaterforest__");
+ break;
case PROCESSOR_KNL:
def_or_undef (parse_in, "__tune_knl__");
break;
@@ -469,6 +480,9 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
case PROCESSOR_ARROWLAKE_S:
def_or_undef (parse_in, "__tune_arrowlake_s__");
break;
+ case PROCESSOR_PANTHERLAKE:
+ def_or_undef (parse_in, "__tune_pantherlake__");
+ break;
case PROCESSOR_INTEL:
case PROCESSOR_GENERIC:
break;
@@ -546,7 +560,10 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
if (isa_flag & OPTION_MASK_ISA_AVX512BW)
def_or_undef (parse_in, "__AVX512BW__");
if (isa_flag & OPTION_MASK_ISA_AVX512VL)
- def_or_undef (parse_in, "__AVX512VL__");
+ {
+ def_or_undef (parse_in, "__AVX512VL__");
+ def_or_undef (parse_in, "__EVEX256__");
+ }
if (isa_flag & OPTION_MASK_ISA_AVX512VBMI)
def_or_undef (parse_in, "__AVX512VBMI__");
if (isa_flag & OPTION_MASK_ISA_AVX512IFMA)
@@ -707,6 +724,10 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
def_or_undef (parse_in, "__SHA512__");
if (isa_flag2 & OPTION_MASK_ISA2_SM4)
def_or_undef (parse_in, "__SM4__");
+ if (isa_flag2 & OPTION_MASK_ISA2_EVEX512)
+ def_or_undef (parse_in, "__EVEX512__");
+ if (isa_flag2 & OPTION_MASK_ISA2_USER_MSR)
+ def_or_undef (parse_in, "__USER_MSR__");
if (TARGET_IAMCU)
{
def_or_undef (parse_in, "__iamcu");
diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
index e42ff27..1eae9d7 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -611,6 +611,7 @@ ix86_broadcast_from_constant (machine_mode mode, rtx op)
avx512 embed broadcast is available. */
if (GET_MODE_INNER (mode) == DImode && !TARGET_64BIT
&& (!TARGET_AVX512F
+ || (GET_MODE_SIZE (mode) == 64 && !TARGET_EVEX512)
|| (GET_MODE_SIZE (mode) < 64 && !TARGET_AVX512VL)))
return nullptr;
@@ -3942,7 +3943,7 @@ ix86_valid_mask_cmp_mode (machine_mode mode)
if ((inner_mode == QImode || inner_mode == HImode) && !TARGET_AVX512BW)
return false;
- return vector_size == 64 || TARGET_AVX512VL;
+ return (vector_size == 64 && TARGET_EVEX512) || TARGET_AVX512VL;
}
/* Return true if integer mask comparison should be used. */
@@ -4772,7 +4773,7 @@ ix86_expand_int_sse_cmp (rtx dest, enum rtx_code code, rtx cop0, rtx cop1,
&& GET_MODE_SIZE (GET_MODE_INNER (mode)) >= 4
/* Don't do it if not using integer masks and we'd end up with
the right values in the registers though. */
- && (GET_MODE_SIZE (mode) == 64
+ && ((GET_MODE_SIZE (mode) == 64 && TARGET_EVEX512)
|| !vector_all_ones_operand (optrue, data_mode)
|| opfalse != CONST0_RTX (data_mode))))
{
@@ -6342,6 +6343,18 @@ ix86_split_ashl (rtx *operands, rtx scratch, machine_mode mode)
if (count > half_width)
ix86_expand_ashl_const (high[0], count - half_width, mode);
}
+ else if (count == 1)
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+ rtx x3 = gen_rtx_REG (CCCmode, FLAGS_REG);
+ rtx x4 = gen_rtx_LTU (mode, x3, const0_rtx);
+ half_mode = mode == DImode ? SImode : DImode;
+ emit_insn (gen_add3_cc_overflow_1 (half_mode, low[0],
+ low[0], low[0]));
+ emit_insn (gen_add3_carry (half_mode, high[0], high[0], high[0],
+ x3, x4));
+ }
else
{
gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
@@ -6496,6 +6509,22 @@ ix86_split_ashr (rtx *operands, rtx scratch, machine_mode mode)
emit_insn (gen_ashr3 (low[0], low[0],
GEN_INT (count - half_width)));
}
+ else if (count == 1
+ && (TARGET_USE_RCR || optimize_size > 1))
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+ if (mode == DImode)
+ {
+ emit_insn (gen_ashrsi3_carry (high[0], high[0]));
+ emit_insn (gen_rcrsi2 (low[0], low[0]));
+ }
+ else
+ {
+ emit_insn (gen_ashrdi3_carry (high[0], high[0]));
+ emit_insn (gen_rcrdi2 (low[0], low[0]));
+ }
+ }
else
{
gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
@@ -6561,6 +6590,22 @@ ix86_split_lshr (rtx *operands, rtx scratch, machine_mode mode)
emit_insn (gen_lshr3 (low[0], low[0],
GEN_INT (count - half_width)));
}
+ else if (count == 1
+ && (TARGET_USE_RCR || optimize_size > 1))
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+ if (mode == DImode)
+ {
+ emit_insn (gen_lshrsi3_carry (high[0], high[0]));
+ emit_insn (gen_rcrsi2 (low[0], low[0]));
+ }
+ else
+ {
+ emit_insn (gen_lshrdi3_carry (high[0], high[0]));
+ emit_insn (gen_rcrdi2 (low[0], low[0]));
+ }
+ }
else
{
gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
@@ -8320,6 +8365,11 @@ alg_usable_p (enum stringop_alg alg, bool memset, bool have_as)
{
if (alg == no_stringop)
return false;
+ /* It is not possible to use a library call if we have non-default
+ address space. We can do better than the generic byte-at-a-time
+ loop, used as a fallback. */
+ if (alg == libcall && have_as)
+ return false;
if (alg == vector_loop)
return TARGET_SSE || TARGET_AVX;
/* Algorithms using the rep prefix want at least edi and ecx;
@@ -8494,8 +8544,12 @@ decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size,
gcc_assert (alg != libcall);
return alg;
}
+
+ /* Try to use some reasonable fallback algorithm. Note that for
+ non-default address spaces we default to a loop instead of
+ a libcall. */
return (alg_usable_p (algs->unknown_size, memset, have_as)
- ? algs->unknown_size : libcall);
+ ? algs->unknown_size : have_as ? loop : libcall);
}
/* Decide on alignment. We know that the operand is already aligned to ALIGN
@@ -13417,6 +13471,41 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget,
return 0;
}
+ case IX86_BUILTIN_URDMSR:
+ case IX86_BUILTIN_UWRMSR:
+ {
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_normal (arg0);
+
+ if (CONST_INT_P (op0))
+ {
+ unsigned HOST_WIDE_INT val = UINTVAL (op0);
+ if (val > 0xffffffff)
+ op0 = force_reg (DImode, op0);
+ }
+ else
+ op0 = force_reg (DImode, op0);
+
+ if (fcode == IX86_BUILTIN_UWRMSR)
+ {
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ op1 = expand_normal (arg1);
+ op1 = force_reg (DImode, op1);
+ icode = CODE_FOR_uwrmsr;
+ target = 0;
+ }
+ else
+ {
+ if (target == 0)
+ target = gen_reg_rtx (DImode);
+ icode = CODE_FOR_urdmsr;
+ op1 = op0;
+ op0 = target;
+ }
+ emit_insn (GEN_FCN (icode) (op0, op1));
+ return target;
+ }
+
case IX86_BUILTIN_VEC_INIT_V2SI:
case IX86_BUILTIN_VEC_INIT_V4HI:
case IX86_BUILTIN_VEC_INIT_V8QI:
@@ -15616,6 +15705,7 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
case E_V32HFmode:
case E_V32BFmode:
case E_V64QImode:
+ gcc_assert (TARGET_EVEX512);
if (TARGET_AVX512BW)
return ix86_vector_duplicate_value (mode, target, val);
else
@@ -15667,6 +15757,9 @@ ix86_expand_vector_init_one_nonzero (bool mmx_ok, machine_mode mode,
bool use_vector_set = false;
rtx (*gen_vec_set_0) (rtx, rtx, rtx) = NULL;
+ if (GET_MODE_SIZE (mode) == 64 && !TARGET_EVEX512)
+ return false;
+
switch (mode)
{
case E_V2DImode:
@@ -18287,7 +18380,7 @@ ix86_emit_swsqrtsf (rtx res, rtx a, machine_mode mode, bool recip)
unsigned vector_size = GET_MODE_SIZE (mode);
if (TARGET_FMA
- || (TARGET_AVX512F && vector_size == 64)
+ || (TARGET_AVX512F && TARGET_EVEX512 && vector_size == 64)
|| (TARGET_AVX512VL && (vector_size == 32 || vector_size == 16)))
emit_insn (gen_rtx_SET (e2,
gen_rtx_FMA (mode, e0, x0, mthree)));
@@ -18413,6 +18506,8 @@ ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
vmode = V4SFmode;
else if (mode == DFmode)
vmode = V2DFmode;
+ else if (mode == HFmode)
+ vmode = V8HFmode;
else
vmode = mode;
@@ -18949,6 +19044,10 @@ ix86_expand_round_sse4 (rtx op0, rtx op1)
switch (mode)
{
+ case E_HFmode:
+ gen_copysign = gen_copysignhf3;
+ gen_round = gen_sse4_1_roundhf2;
+ break;
case E_SFmode:
gen_copysign = gen_copysignsf3;
gen_round = gen_sse4_1_roundsf2;
@@ -23004,6 +23103,9 @@ ix86_vectorize_vec_perm_const (machine_mode vmode, machine_mode op_mode,
unsigned int i, nelt, which;
bool two_args;
+ if (GET_MODE_SIZE (vmode) == 64 && !TARGET_EVEX512)
+ return false;
+
/* For HF mode vector, convert it to HI using subreg. */
if (GET_MODE_INNER (vmode) == HFmode)
{
@@ -23505,7 +23607,7 @@ ix86_expand_vecop_qihi2 (enum rtx_code code, rtx dest, rtx op1, rtx op2)
bool uns_p = code != ASHIFTRT;
if ((qimode == V16QImode && !TARGET_AVX2)
- || (qimode == V32QImode && !TARGET_AVX512BW)
+ || (qimode == V32QImode && (!TARGET_AVX512BW || !TARGET_EVEX512))
/* There are no V64HImode instructions. */
|| qimode == V64QImode)
return false;
@@ -24001,7 +24103,7 @@ ix86_expand_sse2_mulvxdi3 (rtx op0, rtx op1, rtx op2)
machine_mode mode = GET_MODE (op0);
rtx t1, t2, t3, t4, t5, t6;
- if (TARGET_AVX512DQ && mode == V8DImode)
+ if (TARGET_AVX512DQ && TARGET_EVEX512 && mode == V8DImode)
emit_insn (gen_avx512dq_mulv8di3 (op0, op1, op2));
else if (TARGET_AVX512DQ && TARGET_AVX512VL && mode == V4DImode)
emit_insn (gen_avx512dq_mulv4di3 (op0, op1, op2));
diff --git a/gcc/config/i386/i386-isa.def b/gcc/config/i386/i386-isa.def
index aeafcf8..991df5e 100644
--- a/gcc/config/i386/i386-isa.def
+++ b/gcc/config/i386/i386-isa.def
@@ -121,3 +121,5 @@ DEF_PTA(AVXVNNIINT16)
DEF_PTA(SM3)
DEF_PTA(SHA512)
DEF_PTA(SM4)
+DEF_PTA(APX_F)
+DEF_PTA(USER_MSR)
diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
index e47f9ed..3a03de5 100644
--- a/gcc/config/i386/i386-options.cc
+++ b/gcc/config/i386/i386-options.cc
@@ -141,7 +141,11 @@ along with GCC; see the file COPYING3. If not see
#define m_GRANDRIDGE (HOST_WIDE_INT_1U<<PROCESSOR_GRANDRIDGE)
#define m_ARROWLAKE (HOST_WIDE_INT_1U<<PROCESSOR_ARROWLAKE)
#define m_ARROWLAKE_S (HOST_WIDE_INT_1U<<PROCESSOR_ARROWLAKE_S)
-#define m_CORE_ATOM (m_SIERRAFOREST | m_GRANDRIDGE)
+#define m_CLEARWATERFOREST (HOST_WIDE_INT_1U<<PROCESSOR_CLEARWATERFOREST)
+#define m_PANTHERLAKE (HOST_WIDE_INT_1U<<PROCESSOR_PANTHERLAKE)
+#define m_CORE_ATOM (m_SIERRAFOREST | m_GRANDRIDGE | m_CLEARWATERFOREST)
+#define m_CORE_HYBRID (m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S \
+ | m_PANTHERLAKE)
#define m_INTEL (HOST_WIDE_INT_1U<<PROCESSOR_INTEL)
/* Gather Data Sampling / CVE-2022-40982 / INTEL-SA-00828.
Software mitigation. */
@@ -250,7 +254,9 @@ static struct ix86_target_opts isa2_opts[] =
{ "-mavxvnniint16", OPTION_MASK_ISA2_AVXVNNIINT16 },
{ "-msm3", OPTION_MASK_ISA2_SM3 },
{ "-msha512", OPTION_MASK_ISA2_SHA512 },
- { "-msm4", OPTION_MASK_ISA2_SM4 }
+ { "-msm4", OPTION_MASK_ISA2_SM4 },
+ { "-mevex512", OPTION_MASK_ISA2_EVEX512 },
+ { "-musermsr", OPTION_MASK_ISA2_USER_MSR }
};
static struct ix86_target_opts isa_opts[] =
{
@@ -694,6 +700,7 @@ ix86_function_specific_save (struct cl_target_option *ptr,
ptr->branch_cost = ix86_branch_cost;
ptr->tune_defaulted = ix86_tune_defaulted;
ptr->arch_specified = ix86_arch_specified;
+ ptr->x_ix86_apx_features = opts->x_ix86_apx_features;
ptr->x_ix86_isa_flags_explicit = opts->x_ix86_isa_flags_explicit;
ptr->x_ix86_isa_flags2_explicit = opts->x_ix86_isa_flags2_explicit;
ptr->x_recip_mask_explicit = opts->x_recip_mask_explicit;
@@ -764,6 +771,7 @@ static const struct processor_costs *processor_cost_table[] =
&tremont_cost,
&alderlake_cost,
&alderlake_cost,
+ &alderlake_cost,
&slm_cost,
&slm_cost,
&skylake_cost,
@@ -781,6 +789,7 @@ static const struct processor_costs *processor_cost_table[] =
&icelake_cost,
&alderlake_cost,
&alderlake_cost,
+ &alderlake_cost,
&intel_cost,
&lujiazui_cost,
&geode_cost,
@@ -832,6 +841,7 @@ ix86_function_specific_restore (struct gcc_options *opts,
ix86_prefetch_sse = ptr->prefetch_sse;
ix86_tune_defaulted = ptr->tune_defaulted;
ix86_arch_specified = ptr->arch_specified;
+ opts->x_ix86_apx_features = ptr->x_ix86_apx_features;
opts->x_ix86_isa_flags_explicit = ptr->x_ix86_isa_flags_explicit;
opts->x_ix86_isa_flags2_explicit = ptr->x_ix86_isa_flags2_explicit;
opts->x_recip_mask_explicit = ptr->x_recip_mask_explicit;
@@ -1109,6 +1119,9 @@ ix86_valid_target_attribute_inner_p (tree fndecl, tree args, char *p_strings[],
IX86_ATTR_ISA ("sm3", OPT_msm3),
IX86_ATTR_ISA ("sha512", OPT_msha512),
IX86_ATTR_ISA ("sm4", OPT_msm4),
+ IX86_ATTR_ISA ("apxf", OPT_mapxf),
+ IX86_ATTR_ISA ("evex512", OPT_mevex512),
+ IX86_ATTR_ISA ("usermsr", OPT_musermsr),
/* enum options */
IX86_ATTR_ENUM ("fpmath=", OPT_mfpmath_),
@@ -2080,6 +2093,9 @@ ix86_option_override_internal (bool main_args_p,
opts->x_ix86_stringop_alg = no_stringop;
}
+ if (TARGET_APX_F && !TARGET_64BIT)
+ error ("%<-mapxf%> is not supported for 32-bit code");
+
if (TARGET_UINTR && !TARGET_64BIT)
error ("%<-muintr%> not supported for 32-bit code");
@@ -2293,6 +2309,14 @@ ix86_option_override_internal (bool main_args_p,
SET_TARGET_POPCNT (opts);
}
+ /* Enable apx if apxf or apx_features are not
+ explicitly set for -march. */
+ if (TARGET_64BIT_P (opts->x_ix86_isa_flags)
+ && ((processor_alias_table[i].flags & PTA_APX_F) != 0)
+ && !TARGET_EXPLICIT_APX_F_P (opts)
+ && !OPTION_SET_P (ix86_apx_features))
+ opts->x_ix86_apx_features = apx_all;
+
if ((processor_alias_table[i].flags
& (PTA_PREFETCH_SSE | PTA_SSE)) != 0)
ix86_prefetch_sse = true;
@@ -2444,6 +2468,10 @@ ix86_option_override_internal (bool main_args_p,
/* Arrange to set up i386_stack_locals for all functions. */
init_machine_status = ix86_init_machine_status;
+ /* Override APX flag here if ISA bit is set. */
+ if (TARGET_APX_F && !OPTION_SET_P (ix86_apx_features))
+ opts->x_ix86_apx_features = apx_all;
+
/* Validate -mregparm= value. */
if (opts_set->x_ix86_regparm)
{
@@ -2559,6 +2587,21 @@ ix86_option_override_internal (bool main_args_p,
&= ~((OPTION_MASK_ISA_BMI | OPTION_MASK_ISA_BMI2 | OPTION_MASK_ISA_TBM)
& ~opts->x_ix86_isa_flags_explicit);
+ /* Set EVEX512 target if it is not explicitly set
+ when AVX512 is enabled. */
+ if (TARGET_AVX512F_P(opts->x_ix86_isa_flags)
+ && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA2_EVEX512))
+ opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_EVEX512;
+
+ /* Disable AVX512{PF,ER,4VNNIW,4FAMPS} for -mno-evex512. */
+ if (!TARGET_EVEX512_P(opts->x_ix86_isa_flags2))
+ {
+ opts->x_ix86_isa_flags
+ &= ~(OPTION_MASK_ISA_AVX512PF | OPTION_MASK_ISA_AVX512ER);
+ opts->x_ix86_isa_flags2
+ &= ~(OPTION_MASK_ISA2_AVX5124FMAPS | OPTION_MASK_ISA2_AVX5124VNNIW);
+ }
+
/* Validate -mpreferred-stack-boundary= value or default it to
PREFERRED_STACK_BOUNDARY_DEFAULT. */
ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
@@ -2828,7 +2871,8 @@ ix86_option_override_internal (bool main_args_p,
opts->x_ix86_move_max = opts->x_prefer_vector_width_type;
if (opts_set->x_ix86_move_max == PVW_NONE)
{
- if (TARGET_AVX512F_P (opts->x_ix86_isa_flags))
+ if (TARGET_AVX512F_P (opts->x_ix86_isa_flags)
+ && TARGET_EVEX512_P (opts->x_ix86_isa_flags2))
opts->x_ix86_move_max = PVW_AVX512;
else
opts->x_ix86_move_max = PVW_AVX128;
@@ -2849,7 +2893,8 @@ ix86_option_override_internal (bool main_args_p,
opts->x_ix86_store_max = opts->x_prefer_vector_width_type;
if (opts_set->x_ix86_store_max == PVW_NONE)
{
- if (TARGET_AVX512F_P (opts->x_ix86_isa_flags))
+ if (TARGET_AVX512F_P (opts->x_ix86_isa_flags)
+ && TARGET_EVEX512_P (opts->x_ix86_isa_flags2))
opts->x_ix86_store_max = PVW_AVX512;
else
opts->x_ix86_store_max = PVW_AVX128;
@@ -3128,13 +3173,13 @@ ix86_simd_clone_adjust (struct cgraph_node *node)
case 'e':
if (TARGET_PREFER_AVX256)
{
- if (!TARGET_AVX512F)
- str = "avx512f,prefer-vector-width=512";
+ if (!TARGET_AVX512F || !TARGET_EVEX512)
+ str = "avx512f,evex512,prefer-vector-width=512";
else
str = "prefer-vector-width=512";
}
- else if (!TARGET_AVX512F)
- str = "avx512f";
+ else if (!TARGET_AVX512F || !TARGET_EVEX512)
+ str = "avx512f,evex512";
break;
default:
gcc_unreachable ();
diff --git a/gcc/config/i386/i386-opts.h b/gcc/config/i386/i386-opts.h
index be359f3..2ec76a1 100644
--- a/gcc/config/i386/i386-opts.h
+++ b/gcc/config/i386/i386-opts.h
@@ -134,4 +134,12 @@ enum lam_type {
lam_u57
};
+enum apx_features {
+ apx_none = 0,
+ apx_egpr = 1 << 0,
+ apx_push2pop2 = 1 << 1,
+ apx_ndd = 1 << 2,
+ apx_all = apx_egpr | apx_push2pop2 | apx_ndd,
+};
+
#endif
diff --git a/gcc/config/i386/i386-protos.h b/gcc/config/i386/i386-protos.h
index 9ffb125..28d0eab 100644
--- a/gcc/config/i386/i386-protos.h
+++ b/gcc/config/i386/i386-protos.h
@@ -64,6 +64,8 @@ extern bool symbolic_reference_mentioned_p (rtx);
extern bool extended_reg_mentioned_p (rtx);
extern bool x86_extended_QIreg_mentioned_p (rtx_insn *);
extern bool x86_extended_reg_mentioned_p (rtx);
+extern bool x86_extended_rex2reg_mentioned_p (rtx);
+extern bool x86_evex_reg_mentioned_p (rtx [], int);
extern bool x86_maybe_negate_const_int (rtx *, machine_mode);
extern machine_mode ix86_cc_mode (enum rtx_code, rtx, rtx);
@@ -78,6 +80,9 @@ extern bool ix86_expand_set_or_cpymem (rtx, rtx, rtx, rtx, rtx, rtx,
rtx, rtx, rtx, rtx, bool);
extern bool ix86_expand_cmpstrn_or_cmpmem (rtx, rtx, rtx, rtx, rtx, bool);
+extern enum reg_class ix86_insn_base_reg_class (rtx_insn *);
+extern bool ix86_regno_ok_for_insn_base_p (int, rtx_insn *);
+extern enum reg_class ix86_insn_index_reg_class (rtx_insn *);
extern bool constant_address_p (rtx);
extern bool legitimate_pic_operand_p (rtx);
extern bool legitimate_pic_address_disp_p (rtx);
diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
index 477e6ce..641e768 100644
--- a/gcc/config/i386/i386.cc
+++ b/gcc/config/i386/i386.cc
@@ -17,6 +17,7 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
+#define INCLUDE_STRING
#define IN_TARGET_CODE 1
#include "config.h"
@@ -169,7 +170,12 @@ enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
ALL_SSE_REGS, ALL_SSE_REGS, ALL_SSE_REGS, ALL_SSE_REGS,
/* Mask registers. */
ALL_MASK_REGS, MASK_REGS, MASK_REGS, MASK_REGS,
- MASK_REGS, MASK_REGS, MASK_REGS, MASK_REGS
+ MASK_REGS, MASK_REGS, MASK_REGS, MASK_REGS,
+ /* REX2 registers */
+ GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
+ GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
+ GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
+ GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
};
/* The "default" register map used in 32bit mode. */
@@ -227,7 +233,10 @@ int const debugger64_register_map[FIRST_PSEUDO_REGISTER] =
/* AVX-512 registers 24-31 */
75, 76, 77, 78, 79, 80, 81, 82,
/* Mask registers */
- 118, 119, 120, 121, 122, 123, 124, 125
+ 118, 119, 120, 121, 122, 123, 124, 125,
+ /* rex2 extend interger registers */
+ 130, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145
};
/* Define the register numbers to be used in Dwarf debugging information.
@@ -521,6 +530,13 @@ ix86_conditional_register_usage (void)
accessible_reg_set &= ~reg_class_contents[ALL_MASK_REGS];
}
+
+ /* If APX is disabled, disable the registers. */
+ if (! (TARGET_APX_EGPR && TARGET_64BIT))
+ {
+ for (i = FIRST_REX2_INT_REG; i <= LAST_REX2_INT_REG; i++)
+ CLEAR_HARD_REG_BIT (accessible_reg_set, i);
+ }
}
/* Canonicalize a comparison from one we don't have to one we do have. */
@@ -647,7 +663,8 @@ ix86_can_inline_p (tree caller, tree callee)
static bool
ix86_in_large_data_p (tree exp)
{
- if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
+ if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC
+ && ix86_cmodel != CM_LARGE && ix86_cmodel != CM_LARGE_PIC)
return false;
if (exp == NULL_TREE)
@@ -858,7 +875,8 @@ x86_elf_aligned_decl_common (FILE *file, tree decl,
const char *name, unsigned HOST_WIDE_INT size,
unsigned align)
{
- if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
+ if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC
+ || ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
&& size > (unsigned int)ix86_section_threshold)
{
switch_to_section (get_named_section (decl, ".lbss", 0));
@@ -879,7 +897,8 @@ void
x86_output_aligned_bss (FILE *file, tree decl, const char *name,
unsigned HOST_WIDE_INT size, unsigned align)
{
- if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
+ if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC
+ || ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
&& size > (unsigned int)ix86_section_threshold)
switch_to_section (get_named_section (decl, ".lbss", 0));
else
@@ -1924,7 +1943,8 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum,
if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
&& GET_MODE_INNER (mode) == innermode)
{
- if (size == 64 && !TARGET_AVX512F && !TARGET_IAMCU)
+ if (size == 64 && (!TARGET_AVX512F || !TARGET_EVEX512)
+ && !TARGET_IAMCU)
{
static bool warnedavx512f;
static bool warnedavx512f_ret;
@@ -4347,7 +4367,7 @@ ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
/* AVX512F values are returned in ZMM0 if available. */
if (size == 64)
- return !TARGET_AVX512F;
+ return !TARGET_AVX512F || !TARGET_EVEX512;
}
if (mode == XFmode)
@@ -5262,7 +5282,7 @@ standard_sse_constant_p (rtx x, machine_mode pred_mode)
switch (GET_MODE_SIZE (mode))
{
case 64:
- if (TARGET_AVX512F)
+ if (TARGET_AVX512F && TARGET_EVEX512)
return 2;
break;
case 32:
@@ -5312,9 +5332,14 @@ standard_sse_constant_opcode (rtx_insn *insn, rtx *operands)
case MODE_XI:
case MODE_OI:
if (EXT_REX_SSE_REG_P (operands[0]))
- return (TARGET_AVX512VL
- ? "vpxord\t%x0, %x0, %x0"
- : "vpxord\t%g0, %g0, %g0");
+ {
+ if (TARGET_AVX512VL)
+ return "vpxord\t%x0, %x0, %x0";
+ else if (TARGET_EVEX512)
+ return "vpxord\t%g0, %g0, %g0";
+ else
+ gcc_unreachable ();
+ }
return "vpxor\t%x0, %x0, %x0";
case MODE_V2DF:
@@ -5323,16 +5348,28 @@ standard_sse_constant_opcode (rtx_insn *insn, rtx *operands)
/* FALLTHRU */
case MODE_V8DF:
case MODE_V4DF:
- if (!EXT_REX_SSE_REG_P (operands[0]))
- return "vxorpd\t%x0, %x0, %x0";
- else if (TARGET_AVX512DQ)
- return (TARGET_AVX512VL
- ? "vxorpd\t%x0, %x0, %x0"
- : "vxorpd\t%g0, %g0, %g0");
- else
- return (TARGET_AVX512VL
- ? "vpxorq\t%x0, %x0, %x0"
- : "vpxorq\t%g0, %g0, %g0");
+ if (EXT_REX_SSE_REG_P (operands[0]))
+ {
+ if (TARGET_AVX512DQ)
+ {
+ if (TARGET_AVX512VL)
+ return "vxorpd\t%x0, %x0, %x0";
+ else if (TARGET_EVEX512)
+ return "vxorpd\t%g0, %g0, %g0";
+ else
+ gcc_unreachable ();
+ }
+ else
+ {
+ if (TARGET_AVX512VL)
+ return "vpxorq\t%x0, %x0, %x0";
+ else if (TARGET_EVEX512)
+ return "vpxorq\t%g0, %g0, %g0";
+ else
+ gcc_unreachable ();
+ }
+ }
+ return "vxorpd\t%x0, %x0, %x0";
case MODE_V4SF:
if (!EXT_REX_SSE_REG_P (operands[0]))
@@ -5340,16 +5377,28 @@ standard_sse_constant_opcode (rtx_insn *insn, rtx *operands)
/* FALLTHRU */
case MODE_V16SF:
case MODE_V8SF:
- if (!EXT_REX_SSE_REG_P (operands[0]))
- return "vxorps\t%x0, %x0, %x0";
- else if (TARGET_AVX512DQ)
- return (TARGET_AVX512VL
- ? "vxorps\t%x0, %x0, %x0"
- : "vxorps\t%g0, %g0, %g0");
- else
- return (TARGET_AVX512VL
- ? "vpxord\t%x0, %x0, %x0"
- : "vpxord\t%g0, %g0, %g0");
+ if (EXT_REX_SSE_REG_P (operands[0]))
+ {
+ if (TARGET_AVX512DQ)
+ {
+ if (TARGET_AVX512VL)
+ return "vxorps\t%x0, %x0, %x0";
+ else if (TARGET_EVEX512)
+ return "vxorps\t%g0, %g0, %g0";
+ else
+ gcc_unreachable ();
+ }
+ else
+ {
+ if (TARGET_AVX512VL)
+ return "vpxord\t%x0, %x0, %x0";
+ else if (TARGET_EVEX512)
+ return "vpxord\t%g0, %g0, %g0";
+ else
+ gcc_unreachable ();
+ }
+ }
+ return "vxorps\t%x0, %x0, %x0";
default:
gcc_unreachable ();
@@ -5367,7 +5416,7 @@ standard_sse_constant_opcode (rtx_insn *insn, rtx *operands)
case MODE_XI:
case MODE_V8DF:
case MODE_V16SF:
- gcc_assert (TARGET_AVX512F);
+ gcc_assert (TARGET_AVX512F && TARGET_EVEX512);
return "vpternlogd\t{$0xFF, %g0, %g0, %g0|%g0, %g0, %g0, 0xFF}";
case MODE_OI:
@@ -5379,14 +5428,18 @@ standard_sse_constant_opcode (rtx_insn *insn, rtx *operands)
case MODE_V2DF:
case MODE_V4SF:
gcc_assert (TARGET_SSE2);
- if (!EXT_REX_SSE_REG_P (operands[0]))
- return (TARGET_AVX
- ? "vpcmpeqd\t%0, %0, %0"
- : "pcmpeqd\t%0, %0");
- else if (TARGET_AVX512VL)
- return "vpternlogd\t{$0xFF, %0, %0, %0|%0, %0, %0, 0xFF}";
- else
- return "vpternlogd\t{$0xFF, %g0, %g0, %g0|%g0, %g0, %g0, 0xFF}";
+ if (EXT_REX_SSE_REG_P (operands[0]))
+ {
+ if (TARGET_AVX512VL)
+ return "vpternlogd\t{$0xFF, %0, %0, %0|%0, %0, %0, 0xFF}";
+ else if (TARGET_EVEX512)
+ return "vpternlogd\t{$0xFF, %g0, %g0, %g0|%g0, %g0, %g0, 0xFF}";
+ else
+ gcc_unreachable ();
+ }
+ return (TARGET_AVX
+ ? "vpcmpeqd\t%0, %0, %0"
+ : "pcmpeqd\t%0, %0");
default:
gcc_unreachable ();
@@ -5396,7 +5449,7 @@ standard_sse_constant_opcode (rtx_insn *insn, rtx *operands)
{
if (GET_MODE_SIZE (mode) == 64)
{
- gcc_assert (TARGET_AVX512F);
+ gcc_assert (TARGET_AVX512F && TARGET_EVEX512);
return "vpcmpeqd\t%t0, %t0, %t0";
}
else if (GET_MODE_SIZE (mode) == 32)
@@ -5408,7 +5461,7 @@ standard_sse_constant_opcode (rtx_insn *insn, rtx *operands)
}
else if (vector_all_ones_zero_extend_quarter_operand (x, mode))
{
- gcc_assert (TARGET_AVX512F);
+ gcc_assert (TARGET_AVX512F && TARGET_EVEX512);
return "vpcmpeqd\t%x0, %x0, %x0";
}
@@ -5462,6 +5515,12 @@ ix86_get_ssemov (rtx *operands, unsigned size,
bool evex_reg_p = (size == 64
|| EXT_REX_SSE_REG_P (operands[0])
|| EXT_REX_SSE_REG_P (operands[1]));
+
+ bool egpr_p = (TARGET_APX_EGPR
+ && (x86_extended_rex2reg_mentioned_p (operands[0])
+ || x86_extended_rex2reg_mentioned_p (operands[1])));
+ bool egpr_vl = egpr_p && TARGET_AVX512VL;
+
machine_mode scalar_mode;
const char *opcode = NULL;
@@ -5510,6 +5569,8 @@ ix86_get_ssemov (rtx *operands, unsigned size,
|| memory_operand (operands[1], mode))
gcc_unreachable ();
size = 64;
+ /* We need TARGET_EVEX512 to move into zmm register. */
+ gcc_assert (TARGET_EVEX512);
switch (type)
{
case opcode_int:
@@ -5534,12 +5595,18 @@ ix86_get_ssemov (rtx *operands, unsigned size,
{
case E_HFmode:
case E_BFmode:
- if (evex_reg_p)
+ if (evex_reg_p || egpr_vl)
opcode = (misaligned_p
? (TARGET_AVX512BW
? "vmovdqu16"
: "vmovdqu64")
: "vmovdqa64");
+ else if (egpr_p)
+ opcode = (misaligned_p
+ ? (TARGET_AVX512BW
+ ? "vmovdqu16"
+ : "%vmovups")
+ : "%vmovaps");
else
opcode = (misaligned_p
? (TARGET_AVX512BW
@@ -5554,8 +5621,10 @@ ix86_get_ssemov (rtx *operands, unsigned size,
opcode = misaligned_p ? "%vmovupd" : "%vmovapd";
break;
case E_TFmode:
- if (evex_reg_p)
+ if (evex_reg_p || egpr_vl)
opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
+ else if (egpr_p)
+ opcode = misaligned_p ? "%vmovups" : "%vmovaps";
else
opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
break;
@@ -5568,12 +5637,18 @@ ix86_get_ssemov (rtx *operands, unsigned size,
switch (scalar_mode)
{
case E_QImode:
- if (evex_reg_p)
+ if (evex_reg_p || egpr_vl)
opcode = (misaligned_p
? (TARGET_AVX512BW
? "vmovdqu8"
: "vmovdqu64")
: "vmovdqa64");
+ else if (egpr_p)
+ opcode = (misaligned_p
+ ? (TARGET_AVX512BW
+ ? "vmovdqu8"
+ : "%vmovups")
+ : "%vmovaps");
else
opcode = (misaligned_p
? (TARGET_AVX512BW
@@ -5582,12 +5657,18 @@ ix86_get_ssemov (rtx *operands, unsigned size,
: "%vmovdqa");
break;
case E_HImode:
- if (evex_reg_p)
+ if (evex_reg_p || egpr_vl)
opcode = (misaligned_p
? (TARGET_AVX512BW
? "vmovdqu16"
: "vmovdqu64")
: "vmovdqa64");
+ else if (egpr_p)
+ opcode = (misaligned_p
+ ? (TARGET_AVX512BW
+ ? "vmovdqu16"
+ : "%vmovups")
+ : "%vmovaps");
else
opcode = (misaligned_p
? (TARGET_AVX512BW
@@ -5596,16 +5677,20 @@ ix86_get_ssemov (rtx *operands, unsigned size,
: "%vmovdqa");
break;
case E_SImode:
- if (evex_reg_p)
+ if (evex_reg_p || egpr_vl)
opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32";
+ else if (egpr_p)
+ opcode = misaligned_p ? "%vmovups" : "%vmovaps";
else
opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
break;
case E_DImode:
case E_TImode:
case E_OImode:
- if (evex_reg_p)
+ if (evex_reg_p || egpr_vl)
opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
+ else if (egpr_p)
+ opcode = misaligned_p ? "%vmovups" : "%vmovaps";
else
opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
break;
@@ -6188,6 +6273,13 @@ ix86_code_end (void)
regno, false);
}
+ for (regno = FIRST_REX2_INT_REG; regno <= LAST_REX2_INT_REG; regno++)
+ {
+ if (TEST_HARD_REG_BIT (indirect_thunks_used, regno))
+ output_indirect_thunk_function (indirect_thunk_prefix_none,
+ regno, false);
+ }
+
for (regno = FIRST_INT_REG; regno <= LAST_INT_REG; regno++)
{
char name[32];
@@ -6384,6 +6476,26 @@ gen_pop (rtx arg)
stack_pointer_rtx)));
}
+/* Generate a "push2" pattern for input ARG. */
+rtx
+gen_push2 (rtx mem, rtx reg1, rtx reg2)
+{
+ struct machine_function *m = cfun->machine;
+ const int offset = UNITS_PER_WORD * 2;
+
+ if (m->fs.cfa_reg == stack_pointer_rtx)
+ m->fs.cfa_offset += offset;
+ m->fs.sp_offset += offset;
+
+ if (REG_P (reg1) && GET_MODE (reg1) != word_mode)
+ reg1 = gen_rtx_REG (word_mode, REGNO (reg1));
+
+ if (REG_P (reg2) && GET_MODE (reg2) != word_mode)
+ reg2 = gen_rtx_REG (word_mode, REGNO (reg2));
+
+ return gen_push2_di (mem, reg1, reg2);
+}
+
/* Return >= 0 if there is an unused call-clobbered register available
for the entire function. */
@@ -6625,6 +6737,18 @@ get_probe_interval (void)
#define SPLIT_STACK_AVAILABLE 256
+/* Helper function to determine whether push2/pop2 can be used in prologue or
+ epilogue for register save/restore. */
+static bool
+ix86_pro_and_epilogue_can_use_push2pop2 (int nregs)
+{
+ int aligned = cfun->machine->fs.sp_offset % 16 == 0;
+ return TARGET_APX_PUSH2POP2
+ && !cfun->machine->frame.save_regs_using_mov
+ && cfun->machine->func_type == TYPE_NORMAL
+ && (nregs + aligned) >= 3;
+}
+
/* Fill structure ix86_frame about frame of currently computed function. */
static void
@@ -6682,16 +6806,20 @@ ix86_compute_frame_layout (void)
Darwin's ABI specifies 128b alignment for both 32 and 64 bit variants
at call sites, including profile function calls.
- */
- if (((TARGET_64BIT_MS_ABI || TARGET_MACHO)
- && crtl->preferred_stack_boundary < 128)
- && (!crtl->is_leaf || cfun->calls_alloca != 0
- || ix86_current_function_calls_tls_descriptor
- || (TARGET_MACHO && crtl->profile)
- || ix86_incoming_stack_boundary < 128))
+
+ For APX push2/pop2, the stack also requires 128b alignment. */
+ if ((ix86_pro_and_epilogue_can_use_push2pop2 (frame->nregs)
+ && crtl->preferred_stack_boundary < 128)
+ || (((TARGET_64BIT_MS_ABI || TARGET_MACHO)
+ && crtl->preferred_stack_boundary < 128)
+ && (!crtl->is_leaf || cfun->calls_alloca != 0
+ || ix86_current_function_calls_tls_descriptor
+ || (TARGET_MACHO && crtl->profile)
+ || ix86_incoming_stack_boundary < 128)))
{
crtl->preferred_stack_boundary = 128;
- crtl->stack_alignment_needed = 128;
+ if (crtl->stack_alignment_needed < 128)
+ crtl->stack_alignment_needed = 128;
}
stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
@@ -7199,15 +7327,88 @@ choose_baseaddr (HOST_WIDE_INT cfa_offset, unsigned int *align,
static void
ix86_emit_save_regs (void)
{
- unsigned int regno;
+ int regno;
rtx_insn *insn;
- for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
- if (GENERAL_REGNO_P (regno) && ix86_save_reg (regno, true, true))
- {
- insn = emit_insn (gen_push (gen_rtx_REG (word_mode, regno)));
- RTX_FRAME_RELATED_P (insn) = 1;
- }
+ if (!TARGET_APX_PUSH2POP2 || cfun->machine->func_type != TYPE_NORMAL)
+ {
+ for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
+ if (GENERAL_REGNO_P (regno) && ix86_save_reg (regno, true, true))
+ {
+ insn = emit_insn (gen_push (gen_rtx_REG (word_mode, regno)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+ else
+ {
+ int regno_list[2];
+ regno_list[0] = regno_list[1] = -1;
+ int loaded_regnum = 0;
+ bool aligned = cfun->machine->fs.sp_offset % 16 == 0;
+
+ for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
+ if (GENERAL_REGNO_P (regno) && ix86_save_reg (regno, true, true))
+ {
+ if (aligned)
+ {
+ regno_list[loaded_regnum++] = regno;
+ if (loaded_regnum == 2)
+ {
+ gcc_assert (regno_list[0] != -1
+ && regno_list[1] != -1
+ && regno_list[0] != regno_list[1]);
+ const int offset = UNITS_PER_WORD * 2;
+ rtx mem = gen_rtx_MEM (TImode,
+ gen_rtx_PRE_DEC (Pmode,
+ stack_pointer_rtx));
+ insn = emit_insn (gen_push2 (mem,
+ gen_rtx_REG (word_mode,
+ regno_list[0]),
+ gen_rtx_REG (word_mode,
+ regno_list[1])));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ rtx dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (3));
+
+ for (int i = 0; i < 2; i++)
+ {
+ rtx dwarf_reg = gen_rtx_REG (word_mode,
+ regno_list[i]);
+ rtx sp_offset = plus_constant (Pmode,
+ stack_pointer_rtx,
+ + UNITS_PER_WORD
+ * (1 - i));
+ rtx tmp = gen_rtx_SET (gen_frame_mem (DImode,
+ sp_offset),
+ dwarf_reg);
+ RTX_FRAME_RELATED_P (tmp) = 1;
+ XVECEXP (dwarf, 0, i + 1) = tmp;
+ }
+ rtx sp_tmp = gen_rtx_SET (stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ -offset));
+ RTX_FRAME_RELATED_P (sp_tmp) = 1;
+ XVECEXP (dwarf, 0, 0) = sp_tmp;
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
+
+ loaded_regnum = 0;
+ regno_list[0] = regno_list[1] = -1;
+ }
+ }
+ else
+ {
+ insn = emit_insn (gen_push (gen_rtx_REG (word_mode, regno)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ aligned = true;
+ }
+ }
+ if (loaded_regnum == 1)
+ {
+ insn = emit_insn (gen_push (gen_rtx_REG (word_mode,
+ regno_list[0])));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
}
/* Emit a single register save at CFA - CFA_OFFSET. */
@@ -9091,6 +9292,74 @@ ix86_emit_restore_reg_using_pop (rtx reg)
}
}
+/* Emit code to restore REG using a POP2 insn. */
+static void
+ix86_emit_restore_reg_using_pop2 (rtx reg1, rtx reg2)
+{
+ struct machine_function *m = cfun->machine;
+ const int offset = UNITS_PER_WORD * 2;
+
+ rtx mem = gen_rtx_MEM (TImode, gen_rtx_POST_INC (Pmode,
+ stack_pointer_rtx));
+ rtx_insn *insn = emit_insn (gen_pop2_di (reg1, mem, reg2));
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ rtx dwarf = NULL_RTX;
+ dwarf = alloc_reg_note (REG_CFA_RESTORE, reg1, dwarf);
+ dwarf = alloc_reg_note (REG_CFA_RESTORE, reg2, dwarf);
+ REG_NOTES (insn) = dwarf;
+ m->fs.sp_offset -= offset;
+
+ if (m->fs.cfa_reg == crtl->drap_reg
+ && (REGNO (reg1) == REGNO (crtl->drap_reg)
+ || REGNO (reg2) == REGNO (crtl->drap_reg)))
+ {
+ /* Previously we'd represented the CFA as an expression
+ like *(%ebp - 8). We've just popped that value from
+ the stack, which means we need to reset the CFA to
+ the drap register. This will remain until we restore
+ the stack pointer. */
+ add_reg_note (insn, REG_CFA_DEF_CFA,
+ REGNO (reg1) == REGNO (crtl->drap_reg) ? reg1 : reg2);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* This means that the DRAP register is valid for addressing too. */
+ m->fs.drap_valid = true;
+ return;
+ }
+
+ if (m->fs.cfa_reg == stack_pointer_rtx)
+ {
+ rtx x = plus_constant (Pmode, stack_pointer_rtx, offset);
+ x = gen_rtx_SET (stack_pointer_rtx, x);
+ add_reg_note (insn, REG_CFA_ADJUST_CFA, x);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ m->fs.cfa_offset -= offset;
+ }
+
+ /* When the frame pointer is the CFA, and we pop it, we are
+ swapping back to the stack pointer as the CFA. This happens
+ for stack frames that don't allocate other data, so we assume
+ the stack pointer is now pointing at the return address, i.e.
+ the function entry state, which makes the offset be 1 word. */
+ if (reg1 == hard_frame_pointer_rtx || reg2 == hard_frame_pointer_rtx)
+ {
+ m->fs.fp_valid = false;
+ if (m->fs.cfa_reg == hard_frame_pointer_rtx)
+ {
+ m->fs.cfa_reg = stack_pointer_rtx;
+ m->fs.cfa_offset -= offset;
+
+ add_reg_note (insn, REG_CFA_DEF_CFA,
+ plus_constant (Pmode, stack_pointer_rtx,
+ m->fs.cfa_offset));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+}
+
/* Emit code to restore saved registers using POP insns. */
static void
@@ -9103,6 +9372,48 @@ ix86_emit_restore_regs_using_pop (void)
ix86_emit_restore_reg_using_pop (gen_rtx_REG (word_mode, regno));
}
+/* Emit code to restore saved registers using POP2 insns. */
+
+static void
+ix86_emit_restore_regs_using_pop2 (void)
+{
+ int regno;
+ int regno_list[2];
+ regno_list[0] = regno_list[1] = -1;
+ int loaded_regnum = 0;
+ bool aligned = cfun->machine->fs.sp_offset % 16 == 0;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (GENERAL_REGNO_P (regno) && ix86_save_reg (regno, false, true))
+ {
+ if (aligned)
+ {
+ regno_list[loaded_regnum++] = regno;
+ if (loaded_regnum == 2)
+ {
+ gcc_assert (regno_list[0] != -1
+ && regno_list[1] != -1
+ && regno_list[0] != regno_list[1]);
+
+ ix86_emit_restore_reg_using_pop2 (gen_rtx_REG (word_mode,
+ regno_list[0]),
+ gen_rtx_REG (word_mode,
+ regno_list[1]));
+ loaded_regnum = 0;
+ regno_list[0] = regno_list[1] = -1;
+ }
+ }
+ else
+ {
+ ix86_emit_restore_reg_using_pop (gen_rtx_REG (word_mode, regno));
+ aligned = true;
+ }
+ }
+
+ if (loaded_regnum == 1)
+ ix86_emit_restore_reg_using_pop (gen_rtx_REG (word_mode, regno_list[0]));
+}
+
/* Emit code and notes for the LEAVE instruction. If insn is non-null,
omits the emit and only attaches the notes. */
@@ -9642,7 +9953,10 @@ ix86_expand_epilogue (int style)
m->fs.cfa_reg == stack_pointer_rtx);
}
- ix86_emit_restore_regs_using_pop ();
+ if (TARGET_APX_PUSH2POP2 && m->func_type == TYPE_NORMAL)
+ ix86_emit_restore_regs_using_pop2 ();
+ else
+ ix86_emit_restore_regs_using_pop ();
}
/* If we used a stack pointer and haven't already got rid of it,
@@ -10726,7 +11040,7 @@ ix86_legitimate_constant_p (machine_mode mode, rtx x)
case E_OImode:
case E_XImode:
if (!standard_sse_constant_p (x, mode)
- && GET_MODE_SIZE (TARGET_AVX512F
+ && GET_MODE_SIZE (TARGET_AVX512F && TARGET_EVEX512
? XImode
: (TARGET_AVX
? OImode
@@ -11040,6 +11354,95 @@ ix86_validate_address_register (rtx op)
return NULL_RTX;
}
+/* Return true if insn memory address can use any available reg
+ in BASE_REG_CLASS or INDEX_REG_CLASS, otherwise false.
+ For APX, some instruction can't be encoded with gpr32
+ which is BASE_REG_CLASS or INDEX_REG_CLASS, for that case
+ returns false. */
+static bool
+ix86_memory_address_use_extended_reg_class_p (rtx_insn* insn)
+{
+ /* LRA will do some initialization with insn == NULL,
+ return the maximum reg class for that.
+ For other cases, real insn will be passed and checked. */
+ bool ret = true;
+ if (TARGET_APX_EGPR && insn)
+ {
+ if (asm_noperands (PATTERN (insn)) >= 0
+ || GET_CODE (PATTERN (insn)) == ASM_INPUT)
+ return ix86_apx_inline_asm_use_gpr32;
+
+ if (INSN_CODE (insn) < 0)
+ return false;
+
+ /* Try recog the insn before calling get_attr_gpr32. Save
+ the current recog_data first. */
+ /* Also save which_alternative for current recog. */
+
+ struct recog_data_d recog_data_save = recog_data;
+ int which_alternative_saved = which_alternative;
+
+ /* Update the recog_data for alternative check. */
+ if (recog_data.insn != insn)
+ extract_insn_cached (insn);
+
+ /* If alternative is not set, loop throught each alternative
+ of insn and get gpr32 attr for all enabled alternatives.
+ If any enabled alternatives has 0 value for gpr32, disallow
+ gpr32 for addressing. */
+ if (which_alternative_saved == -1)
+ {
+ alternative_mask enabled = get_enabled_alternatives (insn);
+ bool curr_insn_gpr32 = false;
+ for (int i = 0; i < recog_data.n_alternatives; i++)
+ {
+ if (!TEST_BIT (enabled, i))
+ continue;
+ which_alternative = i;
+ curr_insn_gpr32 = get_attr_gpr32 (insn);
+ if (!curr_insn_gpr32)
+ ret = false;
+ }
+ }
+ else
+ {
+ which_alternative = which_alternative_saved;
+ ret = get_attr_gpr32 (insn);
+ }
+
+ recog_data = recog_data_save;
+ which_alternative = which_alternative_saved;
+ }
+
+ return ret;
+}
+
+/* For APX, some instructions can't be encoded with gpr32. */
+enum reg_class
+ix86_insn_base_reg_class (rtx_insn* insn)
+{
+ if (ix86_memory_address_use_extended_reg_class_p (insn))
+ return BASE_REG_CLASS;
+ return GENERAL_GPR16;
+}
+
+bool
+ix86_regno_ok_for_insn_base_p (int regno, rtx_insn* insn)
+{
+
+ if (ix86_memory_address_use_extended_reg_class_p (insn))
+ return GENERAL_REGNO_P (regno);
+ return GENERAL_GPR16_REGNO_P (regno);
+}
+
+enum reg_class
+ix86_insn_index_reg_class (rtx_insn* insn)
+{
+ if (ix86_memory_address_use_extended_reg_class_p (insn))
+ return INDEX_REG_CLASS;
+ return INDEX_GPR16;
+}
+
/* Recognizes RTL expressions that are valid memory addresses for an
instruction. The MODE argument is the machine mode for the MEM
expression that wants to use this address.
@@ -13046,7 +13449,7 @@ print_reg (rtx x, int code, FILE *file)
/* Irritatingly, AMD extended registers use
different naming convention: "r%d[bwd]" */
- if (REX_INT_REGNO_P (regno))
+ if (REX_INT_REGNO_P (regno) || REX2_INT_REGNO_P (regno))
{
gcc_assert (TARGET_64BIT);
switch (msize)
@@ -15543,6 +15946,13 @@ ix86_avoid_lea_for_addr (rtx_insn *insn, rtx operands[])
&& (regno0 == regno1 || regno0 == regno2))
return true;
+ /* Split with -Oz if the encoding requires fewer bytes. */
+ if (optimize_size > 1
+ && parts.scale > 1
+ && !parts.base
+ && (!parts.disp || parts.disp == const0_rtx))
+ return true;
+
/* Check we need to optimize. */
if (!TARGET_AVOID_LEA_FOR_ADDR || optimize_function_for_size_p (cfun))
return false;
@@ -15745,6 +16155,8 @@ ix86_build_const_vector (machine_mode mode, bool vect, rtx value)
case E_V2DImode:
gcc_assert (vect);
/* FALLTHRU */
+ case E_V2HFmode:
+ case E_V4HFmode:
case E_V8HFmode:
case E_V16HFmode:
case E_V32HFmode:
@@ -15786,6 +16198,8 @@ ix86_build_signbit_mask (machine_mode mode, bool vect, bool invert)
switch (mode)
{
+ case E_V2HFmode:
+ case E_V4HFmode:
case E_V8HFmode:
case E_V16HFmode:
case E_V32HFmode:
@@ -16260,7 +16674,7 @@ ix86_output_jmp_thunk_or_indirect (const char *thunk_name, const int regno)
{
if (thunk_name != NULL)
{
- if (REX_INT_REGNO_P (regno)
+ if ((REX_INT_REGNO_P (regno) || REX2_INT_REGNO_P (regno))
&& ix86_indirect_branch_cs_prefix)
fprintf (asm_out_file, "\tcs\n");
fprintf (asm_out_file, "\tjmp\t");
@@ -16312,7 +16726,7 @@ ix86_output_indirect_branch_via_reg (rtx call_op, bool sibcall_p)
{
if (thunk_name != NULL)
{
- if (REX_INT_REGNO_P (regno)
+ if ((REX_INT_REGNO_P (regno) || REX_INT_REGNO_P (regno))
&& ix86_indirect_branch_cs_prefix)
fprintf (asm_out_file, "\tcs\n");
fprintf (asm_out_file, "\tcall\t");
@@ -17069,19 +17483,26 @@ ix86_attr_length_vex_default (rtx_insn *insn, bool has_0f_opcode,
for (i = recog_data.n_operands - 1; i >= 0; --i)
if (REG_P (recog_data.operand[i]))
{
- /* REX.W bit uses 3 byte VEX prefix. */
+ /* REX.W bit uses 3 byte VEX prefix.
+ REX2 with vex use extended EVEX prefix length is 4-byte. */
if (GET_MODE (recog_data.operand[i]) == DImode
&& GENERAL_REG_P (recog_data.operand[i]))
return 3 + 1;
/* REX.B bit requires 3-byte VEX. Right here we don't know which
- operand will be encoded using VEX.B, so be conservative. */
+ operand will be encoded using VEX.B, so be conservative.
+ REX2 with vex use extended EVEX prefix length is 4-byte. */
if (REX_INT_REGNO_P (recog_data.operand[i])
+ || REX2_INT_REGNO_P (recog_data.operand[i])
|| REX_SSE_REGNO_P (recog_data.operand[i]))
reg_only = 3 + 1;
}
else if (MEM_P (recog_data.operand[i]))
{
+ /* REX2.X or REX2.B bits use 3 byte VEX prefix. */
+ if (x86_extended_rex2reg_mentioned_p (recog_data.operand[i]))
+ return 4;
+
/* REX.X or REX.B bits use 3 byte VEX prefix. */
if (x86_extended_reg_mentioned_p (recog_data.operand[i]))
return 3 + 1;
@@ -19194,10 +19615,14 @@ ix86_vectorize_builtin_scatter (const_tree vectype,
{
bool si;
enum ix86_builtins code;
+ const machine_mode mode = TYPE_MODE (TREE_TYPE (vectype));
if (!TARGET_AVX512F)
return NULL_TREE;
+ if (!TARGET_EVEX512 && GET_MODE_SIZE (mode) == 64)
+ return NULL_TREE;
+
if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 2u)
? !TARGET_USE_SCATTER_2PARTS
: (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 4u)
@@ -19518,6 +19943,8 @@ ix86_register_priority (int hard_regno)
/* New x86-64 int registers result in bigger code size. Discourage them. */
if (REX_INT_REGNO_P (hard_regno))
return 2;
+ if (REX2_INT_REGNO_P (hard_regno))
+ return 2;
/* New x86-64 SSE registers result in bigger code size. Discourage them. */
if (REX_SSE_REGNO_P (hard_regno))
return 2;
@@ -20268,8 +20695,8 @@ ix86_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
return MASK_PAIR_REGNO_P(regno);
return ((TARGET_AVX512F && VALID_MASK_REG_MODE (mode))
- || (TARGET_AVX512BW
- && VALID_MASK_AVX512BW_MODE (mode)));
+ || (TARGET_AVX512BW && mode == SImode)
+ || (TARGET_AVX512BW && TARGET_EVEX512 && mode == DImode));
}
if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
@@ -20286,7 +20713,7 @@ ix86_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
- any of 512-bit wide vector mode
- any scalar mode. */
if (TARGET_AVX512F
- && (VALID_AVX512F_REG_OR_XI_MODE (mode)
+ && ((VALID_AVX512F_REG_OR_XI_MODE (mode) && TARGET_EVEX512)
|| VALID_AVX512F_SCALAR_MODE (mode)))
return true;
@@ -20538,7 +20965,7 @@ ix86_set_reg_reg_cost (machine_mode mode)
case MODE_VECTOR_INT:
case MODE_VECTOR_FLOAT:
- if ((TARGET_AVX512F && VALID_AVX512F_REG_MODE (mode))
+ if ((TARGET_AVX512F && TARGET_EVEX512 && VALID_AVX512F_REG_MODE (mode))
|| (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
|| (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
|| (TARGET_SSE && VALID_SSE_REG_MODE (mode))
@@ -21267,7 +21694,8 @@ ix86_rtx_costs (rtx x, machine_mode mode, int outer_code_i, int opno,
{
/* (ior (not ...) ...) can be a single insn in AVX512. */
if (GET_CODE (XEXP (x, 0)) == NOT && TARGET_AVX512F
- && (GET_MODE_SIZE (mode) == 64
+ && ((TARGET_EVEX512
+ && GET_MODE_SIZE (mode) == 64)
|| (TARGET_AVX512VL
&& (GET_MODE_SIZE (mode) == 32
|| GET_MODE_SIZE (mode) == 16))))
@@ -21315,7 +21743,8 @@ ix86_rtx_costs (rtx x, machine_mode mode, int outer_code_i, int opno,
/* (and (not ...) (not ...)) can be a single insn in AVX512. */
if (GET_CODE (right) == NOT && TARGET_AVX512F
- && (GET_MODE_SIZE (mode) == 64
+ && ((TARGET_EVEX512
+ && GET_MODE_SIZE (mode) == 64)
|| (TARGET_AVX512VL
&& (GET_MODE_SIZE (mode) == 32
|| GET_MODE_SIZE (mode) == 16))))
@@ -21385,7 +21814,8 @@ ix86_rtx_costs (rtx x, machine_mode mode, int outer_code_i, int opno,
{
/* (not (xor ...)) can be a single insn in AVX512. */
if (GET_CODE (XEXP (x, 0)) == XOR && TARGET_AVX512F
- && (GET_MODE_SIZE (mode) == 64
+ && ((TARGET_EVEX512
+ && GET_MODE_SIZE (mode) == 64)
|| (TARGET_AVX512VL
&& (GET_MODE_SIZE (mode) == 32
|| GET_MODE_SIZE (mode) == 16))))
@@ -22764,12 +23194,41 @@ x86_extended_reg_mentioned_p (rtx insn)
{
const_rtx x = *iter;
if (REG_P (x)
- && (REX_INT_REGNO_P (REGNO (x)) || REX_SSE_REGNO_P (REGNO (x))))
+ && (REX_INT_REGNO_P (REGNO (x)) || REX_SSE_REGNO_P (REGNO (x))
+ || REX2_INT_REGNO_P (REGNO (x))))
return true;
}
return false;
}
+/* Return true when INSN mentions register that must be encoded using REX2
+ prefix. */
+bool
+x86_extended_rex2reg_mentioned_p (rtx insn)
+{
+ subrtx_iterator::array_type array;
+ FOR_EACH_SUBRTX (iter, array, INSN_P (insn) ? PATTERN (insn) : insn, NONCONST)
+ {
+ const_rtx x = *iter;
+ if (REG_P (x) && REX2_INT_REGNO_P (REGNO (x)))
+ return true;
+ }
+ return false;
+}
+
+/* Return true when rtx operands mentions register that must be encoded using
+ evex prefix. */
+bool
+x86_evex_reg_mentioned_p (rtx operands[], int nops)
+{
+ int i;
+ for (i = 0; i < nops; i++)
+ if (EXT_REX_SSE_REG_P (operands[i])
+ || x86_extended_rex2reg_mentioned_p (operands[i]))
+ return true;
+ return false;
+}
+
/* If profitable, negate (without causing overflow) integer constant
of mode MODE at location LOC. Return true in this case. */
bool
@@ -23000,7 +23459,7 @@ ix86_vector_mode_supported_p (machine_mode mode)
return true;
if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
return true;
- if (TARGET_AVX512F && VALID_AVX512F_REG_MODE (mode))
+ if (TARGET_AVX512F && TARGET_EVEX512 && VALID_AVX512F_REG_MODE (mode))
return true;
if ((TARGET_MMX || TARGET_MMX_WITH_SSE)
&& VALID_MMX_REG_MODE (mode))
@@ -23025,6 +23484,93 @@ ix86_c_mode_for_suffix (char suffix)
return VOIDmode;
}
+/* Helper function to map common constraints to non-EGPR ones.
+ All related constraints have h prefix, and h plus Upper letter
+ means the constraint is strictly EGPR enabled, while h plus
+ lower letter indicates the constraint is strictly gpr16 only.
+
+ Specially for "g" constraint, split it to rmi as there is
+ no corresponding general constraint define for backend.
+
+ Here is the full list to map constraints that may involve
+ gpr to h prefixed.
+
+ "g" -> "jrjmi"
+ "r" -> "jr"
+ "m" -> "jm"
+ "<" -> "j<"
+ ">" -> "j>"
+ "o" -> "jo"
+ "V" -> "jV"
+ "p" -> "jp"
+ "Bm" -> "ja"
+*/
+
+static void map_egpr_constraints (vec<const char *> &constraints)
+{
+ for (size_t i = 0; i < constraints.length(); i++)
+ {
+ const char *cur = constraints[i];
+
+ if (startswith (cur, "=@cc"))
+ continue;
+
+ int len = strlen (cur);
+ auto_vec<char> buf;
+
+ for (int j = 0; j < len; j++)
+ {
+ switch (cur[j])
+ {
+ case 'g':
+ buf.safe_push ('j');
+ buf.safe_push ('r');
+ buf.safe_push ('j');
+ buf.safe_push ('m');
+ buf.safe_push ('i');
+ break;
+ case 'r':
+ case 'm':
+ case '<':
+ case '>':
+ case 'o':
+ case 'V':
+ case 'p':
+ buf.safe_push ('j');
+ buf.safe_push (cur[j]);
+ break;
+ case 'B':
+ if (cur[j + 1] == 'm')
+ {
+ buf.safe_push ('j');
+ buf.safe_push ('a');
+ j++;
+ }
+ else
+ {
+ buf.safe_push (cur[j]);
+ buf.safe_push (cur[j + 1]);
+ j++;
+ }
+ break;
+ case 'T':
+ case 'Y':
+ case 'W':
+ case 'j':
+ buf.safe_push (cur[j]);
+ buf.safe_push (cur[j + 1]);
+ j++;
+ break;
+ default:
+ buf.safe_push (cur[j]);
+ break;
+ }
+ }
+ buf.safe_push ('\0');
+ constraints[i] = xstrdup (buf.address ());
+ }
+}
+
/* Worker function for TARGET_MD_ASM_ADJUST.
We implement asm flag outputs, and maintain source compatibility
@@ -23039,6 +23585,10 @@ ix86_md_asm_adjust (vec<rtx> &outputs, vec<rtx> & /*inputs*/,
bool saw_asm_flag = false;
start_sequence ();
+
+ if (TARGET_APX_EGPR && !ix86_apx_inline_asm_use_gpr32)
+ map_egpr_constraints (constraints);
+
for (unsigned i = 0, n = outputs.length (); i < n; ++i)
{
const char *con = constraints[i];
@@ -23690,7 +24240,7 @@ ix86_preferred_simd_mode (scalar_mode mode)
switch (mode)
{
case E_QImode:
- if (TARGET_AVX512BW && !TARGET_PREFER_AVX256)
+ if (TARGET_AVX512BW && TARGET_EVEX512 && !TARGET_PREFER_AVX256)
return V64QImode;
else if (TARGET_AVX && !TARGET_PREFER_AVX128)
return V32QImode;
@@ -23698,7 +24248,7 @@ ix86_preferred_simd_mode (scalar_mode mode)
return V16QImode;
case E_HImode:
- if (TARGET_AVX512BW && !TARGET_PREFER_AVX256)
+ if (TARGET_AVX512BW && TARGET_EVEX512 && !TARGET_PREFER_AVX256)
return V32HImode;
else if (TARGET_AVX && !TARGET_PREFER_AVX128)
return V16HImode;
@@ -23706,7 +24256,7 @@ ix86_preferred_simd_mode (scalar_mode mode)
return V8HImode;
case E_SImode:
- if (TARGET_AVX512F && !TARGET_PREFER_AVX256)
+ if (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)
return V16SImode;
else if (TARGET_AVX && !TARGET_PREFER_AVX128)
return V8SImode;
@@ -23714,7 +24264,7 @@ ix86_preferred_simd_mode (scalar_mode mode)
return V4SImode;
case E_DImode:
- if (TARGET_AVX512F && !TARGET_PREFER_AVX256)
+ if (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)
return V8DImode;
else if (TARGET_AVX && !TARGET_PREFER_AVX128)
return V4DImode;
@@ -23728,15 +24278,16 @@ ix86_preferred_simd_mode (scalar_mode mode)
{
if (TARGET_PREFER_AVX128)
return V8HFmode;
- else if (TARGET_PREFER_AVX256)
+ else if (TARGET_PREFER_AVX256 || !TARGET_EVEX512)
return V16HFmode;
}
- return V32HFmode;
+ if (TARGET_EVEX512)
+ return V32HFmode;
}
return word_mode;
case E_SFmode:
- if (TARGET_AVX512F && !TARGET_PREFER_AVX256)
+ if (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)
return V16SFmode;
else if (TARGET_AVX && !TARGET_PREFER_AVX128)
return V8SFmode;
@@ -23744,7 +24295,7 @@ ix86_preferred_simd_mode (scalar_mode mode)
return V4SFmode;
case E_DFmode:
- if (TARGET_AVX512F && !TARGET_PREFER_AVX256)
+ if (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)
return V8DFmode;
else if (TARGET_AVX && !TARGET_PREFER_AVX128)
return V4DFmode;
@@ -23764,13 +24315,13 @@ ix86_preferred_simd_mode (scalar_mode mode)
static unsigned int
ix86_autovectorize_vector_modes (vector_modes *modes, bool all)
{
- if (TARGET_AVX512F && !TARGET_PREFER_AVX256)
+ if (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)
{
modes->safe_push (V64QImode);
modes->safe_push (V32QImode);
modes->safe_push (V16QImode);
}
- else if (TARGET_AVX512F && all)
+ else if (TARGET_AVX512F && TARGET_EVEX512 && all)
{
modes->safe_push (V32QImode);
modes->safe_push (V16QImode);
@@ -23808,7 +24359,7 @@ ix86_get_mask_mode (machine_mode data_mode)
unsigned elem_size = vector_size / nunits;
/* Scalar mask case. */
- if ((TARGET_AVX512F && vector_size == 64)
+ if ((TARGET_AVX512F && TARGET_EVEX512 && vector_size == 64)
|| (TARGET_AVX512VL && (vector_size == 32 || vector_size == 16)))
{
if (elem_size == 4
@@ -24306,7 +24857,7 @@ ix86_simd_clone_compute_vecsize_and_simdlen (struct cgraph_node *node,
{
/* If the function isn't exported, we can pick up just one ISA
for the clones. */
- if (TARGET_AVX512F)
+ if (TARGET_AVX512F && TARGET_EVEX512)
clonei->vecsize_mangle = 'e';
else if (TARGET_AVX2)
clonei->vecsize_mangle = 'd';
@@ -24398,17 +24949,17 @@ ix86_simd_clone_usable (struct cgraph_node *node)
return -1;
if (!TARGET_AVX)
return 0;
- return TARGET_AVX512F ? 3 : TARGET_AVX2 ? 2 : 1;
+ return (TARGET_AVX512F && TARGET_EVEX512) ? 3 : TARGET_AVX2 ? 2 : 1;
case 'c':
if (!TARGET_AVX)
return -1;
- return TARGET_AVX512F ? 2 : TARGET_AVX2 ? 1 : 0;
+ return (TARGET_AVX512F && TARGET_EVEX512) ? 2 : TARGET_AVX2 ? 1 : 0;
case 'd':
if (!TARGET_AVX2)
return -1;
- return TARGET_AVX512F ? 1 : 0;
+ return (TARGET_AVX512F && TARGET_EVEX512) ? 1 : 0;
case 'e':
- if (!TARGET_AVX512F)
+ if (!TARGET_AVX512F || !TARGET_EVEX512)
return -1;
return 0;
default:
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index 3e8488f..abfe167 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -51,6 +51,10 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#define TARGET_MMX_WITH_SSE (TARGET_64BIT && TARGET_SSE2)
+#define TARGET_APX_EGPR (ix86_apx_features & apx_egpr)
+#define TARGET_APX_PUSH2POP2 (ix86_apx_features & apx_push2pop2)
+#define TARGET_APX_NDD (ix86_apx_features & apx_ndd)
+
#include "config/vxworks-dummy.h"
#include "config/i386/i386-opts.h"
@@ -449,6 +453,7 @@ extern unsigned char ix86_tune_features[X86_TUNE_LAST];
#define TARGET_DEST_FALSE_DEP_FOR_GLC \
ix86_tune_features[X86_TUNE_DEST_FALSE_DEP_FOR_GLC]
#define TARGET_SLOW_STC ix86_tune_features[X86_TUNE_SLOW_STC]
+#define TARGET_USE_RCR ix86_tune_features[X86_TUNE_USE_RCR]
/* Feature tests against the various architecture variations. */
enum ix86_arch_indices {
@@ -770,7 +775,8 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
TARGET_ABSOLUTE_BIGGEST_ALIGNMENT. */
#define BIGGEST_ALIGNMENT \
- (TARGET_IAMCU ? 32 : (TARGET_AVX512F ? 512 : (TARGET_AVX ? 256 : 128)))
+ (TARGET_IAMCU ? 32 : ((TARGET_AVX512F && TARGET_EVEX512) \
+ ? 512 : (TARGET_AVX ? 256 : 128)))
/* Maximum stack alignment. */
#define MAX_STACK_ALIGNMENT MAX_OFILE_ALIGNMENT
@@ -944,7 +950,11 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
/*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \
0, 0, 0, 0, 0, 0, 0, 0, \
/* k0, k1, k2, k3, k4, k5, k6, k7*/ \
- 0, 0, 0, 0, 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+/* r16, r17, r18, r19, r20, r21, r22, r23*/ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+/* r24, r25, r26, r27, r28, r29, r30, r31*/ \
+ 0, 0, 0, 0, 0, 0, 0, 0} \
/* 1 for registers not available across function calls.
These must include the FIXED_REGISTERS and also any
@@ -981,7 +991,11 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
/*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \
1, 1, 1, 1, 1, 1, 1, 1, \
/* k0, k1, k2, k3, k4, k5, k6, k7*/ \
- 1, 1, 1, 1, 1, 1, 1, 1 }
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+/* r16, r17, r18, r19, r20, r21, r22, r23*/ \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+/* r24, r25, r26, r27, r28, r29, r30, r31*/ \
+ 1, 1, 1, 1, 1, 1, 1, 1} \
/* Order in which to allocate registers. Each register must be
listed once, even those in FIXED_REGISTERS. List frame pointer
@@ -997,7 +1011,8 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, \
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, \
- 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75 }
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, \
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91}
/* ADJUST_REG_ALLOC_ORDER is a macro which permits reg_alloc_order
to be rearranged based on a particular function. When using sse math,
@@ -1005,6 +1020,14 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
#define ADJUST_REG_ALLOC_ORDER x86_order_regs_for_local_alloc ()
+#define INSN_BASE_REG_CLASS(INSN) \
+ ix86_insn_base_reg_class (INSN)
+
+#define REGNO_OK_FOR_INSN_BASE_P(NUM, INSN) \
+ ix86_regno_ok_for_insn_base_p (NUM, INSN)
+
+#define INSN_INDEX_REG_CLASS(INSN) \
+ ix86_insn_index_reg_class (INSN)
#define OVERRIDE_ABI_FORMAT(FNDECL) ix86_call_abi_override (FNDECL)
@@ -1199,6 +1222,9 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
#define FIRST_MASK_REG MASK0_REG
#define LAST_MASK_REG MASK7_REG
+#define FIRST_REX2_INT_REG R16_REG
+#define LAST_REX2_INT_REG R31_REG
+
/* Override this in other tm.h files to cope with various OS lossage
requiring a frame pointer. */
#ifndef SUBTARGET_FRAME_POINTER_REQUIRED
@@ -1276,6 +1302,12 @@ enum reg_class
INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */
LEGACY_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp */
GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp
+ %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15
+ %r16 %r17 %r18 %r19 %r20 %r21 %r22 %r23
+ %r24 %r25 %r26 %r27 %r28 %r29 %r30 %r31 */
+ GENERAL_GPR16, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp
+ %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 */
+ INDEX_GPR16, /* %eax %ebx %ecx %edx %esi %edi %ebp
%r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 */
FP_TOP_REG, FP_SECOND_REG, /* %st(0) %st(1) */
FLOAT_REGS,
@@ -1339,6 +1371,8 @@ enum reg_class
"INDEX_REGS", \
"LEGACY_REGS", \
"GENERAL_REGS", \
+ "GENERAL_GPR16", \
+ "INDEX_GPR16", \
"FP_TOP_REG", "FP_SECOND_REG", \
"FLOAT_REGS", \
"SSE_FIRST_REG", \
@@ -1374,9 +1408,11 @@ enum reg_class
{ 0x0f, 0x0, 0x0 }, /* Q_REGS */ \
{ 0x900f0, 0x0, 0x0 }, /* NON_Q_REGS */ \
{ 0x7e, 0xff0, 0x0 }, /* TLS_GOTBASE_REGS */ \
- { 0x7f, 0xff0, 0x0 }, /* INDEX_REGS */ \
+ { 0x7f, 0xff0, 0xffff000 }, /* INDEX_REGS */ \
{ 0x900ff, 0x0, 0x0 }, /* LEGACY_REGS */ \
- { 0x900ff, 0xff0, 0x0 }, /* GENERAL_REGS */ \
+ { 0x900ff, 0xff0, 0xffff000 }, /* GENERAL_REGS */ \
+ { 0x900ff, 0xff0, 0x0 }, /* GENERAL_GPR16 */ \
+ { 0x0007f, 0xff0, 0x0 }, /* INDEX_GPR16 */ \
{ 0x100, 0x0, 0x0 }, /* FP_TOP_REG */ \
{ 0x200, 0x0, 0x0 }, /* FP_SECOND_REG */ \
{ 0xff00, 0x0, 0x0 }, /* FLOAT_REGS */ \
@@ -1386,13 +1422,13 @@ enum reg_class
{ 0xff00000, 0xfffff000, 0xf }, /* ALL_SSE_REGS */ \
{ 0xf0000000, 0xf, 0x0 }, /* MMX_REGS */ \
{ 0xff0ff00, 0xfffff000, 0xf }, /* FLOAT_SSE_REGS */ \
- { 0x9ffff, 0xff0, 0x0 }, /* FLOAT_INT_REGS */ \
- { 0xff900ff, 0xfffffff0, 0xf }, /* INT_SSE_REGS */ \
- { 0xff9ffff, 0xfffffff0, 0xf }, /* FLOAT_INT_SSE_REGS */ \
+ { 0x9ffff, 0xff0, 0xffff000 }, /* FLOAT_INT_REGS */ \
+ { 0xff900ff, 0xfffffff0, 0xffff00f }, /* INT_SSE_REGS */ \
+ { 0xff9ffff, 0xfffffff0, 0xffff00f }, /* FLOAT_INT_SSE_REGS */ \
{ 0x0, 0x0, 0xfe0 }, /* MASK_REGS */ \
{ 0x0, 0x0, 0xff0 }, /* ALL_MASK_REGS */ \
- { 0x900ff, 0xff0, 0xff0 }, /* INT_MASK_REGS */ \
-{ 0xffffffff, 0xffffffff, 0xfff } /* ALL_REGS */ \
+ { 0x900ff, 0xff0, 0xffffff0 }, /* INT_MASK_REGS */ \
+{ 0xffffffff, 0xffffffff, 0xfffffff } /* ALL_REGS */ \
}
/* The same information, inverted:
@@ -1422,13 +1458,20 @@ enum reg_class
#define REX_INT_REGNO_P(N) \
IN_RANGE ((N), FIRST_REX_INT_REG, LAST_REX_INT_REG)
+#define REX2_INT_REG_P(X) (REG_P (X) && REX2_INT_REGNO_P (REGNO (X)))
+#define REX2_INT_REGNO_P(N) \
+ IN_RANGE ((N), FIRST_REX2_INT_REG, LAST_REX2_INT_REG)
+
#define GENERAL_REG_P(X) (REG_P (X) && GENERAL_REGNO_P (REGNO (X)))
#define GENERAL_REGNO_P(N) \
- (LEGACY_INT_REGNO_P (N) || REX_INT_REGNO_P (N))
+ (LEGACY_INT_REGNO_P (N) || REX_INT_REGNO_P (N) || REX2_INT_REGNO_P (N))
#define INDEX_REG_P(X) (REG_P (X) && INDEX_REGNO_P (REGNO (X)))
#define INDEX_REGNO_P(N) \
- (LEGACY_INDEX_REGNO_P (N) || REX_INT_REGNO_P (N))
+ (LEGACY_INDEX_REGNO_P (N) || REX_INT_REGNO_P (N) || REX2_INT_REGNO_P (N))
+
+#define GENERAL_GPR16_REGNO_P(N) \
+ (LEGACY_INT_REGNO_P (N) || REX_INT_REGNO_P (N))
#define ANY_QI_REG_P(X) (REG_P (X) && ANY_QI_REGNO_P (REGNO (X)))
#define ANY_QI_REGNO_P(N) \
@@ -1807,7 +1850,7 @@ typedef struct ix86_args {
MOVE_MAX_PIECES defaults to MOVE_MAX. */
#define MOVE_MAX \
- ((TARGET_AVX512F \
+ ((TARGET_AVX512F && TARGET_EVEX512\
&& (ix86_move_max == PVW_AVX512 \
|| ix86_store_max == PVW_AVX512)) \
? 64 \
@@ -1826,7 +1869,7 @@ typedef struct ix86_args {
store_by_pieces of 16/32/64 bytes. */
#define STORE_MAX_PIECES \
(TARGET_INTER_UNIT_MOVES_TO_VEC \
- ? ((TARGET_AVX512F && ix86_store_max == PVW_AVX512) \
+ ? ((TARGET_AVX512F && TARGET_EVEX512 && ix86_store_max == PVW_AVX512) \
? 64 \
: ((TARGET_AVX \
&& ix86_store_max >= PVW_AVX256) \
@@ -1986,7 +2029,9 @@ do { \
"xmm20", "xmm21", "xmm22", "xmm23", \
"xmm24", "xmm25", "xmm26", "xmm27", \
"xmm28", "xmm29", "xmm30", "xmm31", \
- "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7" }
+ "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7", \
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" }
#define REGISTER_NAMES HI_REGISTER_NAMES
@@ -2223,6 +2268,7 @@ enum processor_type
PROCESSOR_TREMONT,
PROCESSOR_SIERRAFOREST,
PROCESSOR_GRANDRIDGE,
+ PROCESSOR_CLEARWATERFOREST,
PROCESSOR_KNL,
PROCESSOR_KNM,
PROCESSOR_SKYLAKE,
@@ -2240,6 +2286,7 @@ enum processor_type
PROCESSOR_GRANITERAPIDS_D,
PROCESSOR_ARROWLAKE,
PROCESSOR_ARROWLAKE_S,
+ PROCESSOR_PANTHERLAKE,
PROCESSOR_INTEL,
PROCESSOR_LUJIAZUI,
PROCESSOR_GEODE,
@@ -2357,6 +2404,9 @@ constexpr wide_int_bitmask PTA_GRANDRIDGE = PTA_SIERRAFOREST | PTA_RAOINT;
constexpr wide_int_bitmask PTA_ARROWLAKE = PTA_SIERRAFOREST;
constexpr wide_int_bitmask PTA_ARROWLAKE_S = PTA_ARROWLAKE | PTA_AVXVNNIINT16
| PTA_SHA512 | PTA_SM3 | PTA_SM4;
+constexpr wide_int_bitmask PTA_CLEARWATERFOREST = PTA_ARROWLAKE_S | PTA_PREFETCHI
+ | PTA_USER_MSR;
+constexpr wide_int_bitmask PTA_PANTHERLAKE = PTA_ARROWLAKE_S | PTA_PREFETCHI;
constexpr wide_int_bitmask PTA_KNM = PTA_KNL | PTA_AVX5124VNNIW
| PTA_AVX5124FMAPS | PTA_AVX512VPOPCNTDQ;
constexpr wide_int_bitmask PTA_ZNVER1 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index eef8a0e..f90cf1c 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -208,6 +208,10 @@
;; For insn_callee_abi:
UNSPEC_CALLEE_ABI
+ ;; For PUSH2/POP2 support
+ UNSPEC_APXPUSH2
+ UNSPEC_APXPOP2_LOW
+ UNSPEC_APXPOP2_HIGH
])
(define_c_enum "unspecv" [
@@ -340,6 +344,10 @@
;; For PREFETCHI support
UNSPECV_PREFETCHI
+
+ ;; For USER_MSR support
+ UNSPECV_URDMSR
+ UNSPECV_UWRMSR
])
;; Constants to represent rounding modes in the ROUND instruction
@@ -464,7 +472,23 @@
(MASK5_REG 73)
(MASK6_REG 74)
(MASK7_REG 75)
- (FIRST_PSEUDO_REG 76)
+ (R16_REG 76)
+ (R17_REG 77)
+ (R18_REG 78)
+ (R19_REG 79)
+ (R20_REG 80)
+ (R21_REG 81)
+ (R22_REG 82)
+ (R23_REG 83)
+ (R24_REG 84)
+ (R25_REG 85)
+ (R26_REG 86)
+ (R27_REG 87)
+ (R28_REG 88)
+ (R29_REG 89)
+ (R30_REG 90)
+ (R31_REG 91)
+ (FIRST_PSEUDO_REG 92)
])
;; Insn callee abi index.
@@ -535,10 +559,11 @@
(define_attr "isa" "base,x64,nox64,x64_sse2,x64_sse4,x64_sse4_noavx,
x64_avx,x64_avx512bw,x64_avx512dq,aes,
sse_noavx,sse2,sse2_noavx,sse3,sse3_noavx,sse4,sse4_noavx,
- avx,noavx,avx2,noavx2,bmi,bmi2,fma4,fma,avx512f,noavx512f,
- avx512bw,noavx512bw,avx512dq,noavx512dq,fma_or_avx512vl,
- avx512vl,noavx512vl,avxvnni,avx512vnnivl,avx512fp16,avxifma,
- avx512ifmavl,avxneconvert,avx512bf16vl,vpclmulqdqvl"
+ avx,noavx,avx2,noavx2,bmi,bmi2,fma4,fma,avx512f,avx512f_512,
+ noavx512f,avx512bw,avx512bw_512,noavx512bw,avx512dq,
+ noavx512dq,fma_or_avx512vl,avx512vl,noavx512vl,avxvnni,
+ avx512vnnivl,avx512fp16,avxifma,avx512ifmavl,avxneconvert,
+ avx512bf16vl,vpclmulqdqvl,avx_noavx512f,avx_noavx512vl"
(const_string "base"))
;; The (bounding maximum) length of an instruction immediate.
@@ -857,6 +882,9 @@
;; Define attribute to indicate unaligned ssemov insns
(define_attr "movu" "0,1" (const_string "0"))
+;; Define attribute to indicate gpr32 insns.
+(define_attr "gpr32" "0, 1" (const_string "1"))
+
;; Define instruction set of MMX instructions
(define_attr "mmx_isa" "base,native,sse,sse_noavx,avx"
(const_string "base"))
@@ -889,6 +917,8 @@
(eq_attr "isa" "sse4_noavx")
(symbol_ref "TARGET_SSE4_1 && !TARGET_AVX")
(eq_attr "isa" "avx") (symbol_ref "TARGET_AVX")
+ (eq_attr "isa" "avx_noavx512f")
+ (symbol_ref "TARGET_AVX && !TARGET_AVX512F")
(eq_attr "isa" "noavx") (symbol_ref "!TARGET_AVX")
(eq_attr "isa" "avx2") (symbol_ref "TARGET_AVX2")
(eq_attr "isa" "noavx2") (symbol_ref "!TARGET_AVX2")
@@ -899,8 +929,12 @@
(eq_attr "isa" "fma_or_avx512vl")
(symbol_ref "TARGET_FMA || TARGET_AVX512VL")
(eq_attr "isa" "avx512f") (symbol_ref "TARGET_AVX512F")
+ (eq_attr "isa" "avx512f_512")
+ (symbol_ref "TARGET_AVX512F && TARGET_EVEX512")
(eq_attr "isa" "noavx512f") (symbol_ref "!TARGET_AVX512F")
(eq_attr "isa" "avx512bw") (symbol_ref "TARGET_AVX512BW")
+ (eq_attr "isa" "avx512bw_512")
+ (symbol_ref "TARGET_AVX512BW && TARGET_EVEX512")
(eq_attr "isa" "noavx512bw") (symbol_ref "!TARGET_AVX512BW")
(eq_attr "isa" "avx512dq") (symbol_ref "TARGET_AVX512DQ")
(eq_attr "isa" "noavx512dq") (symbol_ref "!TARGET_AVX512DQ")
@@ -1437,7 +1471,8 @@
(define_mode_iterator SWI1248_AVX512BWDQ_64
[(QI "TARGET_AVX512DQ") HI
- (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW && TARGET_64BIT")])
+ (SI "TARGET_AVX512BW")
+ (DI "TARGET_AVX512BW && TARGET_EVEX512 && TARGET_64BIT")])
(define_insn "*cmp<mode>_ccz_1"
[(set (reg FLAGS_REG)
@@ -2281,7 +2316,7 @@
(define_expand "movxi"
[(set (match_operand:XI 0 "nonimmediate_operand")
(match_operand:XI 1 "general_operand"))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"ix86_expand_vector_move (XImode, operands); DONE;")
(define_expand "movoi"
@@ -2357,7 +2392,7 @@
(define_insn "*movxi_internal_avx512f"
[(set (match_operand:XI 0 "nonimmediate_operand" "=v,v ,v ,m")
(match_operand:XI 1 "nonimmediate_or_sse_const_operand" " C,BC,vm,v"))]
- "TARGET_AVX512F
+ "TARGET_AVX512F && TARGET_EVEX512
&& (register_operand (operands[0], XImode)
|| register_operand (operands[1], XImode))"
{
@@ -2485,9 +2520,9 @@
(define_insn "*movdi_internal"
[(set (match_operand:DI 0 "nonimmediate_operand"
- "=r ,o ,r,r ,r,m ,*y,*y,?*y,?m,?r,?*y,?v,?v,?v,m ,m,?r ,?*Yd,?r,?v,?*y,?*x,*k,*k ,*r,*m,*k")
+ "=r ,o ,r,r ,r,m ,*y,*y,?*y,?m,?r,?*y,?Yv,?v,?v,m ,m,?r ,?*Yd,?r,?v,?*y,?*x,*k,*k ,*r,*m,*k")
(match_operand:DI 1 "general_operand"
- "riFo,riF,Z,rem,i,re,C ,*y,Bk ,*y,*y,r ,C ,?v,Bk,?v,v,*Yd,r ,?v,r ,*x ,*y ,*r,*kBk,*k,*k,CBC"))]
+ "riFo,riF,Z,rem,i,re,C ,*y,Bk ,*y,*y,r ,C ,?v,Bk,?v,v,*Yd,r ,?v,r ,*x ,*y ,*r,*kBk,*k,*k,CBC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& ix86_hardreg_mov_ok (operands[0], operands[1])"
{
@@ -2605,7 +2640,7 @@
(set (attr "mode")
(cond [(eq_attr "alternative" "2")
(const_string "SI")
- (eq_attr "alternative" "12,13")
+ (eq_attr "alternative" "12")
(cond [(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
@@ -2613,6 +2648,18 @@
(const_string "V4SF")
]
(const_string "TI"))
+ (eq_attr "alternative" "13")
+ (cond [(match_test "TARGET_AVX512VL")
+ (const_string "TI")
+ (match_test "TARGET_AVX512F")
+ (const_string "DF")
+ (match_test "TARGET_AVX")
+ (const_string "TI")
+ (ior (not (match_test "TARGET_SSE2"))
+ (match_test "optimize_function_for_size_p (cfun)"))
+ (const_string "V4SF")
+ ]
+ (const_string "TI"))
(and (eq_attr "alternative" "14,15,16")
(not (match_test "TARGET_SSE2")))
@@ -2706,9 +2753,9 @@
(define_insn "*movsi_internal"
[(set (match_operand:SI 0 "nonimmediate_operand"
- "=r,m ,*y,*y,?*y,?m,?r,?*y,?v,?v,?v,m ,?r,?v,*k,*k ,*rm,*k")
+ "=r,m ,*y,*y,?*y,?m,?r,?*y,?Yv,?v,?v,m ,?r,?v,*k,*k ,*rm,*k")
(match_operand:SI 1 "general_operand"
- "g ,re,C ,*y,Bk ,*y,*y,r ,C ,?v,Bk,?v,?v,r ,*r,*kBk,*k ,CBC"))]
+ "g ,re,C ,*y,Bk ,*y,*y,r ,C ,?v,Bk,?v,?v,r ,*r,*kBk,*k ,CBC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& ix86_hardreg_mov_ok (operands[0], operands[1])"
{
@@ -2793,7 +2840,7 @@
(set (attr "mode")
(cond [(eq_attr "alternative" "2,3")
(const_string "DI")
- (eq_attr "alternative" "8,9")
+ (eq_attr "alternative" "8")
(cond [(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
@@ -2801,6 +2848,18 @@
(const_string "V4SF")
]
(const_string "TI"))
+ (eq_attr "alternative" "9")
+ (cond [(match_test "TARGET_AVX512VL")
+ (const_string "TI")
+ (match_test "TARGET_AVX512F")
+ (const_string "SF")
+ (match_test "TARGET_AVX")
+ (const_string "TI")
+ (ior (not (match_test "TARGET_SSE2"))
+ (match_test "optimize_function_for_size_p (cfun)"))
+ (const_string "V4SF")
+ ]
+ (const_string "TI"))
(and (eq_attr "alternative" "10,11")
(not (match_test "TARGET_SSE2")))
@@ -2849,9 +2908,9 @@
(define_insn "*movhi_internal"
[(set (match_operand:HI 0 "nonimmediate_operand"
- "=r,r,r,m ,*k,*k ,r ,m ,*k ,?r,?*v,*v,*v,*v,m")
+ "=r,r,r,m ,*k,*k ,r ,m ,*k ,?r,?*v,*Yv,*v,*v,jm,m")
(match_operand:HI 1 "general_operand"
- "r ,n,m,rn,r ,*km,*k,*k,CBC,*v,r ,C ,*v,m ,*v"))]
+ "r ,n,m,rn,r ,*km,*k,*k,CBC,*v,r ,C ,*v,m ,*x,*v"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& ix86_hardreg_mov_ok (operands[0], operands[1])"
{
@@ -2906,15 +2965,21 @@
(cond [(eq_attr "alternative" "9,10,11,12,13")
(const_string "sse2")
(eq_attr "alternative" "14")
- (const_string "sse4")
+ (const_string "sse4_noavx")
+ (eq_attr "alternative" "15")
+ (const_string "avx")
]
(const_string "*")))
+ (set (attr "gpr32")
+ (if_then_else (eq_attr "alternative" "14")
+ (const_string "0")
+ (const_string "1")))
(set (attr "type")
(cond [(eq_attr "alternative" "4,5,6,7")
(const_string "mskmov")
(eq_attr "alternative" "8")
(const_string "msklog")
- (eq_attr "alternative" "13,14")
+ (eq_attr "alternative" "13,14,15")
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "ssemov")
(const_string "sselog1"))
@@ -2939,7 +3004,7 @@
(set (attr "prefix")
(cond [(eq_attr "alternative" "4,5,6,7,8")
(const_string "vex")
- (eq_attr "alternative" "9,10,11,12,13,14")
+ (eq_attr "alternative" "9,10,11,12,13,14,15")
(const_string "maybe_evex")
]
(const_string "orig")))
@@ -2948,7 +3013,7 @@
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "SI"))
- (eq_attr "alternative" "13,14")
+ (eq_attr "alternative" "13,14,15")
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "TI"))
@@ -3776,6 +3841,28 @@
(set_attr "type" "push,multi")
(set_attr "mode" "SI,TI")])
+(define_insn "push2_di"
+ [(set (match_operand:TI 0 "push_operand" "=<")
+ (unspec:TI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r")]
+ UNSPEC_APXPUSH2))]
+ "TARGET_APX_PUSH2POP2"
+ "push2\t%1, %2"
+ [(set_attr "mode" "TI")
+ (set_attr "type" "multi")
+ (set_attr "prefix" "evex")])
+
+(define_insn "pop2_di"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:TI 1 "pop_operand" ">")]
+ UNSPEC_APXPOP2_LOW))
+ (set (match_operand:DI 2 "register_operand" "=r")
+ (unspec:DI [(const_int 0)] UNSPEC_APXPOP2_HIGH))])]
+ "TARGET_APX_PUSH2POP2"
+ "pop2\t%0, %2"
+ [(set_attr "mode" "TI")
+ (set_attr "prefix" "evex")])
+
(define_insn "*pushsf_rex64"
[(set (match_operand:SF 0 "push_operand" "=X,X,X")
(match_operand:SF 1 "nonmemory_no_elim_operand" "f,rF,v"))]
@@ -3993,9 +4080,9 @@
;; Possible store forwarding (partial memory) stall in alternatives 4, 6 and 7.
(define_insn "*movdf_internal"
[(set (match_operand:DF 0 "nonimmediate_operand"
- "=Yf*f,m ,Yf*f,?r ,!o,?*r ,!o,!o,?r,?m,?r,?r,v,v,v,m,*x,*x,*x,m ,?r,?v,r ,o ,r ,m")
+ "=Yf*f,m ,Yf*f,?r ,!o,?*r ,!o,!o,?r,?m,?r,?r,Yv,v,v,m,*x,*x,*x,m ,?r,?v,r ,o ,r ,m")
(match_operand:DF 1 "general_operand"
- "Yf*fm,Yf*f,G ,roF,r ,*roF,*r,F ,rm,rC,C ,F ,C,v,m,v,C ,*x,m ,*x, v, r,roF,rF,rmF,rC"))]
+ "Yf*fm,Yf*f,G ,roF,r ,*roF,*r,F ,rm,rC,C ,F ,C ,v,m,v,C ,*x,m ,*x, v, r,roF,rF,rmF,rC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress || reload_completed
|| !CONST_DOUBLE_P (operands[1])
@@ -4170,9 +4257,9 @@
(define_insn "*movsf_internal"
[(set (match_operand:SF 0 "nonimmediate_operand"
- "=Yf*f,m ,Yf*f,?r ,?m,v,v,v,m,?r,?v,!*y,!*y,!m,!r,!*y,r ,m")
+ "=Yf*f,m ,Yf*f,?r ,?m,Yv,v,v,m,?r,?v,!*y,!*y,!m,!r,!*y,r ,m")
(match_operand:SF 1 "general_operand"
- "Yf*fm,Yf*f,G ,rmF,rF,C,v,m,v,v ,r ,*y ,m ,*y,*y,r ,rmF,rF"))]
+ "Yf*fm,Yf*f,G ,rmF,rF,C ,v,m,v,v ,r ,*y ,m ,*y,*y,r ,rmF,rF"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress || reload_completed
|| !CONST_DOUBLE_P (operands[1])
@@ -4247,7 +4334,7 @@
(eq_attr "alternative" "11")
(const_string "DI")
(eq_attr "alternative" "5")
- (cond [(and (match_test "TARGET_AVX512F")
+ (cond [(and (match_test "TARGET_AVX512F && TARGET_EVEX512")
(not (match_test "TARGET_PREFER_AVX256")))
(const_string "V16SF")
(match_test "TARGET_AVX")
@@ -4271,7 +4358,11 @@
better to maintain the whole registers in single format
to avoid problems on using packed logical operations. */
(eq_attr "alternative" "6")
- (cond [(ior (match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
+ (cond [(match_test "TARGET_AVX512VL")
+ (const_string "V4SF")
+ (match_test "TARGET_AVX512F")
+ (const_string "SF")
+ (ior (match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
(match_test "TARGET_SSE_SPLIT_REGS"))
(const_string "V4SF")
]
@@ -4301,9 +4392,9 @@
(define_insn "*mov<mode>_internal"
[(set (match_operand:HFBF 0 "nonimmediate_operand"
- "=?r,?r,?r,?m,v,v,?r,m,?v,v")
+ "=?r,?r,?r,?m ,Yv,v,?r,jm,m,?v,v")
(match_operand:HFBF 1 "general_operand"
- "r ,F ,m ,r<hfbfconstf>,C,v, v,v,r ,m"))]
+ "r ,F ,m ,r<hfbfconstf>,C ,v, v,v ,v,r ,m"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress
|| reload_completed
@@ -4339,18 +4430,24 @@
}
}
[(set (attr "isa")
- (cond [(eq_attr "alternative" "4,5,6,8,9")
+ (cond [(eq_attr "alternative" "4,5,6,9,10")
(const_string "sse2")
(eq_attr "alternative" "7")
- (const_string "sse4")
+ (const_string "sse4_noavx")
+ (eq_attr "alternative" "8")
+ (const_string "avx")
]
(const_string "*")))
+ (set (attr "gpr32")
+ (if_then_else (eq_attr "alternative" "8")
+ (const_string "0")
+ (const_string "1")))
(set (attr "type")
(cond [(eq_attr "alternative" "4")
(const_string "sselog1")
- (eq_attr "alternative" "5,6,8")
+ (eq_attr "alternative" "5,6,9")
(const_string "ssemov")
- (eq_attr "alternative" "7,9")
+ (eq_attr "alternative" "7,8,10")
(if_then_else
(match_test ("TARGET_AVX512FP16"))
(const_string "ssemov")
@@ -4370,19 +4467,19 @@
]
(const_string "imov")))
(set (attr "prefix")
- (cond [(eq_attr "alternative" "4,5,6,7,8,9")
+ (cond [(eq_attr "alternative" "4,5,6,7,8,9,10")
(const_string "maybe_vex")
]
(const_string "orig")))
(set (attr "mode")
(cond [(eq_attr "alternative" "4")
(const_string "V4SF")
- (eq_attr "alternative" "6,8")
+ (eq_attr "alternative" "6,9")
(if_then_else
(match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "SI"))
- (eq_attr "alternative" "7,9")
+ (eq_attr "alternative" "7,8,10")
(if_then_else
(match_test "TARGET_AVX512FP16")
(const_string "HI")
@@ -4549,7 +4646,7 @@
(eq_attr "alternative" "12")
(const_string "x64_avx512bw")
(eq_attr "alternative" "13")
- (const_string "avx512bw")
+ (const_string "avx512bw_512")
]
(const_string "*")))
(set (attr "mmx_isa")
@@ -4626,7 +4723,7 @@
"split_double_mode (DImode, &operands[0], 1, &operands[3], &operands[4]);")
(define_mode_attr kmov_isa
- [(QI "avx512dq") (HI "avx512f") (SI "avx512bw") (DI "avx512bw")])
+ [(QI "avx512dq") (HI "avx512f") (SI "avx512bw") (DI "avx512bw_512")])
(define_insn "zero_extend<mode>di2"
[(set (match_operand:DI 0 "register_operand" "=r,*r,*k")
@@ -5144,7 +5241,7 @@
&& optimize_insn_for_speed_p ()
&& reload_completed
&& (!EXT_REX_SSE_REG_P (operands[0])
- || TARGET_AVX512VL)"
+ || TARGET_AVX512VL || TARGET_EVEX512)"
[(set (match_dup 2)
(float_extend:V2DF
(vec_select:V2SF
@@ -5287,8 +5384,8 @@
(set_attr "memory" "none")
(set (attr "enabled")
(if_then_else (eq_attr "alternative" "2")
- (symbol_ref "TARGET_AVX512F && !TARGET_AVX512VL
- && !TARGET_PREFER_AVX256")
+ (symbol_ref "TARGET_AVX512F && TARGET_EVEX512
+ && !TARGET_AVX512VL && !TARGET_PREFER_AVX256")
(const_string "*")))])
(define_expand "extend<mode>xf2"
@@ -8864,7 +8961,7 @@
[(set_attr "type" "alu")
(set_attr "mode" "<MODE>")])
-(define_insn "*add<mode>3_cc_overflow_1"
+(define_insn "@add<mode>3_cc_overflow_1"
[(set (reg:CCC FLAGS_REG)
(compare:CCC
(plus:SWI
@@ -11093,7 +11190,7 @@
and{q}\t{%2, %0|%0, %2}
#
#"
- [(set_attr "isa" "x64,x64,x64,x64,avx512bw")
+ [(set_attr "isa" "x64,x64,x64,x64,avx512bw_512")
(set_attr "type" "alu,alu,alu,imovx,msklog")
(set_attr "length_immediate" "*,*,*,0,*")
(set (attr "prefix_rex")
@@ -11616,12 +11713,13 @@
(not:SWI48 (match_operand:SWI48 1 "register_operand" "r,r,k"))
(match_operand:SWI48 2 "nonimmediate_operand" "r,m,k")))
(clobber (reg:CC FLAGS_REG))]
- "TARGET_BMI || TARGET_AVX512BW"
+ "TARGET_BMI
+ || (TARGET_AVX512BW && (<MODE>mode == SImode || TARGET_EVEX512))"
"@
andn\t{%2, %1, %0|%0, %1, %2}
andn\t{%2, %1, %0|%0, %1, %2}
#"
- [(set_attr "isa" "bmi,bmi,avx512bw")
+ [(set_attr "isa" "bmi,bmi,<kmov_isa>")
(set_attr "type" "bitmanip,bitmanip,msklog")
(set_attr "btver2_decode" "direct, double,*")
(set_attr "mode" "<MODE>")])
@@ -11849,13 +11947,7 @@
<logic>{<imodesuffix>}\t{%2, %0|%0, %2}
<logic>{<imodesuffix>}\t{%2, %0|%0, %2}
#"
- [(set (attr "isa")
- (cond [(eq_attr "alternative" "2")
- (if_then_else (eq_attr "mode" "SI,DI")
- (const_string "avx512bw")
- (const_string "avx512f"))
- ]
- (const_string "*")))
+ [(set_attr "isa" "*,*,<kmov_isa>")
(set_attr "type" "alu, alu, msklog")
(set_attr "mode" "<MODE>")])
@@ -11882,13 +11974,7 @@
DONE;
}
}
- [(set (attr "isa")
- (cond [(eq_attr "alternative" "2")
- (if_then_else (eq_attr "mode" "SI,DI")
- (const_string "avx512bw")
- (const_string "avx512f"))
- ]
- (const_string "*")))
+ [(set_attr "isa" "*,*,<kmov_isa>")
(set_attr "type" "alu, alu, msklog")
(set_attr "mode" "<MODE>")])
@@ -13269,13 +13355,7 @@
"@
not{<imodesuffix>}\t%0
#"
- [(set (attr "isa")
- (cond [(eq_attr "alternative" "1")
- (if_then_else (eq_attr "mode" "SI,DI")
- (const_string "avx512bw")
- (const_string "avx512f"))
- ]
- (const_string "*")))
+ [(set_attr "isa" "*,<kmov_isa>")
(set_attr "type" "negnot,msklog")
(set_attr "mode" "<MODE>")])
@@ -13287,7 +13367,7 @@
"@
not{l}\t%k0
#"
- [(set_attr "isa" "x64,avx512bw")
+ [(set_attr "isa" "x64,avx512bw_512")
(set_attr "type" "negnot,msklog")
(set_attr "mode" "SI,SI")])
@@ -13912,7 +13992,7 @@
return "sal{<imodesuffix>}\t{%2, %0|%0, %2}";
}
}
- [(set_attr "isa" "*,*,bmi2,avx512bw")
+ [(set_attr "isa" "*,*,bmi2,<kmov_isa>")
(set (attr "type")
(cond [(eq_attr "alternative" "1")
(const_string "lea")
@@ -14964,7 +15044,7 @@
return "shr{<imodesuffix>}\t{%2, %0|%0, %2}";
}
}
- [(set_attr "isa" "*,bmi2,avx512bw")
+ [(set_attr "isa" "*,bmi2,<kmov_isa>")
(set_attr "type" "ishift,ishiftx,msklog")
(set (attr "length_immediate")
(if_then_else
@@ -15804,6 +15884,59 @@
[(parallel [(set (strict_low_part (match_dup 0))
(bswap:HI (match_dup 0)))
(clobber (reg:CC FLAGS_REG))])])
+
+;; Rotations through carry flag
+(define_insn "rcrsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 1))
+ (ashift:SI (ltu:SI (reg:CCC FLAGS_REG) (const_int 0))
+ (const_int 31))))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "rcr{l}\t%0"
+ [(set_attr "type" "ishift1")
+ (set_attr "memory" "none")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "SI")])
+
+(define_insn "rcrdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 1))
+ (ashift:DI (ltu:DI (reg:CCC FLAGS_REG) (const_int 0))
+ (const_int 63))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "rcr{q}\t%0"
+ [(set_attr "type" "ishift1")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "DI")])
+
+;; Versions of sar and shr that set the carry flag.
+(define_insn "<insn><mode>3_carry"
+ [(set (reg:CCC FLAGS_REG)
+ (unspec:CCC [(and:SWI48 (match_operand:SWI48 1 "register_operand" "0")
+ (const_int 1))
+ (const_int 0)] UNSPEC_CC_NE))
+ (set (match_operand:SWI48 0 "register_operand" "=r")
+ (any_shiftrt:SWI48 (match_dup 1) (const_int 1)))]
+ ""
+{
+ if (TARGET_SHIFT1 || optimize_function_for_size_p (cfun))
+ return "<shift>{<imodesuffix>}\t%0";
+ return "<shift>{<imodesuffix>}\t{1, %0|%0, 1}";
+}
+ [(set_attr "type" "ishift1")
+ (set (attr "length_immediate")
+ (if_then_else
+ (ior (match_test "TARGET_SHIFT1")
+ (match_test "optimize_function_for_size_p (cfun)"))
+ (const_string "0")
+ (const_string "*")))
+ (set_attr "mode" "<MODE>")])
;; Bit set / bit test instructions
@@ -16630,12 +16763,13 @@
[(set (match_operand:MODEF 0 "register_operand" "=x,x")
(match_operator:MODEF 3 "sse_comparison_operator"
[(match_operand:MODEF 1 "register_operand" "0,x")
- (match_operand:MODEF 2 "nonimmediate_operand" "xm,xm")]))]
+ (match_operand:MODEF 2 "nonimmediate_operand" "xm,xjm")]))]
"SSE_FLOAT_MODE_P (<MODE>mode)"
"@
cmp%D3<ssemodesuffix>\t{%2, %0|%0, %2}
vcmp%D3<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "1,0")
(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
(set_attr "prefix" "orig,vex")
@@ -20091,24 +20225,27 @@
(set_attr "mode" "HF")])
(define_insn "*rcpsf2_sse"
- [(set (match_operand:SF 0 "register_operand" "=x,x,x")
- (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "0,x,m")]
+ [(set (match_operand:SF 0 "register_operand" "=x,x,x,x")
+ (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "0,x,m,ja")]
UNSPEC_RCP))]
"TARGET_SSE && TARGET_SSE_MATH"
"@
%vrcpss\t{%d1, %0|%0, %d1}
%vrcpss\t{%d1, %0|%0, %d1}
- %vrcpss\t{%1, %d0|%d0, %1}"
- [(set_attr "type" "sse")
+ rcpss\t{%1, %d0|%d0, %1}
+ vrcpss\t{%1, %d0|%d0, %1}"
+ [(set_attr "isa" "*,*,noavx,avx")
+ (set_attr "gpr32" "1,1,1,0")
+ (set_attr "type" "sse")
(set_attr "atom_sse_attr" "rcp")
(set_attr "btver2_sse_attr" "rcp")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "SF")
- (set_attr "avx_partial_xmm_update" "false,false,true")
+ (set_attr "avx_partial_xmm_update" "false,false,true,true")
(set (attr "preferred_for_speed")
(cond [(match_test "TARGET_AVX")
(symbol_ref "true")
- (eq_attr "alternative" "1,2")
+ (eq_attr "alternative" "1,2,3")
(symbol_ref "!TARGET_SSE_PARTIAL_REG_DEPENDENCY")
]
(symbol_ref "true")))])
@@ -20351,24 +20488,27 @@
(set_attr "bdver1_decode" "direct")])
(define_insn "*rsqrtsf2_sse"
- [(set (match_operand:SF 0 "register_operand" "=x,x,x")
- (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "0,x,m")]
+ [(set (match_operand:SF 0 "register_operand" "=x,x,x,x")
+ (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "0,x,m,ja")]
UNSPEC_RSQRT))]
"TARGET_SSE && TARGET_SSE_MATH"
"@
%vrsqrtss\t{%d1, %0|%0, %d1}
%vrsqrtss\t{%d1, %0|%0, %d1}
- %vrsqrtss\t{%1, %d0|%d0, %1}"
- [(set_attr "type" "sse")
+ rsqrtss\t{%1, %d0|%d0, %1}
+ vrsqrtss\t{%1, %d0|%d0, %1}"
+ [(set_attr "isa" "*,*,noavx,avx")
+ (set_attr "gpr32" "1,1,1,0")
+ (set_attr "type" "sse")
(set_attr "atom_sse_attr" "rcp")
(set_attr "btver2_sse_attr" "rcp")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "SF")
- (set_attr "avx_partial_xmm_update" "false,false,true")
+ (set_attr "avx_partial_xmm_update" "false,false,true,true")
(set (attr "preferred_for_speed")
(cond [(match_test "TARGET_AVX")
(symbol_ref "true")
- (eq_attr "alternative" "1,2")
+ (eq_attr "alternative" "1,2,3")
(symbol_ref "!TARGET_SSE_PARTIAL_REG_DEPENDENCY")
]
(symbol_ref "true")))])
@@ -21584,7 +21724,7 @@
(define_insn "sse4_1_round<mode>2"
[(set (match_operand:MODEFH 0 "register_operand" "=x,x,x,v,v")
(unspec:MODEFH
- [(match_operand:MODEFH 1 "nonimmediate_operand" "0,x,m,v,m")
+ [(match_operand:MODEFH 1 "nonimmediate_operand" "0,x,jm,v,m")
(match_operand:SI 2 "const_0_to_15_operand")]
UNSPEC_ROUND))]
"TARGET_SSE4_1"
@@ -21597,6 +21737,7 @@
[(set_attr "type" "ssecvt")
(set_attr "prefix_extra" "1,1,1,*,*")
(set_attr "length_immediate" "1")
+ (set_attr "gpr32" "1,1,0,1,1")
(set_attr "prefix" "maybe_vex,maybe_vex,maybe_vex,evex,evex")
(set_attr "isa" "noavx512f,noavx512f,noavx512f,avx512f,avx512f")
(set_attr "avx_partial_xmm_update" "false,false,true,false,true")
@@ -21699,6 +21840,15 @@
DONE;
})
+(define_expand "roundhf2"
+ [(match_operand:HF 0 "register_operand")
+ (match_operand:HF 1 "register_operand")]
+ "TARGET_AVX512FP16 && !flag_trapping_math && !flag_rounding_math"
+{
+ ix86_expand_round_sse4 (operands[0], operands[1]);
+ DONE;
+})
+
(define_expand "round<mode>2"
[(match_operand:X87MODEF 0 "register_operand")
(match_operand:X87MODEF 1 "nonimmediate_operand")]
@@ -21750,6 +21900,22 @@
[(set_attr "type" "fpspc")
(set_attr "mode" "<MODE>")])
+(define_expand "lroundhf<mode>2"
+ [(set (match_operand:SWI248 0 "register_operand")
+ (unspec:SWI248 [(match_operand:HF 1 "nonimmediate_operand")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_AVX512FP16 && !flag_trapping_math && !flag_rounding_math"
+{
+ ix86_expand_lround (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "lrinthf<mode>2"
+ [(set (match_operand:SWI48 0 "register_operand")
+ (unspec:SWI48 [(match_operand:HF 1 "nonimmediate_operand")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_AVX512FP16")
+
(define_expand "lrint<MODEF:mode><SWI48:mode>2"
[(set (match_operand:SWI48 0 "register_operand")
(unspec:SWI48 [(match_operand:MODEF 1 "nonimmediate_operand")]
@@ -21992,6 +22158,19 @@
&& (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
&& flag_unsafe_math_optimizations")
+(define_expand "l<rounding_insn>hf<mode>2"
+ [(set (match_operand:SWI48 0 "nonimmediate_operand")
+ (unspec:SWI48 [(match_operand:HF 1 "register_operand")]
+ FIST_ROUNDING))]
+ "TARGET_AVX512FP16"
+{
+ rtx tmp = gen_reg_rtx (HFmode);
+ emit_insn (gen_sse4_1_roundhf2 (tmp, operands[1],
+ GEN_INT (ROUND_<ROUNDING> | ROUND_NO_EXC)));
+ emit_insn (gen_fix_trunchf<mode>2 (operands[0], tmp));
+ DONE;
+})
+
(define_expand "l<rounding_insn><MODEF:mode><SWI48:mode>2"
[(parallel [(set (match_operand:SWI48 0 "nonimmediate_operand")
(unspec:SWI48 [(match_operand:MODEF 1 "register_operand")]
@@ -22071,14 +22250,15 @@
})
(define_insn "movmsk_df"
- [(set (match_operand:SI 0 "register_operand" "=r")
+ [(set (match_operand:SI 0 "register_operand" "=r,jr")
(unspec:SI
- [(match_operand:DF 1 "register_operand" "x")]
+ [(match_operand:DF 1 "register_operand" "x,x")]
UNSPEC_MOVMSK))]
"SSE_FLOAT_MODE_P (DFmode) && TARGET_SSE_MATH"
"%vmovmskpd\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "prefix" "maybe_vex")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
+ (set_attr "prefix" "maybe_evex")
(set_attr "mode" "DF")])
;; Use movmskpd in SSE mode to avoid store forwarding stall
@@ -25607,11 +25787,12 @@
(symbol_ref "ix86_attr_length_address_default (insn) + 3"))])
(define_insn "fxsave64"
- [(set (match_operand:BLK 0 "memory_operand" "=m")
+ [(set (match_operand:BLK 0 "memory_operand" "=jm")
(unspec_volatile:BLK [(const_int 0)] UNSPECV_FXSAVE64))]
"TARGET_64BIT && TARGET_FXSR"
"fxsave64\t%0"
[(set_attr "type" "other")
+ (set_attr "gpr32" "0")
(set_attr "memory" "store")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 4"))])
@@ -25627,11 +25808,12 @@
(symbol_ref "ix86_attr_length_address_default (insn) + 3"))])
(define_insn "fxrstor64"
- [(unspec_volatile [(match_operand:BLK 0 "memory_operand" "m")]
+ [(unspec_volatile [(match_operand:BLK 0 "memory_operand" "jm")]
UNSPECV_FXRSTOR64)]
"TARGET_64BIT && TARGET_FXSR"
"fxrstor64\t%0"
[(set_attr "type" "other")
+ (set_attr "gpr32" "0")
(set_attr "memory" "load")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 4"))])
@@ -25685,7 +25867,7 @@
(symbol_ref "ix86_attr_length_address_default (insn) + 3"))])
(define_insn "<xsave>_rex64"
- [(set (match_operand:BLK 0 "memory_operand" "=m")
+ [(set (match_operand:BLK 0 "memory_operand" "=jm")
(unspec_volatile:BLK
[(match_operand:SI 1 "register_operand" "a")
(match_operand:SI 2 "register_operand" "d")]
@@ -25694,11 +25876,12 @@
"<xsave>\t%0"
[(set_attr "type" "other")
(set_attr "memory" "store")
+ (set_attr "gpr32" "0")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 3"))])
(define_insn "<xsave>"
- [(set (match_operand:BLK 0 "memory_operand" "=m")
+ [(set (match_operand:BLK 0 "memory_operand" "=jm")
(unspec_volatile:BLK
[(match_operand:SI 1 "register_operand" "a")
(match_operand:SI 2 "register_operand" "d")]
@@ -25707,6 +25890,7 @@
"<xsave>\t%0"
[(set_attr "type" "other")
(set_attr "memory" "store")
+ (set_attr "gpr32" "0")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 4"))])
@@ -25724,7 +25908,7 @@
(define_insn "<xrstor>_rex64"
[(unspec_volatile:BLK
- [(match_operand:BLK 0 "memory_operand" "m")
+ [(match_operand:BLK 0 "memory_operand" "jm")
(match_operand:SI 1 "register_operand" "a")
(match_operand:SI 2 "register_operand" "d")]
ANY_XRSTOR)]
@@ -25732,12 +25916,13 @@
"<xrstor>\t%0"
[(set_attr "type" "other")
(set_attr "memory" "load")
+ (set_attr "gpr32" "0")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 3"))])
(define_insn "<xrstor>64"
[(unspec_volatile:BLK
- [(match_operand:BLK 0 "memory_operand" "m")
+ [(match_operand:BLK 0 "memory_operand" "jm")
(match_operand:SI 1 "register_operand" "a")
(match_operand:SI 2 "register_operand" "d")]
ANY_XRSTOR64)]
@@ -25745,6 +25930,7 @@
"<xrstor>64\t%0"
[(set_attr "type" "other")
(set_attr "memory" "load")
+ (set_attr "gpr32" "0")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 4"))])
@@ -26483,6 +26669,26 @@
DONE;
})
+(define_insn "urdmsr"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI
+ [(match_operand:DI 1 "x86_64_szext_nonmemory_operand" "reZ")]
+ UNSPECV_URDMSR))]
+ "TARGET_USER_MSR && TARGET_64BIT"
+ "urdmsr\t{%1, %0|%0, %1}"
+ [(set_attr "prefix" "vex")
+ (set_attr "type" "other")])
+
+(define_insn "uwrmsr"
+ [(unspec_volatile
+ [(match_operand:DI 0 "x86_64_szext_nonmemory_operand" "reZ")
+ (match_operand:DI 1 "register_operand" "r")]
+ UNSPECV_UWRMSR)]
+ "TARGET_USER_MSR && TARGET_64BIT"
+ "uwrmsr\t{%1, %0|%0, %1}"
+ [(set_attr "prefix" "vex")
+ (set_attr "type" "other")])
+
(include "mmx.md")
(include "sse.md")
(include "sync.md")
diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
index 78b4993..0c3b8f4 100644
--- a/gcc/config/i386/i386.opt
+++ b/gcc/config/i386/i386.opt
@@ -282,7 +282,7 @@ Branches are this expensive (arbitrary units).
mlarge-data-threshold=
Target RejectNegative Joined UInteger Var(ix86_section_threshold) Init(DEFAULT_LARGE_SECTION_THRESHOLD)
--mlarge-data-threshold=<number> Data greater than given threshold will go into .ldata section in x86-64 medium model.
+-mlarge-data-threshold=<number> Data greater than given threshold will go into a large data section in x86-64 medium and large code models.
mcmodel=
Target RejectNegative Joined Enum(cmodel) Var(ix86_cmodel) Init(CM_32)
@@ -1310,3 +1310,41 @@ Enable vectorization for gather instruction.
mscatter
Target Alias(mtune-ctrl=, use_scatter, ^use_scatter)
Enable vectorization for scatter instruction.
+
+mapxf
+Target Mask(ISA2_APX_F) Var(ix86_isa_flags2) Save
+Support APX code generation.
+
+mapx-features=
+Target Undocumented Joined Enum(apx_features) EnumSet Var(ix86_apx_features) Init(apx_none) Save
+
+Enum
+Name(apx_features) Type(int)
+
+EnumValue
+Enum(apx_features) String(none) Value(apx_none) Set(1)
+
+EnumValue
+Enum(apx_features) String(egpr) Value(apx_egpr) Set(2)
+
+EnumValue
+Enum(apx_features) String(push2pop2) Value(apx_push2pop2) Set(3)
+
+EnumValue
+Enum(apx_features) String(ndd) Value(apx_ndd) Set(4)
+
+EnumValue
+Enum(apx_features) String(all) Value(apx_all) Set(1)
+
+mapx-inline-asm-use-gpr32
+Target Var(ix86_apx_inline_asm_use_gpr32) Init(0)
+Enable GPR32 in inline asm when APX_EGPR enabled, do not
+hook reg or mem constraint in inline asm to GPR16.
+
+mevex512
+Target Mask(ISA2_EVEX512) Var(ix86_isa_flags2) Save
+Support 512 bit vector built-in functions and code generation.
+
+musermsr
+Target Mask(ISA2_USER_MSR) Var(ix86_isa_flags2) Save
+Support USER_MSR built-in functions and code generation.
diff --git a/gcc/config/i386/immintrin.h b/gcc/config/i386/immintrin.h
index 29b4dbb..4e17901 100644
--- a/gcc/config/i386/immintrin.h
+++ b/gcc/config/i386/immintrin.h
@@ -96,6 +96,8 @@
#include <avx512bitalgintrin.h>
+#include <avx512bitalgvlintrin.h>
+
#include <avx512vp2intersectintrin.h>
#include <avx512vp2intersectvlintrin.h>
diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md
index ef57822..491a0a5 100644
--- a/gcc/config/i386/mmx.md
+++ b/gcc/config/i386/mmx.md
@@ -60,6 +60,7 @@
;; All 4-byte integer/float16 vector modes
(define_mode_iterator V_32 [V4QI V2HI V1SI V2HF V2BF])
+(define_mode_iterator V2FI_32 [V2HF V2BF V2HI])
;; 4-byte integer vector modes
(define_mode_iterator VI_32 [V4QI V2HI])
@@ -79,7 +80,7 @@
;; V2S* modes
(define_mode_iterator V2FI [V2SF V2SI])
-(define_mode_iterator V2FI_V4HF [V2SF V2SI V4HF])
+(define_mode_iterator V24FI [V2SF V2SI V4HF V4HI])
;; Mapping from integer vector mode to mnemonic suffix
(define_mode_attr mmxvecsize
[(V8QI "b") (V4QI "b") (V2QI "b")
@@ -99,14 +100,16 @@
;; Mapping of vector float modes to an integer mode of the same size
(define_mode_attr mmxintvecmode
- [(V2SF "V2SI") (V2SI "V2SI") (V4HI "V4HI") (V8QI "V8QI")])
+ [(V2SF "V2SI") (V2SI "V2SI") (V4HI "V4HI") (V8QI "V8QI")
+ (V4HF "V4HI") (V2HF "V2HI")])
(define_mode_attr mmxintvecmodelower
- [(V2SF "v2si") (V2SI "v2si") (V4HI "v4hi") (V8QI "v8qi")])
+ [(V2SF "v2si") (V2SI "v2si") (V4HI "v4hi") (V8QI "v8qi")
+ (V4HF "v4hi") (V2HF "v2hi")])
;; Mapping of vector modes to a vector mode of double size
(define_mode_attr mmxdoublevecmode
- [(V2SF "V4SF") (V2SI "V4SI") (V4HF "V8HF")])
+ [(V2SF "V4SF") (V2SI "V4SI") (V4HF "V8HF") (V4HI "V8HI")])
;; Mapping of vector modes back to the scalar modes
(define_mode_attr mmxscalarmode
@@ -418,9 +421,9 @@
(define_insn "*movv2qi_internal"
[(set (match_operand:V2QI 0 "nonimmediate_operand"
- "=r,r,r,m ,v,v,v,m,r,v")
+ "=r,r,r,m ,v,v,v,jm,m,r,v")
(match_operand:V2QI 1 "general_operand"
- "r ,C,m,rC,C,v,m,v,v,r"))]
+ "r ,C,m,rC,C,v,m,x,v,v,r"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))"
{
switch (get_attr_type (insn))
@@ -453,20 +456,26 @@
}
}
[(set (attr "isa")
- (cond [(eq_attr "alternative" "6,8,9")
+ (cond [(eq_attr "alternative" "6,9,10")
(const_string "sse2")
(eq_attr "alternative" "7")
- (const_string "sse4")
+ (const_string "sse4_noavx")
+ (eq_attr "alternative" "8")
+ (const_string "avx")
]
(const_string "*")))
+ (set (attr "gpr32")
+ (if_then_else (eq_attr "alternative" "7")
+ (const_string "0")
+ (const_string "1")))
(set (attr "type")
- (cond [(eq_attr "alternative" "6,7")
+ (cond [(eq_attr "alternative" "6,7,8")
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "ssemov")
(const_string "sselog1"))
(eq_attr "alternative" "4")
(const_string "sselog1")
- (eq_attr "alternative" "5,8,9")
+ (eq_attr "alternative" "5,9,10")
(const_string "ssemov")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "imov")
@@ -483,16 +492,16 @@
]
(const_string "imov")))
(set (attr "prefix")
- (cond [(eq_attr "alternative" "4,5,6,7,8,9")
+ (cond [(eq_attr "alternative" "4,5,6,7,8,9,10")
(const_string "maybe_evex")
]
(const_string "orig")))
(set (attr "mode")
- (cond [(eq_attr "alternative" "6,7")
+ (cond [(eq_attr "alternative" "6,7,8")
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "TI"))
- (eq_attr "alternative" "8,9")
+ (eq_attr "alternative" "9,10")
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "SI"))
@@ -526,9 +535,9 @@
]
(const_string "HI")))
(set (attr "preferred_for_speed")
- (cond [(eq_attr "alternative" "8")
+ (cond [(eq_attr "alternative" "9")
(symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC")
- (eq_attr "alternative" "9")
+ (eq_attr "alternative" "10")
(symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC")
]
(symbol_ref "true")))])
@@ -592,7 +601,7 @@
(define_expand "movq_<mode>_to_sse"
[(set (match_operand:<mmxdoublevecmode> 0 "register_operand")
(vec_concat:<mmxdoublevecmode>
- (match_operand:V2FI_V4HF 1 "nonimmediate_operand")
+ (match_operand:V24FI 1 "nonimmediate_operand")
(match_dup 2)))]
"TARGET_SSE2"
{
@@ -1167,7 +1176,7 @@
(define_insn "@sse4_1_insertps_<mode>"
[(set (match_operand:V2FI 0 "register_operand" "=Yr,*x,v")
(unspec:V2FI
- [(match_operand:V2FI 2 "nonimmediate_operand" "Yrm,*xm,vm")
+ [(match_operand:V2FI 2 "nonimmediate_operand" "Yrjm,*xjm,vm")
(match_operand:V2FI 1 "register_operand" "0,0,v")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_INSERTPS))]
@@ -1193,6 +1202,7 @@
}
}
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "sselog")
(set_attr "prefix_data16" "1,1,*")
(set_attr "prefix_extra" "1")
@@ -1936,31 +1946,13 @@
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-(define_expand "<insn>v4hf3"
- [(set (match_operand:V4HF 0 "register_operand")
- (plusminusmult:V4HF
- (match_operand:V4HF 1 "nonimmediate_operand")
- (match_operand:V4HF 2 "nonimmediate_operand")))]
- "TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
-{
- rtx op2 = gen_reg_rtx (V8HFmode);
- rtx op1 = gen_reg_rtx (V8HFmode);
- rtx op0 = gen_reg_rtx (V8HFmode);
-
- emit_insn (gen_movq_v4hf_to_sse (op2, operands[2]));
- emit_insn (gen_movq_v4hf_to_sse (op1, operands[1]));
-
- emit_insn (gen_<insn>v8hf3 (op0, op1, op2));
-
- emit_move_insn (operands[0], lowpart_subreg (V4HFmode, op0, V8HFmode));
- DONE;
-})
+(define_mode_iterator VHF_32_64 [V2HF (V4HF "TARGET_MMX_WITH_SSE")])
(define_expand "divv4hf3"
[(set (match_operand:V4HF 0 "register_operand")
(div:V4HF
(match_operand:V4HF 1 "nonimmediate_operand")
- (match_operand:V4HF 2 "nonimmediate_operand")))]
+ (match_operand:V4HF 2 "register_operand")))]
"TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
{
rtx op2 = gen_reg_rtx (V8HFmode);
@@ -1976,39 +1968,59 @@
DONE;
})
-(define_expand "movd_v2hf_to_sse"
- [(set (match_operand:V8HF 0 "register_operand")
- (vec_merge:V8HF
- (vec_duplicate:V8HF
- (match_operand:V2HF 1 "nonimmediate_operand"))
- (match_operand:V8HF 2 "reg_or_0_operand")
+(define_mode_attr mov_to_sse_suffix
+ [(V2HF "d") (V4HF "q") (V2HI "d") (V4HI "q")])
+
+(define_mode_attr mmxxmmmode
+ [(V2HF "V8HF") (V2HI "V8HI") (V2BF "V8BF")])
+
+(define_mode_attr mmxxmmmodelower
+ [(V2HF "v8hf") (V2HI "v8hi") (V2BF "v8bf")])
+
+(define_expand "movd_<mode>_to_sse"
+ [(set (match_operand:<mmxxmmmode> 0 "register_operand")
+ (vec_merge:<mmxxmmmode>
+ (vec_duplicate:<mmxxmmmode>
+ (match_operand:V2FI_32 1 "nonimmediate_operand"))
+ (match_dup 2)
(const_int 3)))]
"TARGET_SSE"
{
- if (!flag_trapping_math && operands[2] == CONST0_RTX (V8HFmode))
+ if (!flag_trapping_math)
{
- rtx op1 = force_reg (V2HFmode, operands[1]);
- emit_move_insn (operands[0], lowpart_subreg (V8HFmode, op1, V2HFmode));
+ rtx op1 = force_reg (<MODE>mode, operands[1]);
+ emit_move_insn (operands[0],
+ lowpart_subreg (<mmxxmmmode>mode, op1, <MODE>mode));
DONE;
}
+ operands[2] = CONST0_RTX (<mmxxmmmode>mode);
})
-(define_expand "<insn>v2hf3"
- [(set (match_operand:V2HF 0 "register_operand")
- (plusminusmult:V2HF
- (match_operand:V2HF 1 "nonimmediate_operand")
- (match_operand:V2HF 2 "nonimmediate_operand")))]
+(define_expand "movd_<mode>_to_sse_reg"
+ [(set (match_operand:<mmxxmmmode> 0 "register_operand")
+ (vec_merge:<mmxxmmmode>
+ (vec_duplicate:<mmxxmmmode>
+ (match_operand:V2FI_32 1 "nonimmediate_operand"))
+ (match_operand:<mmxxmmmode> 2 "register_operand")
+ (const_int 3)))]
+ "TARGET_SSE")
+
+(define_expand "<insn><mode>3"
+ [(set (match_operand:VHF_32_64 0 "register_operand")
+ (plusminusmult:VHF_32_64
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")
+ (match_operand:VHF_32_64 2 "nonimmediate_operand")))]
"TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
{
rtx op2 = gen_reg_rtx (V8HFmode);
rtx op1 = gen_reg_rtx (V8HFmode);
rtx op0 = gen_reg_rtx (V8HFmode);
- emit_insn (gen_movd_v2hf_to_sse (op2, operands[2], CONST0_RTX (V8HFmode)));
- emit_insn (gen_movd_v2hf_to_sse (op1, operands[1], CONST0_RTX (V8HFmode)));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op2, operands[2]));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
emit_insn (gen_<insn>v8hf3 (op0, op1, op2));
- emit_move_insn (operands[0], lowpart_subreg (V2HFmode, op0, V8HFmode));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
DONE;
})
@@ -2023,15 +2035,599 @@
rtx op1 = gen_reg_rtx (V8HFmode);
rtx op0 = gen_reg_rtx (V8HFmode);
- emit_insn (gen_movd_v2hf_to_sse (op2, operands[2],
+ emit_insn (gen_movd_v2hf_to_sse_reg (op2, operands[2],
force_reg (V8HFmode, CONST1_RTX (V8HFmode))));
- emit_insn (gen_movd_v2hf_to_sse (op1, operands[1], CONST0_RTX (V8HFmode)));
+ emit_insn (gen_movd_v2hf_to_sse (op1, operands[1]));
emit_insn (gen_divv8hf3 (op0, op1, op2));
emit_move_insn (operands[0], lowpart_subreg (V2HFmode, op0, V8HFmode));
DONE;
})
+(define_expand "<code><mode>3"
+ [(set (match_operand:VHF_32_64 0 "register_operand")
+ (smaxmin:VHF_32_64
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")
+ (match_operand:VHF_32_64 2 "nonimmediate_operand")))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
+{
+ rtx op2 = gen_reg_rtx (V8HFmode);
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op2, operands[2]));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+
+ emit_insn (gen_<code>v8hf3 (op0, op1, op2));
+
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+ DONE;
+})
+
+(define_expand "sqrt<mode>2"
+ [(set (match_operand:VHF_32_64 0 "register_operand")
+ (sqrt:VHF_32_64
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_sqrtv8hf2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+ DONE;
+})
+
+(define_expand "<code><mode>2"
+ [(set (match_operand:VHF_32_64 0 "register_operand")
+ (absneg:VHF_32_64
+ (match_operand:VHF_32_64 1 "register_operand")))]
+ "TARGET_SSE"
+ "ix86_expand_fp_absneg_operator (<CODE>, <MODE>mode, operands); DONE;")
+
+(define_insn_and_split "*mmx_<code><mode>"
+ [(set (match_operand:VHF_32_64 0 "register_operand" "=x,x,x")
+ (absneg:VHF_32_64
+ (match_operand:VHF_32_64 1 "register_operand" "0,x,x")))
+ (use (match_operand:VHF_32_64 2 "register_operand" "x,0,x"))]
+ "TARGET_SSE"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0)
+ (<absneg_op>:<MODE> (match_dup 1) (match_dup 2)))]
+{
+ if (!TARGET_AVX && operands_match_p (operands[0], operands[2]))
+ std::swap (operands[1], operands[2]);
+}
+ [(set_attr "isa" "noavx,noavx,avx")])
+
+(define_insn_and_split "*mmx_nabs<mode>2"
+ [(set (match_operand:VHF_32_64 0 "register_operand" "=x,x,x")
+ (neg:VHF_32_64
+ (abs:VHF_32_64
+ (match_operand:VHF_32_64 1 "register_operand" "0,x,x"))))
+ (use (match_operand:VHF_32_64 2 "register_operand" "x,0,x"))]
+ "TARGET_SSE"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0)
+ (ior:<MODE> (match_dup 1) (match_dup 2)))])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel half-precision floating point rounding operations.
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "btrunc<mode>2"
+ [(match_operand:VHF_32_64 0 "register_operand")
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math
+ && !flag_trapping_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_btruncv8hf2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+ DONE;
+})
+
+(define_expand "nearbyint<mode>2"
+ [(match_operand:VHF_32_64 0 "register_operand")
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_nearbyintv8hf2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+ DONE;
+})
+
+(define_expand "rint<mode>2"
+ [(match_operand:VHF_32_64 0 "register_operand")
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_rintv8hf2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+ DONE;
+})
+
+(define_expand "lrint<mode><mmxintvecmodelower>2"
+ [(match_operand:<mmxintvecmode> 0 "register_operand")
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_lrintv8hfv8hi2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+ DONE;
+})
+
+(define_expand "floor<mode>2"
+ [(match_operand:VHF_32_64 0 "register_operand")
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math
+ && !flag_trapping_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_floorv8hf2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+ DONE;
+})
+
+(define_expand "lfloor<mode><mmxintvecmodelower>2"
+ [(match_operand:<mmxintvecmode> 0 "register_operand")
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math
+ && !flag_trapping_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_lfloorv8hfv8hi2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+ DONE;
+})
+
+(define_expand "ceil<mode>2"
+ [(match_operand:VHF_32_64 0 "register_operand")
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math
+ && !flag_trapping_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_ceilv8hf2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+ DONE;
+})
+
+(define_expand "lceil<mode><mmxintvecmodelower>2"
+ [(match_operand:<mmxintvecmode> 0 "register_operand")
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math
+ && !flag_trapping_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_lceilv8hfv8hi2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+ DONE;
+})
+
+(define_expand "round<mode>2"
+ [(match_operand:VHF_32_64 0 "register_operand")
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math
+ && !flag_trapping_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_roundv8hf2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+ DONE;
+})
+
+(define_expand "lround<mode><mmxintvecmodelower>2"
+ [(match_operand:<mmxintvecmode> 0 "register_operand")
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math
+ && !flag_trapping_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+ emit_insn (gen_lroundv8hfv8hi2 (op0, op1));
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+ DONE;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel half-precision floating point logical operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "*mmx_andnot<mode>3"
+ [(set (match_operand:VHF_32_64 0 "register_operand" "=x,x")
+ (and:VHF_32_64
+ (not:VHF_32_64
+ (match_operand:VHF_32_64 1 "register_operand" "0,x"))
+ (match_operand:VHF_32_64 2 "register_operand" "x,x")))]
+ "TARGET_SSE"
+ "@
+ andnps\t{%2, %0|%0, %2}
+ vandnps\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sselog")
+ (set_attr "prefix" "orig,vex")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "<code><mode>3"
+ [(set (match_operand:VHF_32_64 0 "register_operand" "=x,x")
+ (any_logic:VHF_32_64
+ (match_operand:VHF_32_64 1 "register_operand" "%0,x")
+ (match_operand:VHF_32_64 2 "register_operand" " x,x")))]
+ "TARGET_SSE"
+ "@
+ <logic>ps\t{%2, %0|%0, %2}
+ v<logic>ps\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sselog,sselog")
+ (set_attr "prefix" "orig,vex")
+ (set_attr "mode" "V4SF")])
+
+(define_expand "copysign<mode>3"
+ [(set (match_dup 4)
+ (and:VHF_32_64
+ (not:VHF_32_64 (match_dup 3))
+ (match_operand:VHF_32_64 1 "register_operand")))
+ (set (match_dup 5)
+ (and:VHF_32_64 (match_dup 3)
+ (match_operand:VHF_32_64 2 "register_operand")))
+ (set (match_operand:VHF_32_64 0 "register_operand")
+ (ior:VHF_32_64 (match_dup 4) (match_dup 5)))]
+ "TARGET_SSE"
+{
+ operands[3] = ix86_build_signbit_mask (<MODE>mode, true, false);
+
+ operands[4] = gen_reg_rtx (<MODE>mode);
+ operands[5] = gen_reg_rtx (<MODE>mode);
+})
+
+(define_expand "xorsign<mode>3"
+ [(set (match_dup 4)
+ (and:VHF_32_64 (match_dup 3)
+ (match_operand:VHF_32_64 2 "register_operand")))
+ (set (match_operand:VHF_32_64 0 "register_operand")
+ (xor:VHF_32_64 (match_dup 4)
+ (match_operand:VHF_32_64 1 "register_operand")))]
+ "TARGET_SSE"
+{
+ operands[3] = ix86_build_signbit_mask (<MODE>mode, true, false);
+
+ operands[4] = gen_reg_rtx (<MODE>mode);
+})
+
+(define_expand "signbit<mode>2"
+ [(set (match_operand:<mmxintvecmode> 0 "register_operand")
+ (lshiftrt:<mmxintvecmode>
+ (subreg:<mmxintvecmode>
+ (match_operand:VHF_32_64 1 "register_operand") 0)
+ (match_dup 2)))]
+ "TARGET_SSE2"
+ "operands[2] = GEN_INT (GET_MODE_UNIT_BITSIZE (<MODE>mode)-1);")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel half-precision FMA multiply/accumulate instructions.
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "fma<mode>4"
+ [(set (match_operand:VHF_32_64 0 "register_operand")
+ (fma:VHF_32_64
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")
+ (match_operand:VHF_32_64 2 "nonimmediate_operand")
+ (match_operand:VHF_32_64 3 "nonimmediate_operand")))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
+{
+ rtx op3 = gen_reg_rtx (V8HFmode);
+ rtx op2 = gen_reg_rtx (V8HFmode);
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op3, operands[3]));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op2, operands[2]));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+
+ emit_insn (gen_fmav8hf4 (op0, op1, op2, op3));
+
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+ DONE;
+})
+
+(define_expand "fms<mode>4"
+ [(set (match_operand:VHF_32_64 0 "register_operand")
+ (fma:VHF_32_64
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")
+ (match_operand:VHF_32_64 2 "nonimmediate_operand")
+ (neg:VHF_32_64
+ (match_operand:VHF_32_64 3 "nonimmediate_operand"))))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
+{
+ rtx op3 = gen_reg_rtx (V8HFmode);
+ rtx op2 = gen_reg_rtx (V8HFmode);
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op3, operands[3]));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op2, operands[2]));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+
+ emit_insn (gen_fmsv8hf4 (op0, op1, op2, op3));
+
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+ DONE;
+})
+
+(define_expand "fnma<mode>4"
+ [(set (match_operand:VHF_32_64 0 "register_operand")
+ (fma:VHF_32_64
+ (neg:VHF_32_64
+ (match_operand:VHF_32_64 1 "nonimmediate_operand"))
+ (match_operand:VHF_32_64 2 "nonimmediate_operand")
+ (match_operand:VHF_32_64 3 "nonimmediate_operand")))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
+{
+ rtx op3 = gen_reg_rtx (V8HFmode);
+ rtx op2 = gen_reg_rtx (V8HFmode);
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op3, operands[3]));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op2, operands[2]));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+
+ emit_insn (gen_fnmav8hf4 (op0, op1, op2, op3));
+
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+ DONE;
+})
+
+(define_expand "fnms<mode>4"
+ [(set (match_operand:VHF_32_64 0 "register_operand" "=v,v,x")
+ (fma:VHF_32_64
+ (neg:VHF_32_64
+ (match_operand:VHF_32_64 1 "nonimmediate_operand"))
+ (match_operand:VHF_32_64 2 "nonimmediate_operand")
+ (neg:VHF_32_64
+ (match_operand:VHF_32_64 3 "nonimmediate_operand"))))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
+{
+ rtx op3 = gen_reg_rtx (V8HFmode);
+ rtx op2 = gen_reg_rtx (V8HFmode);
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op3, operands[3]));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op2, operands[2]));
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+
+ emit_insn (gen_fnmsv8hf4 (op0, op1, op2, op3));
+
+ emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+ DONE;
+})
+
+(define_expand "vec_fmaddsubv4hf4"
+ [(match_operand:V4HF 0 "register_operand")
+ (match_operand:V4HF 1 "nonimmediate_operand")
+ (match_operand:V4HF 2 "nonimmediate_operand")
+ (match_operand:V4HF 3 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && TARGET_MMX_WITH_SSE
+ && ix86_partial_vec_fp_math"
+{
+ rtx op3 = gen_reg_rtx (V8HFmode);
+ rtx op2 = gen_reg_rtx (V8HFmode);
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_movq_v4hf_to_sse (op3, operands[3]));
+ emit_insn (gen_movq_v4hf_to_sse (op2, operands[2]));
+ emit_insn (gen_movq_v4hf_to_sse (op1, operands[1]));
+
+ emit_insn (gen_vec_fmaddsubv8hf4 (op0, op1, op2, op3));
+
+ emit_move_insn (operands[0], lowpart_subreg (V4HFmode, op0, V8HFmode));
+ DONE;
+})
+
+(define_expand "vec_fmsubaddv4hf4"
+ [(match_operand:V4HF 0 "register_operand")
+ (match_operand:V4HF 1 "nonimmediate_operand")
+ (match_operand:V4HF 2 "nonimmediate_operand")
+ (match_operand:V4HF 3 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math
+ && TARGET_MMX_WITH_SSE"
+{
+ rtx op3 = gen_reg_rtx (V8HFmode);
+ rtx op2 = gen_reg_rtx (V8HFmode);
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_movq_v4hf_to_sse (op3, operands[3]));
+ emit_insn (gen_movq_v4hf_to_sse (op2, operands[2]));
+ emit_insn (gen_movq_v4hf_to_sse (op1, operands[1]));
+
+ emit_insn (gen_vec_fmsubaddv8hf4 (op0, op1, op2, op3));
+
+ emit_move_insn (operands[0], lowpart_subreg (V4HFmode, op0, V8HFmode));
+ DONE;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel half-precision floating point conversion operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "fix<fixunssuffix>_trunc<mode><mmxintvecmodelower>2"
+ [(set (match_operand:<mmxintvecmode> 0 "register_operand")
+ (any_fix:<mmxintvecmode>
+ (match_operand:VHF_32_64 1 "nonimmediate_operand")))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V8HImode);
+
+ emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+
+ emit_insn (gen_fix<fixunssuffix>_truncv8hfv8hi2 (op0, op1));
+
+ emit_move_insn (operands[0],
+ lowpart_subreg (<mmxintvecmode>mode, op0, V8HImode));
+ DONE;
+})
+
+(define_expand "fix<fixunssuffix>_truncv2hfv2si2"
+ [(set (match_operand:V2SI 0 "register_operand")
+ (any_fix:V2SI
+ (match_operand:V2HF 1 "nonimmediate_operand")))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && TARGET_MMX_WITH_SSE && ix86_partial_vec_fp_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_movd_v2hf_to_sse (op1, operands[1]));
+
+ emit_insn (gen_avx512fp16_fix<fixunssuffix>_truncv4si2 (op0, op1));
+
+ emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+ DONE;
+})
+
+(define_expand "float<floatunssuffix><mmxintvecmodelower><mode>2"
+ [(set (match_operand:VHF_32_64 0 "register_operand")
+ (any_float:VHF_32_64
+ (match_operand:<mmxintvecmode> 1 "nonimmediate_operand")))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
+{
+ rtx op1 = gen_reg_rtx (V8HImode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ rtx (*gen_movd_sse) (rtx, rtx)
+ = gen_mov<mov_to_sse_suffix>_<mmxintvecmodelower>_to_sse;
+ emit_insn (gen_movd_sse (op1, operands[1]));
+
+ emit_insn (gen_float<floatunssuffix>v8hiv8hf2 (op0, op1));
+
+ emit_move_insn (operands[0],
+ lowpart_subreg (<MODE>mode, op0, V8HFmode));
+ DONE;
+})
+
+(define_expand "float<floatunssuffix>v2siv2hf2"
+ [(set (match_operand:V2HF 0 "register_operand")
+ (any_float:V2HF
+ (match_operand:V2SI 1 "nonimmediate_operand")))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && TARGET_MMX_WITH_SSE && ix86_partial_vec_fp_math"
+{
+ rtx op1 = gen_reg_rtx (V4SImode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_movq_v2si_to_sse (op1, operands[1]));
+
+ emit_insn (gen_avx512fp16_float<floatunssuffix>v4siv4hf2 (op0, op1));
+
+ emit_move_insn (operands[0], lowpart_subreg (V2HFmode, op0, V8HFmode));
+ DONE;
+})
+
+(define_expand "extendv2hfv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (float_extend:V2SF
+ (match_operand:V2HF 1 "nonimmediate_operand")))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && TARGET_MMX_WITH_SSE && ix86_partial_vec_fp_math"
+{
+ rtx op1 = gen_reg_rtx (V8HFmode);
+ rtx op0 = gen_reg_rtx (V4SFmode);
+
+ emit_insn (gen_movd_v2hf_to_sse (op1, operands[1]));
+
+ emit_insn (gen_avx512fp16_float_extend_phv4sf2 (op0, op1));
+
+ emit_move_insn (operands[0], lowpart_subreg (V2SFmode, op0, V4SFmode));
+ DONE;
+})
+
+(define_expand "truncv2sfv2hf2"
+ [(set (match_operand:V2HF 0 "register_operand")
+ (float_truncate:V2HF
+ (match_operand:V2SF 1 "nonimmediate_operand")))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && TARGET_MMX_WITH_SSE && ix86_partial_vec_fp_math"
+{
+ rtx op1 = gen_reg_rtx (V4SFmode);
+ rtx op0 = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_movq_v2sf_to_sse (op1, operands[1]));
+
+ emit_insn (gen_avx512fp16_truncv4sfv4hf2 (op0, op1));
+
+ emit_move_insn (operands[0], lowpart_subreg (V2HFmode, op0, V8HFmode));
+ DONE;
+})
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
@@ -3952,7 +4548,7 @@
[(set (match_operand:V2SI 0 "register_operand" "=x,Yv")
(vec_merge:V2SI
(vec_duplicate:V2SI
- (match_operand:SI 2 "nonimmediate_operand" "rm,rm"))
+ (match_operand:SI 2 "nonimmediate_operand" "jrjm,rm"))
(match_operand:V2SI 1 "register_operand" "0,Yv")
(match_operand:SI 3 "const_int_operand")))]
"TARGET_SSE4_1 && TARGET_MMX_WITH_SSE
@@ -3971,6 +4567,7 @@
}
}
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "prefix_extra" "1")
(set_attr "type" "sselog")
(set_attr "length_immediate" "1")
@@ -4031,7 +4628,7 @@
[(set (match_operand:V8QI 0 "register_operand" "=x,YW")
(vec_merge:V8QI
(vec_duplicate:V8QI
- (match_operand:QI 2 "nonimmediate_operand" "rm,rm"))
+ (match_operand:QI 2 "nonimmediate_operand" "jrjm,rm"))
(match_operand:V8QI 1 "register_operand" "0,YW")
(match_operand:SI 3 "const_int_operand")))]
"TARGET_SSE4_1 && TARGET_MMX_WITH_SSE
@@ -4057,28 +4654,31 @@
}
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sselog")
+ (set_attr "gpr32" "0,1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
(define_insn "*mmx_pextrw"
- [(set (match_operand:HI 0 "register_sse4nonimm_operand" "=r,r,m")
+ [(set (match_operand:HI 0 "register_sse4nonimm_operand" "=r,r,jm,m")
(vec_select:HI
- (match_operand:V4HI 1 "register_operand" "y,YW,YW")
+ (match_operand:V4HI 1 "register_operand" "y,YW,YW,YW")
(parallel [(match_operand:SI 2 "const_0_to_3_operand")])))]
"(TARGET_MMX || TARGET_MMX_WITH_SSE)
&& (TARGET_SSE || TARGET_3DNOW_A)"
"@
pextrw\t{%2, %1, %k0|%k0, %1, %2}
%vpextrw\t{%2, %1, %k0|%k0, %1, %2}
- %vpextrw\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "isa" "*,sse2,sse4")
- (set_attr "mmx_isa" "native,*,*")
- (set_attr "type" "mmxcvt,sselog1,sselog1")
+ pextrw\t{%2, %1, %0|%0, %1, %2}
+ vpextrw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "*,sse2,sse4_noavx,avx")
+ (set_attr "gpr32" "1,1,0,1")
+ (set_attr "mmx_isa" "native,*,*,*")
+ (set_attr "type" "mmxcvt,sselog1,sselog1,sselog1")
(set_attr "length_immediate" "1")
- (set_attr "prefix" "orig,maybe_vex,maybe_vex")
- (set_attr "mode" "DI,TI,TI")])
+ (set_attr "prefix" "orig,maybe_vex,maybe_vex,maybe_evex")
+ (set_attr "mode" "DI,TI,TI,TI")])
(define_insn "*mmx_pextrw_zext"
[(set (match_operand:SWI48 0 "register_operand" "=r,r")
@@ -4099,29 +4699,34 @@
(set_attr "mode" "DI,TI")])
(define_insn "*mmx_pextrb"
- [(set (match_operand:QI 0 "nonimmediate_operand" "=r,m")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=jr,jm,r,m")
(vec_select:QI
- (match_operand:V8QI 1 "register_operand" "YW,YW")
+ (match_operand:V8QI 1 "register_operand" "YW,YW,YW,YW")
(parallel [(match_operand:SI 2 "const_0_to_7_operand")])))]
"TARGET_SSE4_1 && TARGET_MMX_WITH_SSE"
"@
- %vpextrb\t{%2, %1, %k0|%k0, %1, %2}
- %vpextrb\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "type" "sselog1")
+ pextrb\t{%2, %1, %k0|%k0, %1, %2}
+ pextrb\t{%2, %1, %0|%0, %1, %2}
+ vpextrb\t{%2, %1, %k0|%k0, %1, %2}
+ vpextrb\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "noavx,noavx,avx,avx")
+ (set_attr "gpr32" "1,0,1,1")
+ (set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
(define_insn "*mmx_pextrb_zext"
- [(set (match_operand:SWI248 0 "register_operand" "=r")
+ [(set (match_operand:SWI248 0 "register_operand" "=jr,r")
(zero_extend:SWI248
(vec_select:QI
- (match_operand:V8QI 1 "register_operand" "YW")
+ (match_operand:V8QI 1 "register_operand" "YW,YW")
(parallel [(match_operand:SI 2 "const_0_to_7_operand")]))))]
"TARGET_SSE4_1 && TARGET_MMX_WITH_SSE"
"%vpextrb\t{%2, %1, %k0|%k0, %1, %2}"
- [(set_attr "type" "sselog1")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
@@ -4131,13 +4736,14 @@
[(set (match_operand:V8QI 0 "register_operand" "=x,Yw")
(unspec:V8QI
[(match_operand:V8QI 1 "register_operand" "0,Yw")
- (match_operand:V16QI 2 "vector_operand" "xBm,Ywm")]
+ (match_operand:V16QI 2 "vector_operand" "xja,Ywm")]
UNSPEC_PSHUFB))]
"TARGET_SSSE3 && TARGET_MMX_WITH_SSE"
"@
pshufb\t{%2, %0|%0, %2}
vpshufb\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex")
@@ -4148,13 +4754,14 @@
[(set (match_operand:V4QI 0 "register_operand" "=x,Yw")
(unspec:V4QI
[(match_operand:V4QI 1 "register_operand" "0,Yw")
- (match_operand:V16QI 2 "vector_operand" "xBm,Ywm")]
+ (match_operand:V16QI 2 "vector_operand" "xja,Ywm")]
UNSPEC_PSHUFB))]
"TARGET_SSSE3"
"@
pshufb\t{%2, %0|%0, %2}
vpshufb\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex")
@@ -4414,29 +5021,31 @@
;; Avoid combining registers from different units in a single alternative,
;; see comment above inline_secondary_memory_needed function in i386.cc
(define_insn "*vec_extractv2si_1"
- [(set (match_operand:SI 0 "nonimmediate_operand" "=y,rm,x,x,y,x,r")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=y,jrjm,rm,x,x,y,x,r")
(vec_select:SI
- (match_operand:V2SI 1 "nonimmediate_operand" " 0,x ,x,0,o,o,o")
+ (match_operand:V2SI 1 "nonimmediate_operand" " 0,x, x ,x,0,o,o,o")
(parallel [(const_int 1)])))]
"(TARGET_MMX || TARGET_MMX_WITH_SSE)
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"@
punpckhdq\t%0, %0
- %vpextrd\t{$1, %1, %0|%0, %1, 1}
+ pextrd\t{$1, %1, %0|%0, %1, 1}
+ vpextrd\t{$1, %1, %0|%0, %1, 1}
%vpshufd\t{$0xe5, %1, %0|%0, %1, 0xe5}
shufps\t{$0xe5, %0, %0|%0, %0, 0xe5}
#
#
#"
- [(set_attr "isa" "*,sse4,sse2,noavx,*,*,*")
- (set_attr "mmx_isa" "native,*,*,*,native,*,*")
- (set_attr "type" "mmxcvt,ssemov,sseshuf1,sseshuf1,mmxmov,ssemov,imov")
+ [(set_attr "isa" "*,sse4_noavx,avx,sse2,noavx,*,*,*")
+ (set_attr "gpr32" "1,0,1,1,1,1,1,1")
+ (set_attr "mmx_isa" "native,*,*,*,*,native,*,*")
+ (set_attr "type" "mmxcvt,ssemov,ssemov,sseshuf1,sseshuf1,mmxmov,ssemov,imov")
(set (attr "length_immediate")
- (if_then_else (eq_attr "alternative" "1,2,3")
+ (if_then_else (eq_attr "alternative" "1,2,3,4")
(const_string "1")
(const_string "*")))
- (set_attr "prefix" "orig,maybe_vex,maybe_vex,orig,orig,orig,orig")
- (set_attr "mode" "DI,TI,TI,V4SF,SI,SI,SI")])
+ (set_attr "prefix" "orig,orig,maybe_evex,maybe_vex,orig,orig,orig,orig")
+ (set_attr "mode" "DI,TI,TI,TI,V4SF,SI,SI,SI")])
(define_split
[(set (match_operand:SI 0 "register_operand")
@@ -4448,15 +5057,16 @@
"operands[1] = adjust_address (operands[1], SImode, 4);")
(define_insn "*vec_extractv2si_1_zext"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=jr,r")
(zero_extend:DI
(vec_select:SI
- (match_operand:V2SI 1 "register_operand" "x")
+ (match_operand:V2SI 1 "register_operand" "x,x")
(parallel [(const_int 1)]))))]
"(TARGET_MMX || TARGET_MMX_WITH_SSE)
&& TARGET_64BIT && TARGET_SSE4_1"
"%vpextrd\t{$1, %1, %k0|%k0, %1, 1}"
- [(set_attr "type" "sselog1")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
@@ -4606,7 +5216,7 @@
[(set (match_operand:V4QI 0 "register_operand" "=x,YW")
(vec_merge:V4QI
(vec_duplicate:V4QI
- (match_operand:QI 2 "nonimmediate_operand" "rm,rm"))
+ (match_operand:QI 2 "nonimmediate_operand" "jrjm,rm"))
(match_operand:V4QI 1 "register_operand" "0,YW")
(match_operand:SI 3 "const_int_operand")))]
"TARGET_SSE4_1
@@ -4631,6 +5241,7 @@
}
}
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
@@ -4638,15 +5249,17 @@
(set_attr "mode" "TI")])
(define_insn "*pextrw"
- [(set (match_operand:HI 0 "register_sse4nonimm_operand" "=r,m")
+ [(set (match_operand:HI 0 "register_sse4nonimm_operand" "=r,jm,m")
(vec_select:HI
- (match_operand:V2HI 1 "register_operand" "YW,YW")
+ (match_operand:V2HI 1 "register_operand" "YW,YW,YW")
(parallel [(match_operand:SI 2 "const_0_to_1_operand")])))]
"TARGET_SSE2"
"@
%vpextrw\t{%2, %1, %k0|%k0, %1, %2}
- %vpextrw\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "isa" "*,sse4")
+ pextrw\t{%2, %1, %0|%0, %1, %2}
+ vpextrw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "*,sse4_noavx,avx")
+ (set_attr "gpr32" "1,0,1")
(set_attr "type" "sselog1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
@@ -4666,29 +5279,34 @@
(set_attr "mode" "TI")])
(define_insn "*pextrb"
- [(set (match_operand:QI 0 "nonimmediate_operand" "=r,m")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=jr,jm,r,m")
(vec_select:QI
- (match_operand:V4QI 1 "register_operand" "YW,YW")
+ (match_operand:V4QI 1 "register_operand" "YW,YW,YW,YW")
(parallel [(match_operand:SI 2 "const_0_to_3_operand")])))]
"TARGET_SSE4_1"
"@
- %vpextrb\t{%2, %1, %k0|%k0, %1, %2}
- %vpextrb\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "type" "sselog1")
+ pextrb\t{%2, %1, %k0|%k0, %1, %2}
+ pextrb\t{%2, %1, %0|%0, %1, %2}
+ vpextrb\t{%2, %1, %k0|%k0, %1, %2}
+ vpextrb\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "noavx,noavx,avx,avx")
+ (set_attr "gpr32" "1,0,1,1")
+ (set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
(define_insn "*pextrb_zext"
- [(set (match_operand:SWI248 0 "register_operand" "=r")
+ [(set (match_operand:SWI248 0 "register_operand" "=jr,r")
(zero_extend:SWI248
(vec_select:QI
- (match_operand:V4QI 1 "register_operand" "YW")
+ (match_operand:V4QI 1 "register_operand" "YW,YW")
(parallel [(match_operand:SI 2 "const_0_to_3_operand")]))))]
"TARGET_SSE4_1"
"%vpextrb\t{%2, %1, %k0|%k0, %1, %2}"
- [(set_attr "type" "sselog1")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
@@ -5145,13 +5763,14 @@
})
(define_insn_and_split "mmx_pmovmskb"
- [(set (match_operand:SI 0 "register_operand" "=r,r")
- (unspec:SI [(match_operand:V8QI 1 "register_operand" "y,x")]
+ [(set (match_operand:SI 0 "register_operand" "=r,r,jr")
+ (unspec:SI [(match_operand:V8QI 1 "register_operand" "y,x,x")]
UNSPEC_MOVMSK))]
"(TARGET_MMX || TARGET_MMX_WITH_SSE)
&& (TARGET_SSE || TARGET_3DNOW_A)"
"@
pmovmskb\t{%1, %0|%0, %1}
+ #
#"
"TARGET_SSE2 && reload_completed
&& SSE_REGNO_P (REGNO (operands[1]))"
@@ -5166,9 +5785,9 @@
operands[2] = lowpart_subreg (QImode, operands[0],
GET_MODE (operands[0]));
}
- [(set_attr "mmx_isa" "native,sse")
- (set_attr "type" "mmxcvt,ssemov")
- (set_attr "mode" "DI,TI")])
+ [(set_attr "mmx_isa" "native,sse_noavx,avx")
+ (set_attr "type" "mmxcvt,ssemov,ssemov")
+ (set_attr "mode" "DI,TI,TI")])
(define_expand "mmx_maskmovq"
[(set (match_operand:V8QI 0 "memory_operand")
diff --git a/gcc/config/i386/predicates.md b/gcc/config/i386/predicates.md
index 37d20c6..ef49efd 100644
--- a/gcc/config/i386/predicates.md
+++ b/gcc/config/i386/predicates.md
@@ -1276,7 +1276,8 @@
(and (match_code "vec_duplicate")
(and (match_test "TARGET_AVX512F")
(ior (match_test "TARGET_AVX512VL")
- (match_test "GET_MODE_SIZE (GET_MODE (op)) == 64")))
+ (and (match_test "GET_MODE_SIZE (GET_MODE (op)) == 64")
+ (match_test "TARGET_EVEX512"))))
(match_test "VALID_BCST_MODE_P (GET_MODE_INNER (GET_MODE (op)))")
(match_test "GET_MODE (XEXP (op, 0))
== GET_MODE_INNER (GET_MODE (op))")
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 80b43fd..c988935 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -253,62 +253,62 @@
;; All vector modes including V?TImode, used in move patterns.
(define_mode_iterator VMOVE
- [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
- (V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX") V1TI
- (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512F") (V16BF "TARGET_AVX") V8BF
- (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF])
+ [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI
+ (V4TI "TARGET_AVX512F && TARGET_EVEX512") (V2TI "TARGET_AVX") V1TI
+ (V32HF "TARGET_AVX512F && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512F && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") V2DF])
;; All AVX-512{F,VL} vector modes without HF. Supposed TARGET_AVX512F baseline.
(define_mode_iterator V48_AVX512VL
- [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
- V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
- V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
+ (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ (V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator V48_256_512_AVX512VL
- [V16SI (V8SI "TARGET_AVX512VL")
- V8DI (V4DI "TARGET_AVX512VL")
- V16SF (V8SF "TARGET_AVX512VL")
- V8DF (V4DF "TARGET_AVX512VL")])
+ [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL")
+ (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL")
+ (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL")
+ (V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL")])
;; All AVX-512{F,VL} vector modes. Supposed TARGET_AVX512F baseline.
(define_mode_iterator V48H_AVX512VL
- [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
- (V32HF "TARGET_AVX512FP16")
+ [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
+ (V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
- V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ (V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
;; 1,2 byte AVX-512{BW,VL} vector modes. Supposed TARGET_AVX512BW baseline.
(define_mode_iterator VI12_AVX512VL
- [V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
- V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")])
+ [(V64QI "TARGET_EVEX512") (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
+ (V32HI "TARGET_EVEX512") (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")])
(define_mode_iterator VI12HFBF_AVX512VL
- [V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
- V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")
- V32HF (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")
- V32BF (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
+ [(V64QI "TARGET_EVEX512") (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
+ (V32HI "TARGET_EVEX512") (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")
+ (V32HF "TARGET_EVEX512") (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")
+ (V32BF "TARGET_EVEX512") (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
(define_mode_iterator VI1_AVX512VL
- [V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")])
+ [(V64QI "TARGET_EVEX512") (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")])
;; All vector modes
(define_mode_iterator V
- [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
- (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF
- (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+ [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512F && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
;; All 128bit vector modes
(define_mode_iterator V_128
@@ -324,37 +324,51 @@
V16HF V8HF V8SF V4SF V4DF V2DF])
;; All 512bit vector modes
-(define_mode_iterator V_512 [V64QI V32HI V16SI V8DI V16SF V8DF V32HF V32BF])
+(define_mode_iterator V_512
+ [(V64QI "TARGET_EVEX512") (V32HI "TARGET_EVEX512")
+ (V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")
+ (V16SF "TARGET_EVEX512") (V8DF "TARGET_EVEX512")
+ (V32HF "TARGET_EVEX512") (V32BF "TARGET_EVEX512")])
;; All 256bit and 512bit vector modes
(define_mode_iterator V_256_512
[V32QI V16HI V16HF V16BF V8SI V4DI V8SF V4DF
- (V64QI "TARGET_AVX512F") (V32HI "TARGET_AVX512F") (V32HF "TARGET_AVX512F")
- (V32BF "TARGET_AVX512F") (V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
- (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")])
+ (V64QI "TARGET_AVX512F && TARGET_EVEX512")
+ (V32HI "TARGET_AVX512F && TARGET_EVEX512")
+ (V32HF "TARGET_AVX512F && TARGET_EVEX512")
+ (V32BF "TARGET_AVX512F && TARGET_EVEX512")
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512")
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")])
;; All vector float modes
(define_mode_iterator VF
- [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+ [(V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX")
+ (V2DF "TARGET_SSE2")])
(define_mode_iterator VF1_VF2_AVX512DQ
- [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512DQ") (V4DF "TARGET_AVX512DQ && TARGET_AVX512VL")
+ [(V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512DQ && TARGET_EVEX512")
+ (V4DF "TARGET_AVX512DQ && TARGET_AVX512VL")
(V2DF "TARGET_AVX512DQ && TARGET_AVX512VL")])
(define_mode_iterator VFH
- [(V32HF "TARGET_AVX512FP16")
+ [(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX")
+ (V2DF "TARGET_SSE2")])
;; 128-, 256- and 512-bit float vector modes for bitwise operations
(define_mode_iterator VFB
- [(V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") (V8HF "TARGET_SSE2")
- (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+ [(V32HF "TARGET_AVX512F && TARGET_EVEX512")
+ (V16HF "TARGET_AVX") (V8HF "TARGET_SSE2")
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")
+ (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
;; 128- and 256-bit float vector modes
(define_mode_iterator VF_128_256
@@ -369,38 +383,38 @@
;; All SFmode vector float modes
(define_mode_iterator VF1
- [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF])
+ [(V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF])
(define_mode_iterator VF1_AVX2
- [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX2") V4SF])
+ [(V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX2") V4SF])
;; 128- and 256-bit SF vector modes
(define_mode_iterator VF1_128_256
[(V8SF "TARGET_AVX") V4SF])
(define_mode_iterator VF1_128_256VL
- [V8SF (V4SF "TARGET_AVX512VL")])
+ [(V8SF "TARGET_EVEX512") (V4SF "TARGET_AVX512VL")])
;; All DFmode vector float modes
(define_mode_iterator VF2
- [(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF])
+ [(V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") V2DF])
;; All DFmode & HFmode vector float modes
(define_mode_iterator VF2H
- [(V32HF "TARGET_AVX512FP16")
+ [(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF])
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") V2DF])
;; 128- and 256-bit DF vector modes
(define_mode_iterator VF2_128_256
[(V4DF "TARGET_AVX") V2DF])
(define_mode_iterator VF2_512_256
- [(V8DF "TARGET_AVX512F") V4DF])
+ [(V8DF "TARGET_AVX512F && TARGET_EVEX512") V4DF])
(define_mode_iterator VF2_512_256VL
- [V8DF (V4DF "TARGET_AVX512VL")])
+ [(V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL")])
;; All 128bit vector SF/DF modes
(define_mode_iterator VF_128
@@ -417,82 +431,88 @@
;; All 512bit vector float modes
(define_mode_iterator VF_512
- [V16SF V8DF])
+ [(V16SF "TARGET_EVEX512") (V8DF "TARGET_EVEX512")])
;; All 512bit vector float modes for bitwise operations
(define_mode_iterator VFB_512
- [V32HF V16SF V8DF])
+ [(V32HF "TARGET_EVEX512") (V16SF "TARGET_EVEX512") (V8DF "TARGET_EVEX512")])
(define_mode_iterator V4SF_V8HF
[V4SF V8HF])
(define_mode_iterator VI48_AVX512VL
- [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
+ [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI1248_AVX512VLBW
- [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX512VL && TARGET_AVX512BW")
+ [(V64QI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V32QI "TARGET_AVX512VL && TARGET_AVX512BW")
(V16QI "TARGET_AVX512VL && TARGET_AVX512BW")
- (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX512VL && TARGET_AVX512BW")
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V16HI "TARGET_AVX512VL && TARGET_AVX512BW")
(V8HI "TARGET_AVX512VL && TARGET_AVX512BW")
- V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
+ (V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VF_AVX512VL
- [V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
- V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ [(V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ (V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
;; AVX512ER SF plus 128- and 256-bit SF vector modes
(define_mode_iterator VF1_AVX512ER_128_256
[(V16SF "TARGET_AVX512ER") (V8SF "TARGET_AVX") V4SF])
(define_mode_iterator VFH_AVX512VL
- [(V32HF "TARGET_AVX512FP16")
+ [(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
- V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ (V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator VF2_AVX512VL
- [V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ [(V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator VF1_AVX512VL
- [V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")])
+ [(V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")])
-(define_mode_iterator VHFBF [V32HF V16HF V8HF V32BF V16BF V8BF])
+(define_mode_iterator VHFBF
+ [(V32HF "TARGET_EVEX512") V16HF V8HF
+ (V32BF "TARGET_EVEX512") V16BF V8BF])
(define_mode_iterator VHFBF_256 [V16HF V16BF])
(define_mode_iterator VHFBF_128 [V8HF V8BF])
(define_mode_iterator VHF_AVX512VL
- [V32HF (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")])
+ [(V32HF "TARGET_EVEX512") (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")])
(define_mode_iterator VHFBF_AVX512VL
- [V32HF (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")
- V32BF (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
+ [(V32HF "TARGET_EVEX512") (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")
+ (V32BF "TARGET_EVEX512") (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
;; All vector integer modes
(define_mode_iterator VI
- [(V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
- (V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI
+ [(V16SI "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512")
+ (V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI])
;; All vector integer and HF modes
(define_mode_iterator VIHFBF
- [(V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
- (V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI
+ [(V16SI "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512")
+ (V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI
- (V32HF "TARGET_AVX512BW") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512BW") (V16BF "TARGET_AVX") V8BF])
+ (V32HF "TARGET_AVX512BW && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512BW && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF])
(define_mode_iterator VI_AVX2
- [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
- (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI
- (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI
- (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI])
+ [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX2") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX2") V2DI])
;; All QImode vector integer modes
(define_mode_iterator VI1
@@ -510,44 +530,44 @@
(V8SI "TARGET_AVX") (V4DI "TARGET_AVX")])
(define_mode_iterator VI8
- [(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI])
+ [(V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI])
(define_mode_iterator VI8_FVL
- [(V8DI "TARGET_AVX512F") V4DI (V2DI "TARGET_AVX512VL")])
+ [(V8DI "TARGET_AVX512F && TARGET_EVEX512") V4DI (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI8_AVX512VL
- [V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
+ [(V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI8_256_512
- [V8DI (V4DI "TARGET_AVX512VL")])
+ [(V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL")])
(define_mode_iterator VI1_AVX2
[(V32QI "TARGET_AVX2") V16QI])
(define_mode_iterator VI1_AVX512
- [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI])
+ [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI])
(define_mode_iterator VI1_AVX512F
- [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI])
+ [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI])
(define_mode_iterator VI1_AVX512VNNI
- [(V64QI "TARGET_AVX512VNNI") (V32QI "TARGET_AVX2") V16QI])
+ [(V64QI "TARGET_AVX512VNNI && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI])
(define_mode_iterator VI12_256_512_AVX512VL
- [V64QI (V32QI "TARGET_AVX512VL")
- V32HI (V16HI "TARGET_AVX512VL")])
+ [(V64QI "TARGET_EVEX512") (V32QI "TARGET_AVX512VL")
+ (V32HI "TARGET_EVEX512") (V16HI "TARGET_AVX512VL")])
(define_mode_iterator VI2_AVX2
[(V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI2_AVX2_AVX512BW
- [(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI])
+ [(V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI2_AVX512F
- [(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX2") V8HI])
+ [(V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI2_AVX512VNNIBW
- [(V32HI "TARGET_AVX512BW || TARGET_AVX512VNNI")
+ [(V32HI "(TARGET_AVX512BW || TARGET_AVX512VNNI) && TARGET_EVEX512")
(V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI4_AVX
@@ -557,61 +577,62 @@
[(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI4_AVX512F
- [(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI])
+ [(V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI4_AVX512VL
- [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")])
+ [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")])
(define_mode_iterator VI48_AVX512F_AVX512VL
- [V4SI V8SI (V16SI "TARGET_AVX512F")
- (V2DI "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL") (V8DI "TARGET_AVX512F")])
+ [V4SI V8SI (V16SI "TARGET_AVX512F && TARGET_EVEX512")
+ (V2DI "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL")
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512")])
(define_mode_iterator VI2_AVX512VL
- [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") V32HI])
+ [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") (V32HI "TARGET_EVEX512")])
(define_mode_iterator VI2HFBF_AVX512VL
- [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") V32HI
- (V8HF "TARGET_AVX512VL") (V16HF "TARGET_AVX512VL") V32HF
- (V8BF "TARGET_AVX512VL") (V16BF "TARGET_AVX512VL") V32BF])
+ [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") (V32HI "TARGET_EVEX512")
+ (V8HF "TARGET_AVX512VL") (V16HF "TARGET_AVX512VL") (V32HF "TARGET_EVEX512")
+ (V8BF "TARGET_AVX512VL") (V16BF "TARGET_AVX512VL") (V32BF "TARGET_EVEX512")])
(define_mode_iterator VI2H_AVX512VL
- [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") V32HI
- (V8SI "TARGET_AVX512VL") V16SI
- V8DI ])
+ [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") (V32HI "TARGET_EVEX512")
+ (V8SI "TARGET_AVX512VL") (V16SI "TARGET_EVEX512")
+ (V8DI "TARGET_EVEX512")])
(define_mode_iterator VI1_AVX512VL_F
- [V32QI (V16QI "TARGET_AVX512VL") (V64QI "TARGET_AVX512F")])
+ [V32QI (V16QI "TARGET_AVX512VL") (V64QI "TARGET_AVX512F && TARGET_EVEX512")])
(define_mode_iterator VI8_AVX2_AVX512BW
- [(V8DI "TARGET_AVX512BW") (V4DI "TARGET_AVX2") V2DI])
+ [(V8DI "TARGET_AVX512BW && TARGET_EVEX512") (V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI8_AVX2
[(V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI8_AVX2_AVX512F
- [(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI])
+ [(V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI8_AVX_AVX512F
- [(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX")])
+ [(V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX")])
(define_mode_iterator VI4_128_8_256
[V4SI V4DI])
;; All V8D* modes
(define_mode_iterator V8FI
- [V8DF V8DI])
+ [(V8DF "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
;; All V16S* modes
(define_mode_iterator V16FI
- [V16SF V16SI])
+ [(V16SF "TARGET_EVEX512") (V16SI "TARGET_EVEX512")])
;; ??? We should probably use TImode instead.
(define_mode_iterator VIMAX_AVX2_AVX512BW
- [(V4TI "TARGET_AVX512BW") (V2TI "TARGET_AVX2") V1TI])
+ [(V4TI "TARGET_AVX512BW && TARGET_EVEX512") (V2TI "TARGET_AVX2") V1TI])
;; Suppose TARGET_AVX512BW as baseline
(define_mode_iterator VIMAX_AVX512VL
- [V4TI (V2TI "TARGET_AVX512VL") (V1TI "TARGET_AVX512VL")])
+ [(V4TI "TARGET_EVEX512") (V2TI "TARGET_AVX512VL") (V1TI "TARGET_AVX512VL")])
(define_mode_iterator VIMAX_AVX2
[(V2TI "TARGET_AVX2") V1TI])
@@ -621,17 +642,17 @@
(V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI12_AVX2_AVX512BW
- [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
- (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI])
+ [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI24_AVX2
[(V16HI "TARGET_AVX2") V8HI
(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI124_AVX2_24_AVX512F_1_AVX512BW
- [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
- (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX2") V8HI
- (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI])
+ [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI
+ (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI124_AVX2
[(V32QI "TARGET_AVX2") V16QI
@@ -639,17 +660,17 @@
(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI248_AVX512VL
- [V32HI V16SI V8DI
+ [(V32HI "TARGET_EVEX512") (V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")
(V16HI "TARGET_AVX512VL") (V8SI "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")
(V4SI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI248_AVX512VLBW
- [(V32HI "TARGET_AVX512BW")
+ [(V32HI "TARGET_AVX512BW && TARGET_EVEX512")
(V16HI "TARGET_AVX512VL && TARGET_AVX512BW")
(V8HI "TARGET_AVX512VL && TARGET_AVX512BW")
- V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
+ (V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI48_AVX2
[(V8SI "TARGET_AVX2") V4SI
@@ -661,16 +682,17 @@
(V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI248_AVX2_8_AVX512F_24_AVX512BW
- [(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI
- (V16SI "TARGET_AVX512BW") (V8SI "TARGET_AVX2") V4SI
- (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI])
+ [(V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI
+ (V16SI "TARGET_AVX512BW && TARGET_EVEX512") (V8SI "TARGET_AVX2") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI248_AVX512BW
- [(V32HI "TARGET_AVX512BW") V16SI V8DI])
+ [(V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16SI "TARGET_EVEX512")
+ (V8DI "TARGET_EVEX512")])
(define_mode_iterator VI248_AVX512BW_AVX512VL
- [(V32HI "TARGET_AVX512BW")
- (V4DI "TARGET_AVX512VL") V16SI V8DI])
+ [(V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V4DI "TARGET_AVX512VL") (V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
;; Suppose TARGET_AVX512VL as baseline
(define_mode_iterator VI248_AVX512BW_1
@@ -684,16 +706,16 @@
V4DI V2DI])
(define_mode_iterator VI48_AVX512F
- [(V16SI "TARGET_AVX512F") V8SI V4SI
- (V8DI "TARGET_AVX512F") V4DI V2DI])
+ [(V16SI "TARGET_AVX512F && TARGET_EVEX512") V8SI V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512") V4DI V2DI])
(define_mode_iterator VI48_AVX_AVX512F
- [(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI])
+ [(V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI])
(define_mode_iterator VI12_AVX_AVX512F
- [ (V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI])
+ [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI])
(define_mode_iterator V48_128_256
[V4SF V2DF
@@ -832,9 +854,11 @@
(define_mode_iterator VI24_128 [V8HI V4SI])
(define_mode_iterator VI248_128 [V8HI V4SI V2DI])
(define_mode_iterator VI248_256 [V16HI V8SI V4DI])
-(define_mode_iterator VI248_512 [V32HI V16SI V8DI])
+(define_mode_iterator VI248_512
+ [(V32HI "TARGET_EVEX512") (V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
(define_mode_iterator VI48_128 [V4SI V2DI])
-(define_mode_iterator VI148_512 [V64QI V16SI V8DI])
+(define_mode_iterator VI148_512
+ [(V64QI "TARGET_EVEX512") (V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
(define_mode_iterator VI148_256 [V32QI V8SI V4DI])
(define_mode_iterator VI148_128 [V16QI V4SI V2DI])
@@ -842,66 +866,75 @@
(define_mode_iterator VI124_256 [V32QI V16HI V8SI])
(define_mode_iterator VI124_256_AVX512F_AVX512BW
[V32QI V16HI V8SI
- (V64QI "TARGET_AVX512BW")
- (V32HI "TARGET_AVX512BW")
- (V16SI "TARGET_AVX512F")])
+ (V64QI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512")])
(define_mode_iterator VI48_256 [V8SI V4DI])
-(define_mode_iterator VI48_512 [V16SI V8DI])
+(define_mode_iterator VI48_512
+ [(V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
(define_mode_iterator VI4_256_8_512 [V8SI V8DI])
(define_mode_iterator VI_AVX512BW
- [V16SI V8DI (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512BW")])
+ [(V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V64QI "TARGET_AVX512BW && TARGET_EVEX512")])
(define_mode_iterator VIHFBF_AVX512BW
- [V16SI V8DI (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512BW")
- (V32HF "TARGET_AVX512BW") (V32BF "TARGET_AVX512BW")])
+ [(V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V64QI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V32HF "TARGET_AVX512BW && TARGET_EVEX512")
+ (V32BF "TARGET_AVX512BW && TARGET_EVEX512")])
;; Int-float size matches
-(define_mode_iterator VI2F_256_512 [V16HI V32HI V16HF V32HF V16BF V32BF])
+(define_mode_iterator VI2F_256_512
+ [V16HI (V32HI "TARGET_EVEX512")
+ V16HF (V32HF "TARGET_EVEX512")
+ V16BF (V32BF "TARGET_EVEX512")])
(define_mode_iterator VI4F_128 [V4SI V4SF])
(define_mode_iterator VI8F_128 [V2DI V2DF])
(define_mode_iterator VI4F_256 [V8SI V8SF])
(define_mode_iterator VI8F_256 [V4DI V4DF])
(define_mode_iterator VI4F_256_512
[V8SI V8SF
- (V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")])
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512")
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512")])
(define_mode_iterator VI48F_256_512
[V8SI V8SF
- (V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")
- (V8DI "TARGET_AVX512F") (V8DF "TARGET_AVX512F")
- (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")])
-(define_mode_iterator VF48_I1248
- [V16SI V16SF V8DI V8DF V32HI V64QI])
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512")
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")
+ (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")])
(define_mode_iterator VF48H_AVX512VL
- [V8DF V16SF (V8SF "TARGET_AVX512VL")])
+ [(V8DF "TARGET_EVEX512") (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL")])
(define_mode_iterator VF48_128
[V2DF V4SF])
(define_mode_iterator VI48F
- [V16SI V16SF V8DI V8DF
+ [(V16SI "TARGET_EVEX512") (V16SF "TARGET_EVEX512")
+ (V8DI "TARGET_EVEX512") (V8DF "TARGET_EVEX512")
(V8SI "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")
(V4SI "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
(V2DI "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator VI12_VI48F_AVX512VL
- [(V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")
- (V8DI "TARGET_AVX512F") (V8DF "TARGET_AVX512F")
+ [(V16SI "TARGET_AVX512F && TARGET_EVEX512")
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")
(V8SI "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")
(V4SI "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
(V2DI "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")
- V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
- V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")])
+ (V64QI "TARGET_EVEX512") (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
+ (V32HI "TARGET_EVEX512") (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")])
(define_mode_iterator VI48F_256 [V8SI V8SF V4DI V4DF])
-(define_mode_iterator VF_AVX512
- [(V4SF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")
- (V8SF "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")
- V16SF V8DF])
-
(define_mode_iterator V8_128 [V8HI V8HF V8BF])
(define_mode_iterator V16_256 [V16HI V16HF V16BF])
-(define_mode_iterator V32_512 [V32HI V32HF V32BF])
+(define_mode_iterator V32_512
+ [(V32HI "TARGET_EVEX512") (V32HF "TARGET_EVEX512") (V32BF "TARGET_EVEX512")])
;; Mapping from float mode to required SSE level
(define_mode_attr sse
@@ -1295,7 +1328,8 @@
;; Mix-n-match
(define_mode_iterator AVX256MODE2P [V8SI V8SF V4DF])
-(define_mode_iterator AVX512MODE2P [V16SI V16SF V8DF])
+(define_mode_iterator AVX512MODE2P
+ [(V16SI "TARGET_EVEX512") (V16SF "TARGET_EVEX512") (V8DF "TARGET_EVEX512")])
;; Mapping for dbpsabbw modes
(define_mode_attr dbpsadbwmode
@@ -1833,12 +1867,14 @@
"operands[4] = adjust_address (operands[0], V2DFmode, 0);")
(define_insn "<sse3>_lddqu<avxsizesuffix>"
- [(set (match_operand:VI1 0 "register_operand" "=x")
- (unspec:VI1 [(match_operand:VI1 1 "memory_operand" "m")]
+ [(set (match_operand:VI1 0 "register_operand" "=x,x")
+ (unspec:VI1 [(match_operand:VI1 1 "memory_operand" "m,jm")]
UNSPEC_LDDQU))]
"TARGET_SSE3"
"%vlddqu\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
+ (set_attr "gpr32" "1,0")
(set_attr "movu" "1")
(set (attr "prefix_data16")
(if_then_else
@@ -1897,9 +1933,11 @@
(define_mode_iterator STORENT_MODE
[(DI "TARGET_SSE2 && TARGET_64BIT") (SI "TARGET_SSE2")
(SF "TARGET_SSE4A") (DF "TARGET_SSE4A")
- (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") (V2DI "TARGET_SSE2")
- (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512")
+ (V4DI "TARGET_AVX") (V2DI "TARGET_SSE2")
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")
+ (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
(define_expand "storent<mode>"
[(set (match_operand:STORENT_MODE 0 "memory_operand")
@@ -1916,17 +1954,19 @@
;; All integer modes with AVX512BW/DQ.
(define_mode_iterator SWI1248_AVX512BWDQ
- [(QI "TARGET_AVX512DQ") HI (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW")])
+ [(QI "TARGET_AVX512DQ") HI (SI "TARGET_AVX512BW")
+ (DI "TARGET_AVX512BW && TARGET_EVEX512")])
;; All integer modes with AVX512BW, where HImode operation
;; can be used instead of QImode.
(define_mode_iterator SWI1248_AVX512BW
- [QI HI (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW")])
+ [QI HI (SI "TARGET_AVX512BW")
+ (DI "TARGET_AVX512BW && TARGET_EVEX512")])
;; All integer modes with AVX512BW/DQ, even HImode requires DQ.
(define_mode_iterator SWI1248_AVX512BWDQ2
[(QI "TARGET_AVX512DQ") (HI "TARGET_AVX512DQ")
- (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW")])
+ (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW && TARGET_EVEX512")])
(define_expand "kmov<mskmodesuffix>"
[(set (match_operand:SWI1248_AVX512BWDQ 0 "nonimmediate_operand")
@@ -2065,7 +2105,7 @@
(zero_extend:DI
(not:SI (match_operand:SI 1 "register_operand" "k"))))
(unspec [(const_int 0)] UNSPEC_MASKOP)]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"knotd\t{%1, %0|%0, %1}";
[(set_attr "type" "msklog")
(set_attr "prefix" "vex")
@@ -2075,7 +2115,7 @@
[(set (match_operand:DI 0 "mask_reg_operand")
(zero_extend:DI
(not:SI (match_operand:SI 1 "mask_reg_operand"))))]
- "TARGET_AVX512BW && reload_completed"
+ "TARGET_AVX512BW && TARGET_EVEX512 && reload_completed"
[(parallel
[(set (match_dup 0)
(zero_extend:DI
@@ -2181,7 +2221,7 @@
(const_int 32))
(zero_extend:DI (match_operand:SI 2 "register_operand" "k"))))
(unspec [(const_int 0)] UNSPEC_MASKOP)]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"kunpckdq\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "mode" "DI")])
@@ -2507,12 +2547,14 @@
(set_attr "mode" "<MODE>")])
(define_insn "<sse>_rcp<mode>2"
- [(set (match_operand:VF1_128_256 0 "register_operand" "=x")
+ [(set (match_operand:VF1_128_256 0 "register_operand" "=x,x")
(unspec:VF1_128_256
- [(match_operand:VF1_128_256 1 "vector_operand" "xBm")] UNSPEC_RCP))]
+ [(match_operand:VF1_128_256 1 "vector_operand" "xBm,xja")] UNSPEC_RCP))]
"TARGET_SSE"
"%vrcpps\t{%1, %0|%0, %1}"
- [(set_attr "type" "sse")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sse")
+ (set_attr "gpr32" "1,0")
(set_attr "atom_sse_attr" "rcp")
(set_attr "btver2_sse_attr" "rcp")
(set_attr "prefix" "maybe_vex")
@@ -2521,7 +2563,7 @@
(define_insn "sse_vmrcpv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "=x,x")
(vec_merge:V4SF
- (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,xm")]
+ (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,xjm")]
UNSPEC_RCP)
(match_operand:V4SF 2 "register_operand" "0,x")
(const_int 1)))]
@@ -2531,6 +2573,7 @@
vrcpss\t{%1, %2, %0|%0, %2, %k1}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
+ (set_attr "gpr32" "1,0")
(set_attr "atom_sse_attr" "rcp")
(set_attr "btver2_sse_attr" "rcp")
(set_attr "prefix" "orig,vex")
@@ -2540,7 +2583,7 @@
[(set (match_operand:V4SF 0 "register_operand" "=x,x")
(vec_merge:V4SF
(vec_duplicate:V4SF
- (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "xm,xm")]
+ (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "xm,xjm")]
UNSPEC_RCP))
(match_operand:V4SF 2 "register_operand" "0,x")
(const_int 1)))]
@@ -2550,6 +2593,7 @@
vrcpss\t{%1, %2, %0|%0, %2, %1}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
+ (set_attr "gpr32" "1,0")
(set_attr "atom_sse_attr" "rcp")
(set_attr "btver2_sse_attr" "rcp")
(set_attr "prefix" "orig,vex")
@@ -2726,12 +2770,14 @@
"TARGET_AVX512FP16")
(define_insn "<sse>_rsqrt<mode>2"
- [(set (match_operand:VF1_128_256 0 "register_operand" "=x")
+ [(set (match_operand:VF1_128_256 0 "register_operand" "=x,x")
(unspec:VF1_128_256
- [(match_operand:VF1_128_256 1 "vector_operand" "xBm")] UNSPEC_RSQRT))]
+ [(match_operand:VF1_128_256 1 "vector_operand" "xBm,xja")] UNSPEC_RSQRT))]
"TARGET_SSE"
"%vrsqrtps\t{%1, %0|%0, %1}"
- [(set_attr "type" "sse")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sse")
+ (set_attr "gpr32" "1,0")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<MODE>")])
@@ -2790,7 +2836,7 @@
(define_insn "sse_vmrsqrtv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "=x,x")
(vec_merge:V4SF
- (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,xm")]
+ (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,xjm")]
UNSPEC_RSQRT)
(match_operand:V4SF 2 "register_operand" "0,x")
(const_int 1)))]
@@ -2800,6 +2846,7 @@
vrsqrtss\t{%1, %2, %0|%0, %2, %k1}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
+ (set_attr "gpr32" "1,0")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "SF")])
@@ -2807,7 +2854,7 @@
[(set (match_operand:V4SF 0 "register_operand" "=x,x")
(vec_merge:V4SF
(vec_duplicate:V4SF
- (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "xm,xm")]
+ (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "xm,xjm")]
UNSPEC_RSQRT))
(match_operand:V4SF 2 "register_operand" "0,x")
(const_int 1)))]
@@ -2817,6 +2864,7 @@
vrsqrtss\t{%1, %2, %0|%0, %2, %1}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
+ (set_attr "gpr32" "1,0")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "SF")])
@@ -2992,7 +3040,7 @@
(vec_merge:VF_128_256
(minus:VF_128_256
(match_operand:VF_128_256 1 "register_operand" "0,x")
- (match_operand:VF_128_256 2 "vector_operand" "xBm, xm"))
+ (match_operand:VF_128_256 2 "vector_operand" "xBm, xjm"))
(plus:VF_128_256 (match_dup 1) (match_dup 2))
(const_int <addsub_cst>)))]
"TARGET_SSE3"
@@ -3001,6 +3049,7 @@
vaddsub<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
+ (set_attr "gpr32" "1,0")
(set (attr "atom_unit")
(if_then_else
(match_test "<MODE>mode == V2DFmode")
@@ -3144,7 +3193,7 @@
(vec_select:DF (match_dup 1) (parallel [(const_int 1)])))
(plusminus:DF
(vec_select:DF
- (match_operand:V4DF 2 "nonimmediate_operand" "xm")
+ (match_operand:V4DF 2 "nonimmediate_operand" "xjm")
(parallel [(const_int 0)]))
(vec_select:DF (match_dup 2) (parallel [(const_int 1)]))))
(vec_concat:V2DF
@@ -3157,6 +3206,7 @@
"TARGET_AVX"
"vh<plusminus_mnemonic>pd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseadd")
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "V4DF")])
@@ -3187,7 +3237,7 @@
(parallel [(match_operand:SI 4 "const_0_to_1_operand")])))
(plus:DF
(vec_select:DF
- (match_operand:V2DF 2 "vector_operand" "xBm,xm")
+ (match_operand:V2DF 2 "vector_operand" "xBm,xjm")
(parallel [(match_operand:SI 5 "const_0_to_1_operand")]))
(vec_select:DF
(match_dup 2)
@@ -3199,6 +3249,7 @@
haddpd\t{%2, %0|%0, %2}
vhaddpd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "1,0")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "V2DF")])
@@ -3213,7 +3264,7 @@
(vec_select:DF (match_dup 1) (parallel [(const_int 1)])))
(minus:DF
(vec_select:DF
- (match_operand:V2DF 2 "vector_operand" "xBm,xm")
+ (match_operand:V2DF 2 "vector_operand" "xBm,xjm")
(parallel [(const_int 0)]))
(vec_select:DF (match_dup 2) (parallel [(const_int 1)])))))]
"TARGET_SSE3"
@@ -3222,6 +3273,7 @@
vhsubpd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
+ (set_attr "gpr32" "1,0")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "V2DF")])
@@ -3278,7 +3330,7 @@
(vec_concat:V2SF
(plusminus:SF
(vec_select:SF
- (match_operand:V8SF 2 "nonimmediate_operand" "xm")
+ (match_operand:V8SF 2 "nonimmediate_operand" "xjm")
(parallel [(const_int 0)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 1)])))
(plusminus:SF
@@ -3302,6 +3354,7 @@
"TARGET_AVX"
"vh<plusminus_mnemonic>ps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseadd")
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
@@ -3320,7 +3373,7 @@
(vec_concat:V2SF
(plusminus:SF
(vec_select:SF
- (match_operand:V4SF 2 "vector_operand" "xBm,xm")
+ (match_operand:V4SF 2 "vector_operand" "xBm,xjm")
(parallel [(const_int 0)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 1)])))
(plusminus:SF
@@ -3332,6 +3385,7 @@
vh<plusminus_mnemonic>ps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
+ (set_attr "gpr32" "1,0")
(set_attr "atom_unit" "complex")
(set_attr "prefix" "orig,vex")
(set_attr "prefix_rep" "1,*")
@@ -3377,9 +3431,11 @@
(define_mode_iterator REDUC_PLUS_MODE
[(V4DF "TARGET_AVX") (V8SF "TARGET_AVX")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V8DF "TARGET_AVX512F") (V16SF "TARGET_AVX512F")
- (V32HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V32QI "TARGET_AVX") (V64QI "TARGET_AVX512F")])
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512")
+ (V32HF "TARGET_AVX512FP16 && TARGET_AVX512VL && TARGET_EVEX512")
+ (V32QI "TARGET_AVX")
+ (V64QI "TARGET_AVX512F && TARGET_EVEX512")])
(define_expand "reduc_plus_scal_<mode>"
[(plus:REDUC_PLUS_MODE
@@ -3421,11 +3477,13 @@
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")
(V8SF "TARGET_AVX") (V4DF "TARGET_AVX")
- (V64QI "TARGET_AVX512BW")
- (V32HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V32HI "TARGET_AVX512BW") (V16SI "TARGET_AVX512F")
- (V8DI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")
- (V8DF "TARGET_AVX512F")])
+ (V64QI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V32HF "TARGET_AVX512FP16 && TARGET_AVX512VL && TARGET_EVEX512")
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512")
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")])
(define_expand "reduc_<code>_scal_<mode>"
[(smaxmin:REDUC_SMINMAX_MODE
@@ -3525,12 +3583,13 @@
[(set (match_operand:VF_128_256 0 "register_operand" "=x")
(unspec:VF_128_256
[(match_operand:VF_128_256 1 "register_operand" "x")
- (match_operand:VF_128_256 2 "nonimmediate_operand" "xm")
+ (match_operand:VF_128_256 2 "nonimmediate_operand" "xjm")
(match_operand:SI 3 "const_0_to_31_operand")]
UNSPEC_PCMP))]
"TARGET_AVX"
"vcmp<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssecmp")
+ (set_attr "gpr32" "0")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
@@ -3736,7 +3795,7 @@
(vec_merge:VF_128
(unspec:VF_128
[(match_operand:VF_128 1 "register_operand" "x")
- (match_operand:VF_128 2 "nonimmediate_operand" "xm")
+ (match_operand:VF_128 2 "nonimmediate_operand" "xjm")
(match_operand:SI 3 "const_0_to_31_operand")]
UNSPEC_PCMP)
(match_dup 1)
@@ -3744,6 +3803,7 @@
"TARGET_AVX"
"vcmp<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %<iptr>2, %3}"
[(set_attr "type" "ssecmp")
+ (set_attr "gpr32" "0")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<ssescalarmode>")])
@@ -3752,13 +3812,14 @@
[(set (match_operand:VF_128_256 0 "register_operand" "=x,x")
(match_operator:VF_128_256 3 "sse_comparison_operator"
[(match_operand:VF_128_256 1 "register_operand" "%0,x")
- (match_operand:VF_128_256 2 "vector_operand" "xBm,xm")]))]
+ (match_operand:VF_128_256 2 "vector_operand" "xBm,xjm")]))]
"TARGET_SSE
&& GET_RTX_CLASS (GET_CODE (operands[3])) == RTX_COMM_COMPARE"
"@
cmp%D3<ssemodesuffix>\t{%2, %0|%0, %2}
vcmp%D3<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "1,0")
(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
(set_attr "prefix" "orig,vex")
@@ -3768,12 +3829,13 @@
[(set (match_operand:VF_128_256 0 "register_operand" "=x,x")
(match_operator:VF_128_256 3 "sse_comparison_operator"
[(match_operand:VF_128_256 1 "register_operand" "0,x")
- (match_operand:VF_128_256 2 "vector_operand" "xBm,xm")]))]
+ (match_operand:VF_128_256 2 "vector_operand" "xBm,xjm")]))]
"TARGET_SSE"
"@
cmp%D3<ssemodesuffix>\t{%2, %0|%0, %2}
vcmp%D3<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "1,0")
(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
(set_attr "prefix" "orig,vex")
@@ -3784,7 +3846,7 @@
(vec_merge:VF_128
(match_operator:VF_128 3 "sse_comparison_operator"
[(match_operand:VF_128 1 "register_operand" "0,x")
- (match_operand:VF_128 2 "nonimmediate_operand" "xm,xm")])
+ (match_operand:VF_128 2 "nonimmediate_operand" "xm,xjm")])
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
@@ -3792,6 +3854,7 @@
cmp%D3<ssescalarmodesuffix>\t{%2, %0|%0, %<iptr>2}
vcmp%D3<ssescalarmodesuffix>\t{%2, %1, %0|%0, %1, %<iptr>2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "1,0")
(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1,*")
(set_attr "prefix" "orig,vex")
@@ -4709,7 +4772,7 @@
(and:VFB_128_256
(not:VFB_128_256
(match_operand:VFB_128_256 1 "register_operand" "0,x,v,v"))
- (match_operand:VFB_128_256 2 "vector_operand" "xBm,xm,vm,vm")))]
+ (match_operand:VFB_128_256 2 "vector_operand" "xBm,xjm,vm,vm")))]
"TARGET_SSE && <mask_avx512vl_condition>
&& (!<mask_applied> || <ssescalarmode>mode != HFmode)"
{
@@ -4753,7 +4816,8 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx512dq,avx512f")
+ [(set_attr "isa" "noavx,avx_noavx512f,avx512dq,avx512f")
+ (set_attr "gpr32" "1,0,1,1")
(set_attr "type" "sselog")
(set_attr "prefix" "orig,maybe_vex,evex,evex")
(set (attr "mode")
@@ -4761,6 +4825,10 @@
(and (eq_attr "alternative" "1")
(match_test "!TARGET_AVX512DQ")))
(const_string "<sseintvecmode2>")
+ (and (not (match_test "<mask_applied>"))
+ (eq_attr "alternative" "3")
+ (match_test "!x86_evex_reg_mentioned_p (operands, 3)"))
+ (const_string "<MODE>")
(eq_attr "alternative" "3")
(const_string "<sseintvecmode2>")
(match_test "TARGET_AVX")
@@ -5035,7 +5103,7 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
+ [(set_attr "isa" "noavx,avx,avx512vl,avx512f_512")
(set_attr "type" "sselog")
(set_attr "prefix" "orig,vex,evex,evex")
(set (attr "mode")
@@ -5063,7 +5131,7 @@
[(set (match_operand:ANDNOT_MODE 0 "register_operand" "=x,x,v,v")
(and:ANDNOT_MODE
(not:ANDNOT_MODE (match_operand:ANDNOT_MODE 1 "register_operand" "0,x,v,v"))
- (match_operand:ANDNOT_MODE 2 "vector_operand" "xBm,xm,vm,v")))]
+ (match_operand:ANDNOT_MODE 2 "vector_operand" "xBm,xjm,vm,v")))]
"TARGET_SSE"
{
char buf[128];
@@ -5092,7 +5160,8 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
+ [(set_attr "isa" "noavx,avx_noavx512f,avx512vl,avx512f_512")
+ (set_attr "gpr32" "1,0,1,1")
(set_attr "type" "sselog")
(set (attr "prefix_data16")
(if_then_else
@@ -5102,7 +5171,10 @@
(const_string "*")))
(set_attr "prefix" "orig,vex,evex,evex")
(set (attr "mode")
- (cond [(eq_attr "alternative" "2")
+ (cond [(and (eq_attr "alternative" "3")
+ (match_test "!x86_evex_reg_mentioned_p (operands, 3)"))
+ (const_string "TI")
+ (eq_attr "alternative" "2")
(const_string "TI")
(eq_attr "alternative" "3")
(const_string "XI")
@@ -5161,7 +5233,7 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
+ [(set_attr "isa" "noavx,avx,avx512vl,avx512f_512")
(set_attr "type" "sselog")
(set_attr "prefix" "orig,vex,evex,evex")
(set (attr "mode")
@@ -5223,7 +5295,7 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
+ [(set_attr "isa" "noavx,avx,avx512vl,avx512f_512")
(set_attr "type" "sselog")
(set (attr "prefix_data16")
(if_then_else
@@ -5269,12 +5341,12 @@
(V2DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
(V8SF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
(V4DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
- (V16SF "TARGET_AVX512F")
- (V8DF "TARGET_AVX512F")
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")
(HF "TARGET_AVX512FP16")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V32HF "TARGET_AVX512FP16")])
+ (V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")])
(define_expand "fma<mode>4"
[(set (match_operand:FMAMODEM 0 "register_operand")
@@ -5312,8 +5384,8 @@
(V2DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
(V8SF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
(V4DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
- (V16SF "TARGET_AVX512F")
- (V8DF "TARGET_AVX512F")])
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")])
(define_mode_iterator FMAMODE
[SF DF V4SF V2DF V8SF V4DF])
@@ -5383,12 +5455,14 @@
;; Suppose AVX-512F as baseline
(define_mode_iterator VFH_SF_AVX512VL
- [(V32HF "TARGET_AVX512FP16")
+ [(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(HF "TARGET_AVX512FP16")
- SF V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
- DF V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ SF (V16SF "TARGET_EVEX512")
+ (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ DF (V8DF "TARGET_EVEX512")
+ (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_insn "<sd_mask_codefor>fma_fmadd_<mode><sd_maskz_name><round_name>"
[(set (match_operand:VFH_SF_AVX512VL 0 "register_operand" "=v,v,v")
@@ -7062,6 +7136,13 @@
DONE;
})
+(define_expand "lrint<mode><sseintvecmodelower>2"
+ [(set (match_operand:<sseintvecmode> 0 "register_operand")
+ (unspec:<sseintvecmode>
+ [(match_operand:VHF_AVX512VL 1 "register_operand")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_AVX512FP16")
+
(define_insn "avx512fp16_vcvtph2<sseintconvertsignprefix><sseintconvert>_<mode><mask_name><round_name>"
[(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v")
(unspec:VI248_AVX512VL
@@ -8028,7 +8109,7 @@
(unspec:V16SI
[(match_operand:V16SF 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vcvtps2dq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8095,7 +8176,7 @@
[(set (match_operand:V16SI 0 "register_operand" "=v")
(any_fix:V16SI
(match_operand:V16SF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vcvttps2<fixsuffix>dq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8595,7 +8676,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vcvtdq2pd\t{%t1, %0|%0, %t1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8631,7 +8712,7 @@
(unspec:V8SI
[(match_operand:V8DF 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vcvtpd2dq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8789,7 +8870,7 @@
[(set (match_operand:V8SI 0 "register_operand" "=v")
(any_fix:V8SI
(match_operand:V8DF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vcvttpd2<fixsuffix>dq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9038,7 +9119,7 @@
(define_insn "<mask_codefor>fixuns_trunc<mode><sseintvecmodelower>2<mask_name>"
[(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
(unsigned_fix:<sseintvecmode>
- (match_operand:VF1_128_256VL 1 "nonimmediate_operand" "vm")))]
+ (match_operand:VF1_128_256 1 "nonimmediate_operand" "vm")))]
"TARGET_AVX512VL"
"vcvttps2udq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
@@ -9193,7 +9274,7 @@
[(set (match_operand:V8SF 0 "register_operand" "=v")
(float_truncate:V8SF
(match_operand:V8DF 1 "<round_nimm_predicate>" "<round_constraint>")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vcvtpd2ps\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9355,7 +9436,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vcvtps2pd\t{%t1, %0|%0, %t1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9540,7 +9621,7 @@
(set (match_operand:V8DF 0 "register_operand")
(float_extend:V8DF
(match_dup 2)))]
-"TARGET_AVX512F"
+"TARGET_AVX512F && TARGET_EVEX512"
"operands[2] = gen_reg_rtx (V8SFmode);")
(define_expand "vec_unpacks_lo_v4sf"
@@ -9678,7 +9759,7 @@
(set (match_operand:V8DF 0 "register_operand")
(float:V8DF
(match_dup 2)))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"operands[2] = gen_reg_rtx (V8SImode);")
(define_expand "vec_unpacks_float_lo_v16si"
@@ -9690,7 +9771,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX512F")
+ "TARGET_AVX512F && TARGET_EVEX512")
(define_expand "vec_unpacku_float_hi_v4si"
[(set (match_dup 5)
@@ -9786,7 +9867,7 @@
(define_expand "vec_unpacku_float_hi_v16si"
[(match_operand:V8DF 0 "register_operand")
(match_operand:V16SI 1 "register_operand")]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
REAL_VALUE_TYPE TWO32r;
rtx k, x, tmp[4];
@@ -9835,7 +9916,7 @@
(define_expand "vec_unpacku_float_lo_v16si"
[(match_operand:V8DF 0 "register_operand")
(match_operand:V16SI 1 "nonimmediate_operand")]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
REAL_VALUE_TYPE TWO32r;
rtx k, x, tmp[3];
@@ -9929,7 +10010,7 @@
[(match_operand:V16SI 0 "register_operand")
(match_operand:V8DF 1 "nonimmediate_operand")
(match_operand:V8DF 2 "nonimmediate_operand")]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
rtx r1, r2;
@@ -10044,7 +10125,7 @@
[(match_operand:V16SI 0 "register_operand")
(match_operand:V8DF 1 "nonimmediate_operand")
(match_operand:V8DF 2 "nonimmediate_operand")]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
rtx r1, r2;
@@ -10237,7 +10318,7 @@
(const_int 11) (const_int 27)
(const_int 14) (const_int 30)
(const_int 15) (const_int 31)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vunpckhps\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -10325,7 +10406,7 @@
(const_int 9) (const_int 25)
(const_int 12) (const_int 28)
(const_int 13) (const_int 29)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vunpcklps\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -10465,7 +10546,7 @@
(const_int 11) (const_int 11)
(const_int 13) (const_int 13)
(const_int 15) (const_int 15)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vmovshdup\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "sse")
(set_attr "prefix" "evex")
@@ -10518,7 +10599,7 @@
(const_int 10) (const_int 10)
(const_int 12) (const_int 12)
(const_int 14) (const_int 14)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vmovsldup\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "sse")
(set_attr "prefix" "evex")
@@ -10836,7 +10917,7 @@
(match_operand:SF 1 "nonimmediate_operand"
" 0, 0,Yv, 0,0, v,m, 0 , m")
(match_operand:SF 2 "nonimm_or_0_operand"
- " Yr,*x,Yv, m,m, m,C,*ym, C")))]
+ " Yr,*x,Yv, jm,jm, m,C,*ym, C")))]
"TARGET_SSE4_1 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
unpcklps\t{%2, %0|%0, %2}
@@ -10868,6 +10949,10 @@
(if_then_else (eq_attr "alternative" "7,8")
(const_string "native")
(const_string "*")))
+ (set (attr "gpr32")
+ (if_then_else (eq_attr "alternative" "3,4")
+ (const_string "0")
+ (const_string "1")))
(set (attr "prefix_data16")
(if_then_else (eq_attr "alternative" "3,4")
(const_string "1")
@@ -10934,12 +11019,12 @@
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "DF")])
-(define_insn "*vec_concatv8hf_movss"
- [(set (match_operand:V8HF 0 "register_operand" "=x,v,v")
- (vec_merge:V8HF
- (vec_duplicate:V8HF
- (match_operand:V2HF 2 "nonimmediate_operand" "x,m,v"))
- (match_operand:V8HF 1 "reg_or_0_operand" "0,C,v" )
+(define_insn "*vec_concat<mmxxmmmodelower>_movss"
+ [(set (match_operand:<mmxxmmmode> 0 "register_operand" "=x,v,v")
+ (vec_merge:<mmxxmmmode>
+ (vec_duplicate:<mmxxmmmode>
+ (match_operand:V2FI_32 2 "nonimmediate_operand" "x,m,v"))
+ (match_operand:<mmxxmmmode> 1 "reg_or_0_operand" "0,C,v" )
(const_int 3)))]
"TARGET_SSE"
"@
@@ -10959,7 +11044,7 @@
(vec_merge:VI4F_128
(vec_duplicate:VI4F_128
(match_operand:<ssescalarmode> 2 "general_operand"
- " Yr,*x,v,m,r ,m,x,v,?rm,?rm,?rm,!x,?re,!*fF"))
+ " Yr,*x,v,m,r ,m,x,v,?jrjm,?jrjm,?rm,!x,?re,!*fF"))
(match_operand:VI4F_128 1 "nonimm_or_0_operand"
" C , C,C,C,C ,C,0,v,0 ,0 ,x ,0 ,0 ,0")
(const_int 1)))]
@@ -10999,6 +11084,10 @@
(const_string "fmov")
]
(const_string "ssemov")))
+ (set (attr "gpr32")
+ (if_then_else (eq_attr "alternative" "8,9")
+ (const_string "0")
+ (const_string "1")))
(set (attr "prefix_extra")
(if_then_else (eq_attr "alternative" "8,9,10")
(const_string "1")
@@ -11169,7 +11258,7 @@
[(set (match_operand:V4SF 0 "register_operand" "=Yr,*x,v")
(vec_merge:V4SF
(vec_duplicate:V4SF
- (match_operand:SF 2 "nonimmediate_operand" "Yrm,*xm,vm"))
+ (match_operand:SF 2 "nonimmediate_operand" "Yrjm,*xjm,vm"))
(match_operand:V4SF 1 "register_operand" "0,0,v")
(match_operand:SI 3 "const_int_operand")))]
"TARGET_SSE4_1
@@ -11190,6 +11279,7 @@
}
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "sselog")
+ (set_attr "gpr32" "0,0,1")
(set_attr "prefix_data16" "1,1,*")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
@@ -11264,7 +11354,7 @@
(define_insn "@sse4_1_insertps_<mode>"
[(set (match_operand:VI4F_128 0 "register_operand" "=Yr,*x,v")
(unspec:VI4F_128
- [(match_operand:VI4F_128 2 "nonimmediate_operand" "Yrm,*xm,vm")
+ [(match_operand:VI4F_128 2 "nonimmediate_operand" "Yrjm,*xjm,vm")
(match_operand:VI4F_128 1 "register_operand" "0,0,v")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_INSERTPS))]
@@ -11290,6 +11380,7 @@
}
}
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "sselog")
(set_attr "prefix_data16" "1,1,*")
(set_attr "prefix_extra" "1")
@@ -11367,7 +11458,7 @@
"operands[1] = gen_lowpart (SFmode, operands[1]);")
(define_insn_and_split "*sse4_1_extractps"
- [(set (match_operand:SF 0 "nonimmediate_operand" "=rm,rm,rm,Yv,Yv")
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=jrjm,jrjm,rm,Yv,Yv")
(vec_select:SF
(match_operand:V4SF 1 "register_operand" "Yr,*x,v,0,v")
(parallel [(match_operand:SI 2 "const_0_to_3_operand")])))]
@@ -11401,6 +11492,7 @@
DONE;
}
[(set_attr "isa" "noavx,noavx,avx,noavx,avx")
+ (set_attr "gpr32" "0,0,1,1,1")
(set_attr "type" "sselog,sselog,sselog,*,*")
(set_attr "prefix_data16" "1,1,1,*,*")
(set_attr "prefix_extra" "1,1,1,*,*")
@@ -11429,7 +11521,9 @@
(V8SF "32x4") (V8SI "32x4") (V4DF "64x2") (V4DI "64x2")])
(define_mode_iterator AVX512_VEC
- [(V8DF "TARGET_AVX512DQ") (V8DI "TARGET_AVX512DQ") V16SF V16SI])
+ [(V8DF "TARGET_AVX512DQ && TARGET_EVEX512")
+ (V8DI "TARGET_AVX512DQ && TARGET_EVEX512")
+ (V16SF "TARGET_EVEX512") (V16SI "TARGET_EVEX512")])
(define_expand "<extract_type>_vextract<shuffletype><extract_suf>_mask"
[(match_operand:<ssequartermode> 0 "nonimmediate_operand")
@@ -11598,7 +11692,9 @@
[(V16SF "32x8") (V16SI "32x8") (V8DF "64x4") (V8DI "64x4")])
(define_mode_iterator AVX512_VEC_2
- [(V16SF "TARGET_AVX512DQ") (V16SI "TARGET_AVX512DQ") V8DF V8DI])
+ [(V16SF "TARGET_AVX512DQ && TARGET_EVEX512")
+ (V16SI "TARGET_AVX512DQ && TARGET_EVEX512")
+ (V8DF "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
(define_expand "<extract_type_2>_vextract<shuffletype><extract_suf_2>_mask"
[(match_operand:<ssehalfvecmode> 0 "nonimmediate_operand")
@@ -12155,7 +12251,8 @@
(const_int 26) (const_int 27)
(const_int 28) (const_int 29)
(const_int 30) (const_int 31)])))]
- "TARGET_AVX512F && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "TARGET_AVX512F && TARGET_EVEX512
+ && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
{
if (TARGET_AVX512VL
|| REG_P (operands[0])
@@ -12203,7 +12300,7 @@
(const_int 58) (const_int 59)
(const_int 60) (const_int 61)
(const_int 62) (const_int 63)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vextracti64x4\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog1")
(set_attr "length_immediate" "1")
@@ -12229,7 +12326,7 @@
"operands[1] = gen_lowpart (V16QImode, operands[1]);")
(define_insn "vec_extract_hi_v32qi"
- [(set (match_operand:V16QI 0 "nonimmediate_operand" "=xm,vm")
+ [(set (match_operand:V16QI 0 "nonimmediate_operand" "=xjm,vm")
(vec_select:V16QI
(match_operand:V32QI 1 "register_operand" "x,v")
(parallel [(const_int 16) (const_int 17)
@@ -12247,7 +12344,8 @@
[(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
- (set_attr "isa" "*,avx512vl")
+ (set_attr "isa" "noavx512vl,avx512vl")
+ (set_attr "gpr32" "0,1")
(set_attr "prefix" "vex,evex")
(set_attr "mode" "OI")])
@@ -12265,9 +12363,9 @@
"operands[1] = gen_lowpart (<ssescalarmode>mode, operands[1]);")
(define_insn "*vec_extract<mode>"
- [(set (match_operand:HFBF 0 "register_sse4nonimm_operand" "=?r,m,x,v")
+ [(set (match_operand:HFBF 0 "register_sse4nonimm_operand" "=?r,jm,m,x,v")
(vec_select:HFBF
- (match_operand:<ssevecmode> 1 "register_operand" "v,v,0,v")
+ (match_operand:<ssevecmode> 1 "register_operand" "v,x,v,0,v")
(parallel
[(match_operand:SI 2 "const_0_to_7_operand")])))]
"TARGET_SSE2"
@@ -12277,12 +12375,14 @@
case 0:
return "%vpextrw\t{%2, %1, %k0|%k0, %1, %2}";
case 1:
- return "%vpextrw\t{%2, %1, %0|%0, %1, %2}";
-
+ return "pextrw\t{%2, %1, %0|%0, %1, %2}";
case 2:
+ return "vpextrw\t{%2, %1, %0|%0, %1, %2}";
+
+ case 3:
operands[2] = GEN_INT (INTVAL (operands[2]) * 2);
return "psrldq\t{%2, %0|%0, %2}";
- case 3:
+ case 4:
operands[2] = GEN_INT (INTVAL (operands[2]) * 2);
return "vpsrldq\t{%2, %1, %0|%0, %1, %2}";
@@ -12290,22 +12390,23 @@
gcc_unreachable ();
}
}
- [(set_attr "isa" "*,sse4,noavx,avx")
- (set_attr "type" "sselog1,sselog1,sseishft1,sseishft1")
+ [(set_attr "isa" "*,sse4_noavx,avx,noavx,avx")
+ (set_attr "gpr32" "1,0,1,1,1")
+ (set_attr "type" "sselog1,sselog1,sselog1,sseishft1,sseishft1")
(set_attr "prefix" "maybe_evex")
(set_attr "mode" "TI")])
;; Modes handled by vec_extract patterns.
(define_mode_iterator VEC_EXTRACT_MODE
- [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
- (V32HF "TARGET_AVX512BW") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512BW") (V16BF "TARGET_AVX") V8BF
- (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF
- (V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX")])
+ [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512BW && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512BW && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") V2DF
+ (V4TI "TARGET_AVX512F && TARGET_EVEX512") (V2TI "TARGET_AVX")])
(define_expand "vec_extract<mode><ssescalarmodelower>"
[(match_operand:<ssescalarmode> 0 "register_operand")
@@ -12347,7 +12448,7 @@
(const_int 3) (const_int 11)
(const_int 5) (const_int 13)
(const_int 7) (const_int 15)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vunpckhpd\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -12461,7 +12562,7 @@
(const_int 2) (const_int 10)
(const_int 4) (const_int 12)
(const_int 6) (const_int 14)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vmovddup\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "sselog1")
(set_attr "prefix" "evex")
@@ -12477,7 +12578,7 @@
(const_int 2) (const_int 10)
(const_int 4) (const_int 12)
(const_int 6) (const_int 14)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vunpcklpd\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -12689,7 +12790,7 @@
(match_operand:SI 4 "const_0_to_255_operand")]
UNSPEC_VTERNLOG))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
/* Disallow embeded broadcast for vector HFmode since
it's not real AVX512FP16 instruction. */
&& (GET_MODE_SIZE (GET_MODE_INNER (<MODE>mode)) >= 4
@@ -12781,7 +12882,7 @@
(match_operand:V 3 "regmem_or_bitnot_regmem_operand")
(match_operand:V 4 "regmem_or_bitnot_regmem_operand"))))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
&& ix86_pre_reload_split ()
&& (rtx_equal_p (STRIP_UNARY (operands[1]),
STRIP_UNARY (operands[4]))
@@ -12866,7 +12967,7 @@
(match_operand:V 3 "regmem_or_bitnot_regmem_operand"))
(match_operand:V 4 "regmem_or_bitnot_regmem_operand")))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
&& ix86_pre_reload_split ()
&& (rtx_equal_p (STRIP_UNARY (operands[1]),
STRIP_UNARY (operands[4]))
@@ -12950,7 +13051,7 @@
(match_operand:V 2 "regmem_or_bitnot_regmem_operand"))
(match_operand:V 3 "regmem_or_bitnot_regmem_operand")))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
&& ix86_pre_reload_split ()"
"#"
"&& 1"
@@ -13074,7 +13175,7 @@
(match_operand:SI 3 "const_0_to_255_operand")
(match_operand:V16SF 4 "register_operand")
(match_operand:HI 5 "register_operand")]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
int mask = INTVAL (operands[3]);
emit_insn (gen_avx512f_shufps512_1_mask (operands[0], operands[1], operands[2],
@@ -13261,7 +13362,7 @@
(match_operand 16 "const_12_to_15_operand")
(match_operand 17 "const_28_to_31_operand")
(match_operand 18 "const_28_to_31_operand")])))]
- "TARGET_AVX512F
+ "TARGET_AVX512F && TARGET_EVEX512
&& (INTVAL (operands[3]) == (INTVAL (operands[7]) - 4)
&& INTVAL (operands[4]) == (INTVAL (operands[8]) - 4)
&& INTVAL (operands[5]) == (INTVAL (operands[9]) - 4)
@@ -13296,7 +13397,7 @@
(match_operand:SI 3 "const_0_to_255_operand")
(match_operand:V8DF 4 "register_operand")
(match_operand:QI 5 "register_operand")]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
int mask = INTVAL (operands[3]);
emit_insn (gen_avx512f_shufpd512_1_mask (operands[0], operands[1], operands[2],
@@ -13326,7 +13427,7 @@
(match_operand 8 "const_12_to_13_operand")
(match_operand 9 "const_6_to_7_operand")
(match_operand 10 "const_14_to_15_operand")])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
int mask;
mask = INTVAL (operands[3]);
@@ -13458,7 +13559,7 @@
(const_int 3) (const_int 11)
(const_int 5) (const_int 13)
(const_int 7) (const_int 15)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpunpckhqdq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -13508,7 +13609,7 @@
(const_int 2) (const_int 10)
(const_int 4) (const_int 12)
(const_int 6) (const_int 14)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpunpcklqdq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -13872,8 +13973,8 @@
(set_attr "mode" "V2DF,DF,V8DF")
(set (attr "enabled")
(cond [(eq_attr "alternative" "2")
- (symbol_ref "TARGET_AVX512F && !TARGET_AVX512VL
- && !TARGET_PREFER_AVX256")
+ (symbol_ref "TARGET_AVX512F && TARGET_EVEX512
+ && !TARGET_AVX512VL && !TARGET_PREFER_AVX256")
(match_test "<mask_avx512vl_condition>")
(const_string "*")
]
@@ -13957,13 +14058,13 @@
[(set (match_operand:PMOV_DST_MODE_1 0 "nonimmediate_operand")
(truncate:PMOV_DST_MODE_1
(match_operand:<pmov_src_mode> 1 "register_operand")))]
- "TARGET_AVX512F")
+ "TARGET_AVX512F && TARGET_EVEX512")
(define_insn "*avx512f_<code><pmov_src_lower><mode>2"
[(set (match_operand:PMOV_DST_MODE_1 0 "nonimmediate_operand" "=v,m")
(any_truncate:PMOV_DST_MODE_1
(match_operand:<pmov_src_mode> 1 "register_operand" "v,v")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<trunsuffix><pmov_suff_1>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "none,store")
@@ -13985,7 +14086,7 @@
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)])))]
- "TARGET_AVX512BW && ix86_pre_reload_split ()"
+ "TARGET_AVX512BW && TARGET_EVEX512 && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -14010,7 +14111,7 @@
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)])))]
- "TARGET_AVX512BW && ix86_pre_reload_split ()"
+ "TARGET_AVX512BW && TARGET_EVEX512 && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -14094,7 +14195,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))]
- "TARGET_AVX512F && ix86_pre_reload_split ()"
+ "TARGET_AVX512F && TARGET_EVEX512 && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -14110,7 +14211,7 @@
(match_operand:<pmov_src_mode> 1 "register_operand" "v,v"))
(match_operand:PMOV_DST_MODE_1 2 "nonimm_or_0_operand" "0C,0")
(match_operand:<avx512fmaskmode> 3 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<trunsuffix><pmov_suff_1>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "none,store")
@@ -14124,19 +14225,19 @@
(match_operand:<pmov_src_mode> 1 "register_operand"))
(match_dup 0)
(match_operand:<avx512fmaskmode> 2 "register_operand")))]
- "TARGET_AVX512F")
+ "TARGET_AVX512F && TARGET_EVEX512")
(define_expand "truncv32hiv32qi2"
[(set (match_operand:V32QI 0 "nonimmediate_operand")
(truncate:V32QI
(match_operand:V32HI 1 "register_operand")))]
- "TARGET_AVX512BW")
+ "TARGET_AVX512BW && TARGET_EVEX512")
(define_insn "avx512bw_<code>v32hiv32qi2"
[(set (match_operand:V32QI 0 "nonimmediate_operand" "=v,m")
(any_truncate:V32QI
(match_operand:V32HI 1 "register_operand" "v,v")))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"vpmov<trunsuffix>wb\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "none,store")
@@ -14166,7 +14267,7 @@
(const_int 26) (const_int 27)
(const_int 28) (const_int 29)
(const_int 30) (const_int 31)])))]
- "TARGET_AVX512VBMI && ix86_pre_reload_split ()"
+ "TARGET_AVX512VBMI && TARGET_EVEX512 && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -14182,7 +14283,7 @@
(match_operand:V32HI 1 "register_operand" "v,v"))
(match_operand:V32QI 2 "nonimm_or_0_operand" "0C,0")
(match_operand:SI 3 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"vpmov<trunsuffix>wb\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "none,store")
@@ -14196,7 +14297,7 @@
(match_operand:V32HI 1 "register_operand"))
(match_dup 0)
(match_operand:SI 2 "register_operand")))]
- "TARGET_AVX512BW")
+ "TARGET_AVX512BW && TARGET_EVEX512")
(define_mode_iterator PMOV_DST_MODE_2
[V4SI V8HI (V16QI "TARGET_AVX512BW")])
@@ -15072,7 +15173,7 @@
[(set (match_operand:V8QI 0 "register_operand")
(truncate:V8QI
(match_operand:V8DI 1 "register_operand")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
rtx op0 = gen_reg_rtx (V16QImode);
@@ -15092,7 +15193,7 @@
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<trunsuffix>qb\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -15102,7 +15203,7 @@
[(set (match_operand:V8QI 0 "memory_operand" "=m")
(any_truncate:V8QI
(match_operand:V8DI 1 "register_operand" "v")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<trunsuffix>qb\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "store")
@@ -15114,7 +15215,7 @@
(subreg:DI
(any_truncate:V8QI
(match_operand:V8DI 1 "register_operand")) 0))]
- "TARGET_AVX512F && ix86_pre_reload_split ()"
+ "TARGET_AVX512F && TARGET_EVEX512 && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -15138,7 +15239,7 @@
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<trunsuffix>qb\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -15159,7 +15260,7 @@
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<trunsuffix>qb\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -15172,7 +15273,7 @@
(match_operand:V8DI 1 "register_operand" "v"))
(match_dup 0)
(match_operand:QI 2 "register_operand" "Yk")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<trunsuffix>qb\t{%1, %0%{%2%}|%0%{%2%}, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "store")
@@ -15195,7 +15296,7 @@
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))
(match_operand:QI 2 "register_operand")) 0))]
- "TARGET_AVX512F && ix86_pre_reload_split ()"
+ "TARGET_AVX512F && TARGET_EVEX512 && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -15453,7 +15554,7 @@
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
(const_int 12) (const_int 14)])))))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"ix86_fixup_binary_operands_no_copy (MULT, V16SImode, operands);")
(define_insn "*vec_widen_umult_even_v16si<mask_name>"
@@ -15473,7 +15574,8 @@
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
(const_int 12) (const_int 14)])))))]
- "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
+ "TARGET_AVX512F && TARGET_EVEX512
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpmuludq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseimul")
(set_attr "prefix" "evex")
@@ -15568,7 +15670,7 @@
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
(const_int 12) (const_int 14)])))))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"ix86_fixup_binary_operands_no_copy (MULT, V16SImode, operands);")
(define_insn "*vec_widen_smult_even_v16si<mask_name>"
@@ -15588,7 +15690,8 @@
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
(const_int 12) (const_int 14)])))))]
- "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
+ "TARGET_AVX512F && TARGET_EVEX512
+ && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpmuldq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseimul")
(set_attr "prefix" "evex")
@@ -15653,7 +15756,7 @@
(parallel [(const_int 0) (const_int 2)])))
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 2 "vector_operand" "YrBm,*xBm,vm")
+ (match_operand:V4SI 2 "vector_operand" "Yrja,*xja,vm")
(parallel [(const_int 0) (const_int 2)])))))]
"TARGET_SSE4_1 && <mask_avx512vl_condition>
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))"
@@ -15662,6 +15765,7 @@
pmuldq\t{%2, %0|%0, %2}
vpmuldq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "sseimul")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,vex")
@@ -15899,7 +16003,7 @@
[(set (match_operand:VI4_AVX512F 0 "register_operand" "=Yr,*x,v")
(mult:VI4_AVX512F
(match_operand:VI4_AVX512F 1 "bcst_vector_operand" "%0,0,v")
- (match_operand:VI4_AVX512F 2 "bcst_vector_operand" "YrBm,*xBm,vmBr")))]
+ (match_operand:VI4_AVX512F 2 "bcst_vector_operand" "Yrja,*xja,vmBr")))]
"TARGET_SSE4_1 && ix86_binary_operator_ok (MULT, <MODE>mode, operands)
&& <mask_mode512bit_condition>"
"@
@@ -15907,6 +16011,7 @@
pmulld\t{%2, %0|%0, %2}
vpmulld\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "sseimul")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "<bcst_mask_prefix4>")
@@ -15987,7 +16092,7 @@
"TARGET_SSE2"
{
/* Try with vnni instructions. */
- if ((<MODE_SIZE> == 64 && TARGET_AVX512VNNI)
+ if ((<MODE_SIZE> == 64 && TARGET_AVX512VNNI && TARGET_EVEX512)
|| (<MODE_SIZE> < 64
&& ((TARGET_AVX512VNNI && TARGET_AVX512VL) || TARGET_AVXVNNI)))
{
@@ -16081,7 +16186,7 @@
(match_operand:V64QI 1 "register_operand")
(match_operand:V64QI 2 "nonimmediate_operand")
(match_operand:V16SI 3 "nonimmediate_operand")]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
{
rtx t1 = gen_reg_rtx (V8DImode);
rtx t2 = gen_reg_rtx (V16SImode);
@@ -16711,7 +16816,7 @@
[(set (match_operand:VI14_128 0 "register_operand" "=Yr,*x,<v_Yw>")
(smaxmin:VI14_128
(match_operand:VI14_128 1 "vector_operand" "%0,0,<v_Yw>")
- (match_operand:VI14_128 2 "vector_operand" "YrBm,*xBm,<v_Yw>m")))]
+ (match_operand:VI14_128 2 "vector_operand" "Yrja,*xja,<v_Yw>m")))]
"TARGET_SSE4_1
&& <mask_mode512bit_condition>
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))"
@@ -16722,6 +16827,7 @@
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "sseiadd")
(set_attr "prefix_extra" "1")
+ (set_attr "gpr32" "0,0,1")
(set_attr "prefix" "orig,orig,vex")
(set_attr "mode" "TI")])
@@ -16729,13 +16835,14 @@
[(set (match_operand:V8HI 0 "register_operand" "=x,Yw")
(smaxmin:V8HI
(match_operand:V8HI 1 "vector_operand" "%0,Yw")
- (match_operand:V8HI 2 "vector_operand" "xBm,Ywm")))]
+ (match_operand:V8HI 2 "vector_operand" "xja,Ywm")))]
"TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
p<maxmin_int>w\t{%2, %0|%0, %2}
vp<maxmin_int>w\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseiadd")
+ (set_attr "gpr32" "0,1")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
@@ -16793,7 +16900,7 @@
[(set (match_operand:VI24_128 0 "register_operand" "=Yr,*x,<v_Yw>")
(umaxmin:VI24_128
(match_operand:VI24_128 1 "vector_operand" "%0,0,<v_Yw>")
- (match_operand:VI24_128 2 "vector_operand" "YrBm,*xBm,<v_Yw>m")))]
+ (match_operand:VI24_128 2 "vector_operand" "Yrja,*xja,<v_Yw>m")))]
"TARGET_SSE4_1
&& <mask_mode512bit_condition>
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))"
@@ -16803,6 +16910,7 @@
vp<maxmin_int><ssemodesuffix>\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "sseiadd")
+ (set_attr "gpr32" "0,0,1")
(set_attr "prefix_extra" "1,1,*")
(set_attr "prefix" "orig,orig,vex")
(set_attr "mode" "TI")])
@@ -16811,12 +16919,13 @@
[(set (match_operand:V16QI 0 "register_operand" "=x,Yw")
(umaxmin:V16QI
(match_operand:V16QI 1 "vector_operand" "%0,Yw")
- (match_operand:V16QI 2 "vector_operand" "xBm,Ywm")))]
+ (match_operand:V16QI 2 "vector_operand" "xja,Ywm")))]
"TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
p<maxmin_int>b\t{%2, %0|%0, %2}
vp<maxmin_int>b\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "type" "sseiadd")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
@@ -16831,7 +16940,7 @@
[(set (match_operand:VI_256 0 "register_operand" "=x")
(eq:VI_256
(match_operand:VI_256 1 "nonimmediate_operand" "%x")
- (match_operand:VI_256 2 "nonimmediate_operand" "xm")))]
+ (match_operand:VI_256 2 "nonimmediate_operand" "jm")))]
"TARGET_AVX2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpcmpeq<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssecmp")
@@ -16839,6 +16948,7 @@
(if_then_else (eq (const_string "<MODE>mode") (const_string "V4DImode"))
(const_string "1")
(const_string "*")))
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
@@ -17021,7 +17131,7 @@
[(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,x")
(eq:V2DI
(match_operand:V2DI 1 "vector_operand" "%0,0,x")
- (match_operand:V2DI 2 "vector_operand" "YrBm,*xBm,xm")))]
+ (match_operand:V2DI 2 "vector_operand" "Yrja,*xja,xjm")))]
"TARGET_SSE4_1 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
pcmpeqq\t{%2, %0|%0, %2}
@@ -17029,6 +17139,7 @@
vpcmpeqq\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssecmp")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,vex")
(set_attr "mode" "TI")])
@@ -17037,13 +17148,14 @@
[(set (match_operand:VI124_128 0 "register_operand" "=x,x")
(eq:VI124_128
(match_operand:VI124_128 1 "vector_operand" "%0,x")
- (match_operand:VI124_128 2 "vector_operand" "xBm,xm")))]
+ (match_operand:VI124_128 2 "vector_operand" "xBm,xjm")))]
"TARGET_SSE2
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
pcmpeq<ssemodesuffix>\t{%2, %0|%0, %2}
vpcmpeq<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "1,0")
(set_attr "type" "ssecmp")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
@@ -17052,7 +17164,7 @@
[(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,x")
(gt:V2DI
(match_operand:V2DI 1 "register_operand" "0,0,x")
- (match_operand:V2DI 2 "vector_operand" "YrBm,*xBm,xm")))]
+ (match_operand:V2DI 2 "vector_operand" "Yrja,*xja,xjm")))]
"TARGET_SSE4_2"
"@
pcmpgtq\t{%2, %0|%0, %2}
@@ -17060,6 +17172,7 @@
vpcmpgtq\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssecmp")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,vex")
(set_attr "mode" "TI")])
@@ -17068,7 +17181,7 @@
[(set (match_operand:VI_256 0 "register_operand" "=x")
(gt:VI_256
(match_operand:VI_256 1 "register_operand" "x")
- (match_operand:VI_256 2 "nonimmediate_operand" "xm")))]
+ (match_operand:VI_256 2 "nonimmediate_operand" "xjm")))]
"TARGET_AVX2"
"vpcmpgt<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssecmp")
@@ -17076,6 +17189,7 @@
(if_then_else (eq (const_string "<MODE>mode") (const_string "V4DImode"))
(const_string "1")
(const_string "*")))
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
@@ -17099,12 +17213,13 @@
[(set (match_operand:VI124_128 0 "register_operand" "=x,x")
(gt:VI124_128
(match_operand:VI124_128 1 "register_operand" "0,x")
- (match_operand:VI124_128 2 "vector_operand" "xBm,xm")))]
+ (match_operand:VI124_128 2 "vector_operand" "xBm,xjm")))]
"TARGET_SSE2"
"@
pcmpgt<ssemodesuffix>\t{%2, %0|%0, %2}
vpcmpgt<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "1,0")
(set_attr "type" "ssecmp")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
@@ -17263,10 +17378,13 @@
(V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")
(V8SF "TARGET_AVX2") (V4DF "TARGET_AVX2")
(V16HF "TARGET_AVX512FP16")
- (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")
- (V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
- (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512VBMI")
- (V32HF "TARGET_AVX512FP16")])
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512")
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V64QI "TARGET_AVX512VBMI && TARGET_EVEX512")
+ (V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")])
(define_expand "vec_perm<mode>"
[(match_operand:VEC_PERM_AVX2 0 "register_operand")
@@ -17293,7 +17411,7 @@
{
operands[2] = CONSTM1_RTX (<MODE>mode);
- if (!TARGET_AVX512F)
+ if (!TARGET_AVX512F || (!TARGET_AVX512VL && !TARGET_EVEX512))
operands[2] = force_reg (<MODE>mode, operands[2]);
})
@@ -17302,6 +17420,7 @@
(xor:VI (match_operand:VI 1 "bcst_vector_operand" " 0, m,Br")
(match_operand:VI 2 "vector_all_ones_operand" "BC,BC,BC")))]
"TARGET_AVX512F
+ && (<MODE_SIZE> == 64 || TARGET_AVX512VL || TARGET_EVEX512)
&& (!<mask_applied>
|| <ssescalarmode>mode == SImode
|| <ssescalarmode>mode == DImode)"
@@ -17368,7 +17487,7 @@
(match_operand:VI 2 "vector_all_ones_operand" "BC,BC,BC")))
(unspec [(match_operand:VI 3 "register_operand" "0,0,0")]
UNSPEC_INSN_FALSE_DEP)]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && (<MODE_SIZE> == 64 || TARGET_AVX512VL || TARGET_EVEX512)"
{
if (TARGET_AVX512VL)
return "vpternlog<ternlogsuffix>\t{$0x55, %1, %0, %0<mask_operand3>|%0<mask_operand3>, %0, %1, 0x55}";
@@ -17392,7 +17511,7 @@
(not:<ssescalarmode>
(match_operand:<ssescalarmode> 1 "nonimmediate_operand"))))]
"<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256)"
+ || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)"
[(set (match_dup 0)
(xor:VI48_AVX512F
(vec_duplicate:VI48_AVX512F (match_dup 1))
@@ -17421,7 +17540,7 @@
[(set (match_operand:VI 0 "register_operand" "=x,x,v,v,v")
(and:VI
(not:VI (match_operand:VI 1 "bcst_vector_operand" "0,x,v,m,Br"))
- (match_operand:VI 2 "bcst_vector_operand" "xBm,xm,vmBr,0,0")))]
+ (match_operand:VI 2 "bcst_vector_operand" "xBm,xjm,vmBr,0,0")))]
"TARGET_SSE
&& (register_operand (operands[1], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
@@ -17508,7 +17627,8 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx,*,*")
+ [(set_attr "isa" "noavx,avx_noavx512f,avx512f,*,*")
+ (set_attr "gpr32" "1,0,1,1,1")
(set_attr "type" "sselog")
(set (attr "prefix_data16")
(if_then_else
@@ -17538,7 +17658,8 @@
(symbol_ref "<MODE_SIZE> == 64 || TARGET_AVX512VL")
(eq_attr "alternative" "4")
(symbol_ref "<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256)")
+ || (TARGET_AVX512F && TARGET_EVEX512
+ && !TARGET_PREFER_AVX256)")
]
(const_string "*")))])
@@ -17582,7 +17703,7 @@
(match_operand:<ssescalarmode> 1 "nonimmediate_operand")))
(match_operand:VI 2 "vector_operand")))]
"<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256)"
+ || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)"
[(set (match_dup 3)
(vec_duplicate:VI (match_dup 1)))
(set (match_dup 0)
@@ -17597,7 +17718,7 @@
(match_operand:<ssescalarmode> 1 "nonimmediate_operand")))
(match_operand:VI 2 "vector_operand")))]
"<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256)"
+ || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)"
[(set (match_dup 3)
(vec_duplicate:VI (match_dup 1)))
(set (match_dup 0)
@@ -17663,7 +17784,7 @@
[(set (match_operand:VI48_AVX_AVX512F 0 "register_operand" "=x,x,v")
(any_logic:VI48_AVX_AVX512F
(match_operand:VI48_AVX_AVX512F 1 "bcst_vector_operand" "%0,x,v")
- (match_operand:VI48_AVX_AVX512F 2 "bcst_vector_operand" "xBm,xm,vmBr")))]
+ (match_operand:VI48_AVX_AVX512F 2 "bcst_vector_operand" "xBm,xjm,vmBr")))]
"TARGET_SSE && <mask_mode512bit_condition>
&& ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
{
@@ -17693,9 +17814,11 @@
case E_V4DImode:
case E_V4SImode:
case E_V2DImode:
- ssesuffix = (TARGET_AVX512VL
- && (<mask_applied> || which_alternative == 2)
- ? "<ssemodesuffix>" : "");
+ ssesuffix = ((TARGET_AVX512VL
+ && (<mask_applied> || which_alternative == 2))
+ || (MEM_P (operands[2]) && which_alternative == 2
+ && x86_extended_rex2reg_mentioned_p (operands[2])))
+ ? "<ssemodesuffix>" : "";
break;
default:
gcc_unreachable ();
@@ -17735,7 +17858,8 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx")
+ [(set_attr "isa" "noavx,avx_noavx512f,avx512f")
+ (set_attr "gpr32" "1,0,1")
(set_attr "type" "sselog")
(set (attr "prefix_data16")
(if_then_else
@@ -17762,7 +17886,7 @@
[(set (match_operand:VI12_AVX_AVX512F 0 "register_operand" "=x,x,v")
(any_logic:VI12_AVX_AVX512F
(match_operand:VI12_AVX_AVX512F 1 "vector_operand" "%0,x,v")
- (match_operand:VI12_AVX_AVX512F 2 "vector_operand" "xBm,xm,vm")))]
+ (match_operand:VI12_AVX_AVX512F 2 "vector_operand" "xBm,xjm,vm")))]
"TARGET_SSE && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
{
char buf[64];
@@ -17791,7 +17915,10 @@
case E_V16HImode:
case E_V16QImode:
case E_V8HImode:
- ssesuffix = TARGET_AVX512VL && which_alternative == 2 ? "q" : "";
+ ssesuffix = (((TARGET_AVX512VL && which_alternative == 2)
+ || (MEM_P (operands[2]) && which_alternative == 2
+ && x86_extended_rex2reg_mentioned_p (operands[2]))))
+ ? "q" : "";
break;
default:
gcc_unreachable ();
@@ -17828,7 +17955,8 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx")
+ [(set_attr "isa" "noavx,avx_noavx512f,avx512f")
+ (set_attr "gpr32" "1,0,1")
(set_attr "type" "sselog")
(set (attr "prefix_data16")
(if_then_else
@@ -17855,13 +17983,14 @@
[(set (match_operand:V1TI 0 "register_operand" "=x,x,v")
(any_logic:V1TI
(match_operand:V1TI 1 "register_operand" "%0,x,v")
- (match_operand:V1TI 2 "vector_operand" "xBm,xm,vm")))]
+ (match_operand:V1TI 2 "vector_operand" "xBm,xjm,vm")))]
"TARGET_SSE2"
"@
p<logic>\t{%2, %0|%0, %2}
vp<logic>\t{%2, %1, %0|%0, %1, %2}
vp<logic>d\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "isa" "noavx,avx,avx512vl")
+ [(set_attr "isa" "noavx,avx_noavx512vl,avx512vl")
+ (set_attr "gpr32" "1,0,1")
(set_attr "prefix" "orig,vex,evex")
(set_attr "prefix_data16" "1,*,*")
(set_attr "type" "sselog")
@@ -17883,7 +18012,7 @@
(match_operand:VI 1 "bcst_vector_operand" "0,m, 0,vBr"))
(match_operand:VI 2 "bcst_vector_operand" "m,0,vBr, 0")))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
&& (register_operand (operands[1], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
{
@@ -17916,7 +18045,7 @@
(match_operand:VI 1 "bcst_vector_operand" "%0, 0")
(match_operand:VI 2 "bcst_vector_operand" " m,vBr"))))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
&& (register_operand (operands[1], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
{
@@ -17947,7 +18076,7 @@
(not:VI (match_operand:VI 1 "bcst_vector_operand" "%0, 0"))
(not:VI (match_operand:VI 2 "bcst_vector_operand" "m,vBr"))))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
&& (register_operand (operands[1], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
{
@@ -17969,7 +18098,7 @@
(const_string "*")))])
(define_mode_iterator AVX512ZEXTMASK
- [(DI "TARGET_AVX512BW") (SI "TARGET_AVX512BW") HI])
+ [(DI "TARGET_AVX512BW && TARGET_EVEX512") (SI "TARGET_AVX512BW") HI])
(define_insn "<avx512>_testm<mode>3<mask_scalar_merge_name>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand" "=k")
@@ -18081,16 +18210,18 @@
(unspec [(const_int 0)] UNSPEC_MASKOP)])]
"TARGET_AVX512F")
+(define_mode_iterator SWI24_MASK [HI (SI "TARGET_EVEX512")])
+
(define_expand "vec_pack_trunc_<mode>"
[(parallel
[(set (match_operand:<DOUBLEMASKMODE> 0 "register_operand")
(ior:<DOUBLEMASKMODE>
(ashift:<DOUBLEMASKMODE>
(zero_extend:<DOUBLEMASKMODE>
- (match_operand:SWI24 2 "register_operand"))
+ (match_operand:SWI24_MASK 2 "register_operand"))
(match_dup 3))
(zero_extend:<DOUBLEMASKMODE>
- (match_operand:SWI24 1 "register_operand"))))
+ (match_operand:SWI24_MASK 1 "register_operand"))))
(unspec [(const_int 0)] UNSPEC_MASKOP)])]
"TARGET_AVX512BW"
{
@@ -18218,7 +18349,7 @@
(const_int 60) (const_int 61)
(const_int 62) (const_int 63)])))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"vpacksswb\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "<mask_prefix>")
@@ -18287,7 +18418,7 @@
(const_int 14) (const_int 15)
(const_int 28) (const_int 29)
(const_int 30) (const_int 31)])))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"vpackssdw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "<mask_prefix>")
@@ -18349,7 +18480,7 @@
(const_int 61) (const_int 125)
(const_int 62) (const_int 126)
(const_int 63) (const_int 127)])))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"vpunpckhbw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -18445,7 +18576,7 @@
(const_int 53) (const_int 117)
(const_int 54) (const_int 118)
(const_int 55) (const_int 119)])))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"vpunpcklbw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -18669,7 +18800,7 @@
(const_int 11) (const_int 27)
(const_int 14) (const_int 30)
(const_int 15) (const_int 31)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpunpckhdq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -18724,7 +18855,7 @@
(const_int 9) (const_int 25)
(const_int 12) (const_int 28)
(const_int 13) (const_int 29)])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpunpckldq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -18803,7 +18934,7 @@
[(set (match_operand:PINSR_MODE 0 "register_operand" "=x,x,x,x,v,v,&x")
(vec_merge:PINSR_MODE
(vec_duplicate:PINSR_MODE
- (match_operand:<ssescalarmode> 2 "nonimmediate_operand" "r,m,r,m,r,m,x"))
+ (match_operand:<ssescalarmode> 2 "nonimmediate_operand" "jr,jm,r,m,r,m,x"))
(match_operand:PINSR_MODE 1 "register_operand" "0,0,x,x,v,v,x")
(match_operand:SI 3 "const_int_operand")))]
"TARGET_SSE2
@@ -18840,6 +18971,7 @@
}
[(set_attr "isa" "noavx,noavx,avx,avx,<pinsr_evex_isa>,<pinsr_evex_isa>,avx2")
(set_attr "type" "sselog")
+ (set_attr "gpr32" "0,0,1,1,1,1,1")
(set (attr "prefix_rex")
(if_then_else
(and (not (match_test "TARGET_AVX"))
@@ -18912,6 +19044,12 @@
{
if (which_alternative == 0)
return "vinsert<shuffletype><extract_suf>\t{$0, %2, %1, %0|%0, %1, %2, 0}";
+ bool egpr_used = (TARGET_APX_EGPR
+ && x86_extended_rex2reg_mentioned_p (operands[2]));
+ const char *align_templ = egpr_used ? "vmovaps\t{%2, %x0|%x0, %2}"
+ : "vmovdqa\t{%2, %x0|%x0, %2}";
+ const char *unalign_templ = egpr_used ? "vmovups\t{%2, %x0|%x0, %2}"
+ : "vmovdqu\t{%2, %x0|%x0, %2}";
switch (<MODE>mode)
{
case E_V8DFmode:
@@ -18927,17 +19065,17 @@
case E_V8DImode:
if (misaligned_operand (operands[2], <ssequartermode>mode))
return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}"
- : "vmovdqu\t{%2, %x0|%x0, %2}";
+ : unalign_templ;
else
return which_alternative == 2 ? "vmovdqa64\t{%2, %x0|%x0, %2}"
- : "vmovdqa\t{%2, %x0|%x0, %2}";
+ : align_templ;
case E_V16SImode:
if (misaligned_operand (operands[2], <ssequartermode>mode))
return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}"
- : "vmovdqu\t{%2, %x0|%x0, %2}";
+ : unalign_templ;
else
return which_alternative == 2 ? "vmovdqa32\t{%2, %x0|%x0, %2}"
- : "vmovdqa\t{%2, %x0|%x0, %2}";
+ : align_templ;
default:
gcc_unreachable ();
}
@@ -19418,7 +19556,7 @@
(match_operand:SI 2 "const_0_to_255_operand")
(match_operand:V16SI 3 "register_operand")
(match_operand:HI 4 "register_operand")]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
int mask = INTVAL (operands[2]);
emit_insn (gen_avx512f_pshufd_1_mask (operands[0], operands[1],
@@ -19462,7 +19600,7 @@
(match_operand 15 "const_12_to_15_operand")
(match_operand 16 "const_12_to_15_operand")
(match_operand 17 "const_12_to_15_operand")])))]
- "TARGET_AVX512F
+ "TARGET_AVX512F && TARGET_EVEX512
&& INTVAL (operands[2]) + 4 == INTVAL (operands[6])
&& INTVAL (operands[3]) + 4 == INTVAL (operands[7])
&& INTVAL (operands[4]) + 4 == INTVAL (operands[8])
@@ -19628,7 +19766,7 @@
[(match_operand:V32HI 1 "nonimmediate_operand" "vm")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_PSHUFLW))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"vpshuflw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -19804,7 +19942,7 @@
[(match_operand:V32HI 1 "nonimmediate_operand" "vm")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_PSHUFHW))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"vpshufhw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -19994,17 +20132,23 @@
operands[4] = gen_lowpart (<ssescalarmode>mode, operands[2]);
})
+(define_mode_attr vi128_jr_r
+ [(V16QI "jr") (V8HI "r")])
+
(define_insn "*vec_extract<mode>"
- [(set (match_operand:<ssescalarmode> 0 "register_sse4nonimm_operand" "=r,m")
+ [(set (match_operand:<ssescalarmode> 0 "register_sse4nonimm_operand" "=<vi128_jr_r>,r,jm,m")
(vec_select:<ssescalarmode>
- (match_operand:PEXTR_MODE12 1 "register_operand" "YW,YW")
+ (match_operand:PEXTR_MODE12 1 "register_operand" "YW,YW,YW,YW")
(parallel
[(match_operand:SI 2 "const_0_to_<ssescalarnummask>_operand")])))]
"TARGET_SSE2"
"@
- %vpextr<ssemodesuffix>\t{%2, %1, %k0|%k0, %1, %2}
- %vpextr<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "isa" "*,sse4")
+ pextr<ssemodesuffix>\t{%2, %1, %k0|%k0, %1, %2}
+ vpextr<ssemodesuffix>\t{%2, %1, %k0|%k0, %1, %2}
+ pextr<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}
+ vpextr<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "sse2_noavx,avx,sse4_noavx,avx")
+ (set_attr "gpr32" "1,1,0,1")
(set_attr "type" "sselog1")
(set (attr "prefix_extra")
(if_then_else
@@ -20012,20 +20156,21 @@
(const_string "*")
(const_string "1")))
(set_attr "length_immediate" "1")
- (set_attr "prefix" "maybe_vex,maybe_vex")
+ (set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
(define_insn "*vec_extract<PEXTR_MODE12:mode>_zext"
- [(set (match_operand:SWI48 0 "register_operand" "=r")
+ [(set (match_operand:SWI48 0 "register_operand" "=<vi128_jr_r>,r")
(zero_extend:SWI48
(vec_select:<PEXTR_MODE12:ssescalarmode>
- (match_operand:PEXTR_MODE12 1 "register_operand" "YW")
+ (match_operand:PEXTR_MODE12 1 "register_operand" "YW,YW")
(parallel
[(match_operand:SI 2
"const_0_to_<PEXTR_MODE12:ssescalarnummask>_operand")]))))]
"TARGET_SSE2"
"%vpextr<PEXTR_MODE12:ssemodesuffix>\t{%2, %1, %k0|%k0, %1, %2}"
- [(set_attr "type" "sselog1")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sselog1")
(set (attr "prefix_extra")
(if_then_else
(eq (const_string "<PEXTR_MODE12:MODE>mode") (const_string "V8HImode"))
@@ -20036,15 +20181,16 @@
(set_attr "mode" "TI")])
(define_insn "*vec_extractv16qi_zext"
- [(set (match_operand:HI 0 "register_operand" "=r")
+ [(set (match_operand:HI 0 "register_operand" "=jr,r")
(zero_extend:HI
(vec_select:QI
- (match_operand:V16QI 1 "register_operand" "YW")
+ (match_operand:V16QI 1 "register_operand" "YW,YW")
(parallel
[(match_operand:SI 2 "const_0_to_15_operand")]))))]
"TARGET_SSE4_1"
"%vpextrb\t{%2, %1, %k0|%k0, %1, %2}"
- [(set_attr "type" "sselog1")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
@@ -20150,24 +20296,26 @@
"operands[1] = gen_lowpart (SImode, operands[1]);")
(define_insn "*vec_extractv4si"
- [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,rm,Yr,*x,Yw")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=jrjm,rm,rm,Yr,*x,Yw")
(vec_select:SI
- (match_operand:V4SI 1 "register_operand" " x, v, 0, 0,Yw")
+ (match_operand:V4SI 1 "register_operand" "x, x, v, 0, 0, Yw")
(parallel [(match_operand:SI 2 "const_0_to_3_operand")])))]
"TARGET_SSE4_1"
{
switch (which_alternative)
{
case 0:
+ return "pextrd\t{%2, %1, %0|%0, %1, %2}";
case 1:
- return "%vpextrd\t{%2, %1, %0|%0, %1, %2}";
-
case 2:
+ return "vpextrd\t{%2, %1, %0|%0, %1, %2}";
+
case 3:
+ case 4:
operands[2] = GEN_INT (INTVAL (operands[2]) * 4);
return "psrldq\t{%2, %0|%0, %2}";
- case 4:
+ case 5:
operands[2] = GEN_INT (INTVAL (operands[2]) * 4);
return "vpsrldq\t{%2, %1, %0|%0, %1, %2}";
@@ -20175,25 +20323,26 @@
gcc_unreachable ();
}
}
- [(set_attr "isa" "*,avx512dq,noavx,noavx,avx")
- (set_attr "type" "sselog1,sselog1,sseishft1,sseishft1,sseishft1")
+ [(set_attr "isa" "noavx,avx,avx512dq,noavx,noavx,avx")
+ (set_attr "type" "sselog1,sselog1,sselog1,sseishft1,sseishft1,sseishft1")
+ (set_attr "gpr32" "0,1,1,1,1,1")
(set (attr "prefix_extra")
(if_then_else (eq_attr "alternative" "0,1")
(const_string "1")
(const_string "*")))
(set_attr "length_immediate" "1")
- (set_attr "prefix" "maybe_vex,evex,orig,orig,maybe_vex")
+ (set_attr "prefix" "orig,vex,evex,orig,orig,maybe_vex")
(set_attr "mode" "TI")])
(define_insn "*vec_extractv4si_zext"
- [(set (match_operand:DI 0 "register_operand" "=r,r")
+ [(set (match_operand:DI 0 "register_operand" "=jr,r,r")
(zero_extend:DI
(vec_select:SI
- (match_operand:V4SI 1 "register_operand" "x,v")
+ (match_operand:V4SI 1 "register_operand" "x,x,v")
(parallel [(match_operand:SI 2 "const_0_to_3_operand")]))))]
"TARGET_64BIT && TARGET_SSE4_1"
"%vpextrd\t{%2, %1, %k0|%k0, %1, %2}"
- [(set_attr "isa" "*,avx512dq")
+ [(set_attr "isa" "noavx,avx,avx512dq")
(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
@@ -20223,13 +20372,14 @@
})
(define_insn "*vec_extractv2di_1"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,rm,m,x,x,Yv,x,v,r")
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=jrjm,rm,rm,m,x,x,Yv,x,v,r")
(vec_select:DI
- (match_operand:V2DI 1 "nonimmediate_operand" "x ,v ,v,0,x, v,x,o,o")
+ (match_operand:V2DI 1 "nonimmediate_operand" "x, x ,v ,v,0,x, v,x,o,o")
(parallel [(const_int 1)])))]
"TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"@
- %vpextrq\t{$1, %1, %0|%0, %1, 1}
+ pextrq\t{$1, %1, %0|%0, %1, 1}
+ vpextrq\t{$1, %1, %0|%0, %1, 1}
vpextrq\t{$1, %1, %0|%0, %1, 1}
%vmovhps\t{%1, %0|%0, %1}
psrldq\t{$8, %0|%0, 8}
@@ -20240,44 +20390,47 @@
#"
[(set (attr "isa")
(cond [(eq_attr "alternative" "0")
- (const_string "x64_sse4")
+ (const_string "x64_sse4_noavx")
(eq_attr "alternative" "1")
+ (const_string "x64_avx")
+ (eq_attr "alternative" "2")
(const_string "x64_avx512dq")
- (eq_attr "alternative" "3")
- (const_string "sse2_noavx")
(eq_attr "alternative" "4")
- (const_string "avx")
+ (const_string "sse2_noavx")
(eq_attr "alternative" "5")
- (const_string "avx512bw")
+ (const_string "avx")
(eq_attr "alternative" "6")
- (const_string "noavx")
+ (const_string "avx512bw")
(eq_attr "alternative" "8")
+ (const_string "noavx")
+ (eq_attr "alternative" "9")
(const_string "x64")
]
(const_string "*")))
(set (attr "type")
- (cond [(eq_attr "alternative" "2,6,7")
+ (cond [(eq_attr "alternative" "3,7,8")
(const_string "ssemov")
- (eq_attr "alternative" "3,4,5")
+ (eq_attr "alternative" "4,5,6")
(const_string "sseishft1")
- (eq_attr "alternative" "8")
+ (eq_attr "alternative" "9")
(const_string "imov")
]
(const_string "sselog1")))
+ (set_attr "gpr32" "0,1,1,1,1,1,1,1,1,1")
(set (attr "length_immediate")
- (if_then_else (eq_attr "alternative" "0,1,3,4,5")
+ (if_then_else (eq_attr "alternative" "0,1,2,4,5,6")
(const_string "1")
(const_string "*")))
(set (attr "prefix_rex")
- (if_then_else (eq_attr "alternative" "0,1")
+ (if_then_else (eq_attr "alternative" "0")
(const_string "1")
(const_string "*")))
(set (attr "prefix_extra")
- (if_then_else (eq_attr "alternative" "0,1")
+ (if_then_else (eq_attr "alternative" "0")
(const_string "1")
(const_string "*")))
- (set_attr "prefix" "maybe_vex,evex,maybe_vex,orig,vex,evex,orig,*,*")
- (set_attr "mode" "TI,TI,V2SF,TI,TI,TI,V4SF,DI,DI")])
+ (set_attr "prefix" "orig,maybe_evex,evex,maybe_vex,orig,vex,evex,orig,*,*")
+ (set_attr "mode" "TI,TI,TI,V2SF,TI,TI,TI,V4SF,DI,DI")])
(define_split
[(set (match_operand:<ssescalarmode> 0 "register_operand")
@@ -20315,7 +20468,7 @@
(match_operand:V4TI 1 "register_operand" "v")
(parallel
[(match_operand:SI 2 "const_0_to_3_operand")])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vextracti32x4\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "length_immediate" "1")
@@ -20323,7 +20476,7 @@
(set_attr "mode" "XI")])
(define_mode_iterator VEXTRACTI128_MODE
- [(V4TI "TARGET_AVX512F") V2TI])
+ [(V4TI "TARGET_AVX512F && TARGET_EVEX512") V2TI])
(define_split
[(set (match_operand:TI 0 "nonimmediate_operand")
@@ -20346,7 +20499,8 @@
&& VECTOR_MODE_P (GET_MODE (operands[1]))
&& ((TARGET_SSE && GET_MODE_SIZE (GET_MODE (operands[1])) == 16)
|| (TARGET_AVX && GET_MODE_SIZE (GET_MODE (operands[1])) == 32)
- || (TARGET_AVX512F && GET_MODE_SIZE (GET_MODE (operands[1])) == 64))
+ || (TARGET_AVX512F && TARGET_EVEX512
+ && GET_MODE_SIZE (GET_MODE (operands[1])) == 64))
&& (<MODE>mode == SImode || TARGET_64BIT || MEM_P (operands[0]))"
[(set (match_dup 0) (vec_select:SWI48x (match_dup 1)
(parallel [(const_int 0)])))]
@@ -20395,7 +20549,7 @@
(match_operand:SI 1 "nonimmediate_operand"
" 0, 0, x,Yv, 0, 0,Yv,rm, 0,rm")
(match_operand:SI 2 "nonimm_or_0_operand"
- " rm,rm,rm,rm,Yr,*x,Yv, C,*ym, C")))]
+ "jrjm,jrjm,rm,rm,Yr,*x,Yv, C,*ym, C")))]
"TARGET_SSE4_1 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"@
pinsrd\t{$1, %2, %0|%0, %2, 1}
@@ -20422,6 +20576,10 @@
(const_string "mmxmov")
]
(const_string "sselog")))
+ (set (attr "gpr32")
+ (if_then_else (eq_attr "alternative" "0,1")
+ (const_string "0")
+ (const_string "1")))
(set (attr "prefix_extra")
(if_then_else (eq_attr "alternative" "0,1,2,3")
(const_string "1")
@@ -20546,7 +20704,7 @@
(match_operand:DI 1 "register_operand"
" 0, 0,x ,Yv,0,Yv,0,0,v")
(match_operand:DI 2 "nonimmediate_operand"
- " rm,rm,rm,rm,x,Yv,x,m,m")))]
+ " jrjm,jrjm,rm,rm,x,Yv,x,m,m")))]
"TARGET_SSE"
"@
pinsrq\t{$1, %2, %0|%0, %2, 1}
@@ -20576,6 +20734,10 @@
(eq_attr "alternative" "0,1,2,3,4,5")
(const_string "sselog")
(const_string "ssemov")))
+ (set (attr "gpr32")
+ (if_then_else (eq_attr "alternative" "0,1")
+ (const_string "0")
+ (const_string "1")))
(set (attr "prefix_rex")
(if_then_else (eq_attr "alternative" "0,1,2,3")
(const_string "1")
@@ -20685,7 +20847,7 @@
(define_expand "vec_unpacks_lo_di"
[(set (match_operand:SI 0 "register_operand")
(subreg:SI (match_operand:DI 1 "register_operand") 0))]
- "TARGET_AVX512BW")
+ "TARGET_AVX512BW && TARGET_EVEX512")
(define_expand "vec_unpacku_hi_<mode>"
[(match_operand:<sseunpackmode> 0 "register_operand")
@@ -20724,12 +20886,15 @@
(unspec [(const_int 0)] UNSPEC_MASKOP)])]
"TARGET_AVX512F")
+(define_mode_iterator SWI48x_MASK [SI (DI "TARGET_EVEX512")])
+
(define_expand "vec_unpacks_hi_<mode>"
[(parallel
- [(set (subreg:SWI48x
+ [(set (subreg:SWI48x_MASK
(match_operand:<HALFMASKMODE> 0 "register_operand") 0)
- (lshiftrt:SWI48x (match_operand:SWI48x 1 "register_operand")
- (match_dup 2)))
+ (lshiftrt:SWI48x_MASK
+ (match_operand:SWI48x_MASK 1 "register_operand")
+ (match_dup 2)))
(unspec [(const_int 0)] UNSPEC_MASKOP)])]
"TARGET_AVX512BW"
"operands[2] = GEN_INT (GET_MODE_BITSIZE (<HALFMASKMODE>mode));")
@@ -20811,33 +20976,35 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "<sse>_movmsk<ssemodesuffix><avxsizesuffix>"
- [(set (match_operand:SI 0 "register_operand" "=r")
+ [(set (match_operand:SI 0 "register_operand" "=r,jr")
(unspec:SI
- [(match_operand:VF_128_256 1 "register_operand" "x")]
+ [(match_operand:VF_128_256 1 "register_operand" "x,x")]
UNSPEC_MOVMSK))]
"TARGET_SSE"
"%vmovmsk<ssemodesuffix>\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "prefix" "maybe_vex")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
+ (set_attr "prefix" "maybe_evex")
(set_attr "mode" "<MODE>")])
(define_insn "*<sse>_movmsk<ssemodesuffix><avxsizesuffix>_<u>ext"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=r,jr")
(any_extend:DI
(unspec:SI
- [(match_operand:VF_128_256 1 "register_operand" "x")]
+ [(match_operand:VF_128_256 1 "register_operand" "x,x")]
UNSPEC_MOVMSK)))]
"TARGET_64BIT && TARGET_SSE"
- "%vmovmsk<ssemodesuffix>\t{%1, %k0|%k0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "prefix" "maybe_vex")
+ "%vmovmsk<ssemodesuffix>\t{%1, %0|%0, %1}"
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
+ (set_attr "prefix" "maybe_evex")
(set_attr "mode" "<MODE>")])
(define_insn_and_split "*<sse>_movmsk<ssemodesuffix><avxsizesuffix>_lt"
- [(set (match_operand:SI 0 "register_operand" "=r")
+ [(set (match_operand:SI 0 "register_operand" "=r,jr")
(unspec:SI
[(lt:VF_128_256
- (match_operand:<sseintvecmode> 1 "register_operand" "x")
+ (match_operand:<sseintvecmode> 1 "register_operand" "x,x")
(match_operand:<sseintvecmode> 2 "const0_operand"))]
UNSPEC_MOVMSK))]
"TARGET_SSE"
@@ -20846,16 +21013,17 @@
[(set (match_dup 0)
(unspec:SI [(match_dup 1)] UNSPEC_MOVMSK))]
"operands[1] = gen_lowpart (<MODE>mode, operands[1]);"
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<MODE>")])
(define_insn_and_split "*<sse>_movmsk<ssemodesuffix><avxsizesuffix>_<u>ext_lt"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=r,jr")
(any_extend:DI
(unspec:SI
[(lt:VF_128_256
- (match_operand:<sseintvecmode> 1 "register_operand" "x")
+ (match_operand:<sseintvecmode> 1 "register_operand" "x,x")
(match_operand:<sseintvecmode> 2 "const0_operand"))]
UNSPEC_MOVMSK)))]
"TARGET_64BIT && TARGET_SSE"
@@ -20864,16 +21032,17 @@
[(set (match_dup 0)
(any_extend:DI (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)))]
"operands[1] = gen_lowpart (<MODE>mode, operands[1]);"
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<MODE>")])
(define_insn_and_split "*<sse>_movmsk<ssemodesuffix><avxsizesuffix>_shift"
- [(set (match_operand:SI 0 "register_operand" "=r")
+ [(set (match_operand:SI 0 "register_operand" "=r,jr")
(unspec:SI
[(subreg:VF_128_256
(ashiftrt:<sseintvecmode>
- (match_operand:<sseintvecmode> 1 "register_operand" "x")
+ (match_operand:<sseintvecmode> 1 "register_operand" "x,x")
(match_operand:QI 2 "const_int_operand")) 0)]
UNSPEC_MOVMSK))]
"TARGET_SSE"
@@ -20882,17 +21051,18 @@
[(set (match_dup 0)
(unspec:SI [(match_dup 1)] UNSPEC_MOVMSK))]
"operands[1] = gen_lowpart (<MODE>mode, operands[1]);"
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<MODE>")])
(define_insn_and_split "*<sse>_movmsk<ssemodesuffix><avxsizesuffix>_<u>ext_shift"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=r,jr")
(any_extend:DI
(unspec:SI
[(subreg:VF_128_256
(ashiftrt:<sseintvecmode>
- (match_operand:<sseintvecmode> 1 "register_operand" "x")
+ (match_operand:<sseintvecmode> 1 "register_operand" "x,x")
(match_operand:QI 2 "const_int_operand")) 0)]
UNSPEC_MOVMSK)))]
"TARGET_64BIT && TARGET_SSE"
@@ -20901,18 +21071,20 @@
[(set (match_dup 0)
(any_extend:DI (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)))]
"operands[1] = gen_lowpart (<MODE>mode, operands[1]);"
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<MODE>")])
(define_insn "<sse2_avx2>_pmovmskb"
- [(set (match_operand:SI 0 "register_operand" "=r")
+ [(set (match_operand:SI 0 "register_operand" "=r,jr")
(unspec:SI
- [(match_operand:VI1_AVX2 1 "register_operand" "x")]
+ [(match_operand:VI1_AVX2 1 "register_operand" "x,x")]
UNSPEC_MOVMSK))]
"TARGET_SSE2"
"%vpmovmskb\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
@@ -20922,14 +21094,15 @@
(set_attr "mode" "SI")])
(define_insn "*<sse2_avx2>_pmovmskb_zext"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=r,jr")
(zero_extend:DI
(unspec:SI
- [(match_operand:VI1_AVX2 1 "register_operand" "x")]
+ [(match_operand:VI1_AVX2 1 "register_operand" "x,x")]
UNSPEC_MOVMSK)))]
"TARGET_64BIT && TARGET_SSE2"
"%vpmovmskb\t{%1, %k0|%k0, %1}"
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
@@ -20939,14 +21112,15 @@
(set_attr "mode" "SI")])
(define_insn "*sse2_pmovmskb_ext"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=r,jr")
(sign_extend:DI
(unspec:SI
- [(match_operand:V16QI 1 "register_operand" "x")]
+ [(match_operand:V16QI 1 "register_operand" "x,x")]
UNSPEC_MOVMSK)))]
"TARGET_64BIT && TARGET_SSE2"
"%vpmovmskb\t{%1, %k0|%k0, %1}"
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
@@ -21031,9 +21205,9 @@
})
(define_insn_and_split "*<sse2_avx2>_pmovmskb_lt"
- [(set (match_operand:SI 0 "register_operand" "=r")
+ [(set (match_operand:SI 0 "register_operand" "=r,jr")
(unspec:SI
- [(lt:VI1_AVX2 (match_operand:VI1_AVX2 1 "register_operand" "x")
+ [(lt:VI1_AVX2 (match_operand:VI1_AVX2 1 "register_operand" "x,x")
(match_operand:VI1_AVX2 2 "const0_operand"))]
UNSPEC_MOVMSK))]
"TARGET_SSE2"
@@ -21042,7 +21216,8 @@
[(set (match_dup 0)
(unspec:SI [(match_dup 1)] UNSPEC_MOVMSK))]
""
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
@@ -21052,10 +21227,10 @@
(set_attr "mode" "SI")])
(define_insn_and_split "*<sse2_avx2>_pmovmskb_zext_lt"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=r,jr")
(zero_extend:DI
(unspec:SI
- [(lt:VI1_AVX2 (match_operand:VI1_AVX2 1 "register_operand" "x")
+ [(lt:VI1_AVX2 (match_operand:VI1_AVX2 1 "register_operand" "x,x")
(match_operand:VI1_AVX2 2 "const0_operand"))]
UNSPEC_MOVMSK)))]
"TARGET_64BIT && TARGET_SSE2"
@@ -21064,7 +21239,8 @@
[(set (match_dup 0)
(zero_extend:DI (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)))]
""
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
@@ -21074,10 +21250,10 @@
(set_attr "mode" "SI")])
(define_insn_and_split "*sse2_pmovmskb_ext_lt"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=r,jr")
(sign_extend:DI
(unspec:SI
- [(lt:V16QI (match_operand:V16QI 1 "register_operand" "x")
+ [(lt:V16QI (match_operand:V16QI 1 "register_operand" "x,x")
(match_operand:V16QI 2 "const0_operand"))]
UNSPEC_MOVMSK)))]
"TARGET_64BIT && TARGET_SSE2"
@@ -21086,7 +21262,8 @@
[(set (match_dup 0)
(sign_extend:DI (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)))]
""
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
@@ -21147,21 +21324,25 @@
(set_attr "mode" "TI")])
(define_insn "sse_ldmxcsr"
- [(unspec_volatile [(match_operand:SI 0 "memory_operand" "m")]
+ [(unspec_volatile [(match_operand:SI 0 "memory_operand" "m,jm")]
UNSPECV_LDMXCSR)]
"TARGET_SSE"
"%vldmxcsr\t%0"
- [(set_attr "type" "sse")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sse")
+ (set_attr "gpr32" "1,0")
(set_attr "atom_sse_attr" "mxcsr")
(set_attr "prefix" "maybe_vex")
(set_attr "memory" "load")])
(define_insn "sse_stmxcsr"
- [(set (match_operand:SI 0 "memory_operand" "=m")
+ [(set (match_operand:SI 0 "memory_operand" "=m,jm")
(unspec_volatile:SI [(const_int 0)] UNSPECV_STMXCSR))]
"TARGET_SSE"
"%vstmxcsr\t%0"
- [(set_attr "type" "sse")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "sse")
+ (set_attr "gpr32" "0")
(set_attr "atom_sse_attr" "mxcsr")
(set_attr "prefix" "maybe_vex")
(set_attr "memory" "store")])
@@ -21216,7 +21397,7 @@
(vec_select:V16HI
(vec_concat:V32HI
(match_operand:V16HI 1 "register_operand" "x")
- (match_operand:V16HI 2 "nonimmediate_operand" "xm"))
+ (match_operand:V16HI 2 "nonimmediate_operand" "xjm"))
(parallel
[(const_int 0) (const_int 2) (const_int 4) (const_int 6)
(const_int 16) (const_int 18) (const_int 20) (const_int 22)
@@ -21232,6 +21413,7 @@
"TARGET_AVX2"
"vph<plusminus_mnemonic>w\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
@@ -21242,7 +21424,7 @@
(vec_select:V8HI
(vec_concat:V16HI
(match_operand:V8HI 1 "register_operand" "0,x")
- (match_operand:V8HI 2 "vector_operand" "xBm,xm"))
+ (match_operand:V8HI 2 "vector_operand" "xja,xjm"))
(parallel
[(const_int 0) (const_int 2) (const_int 4) (const_int 6)
(const_int 8) (const_int 10) (const_int 12) (const_int 14)]))
@@ -21257,6 +21439,7 @@
vph<plusminus_mnemonic>w\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseiadd")
+ (set_attr "gpr32" "0")
(set_attr "atom_unit" "complex")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,vex")
@@ -21308,7 +21491,7 @@
(vec_select:V8SI
(vec_concat:V16SI
(match_operand:V8SI 1 "register_operand" "x")
- (match_operand:V8SI 2 "nonimmediate_operand" "xm"))
+ (match_operand:V8SI 2 "nonimmediate_operand" "xjm"))
(parallel
[(const_int 0) (const_int 2) (const_int 8) (const_int 10)
(const_int 4) (const_int 6) (const_int 12) (const_int 14)]))
@@ -21320,6 +21503,7 @@
"TARGET_AVX2"
"vph<plusminus_mnemonic>d\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
@@ -21330,7 +21514,7 @@
(vec_select:V4SI
(vec_concat:V8SI
(match_operand:V4SI 1 "register_operand" "0,x")
- (match_operand:V4SI 2 "vector_operand" "xBm,xm"))
+ (match_operand:V4SI 2 "vector_operand" "xja,xjm"))
(parallel
[(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))
(vec_select:V4SI
@@ -21343,6 +21527,7 @@
vph<plusminus_mnemonic>d\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseiadd")
+ (set_attr "gpr32" "0")
(set_attr "atom_unit" "complex")
(set_attr "prefix_data16" "1,*")
(set_attr "prefix_extra" "1")
@@ -21382,6 +21567,7 @@
}
[(set_attr "mmx_isa" "native,sse_noavx,avx")
(set_attr "type" "sseiadd")
+ (set_attr "gpr32" "0")
(set_attr "atom_unit" "complex")
(set_attr "prefix_extra" "1")
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
@@ -21484,7 +21670,7 @@
(const_int 1) (const_int 1)
(const_int 1) (const_int 1)]))
(const_int 1))))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"vpmulhrsw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseimul")
(set_attr "prefix" "evex")
@@ -21503,7 +21689,7 @@
(const_int 12) (const_int 14)])))
(sign_extend:V8HI
(vec_select:V8QI
- (match_operand:V16QI 2 "vector_operand" "xBm,Ywm")
+ (match_operand:V16QI 2 "vector_operand" "xja,Ywm")
(parallel [(const_int 0) (const_int 2)
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
@@ -21526,6 +21712,7 @@
pmaddubsw\t{%2, %0|%0, %2}
vpmaddubsw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "type" "sseiadd")
(set_attr "atom_unit" "simul")
(set_attr "prefix_extra" "1")
@@ -21644,7 +21831,7 @@
(sign_extend:<ssedoublemode>
(match_operand:VI2_AVX2_AVX512BW 1 "vector_operand" "%0,<v_Yw>"))
(sign_extend:<ssedoublemode>
- (match_operand:VI2_AVX2_AVX512BW 2 "vector_operand" "xBm,<v_Yw>m")))
+ (match_operand:VI2_AVX2_AVX512BW 2 "vector_operand" "xja,<v_Yw>m")))
(const_int 14))
(match_operand:VI2_AVX2_AVX512BW 3 "const1_operand"))
(const_int 1))))]
@@ -21654,6 +21841,7 @@
pmulhrsw\t{%2, %0|%0, %2}
vpmulhrsw\t{%2, %1, %0<mask_operand4>|%0<mask_operand4>, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "type" "sseimul")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex")
@@ -21770,13 +21958,14 @@
[(set (match_operand:VI1_AVX512 0 "register_operand" "=x,<v_Yw>")
(unspec:VI1_AVX512
[(match_operand:VI1_AVX512 1 "register_operand" "0,<v_Yw>")
- (match_operand:VI1_AVX512 2 "vector_operand" "xBm,<v_Yw>m")]
+ (match_operand:VI1_AVX512 2 "vector_operand" "xja,<v_Yw>m")]
UNSPEC_PSHUFB))]
"TARGET_SSSE3 && <mask_mode512bit_condition> && <mask_avx512bw_condition>"
"@
pshufb\t{%2, %0|%0, %2}
vpshufb\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex")
@@ -21836,7 +22025,7 @@
[(set (match_operand:VI124_AVX2 0 "register_operand" "=x,x")
(unspec:VI124_AVX2
[(match_operand:VI124_AVX2 1 "register_operand" "0,x")
- (match_operand:VI124_AVX2 2 "vector_operand" "xBm,xm")]
+ (match_operand:VI124_AVX2 2 "vector_operand" "xja,xjm")]
UNSPEC_PSIGN))]
"TARGET_SSSE3"
"@
@@ -21844,6 +22033,7 @@
vpsign<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sselog1")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "<sseinsnmode>")])
@@ -21891,7 +22081,7 @@
[(set (match_operand:VIMAX_AVX2_AVX512BW 0 "register_operand" "=x,<v_Yw>")
(unspec:VIMAX_AVX2_AVX512BW
[(match_operand:VIMAX_AVX2_AVX512BW 1 "register_operand" "0,<v_Yw>")
- (match_operand:VIMAX_AVX2_AVX512BW 2 "vector_operand" "xBm,<v_Yw>m")
+ (match_operand:VIMAX_AVX2_AVX512BW 2 "vector_operand" "xja,<v_Yw>m")
(match_operand:SI 3 "const_0_to_255_mul_8_operand")]
UNSPEC_PALIGNR))]
"TARGET_SSSE3"
@@ -21909,6 +22099,7 @@
}
}
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "type" "sseishft")
(set_attr "atom_unit" "sishuf")
(set_attr "prefix_extra" "1")
@@ -21983,6 +22174,7 @@
}
[(set_attr "mmx_isa" "native,sse_noavx,avx")
(set_attr "type" "sseishft")
+ (set_attr "gpr32" "0,0,1")
(set_attr "atom_unit" "sishuf")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
@@ -21992,18 +22184,21 @@
;; Mode iterator to handle singularity w/ absence of V2DI and V4DI
;; modes for abs instruction on pre AVX-512 targets.
(define_mode_iterator VI1248_AVX512VL_AVX512BW
- [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
- (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI
- (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI
- (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
+ [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX2") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX512VL")
+ (V2DI "TARGET_AVX512VL")])
(define_insn "*abs<mode>2"
- [(set (match_operand:VI1248_AVX512VL_AVX512BW 0 "register_operand" "=<v_Yw>")
+ [(set (match_operand:VI1248_AVX512VL_AVX512BW 0 "register_operand" "=x,<v_Yw>")
(abs:VI1248_AVX512VL_AVX512BW
- (match_operand:VI1248_AVX512VL_AVX512BW 1 "vector_operand" "<v_Yw>Bm")))]
+ (match_operand:VI1248_AVX512VL_AVX512BW 1 "vector_operand" "xja,<v_Yw>Bm")))]
"TARGET_SSSE3"
"%vpabs<ssemodesuffix>\t{%1, %0|%0, %1}"
- [(set_attr "type" "sselog1")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
+ (set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<sseinsnmode>")])
@@ -22141,7 +22336,7 @@
(define_insn "<sse4_1>_blend<ssemodesuffix><avxsizesuffix>"
[(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x")
(vec_merge:VF_128_256
- (match_operand:VF_128_256 2 "vector_operand" "YrBm,*xBm,xm")
+ (match_operand:VF_128_256 2 "vector_operand" "Yrja,*xja,xjm")
(match_operand:VF_128_256 1 "register_operand" "0,0,x")
(match_operand:SI 3 "const_0_to_<blendbits>_operand")))]
"TARGET_SSE4_1"
@@ -22151,6 +22346,7 @@
vblend<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "length_immediate" "1")
(set_attr "prefix_data16" "1,1,*")
(set_attr "prefix_extra" "1")
@@ -22161,7 +22357,7 @@
[(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x")
(unspec:VF_128_256
[(match_operand:VF_128_256 1 "register_operand" "0,0,x")
- (match_operand:VF_128_256 2 "vector_operand" "YrBm,*xBm,xm")
+ (match_operand:VF_128_256 2 "vector_operand" "Yrja,*xja,xjm")
(match_operand:VF_128_256 3 "register_operand" "Yz,Yz,x")]
UNSPEC_BLENDV))]
"TARGET_SSE4_1"
@@ -22171,6 +22367,7 @@
vblendv<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "length_immediate" "1")
(set_attr "prefix_data16" "1,1,*")
(set_attr "prefix_extra" "1")
@@ -22222,7 +22419,7 @@
[(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x")
(unspec:VF_128_256
[(match_operand:VF_128_256 1 "register_operand" "0,0,x")
- (match_operand:VF_128_256 2 "vector_operand" "YrBm,*xBm,xm")
+ (match_operand:VF_128_256 2 "vector_operand" "Yrja,*xja,xjm")
(lt:VF_128_256
(match_operand:<sseintvecmode> 3 "register_operand" "Yz,Yz,x")
(match_operand:<sseintvecmode> 4 "const0_operand"))]
@@ -22236,6 +22433,7 @@
"operands[3] = gen_lowpart (<MODE>mode, operands[3]);"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "length_immediate" "1")
(set_attr "prefix_data16" "1,1,*")
(set_attr "prefix_extra" "1")
@@ -22254,7 +22452,7 @@
[(set (match_operand:<ssebytemode> 0 "register_operand" "=Yr,*x,x")
(unspec:<ssebytemode>
[(match_operand:<ssebytemode> 1 "register_operand" "0,0,x")
- (match_operand:<ssebytemode> 2 "vector_operand" "YrBm,*xBm,xm")
+ (match_operand:<ssebytemode> 2 "vector_operand" "Yrja,*xja,xjm")
(subreg:<ssebytemode>
(lt:VI48_AVX
(match_operand:VI48_AVX 3 "register_operand" "Yz,Yz,x")
@@ -22274,6 +22472,7 @@
}
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "length_immediate" "1")
(set_attr "prefix_data16" "1,1,*")
(set_attr "prefix_extra" "1")
@@ -22312,7 +22511,7 @@
[(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x")
(unspec:VF_128_256
[(match_operand:VF_128_256 1 "vector_operand" "%0,0,x")
- (match_operand:VF_128_256 2 "vector_operand" "YrBm,*xBm,xm")
+ (match_operand:VF_128_256 2 "vector_operand" "Yrja,*xja,xjm")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_DP))]
"TARGET_SSE4_1"
@@ -22322,6 +22521,7 @@
vdp<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssemul")
+ (set_attr "gpr32" "0")
(set_attr "length_immediate" "1")
(set_attr "prefix_data16" "1,1,*")
(set_attr "prefix_extra" "1")
@@ -22336,11 +22536,12 @@
(define_insn "<vi8_sse4_1_avx2_avx512>_movntdqa"
[(set (match_operand:VI8_AVX2_AVX512F 0 "register_operand" "=Yr,*x,v")
- (unspec:VI8_AVX2_AVX512F [(match_operand:VI8_AVX2_AVX512F 1 "memory_operand" "m,m,m")]
+ (unspec:VI8_AVX2_AVX512F [(match_operand:VI8_AVX2_AVX512F 1 "memory_operand" "jm,jm,m")]
UNSPEC_MOVNTDQA))]
"TARGET_SSE4_1"
"%vmovntdqa\t{%1, %0|%0, %1}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,maybe_evex")
@@ -22350,7 +22551,7 @@
[(set (match_operand:VI1_AVX2 0 "register_operand" "=Yr,*x,x")
(unspec:VI1_AVX2
[(match_operand:VI1_AVX2 1 "register_operand" "0,0,x")
- (match_operand:VI1_AVX2 2 "vector_operand" "YrBm,*xBm,xm")
+ (match_operand:VI1_AVX2 2 "vector_operand" "Yrja,*xja,xjm")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_MPSADBW))]
"TARGET_SSE4_1"
@@ -22359,7 +22560,9 @@
mpsadbw\t{%3, %2, %0|%0, %2, %3}
vmpsadbw\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "sselog1")
+ (set_attr "gpr32" "0")
(set_attr "length_immediate" "1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,vex")
@@ -22371,7 +22574,7 @@
[(set (match_operand:VI2_AVX2_AVX512BW 0 "register_operand" "=Yr,*x,<v_Yw>")
(unspec:VI2_AVX2_AVX512BW
[(match_operand:<sseunpackmode> 1 "register_operand" "0,0,<v_Yw>")
- (match_operand:<sseunpackmode> 2 "vector_operand" "YrBm,*xBm,<v_Yw>m")]
+ (match_operand:<sseunpackmode> 2 "vector_operand" "Yrja,*xja,<v_Yw>m")]
UNSPEC_US_TRUNCATE))]
"TARGET_SSE4_1 && <mask_mode512bit_condition> && <mask_avx512bw_condition>"
"@
@@ -22379,6 +22582,7 @@
packusdw\t{%2, %0|%0, %2}
vpackusdw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,<mask_prefix>")
@@ -22388,7 +22592,7 @@
[(set (match_operand:VI1_AVX2 0 "register_operand" "=Yr,*x,x")
(unspec:VI1_AVX2
[(match_operand:VI1_AVX2 1 "register_operand" "0,0,x")
- (match_operand:VI1_AVX2 2 "vector_operand" "YrBm,*xBm,xm")
+ (match_operand:VI1_AVX2 2 "vector_operand" "Yrja,*xja,xjm")
(match_operand:VI1_AVX2 3 "register_operand" "Yz,Yz,x")]
UNSPEC_BLENDV))]
"TARGET_SSE4_1"
@@ -22398,6 +22602,7 @@
vpblendvb\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "*,*,1")
(set_attr "prefix" "orig,orig,vex")
@@ -22437,7 +22642,7 @@
[(set (match_operand:VI1_AVX2 0 "register_operand" "=Yr,*x,x")
(unspec:VI1_AVX2
[(match_operand:VI1_AVX2 1 "register_operand" "0,0,x")
- (match_operand:VI1_AVX2 2 "vector_operand" "YrBm,*xBm,xm")
+ (match_operand:VI1_AVX2 2 "vector_operand" "Yrja,*xja,xjm")
(lt:VI1_AVX2 (match_operand:VI1_AVX2 3 "register_operand" "Yz,Yz,x")
(match_operand:VI1_AVX2 4 "const0_operand"))]
UNSPEC_BLENDV))]
@@ -22450,6 +22655,7 @@
""
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "*,*,1")
(set_attr "prefix" "orig,orig,vex")
@@ -22481,7 +22687,7 @@
(define_insn "sse4_1_pblend<ssemodesuffix>"
[(set (match_operand:V8_128 0 "register_operand" "=Yr,*x,x")
(vec_merge:V8_128
- (match_operand:V8_128 2 "vector_operand" "YrBm,*xBm,xm")
+ (match_operand:V8_128 2 "vector_operand" "Yrja,*xja,xjm")
(match_operand:V8_128 1 "register_operand" "0,0,x")
(match_operand:SI 3 "const_0_to_255_operand")))]
"TARGET_SSE4_1"
@@ -22491,6 +22697,7 @@
vpblendw\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "orig,orig,vex")
@@ -22553,7 +22760,7 @@
(define_insn "*avx2_pblend<ssemodesuffix>"
[(set (match_operand:V16_256 0 "register_operand" "=x")
(vec_merge:V16_256
- (match_operand:V16_256 2 "nonimmediate_operand" "xm")
+ (match_operand:V16_256 2 "nonimmediate_operand" "xjm")
(match_operand:V16_256 1 "register_operand" "x")
(match_operand:SI 3 "avx2_pblendw_operand")))]
"TARGET_AVX2"
@@ -22562,6 +22769,7 @@
return "vpblendw\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
@@ -22570,7 +22778,7 @@
(define_insn "avx2_pblendd<mode>"
[(set (match_operand:VI4_AVX2 0 "register_operand" "=x")
(vec_merge:VI4_AVX2
- (match_operand:VI4_AVX2 2 "nonimmediate_operand" "xm")
+ (match_operand:VI4_AVX2 2 "nonimmediate_operand" "xjm")
(match_operand:VI4_AVX2 1 "register_operand" "x")
(match_operand:SI 3 "const_0_to_255_operand")))]
"TARGET_AVX2"
@@ -22583,11 +22791,12 @@
(define_insn "sse4_1_phminposuw"
[(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,x")
- (unspec:V8HI [(match_operand:V8HI 1 "vector_operand" "YrBm,*xBm,xm")]
+ (unspec:V8HI [(match_operand:V8HI 1 "vector_operand" "Yrja,*xja,xjm")]
UNSPEC_PHMINPOSUW))]
"TARGET_SSE4_1"
"%vphminposuw\t{%1, %0|%0, %1}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0")
(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,vex")
@@ -22651,7 +22860,7 @@
[(set (match_operand:V32HI 0 "register_operand" "=v")
(any_extend:V32HI
(match_operand:V32QI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"vpmov<extsuffix>bw\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -22665,7 +22874,7 @@
(match_operand:V64QI 2 "const0_operand"))
(match_parallel 3 "pmovzx_parallel"
[(match_operand 4 "const_int_operand")])))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"#"
"&& reload_completed"
[(set (match_dup 0) (zero_extend:V32HI (match_dup 1)))]
@@ -22685,7 +22894,7 @@
(match_operand:V64QI 3 "const0_operand"))
(match_parallel 4 "pmovzx_parallel"
[(match_operand 5 "const_int_operand")])))]
- "TARGET_AVX512BW"
+ "TARGET_AVX512BW && TARGET_EVEX512"
"#"
"&& reload_completed"
[(set (match_dup 0) (zero_extend:V32HI (match_dup 1)))]
@@ -22698,7 +22907,7 @@
[(set (match_operand:V32HI 0 "register_operand")
(any_extend:V32HI
(match_operand:V32QI 1 "nonimmediate_operand")))]
- "TARGET_AVX512BW")
+ "TARGET_AVX512BW && TARGET_EVEX512")
(define_insn "sse4_1_<code>v8qiv8hi2<mask_name>"
[(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,Yw")
@@ -22720,10 +22929,14 @@
(define_insn "*sse4_1_<code>v8qiv8hi2<mask_name>_1"
[(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,Yw")
(any_extend:V8HI
- (match_operand:V8QI 1 "memory_operand" "m,m,m")))]
+ (match_operand:V8QI 1 "memory_operand" "jm,jm,m")))]
"TARGET_SSE4_1 && <mask_avx512bw_condition> && <mask_avx512vl_condition>"
- "%vpmov<extsuffix>bw\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
+ "@
+ pmov<extsuffix>bw\t{%1, %0|%0, %1}
+ pmov<extsuffix>bw\t{%1, %0|%0, %1}
+ vpmov<extsuffix>bw\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,maybe_evex")
@@ -22753,7 +22966,7 @@
[(set (match_operand:V16QI 0 "register_operand" "=Yr,*x,Yw")
(vec_select:V16QI
(vec_concat:V32QI
- (match_operand:V16QI 1 "vector_operand" "YrBm,*xBm,Ywm")
+ (match_operand:V16QI 1 "vector_operand" "Yrja,*xja,Ywm")
(match_operand:V16QI 2 "const0_operand"))
(match_parallel 3 "pmovzx_parallel"
[(match_operand 4 "const_int_operand")])))]
@@ -22778,7 +22991,8 @@
DONE;
}
}
- [(set_attr "isa" "noavx,noavx,avx")])
+ [(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")])
(define_insn_and_split "*sse4_1_zero_extendv8qiv8hi2_4"
[(set (match_operand:V16QI 0 "register_operand" "=Yr,*x,Yw")
@@ -22786,7 +23000,7 @@
(vec_concat:V32QI
(subreg:V16QI
(vec_concat:VI248_128
- (match_operand:<ssehalfvecmode> 1 "vector_operand" "YrBm,*xBm,Ywm")
+ (match_operand:<ssehalfvecmode> 1 "vector_operand" "Yrja,*xja,Ywm")
(match_operand:<ssehalfvecmode> 2 "const0_operand")) 0)
(match_operand:V16QI 3 "const0_operand"))
(match_parallel 4 "pmovzx_parallel"
@@ -22813,7 +23027,8 @@
}
operands[1] = lowpart_subreg (V16QImode, operands[1], <ssehalfvecmode>mode);
}
- [(set_attr "isa" "noavx,noavx,avx")])
+ [(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")])
(define_expand "<insn>v8qiv8hi2"
[(set (match_operand:V8HI 0 "register_operand")
@@ -22840,7 +23055,7 @@
[(set (match_operand:V16SI 0 "register_operand" "=v")
(any_extend:V16SI
(match_operand:V16QI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<extsuffix>bd\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -22850,7 +23065,7 @@
[(set (match_operand:V16SI 0 "register_operand")
(any_extend:V16SI
(match_operand:V16QI 1 "nonimmediate_operand")))]
- "TARGET_AVX512F")
+ "TARGET_AVX512F && TARGET_EVEX512")
(define_insn "avx2_<code>v8qiv8si2<mask_name>"
[(set (match_operand:V8SI 0 "register_operand" "=v")
@@ -22932,10 +23147,11 @@
(define_insn "*sse4_1_<code>v4qiv4si2<mask_name>_1"
[(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v")
(any_extend:V4SI
- (match_operand:V4QI 1 "memory_operand" "m,m,m")))]
+ (match_operand:V4QI 1 "memory_operand" "jm,jm,m")))]
"TARGET_SSE4_1 && <mask_avx512vl_condition>"
"%vpmov<extsuffix>bd\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,maybe_evex")
@@ -22982,7 +23198,7 @@
[(set (match_operand:V16SI 0 "register_operand" "=v")
(any_extend:V16SI
(match_operand:V16HI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<extsuffix>wd\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -22992,7 +23208,7 @@
[(set (match_operand:V16SI 0 "register_operand")
(any_extend:V16SI
(match_operand:V16HI 1 "nonimmediate_operand")))]
- "TARGET_AVX512F")
+ "TARGET_AVX512F && TARGET_EVEX512")
(define_insn_and_split "avx512f_zero_extendv16hiv16si2_1"
[(set (match_operand:V32HI 0 "register_operand" "=v")
@@ -23002,7 +23218,7 @@
(match_operand:V32HI 2 "const0_operand"))
(match_parallel 3 "pmovzx_parallel"
[(match_operand 4 "const_int_operand")])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"#"
"&& reload_completed"
[(set (match_dup 0) (zero_extend:V16SI (match_dup 1)))]
@@ -23104,10 +23320,11 @@
(define_insn "*sse4_1_<code>v4hiv4si2<mask_name>_1"
[(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v")
(any_extend:V4SI
- (match_operand:V4HI 1 "memory_operand" "m,m,m")))]
+ (match_operand:V4HI 1 "memory_operand" "jm,jm,m")))]
"TARGET_SSE4_1 && <mask_avx512vl_condition>"
"%vpmov<extsuffix>wd\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,maybe_evex")
@@ -23156,7 +23373,7 @@
[(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,v")
(vec_select:V8HI
(vec_concat:V16HI
- (match_operand:V8HI 1 "vector_operand" "YrBm,*xBm,vm")
+ (match_operand:V8HI 1 "vector_operand" "Yrja,*xja,vm")
(match_operand:V8HI 2 "const0_operand"))
(match_parallel 3 "pmovzx_parallel"
[(match_operand 4 "const_int_operand")])))]
@@ -23179,7 +23396,8 @@
DONE;
}
}
- [(set_attr "isa" "noavx,noavx,avx")])
+ [(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")])
(define_insn_and_split "*sse4_1_zero_extendv4hiv4si2_4"
[(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,v")
@@ -23187,7 +23405,7 @@
(vec_concat:V16HI
(subreg:V8HI
(vec_concat:VI148_128
- (match_operand:<ssehalfvecmode> 1 "vector_operand" "YrBm,*xBm,vm")
+ (match_operand:<ssehalfvecmode> 1 "vector_operand" "Yrja,*xja,vm")
(match_operand:<ssehalfvecmode> 2 "const0_operand")) 0)
(match_operand:V8HI 3 "const0_operand"))
(match_parallel 4 "pmovzx_parallel"
@@ -23212,7 +23430,8 @@
}
operands[1] = lowpart_subreg (V8HImode, operands[1], <ssehalfvecmode>mode);
}
- [(set_attr "isa" "noavx,noavx,avx")])
+ [(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")])
(define_insn "avx512f_<code>v8qiv8di2<mask_name>"
[(set (match_operand:V8DI 0 "register_operand" "=v")
@@ -23223,7 +23442,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<extsuffix>bq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -23233,7 +23452,7 @@
[(set (match_operand:V8DI 0 "register_operand" "=v")
(any_extend:V8DI
(match_operand:V8QI 1 "memory_operand" "m")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<extsuffix>bq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -23251,7 +23470,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX512F && ix86_pre_reload_split ()"
+ "TARGET_AVX512F && TARGET_EVEX512 && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -23262,7 +23481,7 @@
[(set (match_operand:V8DI 0 "register_operand")
(any_extend:V8DI
(match_operand:V8QI 1 "nonimmediate_operand")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
if (!MEM_P (operands[1]))
{
@@ -23350,12 +23569,14 @@
(set_attr "mode" "TI")])
(define_insn "*sse4_1_<code>v2qiv2di2<mask_name>_1"
- [(set (match_operand:V2DI 0 "register_operand" "=v")
+ [(set (match_operand:V2DI 0 "register_operand" "=x,v")
(any_extend:V2DI
- (match_operand:V2QI 1 "memory_operand" "m")))]
+ (match_operand:V2QI 1 "memory_operand" "jm,m")))]
"TARGET_SSE4_1 && <mask_avx512vl_condition>"
"%vpmov<extsuffix>bq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
- [(set_attr "type" "ssemov")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
+ (set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_evex")
(set_attr "mode" "TI")])
@@ -23402,7 +23623,7 @@
[(set (match_operand:V8DI 0 "register_operand" "=v")
(any_extend:V8DI
(match_operand:V8HI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<extsuffix>wq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -23412,7 +23633,7 @@
[(set (match_operand:V8DI 0 "register_operand")
(any_extend:V8DI
(match_operand:V8HI 1 "nonimmediate_operand")))]
- "TARGET_AVX512F")
+ "TARGET_AVX512F && TARGET_EVEX512")
(define_insn "avx2_<code>v4hiv4di2<mask_name>"
[(set (match_operand:V4DI 0 "register_operand" "=v")
@@ -23489,10 +23710,11 @@
(define_insn "*sse4_1_<code>v2hiv2di2<mask_name>_1"
[(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,v")
(any_extend:V2DI
- (match_operand:V2HI 1 "memory_operand" "m,m,m")))]
+ (match_operand:V2HI 1 "memory_operand" "jm,jm,m")))]
"TARGET_SSE4_1 && <mask_avx512vl_condition>"
"%vpmov<extsuffix>wq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,maybe_evex")
@@ -23538,7 +23760,7 @@
[(set (match_operand:V8DI 0 "register_operand" "=v")
(any_extend:V8DI
(match_operand:V8SI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vpmov<extsuffix>dq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -23552,7 +23774,7 @@
(match_operand:V16SI 2 "const0_operand"))
(match_parallel 3 "pmovzx_parallel"
[(match_operand 4 "const_int_operand")])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"#"
"&& reload_completed"
[(set (match_dup 0) (zero_extend:V8DI (match_dup 1)))]
@@ -23571,7 +23793,7 @@
(match_operand:V16SI 3 "const0_operand"))
(match_parallel 4 "pmovzx_parallel"
[(match_operand 5 "const_int_operand")])))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"#"
"&& reload_completed"
[(set (match_dup 0) (zero_extend:V8DI (match_dup 1)))]
@@ -23583,7 +23805,7 @@
[(set (match_operand:V8DI 0 "register_operand" "=v")
(any_extend:V8DI
(match_operand:V8SI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F")
+ "TARGET_AVX512F && TARGET_EVEX512")
(define_insn "avx2_<code>v4siv4di2<mask_name>"
[(set (match_operand:V4DI 0 "register_operand" "=v")
@@ -23654,10 +23876,11 @@
(define_insn "*sse4_1_<code>v2siv2di2<mask_name>_1"
[(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,v")
(any_extend:V2DI
- (match_operand:V2SI 1 "memory_operand" "m,m,m")))]
+ (match_operand:V2SI 1 "memory_operand" "jm,jm,m")))]
"TARGET_SSE4_1 && <mask_avx512vl_condition>"
"%vpmov<extsuffix>dq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")
(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,maybe_evex")
@@ -23684,7 +23907,7 @@
[(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v")
(vec_select:V4SI
(vec_concat:V8SI
- (match_operand:V4SI 1 "vector_operand" "YrBm,*xBm,vm")
+ (match_operand:V4SI 1 "vector_operand" "Yrja,*xja,vm")
(match_operand:V4SI 2 "const0_operand"))
(match_parallel 3 "pmovzx_parallel"
[(match_operand 4 "const_int_operand")])))]
@@ -23705,14 +23928,15 @@
DONE;
}
}
- [(set_attr "isa" "noavx,noavx,avx")])
+ [(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")])
(define_insn_and_split "*sse4_1_zero_extendv2siv2di2_4"
[(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v")
(vec_select:V4SI
(vec_concat:V8SI
(vec_concat:V4SI
- (match_operand:V2SI 1 "vector_operand" "YrBm, *xBm, vm")
+ (match_operand:V2SI 1 "vector_operand" "Yrja, *xja, vm")
(match_operand:V2SI 2 "const0_operand"))
(match_operand:V4SI 3 "const0_operand"))
(match_parallel 4 "pmovzx_parallel"
@@ -23734,7 +23958,8 @@
}
operands[1] = lowpart_subreg (V4SImode, operands[1], V2SImode);
}
- [(set_attr "isa" "noavx,noavx,avx")])
+ [(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "gpr32" "0,0,1")])
(define_expand "<insn>v2siv2di2"
[(set (match_operand:V2DI 0 "register_operand")
@@ -23762,11 +23987,12 @@
(define_insn "avx_vtest<ssemodesuffix><avxsizesuffix>"
[(set (reg:CC FLAGS_REG)
(unspec:CC [(match_operand:VF_128_256 0 "register_operand" "x")
- (match_operand:VF_128_256 1 "nonimmediate_operand" "xm")]
+ (match_operand:VF_128_256 1 "nonimmediate_operand" "xjm")]
UNSPEC_VTESTP))]
"TARGET_AVX"
"vtest<ssemodesuffix>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
@@ -23776,12 +24002,13 @@
(define_insn "*<sse4_1>_ptest<mode>"
[(set (reg FLAGS_REG)
(unspec [(match_operand:V_AVX 0 "register_operand" "Yr, *x, x")
- (match_operand:V_AVX 1 "vector_operand" "YrBm, *xBm, xm")]
+ (match_operand:V_AVX 1 "vector_operand" "Yrja, *xja, xjm")]
UNSPEC_PTEST))]
"TARGET_SSE4_1 && ix86_match_ptest_ccmode (insn)"
"%vptest\t{%1, %0|%0, %1}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssecomi")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,vex")
(set (attr "btver2_decode")
@@ -23818,12 +24045,13 @@
(define_insn "ptesttf2"
[(set (reg:CC FLAGS_REG)
(unspec:CC [(match_operand:TF 0 "register_operand" "Yr, *x, x")
- (match_operand:TF 1 "vector_operand" "YrBm, *xBm, xm")]
+ (match_operand:TF 1 "vector_operand" "Yrja, *xja, xjm")]
UNSPEC_PTEST))]
"TARGET_SSE4_1"
"%vptest\t{%1, %0|%0, %1}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssecomi")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,orig,vex")
(set_attr "mode" "TI")])
@@ -23934,13 +24162,14 @@
(define_insn "<sse4_1>_round<ssemodesuffix><avxsizesuffix>"
[(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x")
(unspec:VF_128_256
- [(match_operand:VF_128_256 1 "vector_operand" "YrBm,*xBm,xm")
+ [(match_operand:VF_128_256 1 "vector_operand" "Yrja,*xja,xjm")
(match_operand:SI 2 "const_0_to_15_operand")]
UNSPEC_ROUND))]
"TARGET_SSE4_1"
"%vround<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssecvt")
+ (set_attr "gpr32" "0")
(set_attr "prefix_data16" "1,1,*")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
@@ -23977,7 +24206,7 @@
[(match_operand:V16SI 0 "register_operand")
(match_operand:V16SF 1 "nonimmediate_operand")
(match_operand:SI 2 "const_0_to_15_operand")]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
rtx tmp = gen_reg_rtx (V16SFmode);
emit_insn (gen_avx512f_rndscalev16sf (tmp, operands[1], operands[2]));
@@ -24024,22 +24253,35 @@
})
(define_insn "sse4_1_round<ssescalarmodesuffix>"
- [(set (match_operand:VF_128 0 "register_operand" "=Yr,*x,x,v")
- (vec_merge:VF_128
- (unspec:VF_128
- [(match_operand:VF_128 2 "nonimmediate_operand" "Yrm,*xm,xm,vm")
+ [(set (match_operand:VFH_128 0 "register_operand" "=Yr,*x,x,v")
+ (vec_merge:VFH_128
+ (unspec:VFH_128
+ [(match_operand:VFH_128 2 "nonimmediate_operand" "Yrjm,*xjm,xjm,vm")
(match_operand:SI 3 "const_0_to_15_operand")]
UNSPEC_ROUND)
- (match_operand:VF_128 1 "register_operand" "0,0,x,v")
+ (match_operand:VFH_128 1 "register_operand" "0,0,x,v")
(const_int 1)))]
"TARGET_SSE4_1"
- "@
- round<ssescalarmodesuffix>\t{%3, %2, %0|%0, %<iptr>2, %3}
- round<ssescalarmodesuffix>\t{%3, %2, %0|%0, %<iptr>2, %3}
- vround<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %<iptr>2, %3}
- vrndscale<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %<iptr>2, %3}"
- [(set_attr "isa" "noavx,noavx,avx,avx512f")
+{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ return "round<ssescalarmodesuffix>\t{%3, %2, %0|%0, %<iptr>2, %3}";
+ case 2:
+ return "vround<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %<iptr>2, %3}";
+ case 3:
+ if (x86_evex_reg_mentioned_p (operands, 3) || <MODE>mode == V8HFmode)
+ return "vrndscale<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %<iptr>2, %3}";
+ else
+ return "vround<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %<iptr>2, %3}";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "isa" "noavx,noavx,noavx512f,avx512f")
(set_attr "type" "ssecvt")
+ (set_attr "gpr32" "0,0,0,1")
(set_attr "length_immediate" "1")
(set_attr "prefix_data16" "1,1,*,*")
(set_attr "prefix_extra" "1")
@@ -24051,19 +24293,32 @@
(vec_merge:VFH_128
(vec_duplicate:VFH_128
(unspec:<ssescalarmode>
- [(match_operand:<ssescalarmode> 2 "nonimmediate_operand" "Yrm,*xm,xm,vm")
+ [(match_operand:<ssescalarmode> 2 "nonimmediate_operand" "Yrjm,*xjm,xjm,vm")
(match_operand:SI 3 "const_0_to_15_operand")]
UNSPEC_ROUND))
(match_operand:VFH_128 1 "register_operand" "0,0,x,v")
(const_int 1)))]
"TARGET_SSE4_1"
- "@
- round<ssescalarmodesuffix>\t{%3, %2, %0|%0, %2, %3}
- round<ssescalarmodesuffix>\t{%3, %2, %0|%0, %2, %3}
- vround<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}
- vrndscale<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
- [(set_attr "isa" "noavx,noavx,avx,avx512f")
+{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ return "round<ssescalarmodesuffix>\t{%3, %2, %0|%0, %2, %3}";
+ case 2:
+ return "vround<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}";
+ case 3:
+ if (x86_evex_reg_mentioned_p (operands, 3) || <MODE>mode == V8HFmode)
+ return "vrndscale<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}";
+ else
+ return "vround<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "isa" "noavx,noavx,noavx512f,avx512f")
(set_attr "type" "ssecvt")
+ (set_attr "gpr32" "0,0,0,1")
(set_attr "length_immediate" "1")
(set_attr "prefix_data16" "1,1,*,*")
(set_attr "prefix_extra" "1")
@@ -24081,6 +24336,17 @@
(define_expand "lfloor<mode><sseintvecmodelower>2"
[(match_operand:<sseintvecmode> 0 "register_operand")
+ (match_operand:VHF_AVX512VL 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && !flag_trapping_math"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_floor<mode>2 (tmp, operands[1]));
+ emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "lfloor<mode><sseintvecmodelower>2"
+ [(match_operand:<sseintvecmode> 0 "register_operand")
(match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
"TARGET_SSE4_1 && !flag_trapping_math"
{
@@ -24101,6 +24367,17 @@
(define_expand "lceil<mode><sseintvecmodelower>2"
[(match_operand:<sseintvecmode> 0 "register_operand")
+ (match_operand:VHF_AVX512VL 1 "register_operand")]
+ "TARGET_AVX512FP16 && !flag_trapping_math"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_ceil<mode>2 (tmp, operands[1]));
+ emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "lceil<mode><sseintvecmodelower>2"
+ [(match_operand:<sseintvecmode> 0 "register_operand")
(match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
"TARGET_SSE4_1 && !flag_trapping_math"
{
@@ -24121,11 +24398,11 @@
(define_expand "round<mode>2"
[(set (match_dup 3)
- (plus:VF
- (match_operand:VF 1 "register_operand")
+ (plus:VFH
+ (match_operand:VFH 1 "register_operand")
(match_dup 2)))
- (set (match_operand:VF 0 "register_operand")
- (unspec:VF
+ (set (match_operand:VFH 0 "register_operand")
+ (unspec:VFH
[(match_dup 3) (match_dup 4)]
UNSPEC_ROUND))]
"TARGET_SSE4_1 && !flag_trapping_math"
@@ -24155,6 +24432,17 @@
(define_expand "lround<mode><sseintvecmodelower>2"
[(match_operand:<sseintvecmode> 0 "register_operand")
+ (match_operand:VHF_AVX512VL 1 "register_operand")]
+ "TARGET_AVX512FP16 && !flag_trapping_math"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_round<mode>2 (tmp, operands[1]));
+ emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "lround<mode><sseintvecmodelower>2"
+ [(match_operand:<sseintvecmode> 0 "register_operand")
(match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
"TARGET_SSE4_1 && !flag_trapping_math"
{
@@ -24284,7 +24572,7 @@
(unspec:SI
[(match_operand:V16QI 1 "register_operand" "x,x")
(match_operand:SI 2 "register_operand" "a,a")
- (match_operand:V16QI 3 "nonimmediate_operand" "x,m")
+ (match_operand:V16QI 3 "nonimmediate_operand" "x,jm")
(match_operand:SI 4 "register_operand" "d,d")
(match_operand:SI 5 "const_0_to_255_operand")]
UNSPEC_PCMPESTR))
@@ -24299,6 +24587,7 @@
"TARGET_SSE4_2"
"%vpcmpestri\t{%5, %3, %1|%1, %3, %5}"
[(set_attr "type" "sselog")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "length_immediate" "1")
@@ -24311,7 +24600,7 @@
(unspec:V16QI
[(match_operand:V16QI 1 "register_operand" "x,x")
(match_operand:SI 2 "register_operand" "a,a")
- (match_operand:V16QI 3 "nonimmediate_operand" "x,m")
+ (match_operand:V16QI 3 "nonimmediate_operand" "x,jm")
(match_operand:SI 4 "register_operand" "d,d")
(match_operand:SI 5 "const_0_to_255_operand")]
UNSPEC_PCMPESTR))
@@ -24326,6 +24615,7 @@
"TARGET_SSE4_2"
"%vpcmpestrm\t{%5, %3, %1|%1, %3, %5}"
[(set_attr "type" "sselog")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
@@ -24338,7 +24628,7 @@
(unspec:CC
[(match_operand:V16QI 2 "register_operand" "x,x,x,x")
(match_operand:SI 3 "register_operand" "a,a,a,a")
- (match_operand:V16QI 4 "nonimmediate_operand" "x,m,x,m")
+ (match_operand:V16QI 4 "nonimmediate_operand" "x,jm,x,jm")
(match_operand:SI 5 "register_operand" "d,d,d,d")
(match_operand:SI 6 "const_0_to_255_operand")]
UNSPEC_PCMPESTR))
@@ -24351,6 +24641,7 @@
%vpcmpestri\t{%6, %4, %2|%2, %4, %6}
%vpcmpestri\t{%6, %4, %2|%2, %4, %6}"
[(set_attr "type" "sselog")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,load,none,load")
@@ -24362,7 +24653,7 @@
[(set (match_operand:SI 0 "register_operand" "=c,c")
(unspec:SI
[(match_operand:V16QI 2 "register_operand" "x,x")
- (match_operand:V16QI 3 "nonimmediate_operand" "x,m")
+ (match_operand:V16QI 3 "nonimmediate_operand" "x,jm")
(match_operand:SI 4 "const_0_to_255_operand")]
UNSPEC_PCMPISTR))
(set (match_operand:V16QI 1 "register_operand" "=Yz,Yz")
@@ -24405,6 +24696,7 @@
DONE;
}
[(set_attr "type" "sselog")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,load")
@@ -24414,7 +24706,7 @@
[(set (match_operand:SI 0 "register_operand" "=c,c")
(unspec:SI
[(match_operand:V16QI 1 "register_operand" "x,x")
- (match_operand:V16QI 2 "nonimmediate_operand" "x,m")
+ (match_operand:V16QI 2 "nonimmediate_operand" "x,jm")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_PCMPISTR))
(set (reg:CC FLAGS_REG)
@@ -24426,6 +24718,7 @@
"TARGET_SSE4_2"
"%vpcmpistri\t{%3, %2, %1|%1, %2, %3}"
[(set_attr "type" "sselog")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
@@ -24437,7 +24730,7 @@
[(set (match_operand:V16QI 0 "register_operand" "=Yz,Yz")
(unspec:V16QI
[(match_operand:V16QI 1 "register_operand" "x,x")
- (match_operand:V16QI 2 "nonimmediate_operand" "x,m")
+ (match_operand:V16QI 2 "nonimmediate_operand" "x,jm")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_PCMPISTR))
(set (reg:CC FLAGS_REG)
@@ -24449,6 +24742,7 @@
"TARGET_SSE4_2"
"%vpcmpistrm\t{%3, %2, %1|%1, %2, %3}"
[(set_attr "type" "sselog")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
@@ -24460,7 +24754,7 @@
[(set (reg:CC FLAGS_REG)
(unspec:CC
[(match_operand:V16QI 2 "register_operand" "x,x,x,x")
- (match_operand:V16QI 3 "nonimmediate_operand" "x,m,x,m")
+ (match_operand:V16QI 3 "nonimmediate_operand" "x,jm,x,jm")
(match_operand:SI 4 "const_0_to_255_operand")]
UNSPEC_PCMPISTR))
(clobber (match_scratch:V16QI 0 "=Yz,Yz,X,X"))
@@ -24472,6 +24766,7 @@
%vpcmpistri\t{%4, %3, %2|%2, %3, %4}
%vpcmpistri\t{%4, %3, %2|%2, %3, %4}"
[(set_attr "type" "sselog")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,load,none,load")
@@ -25394,7 +25689,7 @@
(ashiftrt:V8DI
(match_operand:V8DI 1 "register_operand")
(match_operand:V8DI 2 "nonimmediate_operand")))]
- "TARGET_AVX512F")
+ "TARGET_AVX512F && TARGET_EVEX512")
(define_expand "vashrv4di3"
[(set (match_operand:V4DI 0 "register_operand")
@@ -25485,7 +25780,7 @@
[(set (match_operand:V16SI 0 "register_operand")
(ashiftrt:V16SI (match_operand:V16SI 1 "register_operand")
(match_operand:V16SI 2 "nonimmediate_operand")))]
- "TARGET_AVX512F")
+ "TARGET_AVX512F && TARGET_EVEX512")
(define_expand "vashrv8si3"
[(set (match_operand:V8SI 0 "register_operand")
@@ -25889,7 +26184,7 @@
(define_insn "aesenc"
[(set (match_operand:V2DI 0 "register_operand" "=x,x,v")
(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0,x,v")
- (match_operand:V2DI 2 "vector_operand" "xBm,xm,vm")]
+ (match_operand:V2DI 2 "vector_operand" "xja,xm,vm")]
UNSPEC_AESENC))]
"TARGET_AES || (TARGET_VAES && TARGET_AVX512VL)"
"@
@@ -25898,6 +26193,7 @@
vaesenc\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,aes,avx512vl")
(set_attr "type" "sselog1")
+ (set_attr "gpr32" "0,1,1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,vex,evex")
(set_attr "btver2_decode" "double,double,double")
@@ -25906,7 +26202,7 @@
(define_insn "aesenclast"
[(set (match_operand:V2DI 0 "register_operand" "=x,x,v")
(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0,x,v")
- (match_operand:V2DI 2 "vector_operand" "xBm,xm,vm")]
+ (match_operand:V2DI 2 "vector_operand" "xja,xm,vm")]
UNSPEC_AESENCLAST))]
"TARGET_AES || (TARGET_VAES && TARGET_AVX512VL)"
"@
@@ -25915,6 +26211,7 @@
vaesenclast\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,aes,avx512vl")
(set_attr "type" "sselog1")
+ (set_attr "gpr32" "0,1,1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,vex,evex")
(set_attr "btver2_decode" "double,double,double")
@@ -25923,7 +26220,7 @@
(define_insn "aesdec"
[(set (match_operand:V2DI 0 "register_operand" "=x,x,v")
(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0,x,v")
- (match_operand:V2DI 2 "vector_operand" "xBm,xm,vm")]
+ (match_operand:V2DI 2 "vector_operand" "xja,xm,vm")]
UNSPEC_AESDEC))]
"TARGET_AES || (TARGET_VAES && TARGET_AVX512VL)"
"@
@@ -25932,6 +26229,7 @@
vaesdec\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,aes,avx512vl")
(set_attr "type" "sselog1")
+ (set_attr "gpr32" "0,1,1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,vex,evex")
(set_attr "btver2_decode" "double,double,double")
@@ -25940,7 +26238,7 @@
(define_insn "aesdeclast"
[(set (match_operand:V2DI 0 "register_operand" "=x,x,v")
(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0,x,v")
- (match_operand:V2DI 2 "vector_operand" "xBm,xm,vm")]
+ (match_operand:V2DI 2 "vector_operand" "xja,xm,vm")]
UNSPEC_AESDECLAST))]
"TARGET_AES || (TARGET_VAES && TARGET_AVX512VL)"
"@
@@ -25948,6 +26246,7 @@
vaesdeclast\t{%2, %1, %0|%0, %1, %2}
vaesdeclast\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,aes,avx512vl")
+ (set_attr "gpr32" "0,1,1")
(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,vex,evex")
@@ -25956,23 +26255,25 @@
(define_insn "aesimc"
[(set (match_operand:V2DI 0 "register_operand" "=x")
- (unspec:V2DI [(match_operand:V2DI 1 "vector_operand" "xBm")]
+ (unspec:V2DI [(match_operand:V2DI 1 "vector_operand" "xja")]
UNSPEC_AESIMC))]
"TARGET_AES"
"%vaesimc\t{%1, %0|%0, %1}"
[(set_attr "type" "sselog1")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
(define_insn "aeskeygenassist"
[(set (match_operand:V2DI 0 "register_operand" "=x")
- (unspec:V2DI [(match_operand:V2DI 1 "vector_operand" "xBm")
+ (unspec:V2DI [(match_operand:V2DI 1 "vector_operand" "xja")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_AESKEYGENASSIST))]
"TARGET_AES"
"%vaeskeygenassist\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog1")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
@@ -25981,7 +26282,7 @@
(define_insn "pclmulqdq"
[(set (match_operand:V2DI 0 "register_operand" "=x,x,v")
(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0,x,v")
- (match_operand:V2DI 2 "vector_operand" "xBm,xm,vm")
+ (match_operand:V2DI 2 "vector_operand" "xja,xm,vm")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_PCLMUL))]
"TARGET_PCLMUL"
@@ -25991,6 +26292,7 @@
vpclmulqdq\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "isa" "noavx,avx,vpclmulqdqvl")
(set_attr "type" "sselog1")
+ (set_attr "gpr32" "0,1,1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "orig,vex,evex")
@@ -26056,12 +26358,12 @@
(set_attr "mode" "OI")])
(define_mode_attr pbroadcast_evex_isa
- [(V64QI "avx512bw") (V32QI "avx512bw") (V16QI "avx512bw")
- (V32HI "avx512bw") (V16HI "avx512bw") (V8HI "avx512bw")
- (V16SI "avx512f") (V8SI "avx512f") (V4SI "avx512f")
- (V8DI "avx512f") (V4DI "avx512f") (V2DI "avx512f")
- (V32HF "avx512bw") (V16HF "avx512bw") (V8HF "avx512bw")
- (V32BF "avx512bw") (V16BF "avx512bw") (V8BF "avx512bw")])
+ [(V64QI "avx512bw_512") (V32QI "avx512bw") (V16QI "avx512bw")
+ (V32HI "avx512bw_512") (V16HI "avx512bw") (V8HI "avx512bw")
+ (V16SI "avx512f_512") (V8SI "avx512f") (V4SI "avx512f")
+ (V8DI "avx512f_512") (V4DI "avx512f") (V2DI "avx512f")
+ (V32HF "avx512bw_512") (V16HF "avx512bw") (V8HF "avx512bw")
+ (V32BF "avx512bw_512") (V16BF "avx512bw") (V8BF "avx512bw")])
(define_insn "avx2_pbroadcast<mode>"
[(set (match_operand:VIHFBF 0 "register_operand" "=x,v")
@@ -26431,11 +26733,13 @@
(set_attr "prefix" "<mask_prefix2>")
(set_attr "mode" "<sseinsnmode>")])
+;; TODO (APX): vmovaps supports EGPR but not others, could split
+;; pattern to enable gpr32 for this one.
(define_insn "avx2_permv2ti"
[(set (match_operand:V4DI 0 "register_operand" "=x")
(unspec:V4DI
[(match_operand:V4DI 1 "register_operand" "x")
- (match_operand:V4DI 2 "nonimmediate_operand" "xm")
+ (match_operand:V4DI 2 "nonimmediate_operand" "xjm")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_VPERMTI))]
"TARGET_AVX2"
@@ -26462,6 +26766,7 @@
return "vperm2i128\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
@@ -26602,7 +26907,7 @@
(set (attr "enabled")
(if_then_else (eq_attr "alternative" "1")
(symbol_ref "TARGET_AVX512F && !TARGET_AVX512VL
- && !TARGET_PREFER_AVX256")
+ && TARGET_EVEX512 && !TARGET_PREFER_AVX256")
(const_string "*")))])
(define_insn "*vec_dupv4si"
@@ -26630,7 +26935,7 @@
(set (attr "enabled")
(if_then_else (eq_attr "alternative" "1")
(symbol_ref "TARGET_AVX512F && !TARGET_AVX512VL
- && !TARGET_PREFER_AVX256")
+ && TARGET_EVEX512 && !TARGET_PREFER_AVX256")
(const_string "*")))])
(define_insn "*vec_dupv2di"
@@ -26661,7 +26966,8 @@
(if_then_else
(eq_attr "alternative" "2")
(symbol_ref "TARGET_AVX512VL
- || (TARGET_AVX512F && !TARGET_PREFER_AVX256)")
+ || (TARGET_AVX512F && TARGET_EVEX512
+ && !TARGET_PREFER_AVX256)")
(const_string "*")))])
(define_insn "avx2_vbroadcasti128_<mode>"
@@ -26741,7 +27047,7 @@
[(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_evex")
- (set_attr "isa" "avx2,noavx2,avx2,avx512f,noavx2")
+ (set_attr "isa" "avx2,noavx2,avx2,avx512f_512,noavx2")
(set_attr "mode" "<sseinsnmode>,V8SF,<sseinsnmode>,<sseinsnmode>,V8SF")])
(define_split
@@ -26781,7 +27087,7 @@
(define_insn "avx_vbroadcastf128_<mode>"
[(set (match_operand:V_256 0 "register_operand" "=x,x,x,v,v,v,v")
(vec_concat:V_256
- (match_operand:<ssehalfvecmode> 1 "nonimmediate_operand" "m,0,?x,m,0,m,0")
+ (match_operand:<ssehalfvecmode> 1 "nonimmediate_operand" "jm,0,?x,m,0,m,0")
(match_dup 1)))]
"TARGET_AVX"
"@
@@ -26792,8 +27098,9 @@
vinsert<i128vldq>\t{$1, %1, %0, %0|%0, %0, %1, 1}
vbroadcast<shuffletype>32x4\t{%1, %0|%0, %1}
vinsert<shuffletype>32x4\t{$1, %1, %0, %0|%0, %0, %1, 1}"
- [(set_attr "isa" "*,*,*,avx512dq,avx512dq,avx512vl,avx512vl")
+ [(set_attr "isa" "noavx512vl,*,*,avx512dq,avx512dq,avx512vl,avx512vl")
(set_attr "type" "ssemov,sselog1,sselog1,ssemov,sselog1,ssemov,sselog1")
+ (set_attr "gpr32" "0,1,1,1,1,1,1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "0,1,1,0,1,0,1")
(set_attr "prefix" "vex,vex,vex,evex,evex,evex,evex")
@@ -26801,8 +27108,8 @@
;; For broadcast[i|f]32x2. Yes there is no v4sf version, only v4si.
(define_mode_iterator VI4F_BRCST32x2
- [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- V16SF (V8SF "TARGET_AVX512VL")])
+ [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL")])
(define_mode_attr 64x2mode
[(V8DF "V2DF") (V8DI "V2DI") (V4DI "V2DI") (V4DF "V2DF")])
@@ -26852,7 +27159,8 @@
;; For broadcast[i|f]64x2
(define_mode_iterator VI8F_BRCST64x2
- [V8DI V8DF (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")])
+ [(V8DI "TARGET_EVEX512") (V8DF "TARGET_EVEX512")
+ (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")])
(define_insn "<mask_codefor>avx512dq_broadcast<mode><mask_name>_1"
[(set (match_operand:VI8F_BRCST64x2 0 "register_operand" "=v,v")
@@ -26908,23 +27216,28 @@
(set_attr "mode" "<sseinsnmode>")])
(define_mode_iterator VPERMI2
- [V16SI V16SF V8DI V8DF
+ [(V16SI "TARGET_EVEX512") (V16SF "TARGET_EVEX512")
+ (V8DI "TARGET_EVEX512") (V8DF "TARGET_EVEX512")
(V8SI "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")
(V4SI "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
(V2DI "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")
- (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX512BW && TARGET_AVX512VL")
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V16HI "TARGET_AVX512BW && TARGET_AVX512VL")
(V8HI "TARGET_AVX512BW && TARGET_AVX512VL")
- (V64QI "TARGET_AVX512VBMI") (V32QI "TARGET_AVX512VBMI && TARGET_AVX512VL")
+ (V64QI "TARGET_AVX512VBMI && TARGET_EVEX512")
+ (V32QI "TARGET_AVX512VBMI && TARGET_AVX512VL")
(V16QI "TARGET_AVX512VBMI && TARGET_AVX512VL")])
(define_mode_iterator VPERMI2I
- [V16SI V8DI
+ [(V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")
(V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
- (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX512BW && TARGET_AVX512VL")
+ (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V16HI "TARGET_AVX512BW && TARGET_AVX512VL")
(V8HI "TARGET_AVX512BW && TARGET_AVX512VL")
- (V64QI "TARGET_AVX512VBMI") (V32QI "TARGET_AVX512VBMI && TARGET_AVX512VL")
+ (V64QI "TARGET_AVX512VBMI && TARGET_EVEX512")
+ (V32QI "TARGET_AVX512VBMI && TARGET_AVX512VL")
(V16QI "TARGET_AVX512VBMI && TARGET_AVX512VL")])
(define_expand "<avx512>_vpermi2var<mode>3_mask"
@@ -27076,12 +27389,13 @@
[(set (match_operand:AVX256MODE2P 0 "register_operand" "=x")
(unspec:AVX256MODE2P
[(match_operand:AVX256MODE2P 1 "register_operand" "x")
- (match_operand:AVX256MODE2P 2 "nonimmediate_operand" "xm")
+ (match_operand:AVX256MODE2P 2 "nonimmediate_operand" "xjm")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_VPERMIL2F128))]
"TARGET_AVX"
"vperm2<i128>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sselog")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
@@ -27092,7 +27406,7 @@
(vec_select:AVX256MODE2P
(vec_concat:<ssedoublevecmode>
(match_operand:AVX256MODE2P 1 "register_operand" "x")
- (match_operand:AVX256MODE2P 2 "nonimmediate_operand" "xm"))
+ (match_operand:AVX256MODE2P 2 "nonimmediate_operand" "xjm"))
(match_parallel 3 ""
[(match_operand 4 "const_int_operand")])))]
"TARGET_AVX
@@ -27109,6 +27423,7 @@
return "vperm2<i128>\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
@@ -27197,11 +27512,11 @@
})
(define_insn "vec_set_lo_<mode><mask_name>"
- [(set (match_operand:VI8F_256 0 "register_operand" "=v")
+ [(set (match_operand:VI8F_256 0 "register_operand" "=x,v")
(vec_concat:VI8F_256
- (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "vm")
+ (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xjm,vm")
(vec_select:<ssehalfvecmode>
- (match_operand:VI8F_256 1 "register_operand" "v")
+ (match_operand:VI8F_256 1 "register_operand" "x,v")
(parallel [(const_int 2) (const_int 3)]))))]
"TARGET_AVX && <mask_avx512dq_condition>"
{
@@ -27212,7 +27527,9 @@
else
return "vinsert<i128>\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}";
}
- [(set_attr "type" "sselog")
+ [(set_attr "isa" "noavx512vl,avx512vl")
+ (set_attr "gpr32" "0,1")
+ (set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
@@ -27241,11 +27558,11 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "vec_set_lo_<mode><mask_name>"
- [(set (match_operand:VI4F_256 0 "register_operand" "=v")
+ [(set (match_operand:VI4F_256 0 "register_operand" "=x,v")
(vec_concat:VI4F_256
- (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "vm")
+ (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xjm,vm")
(vec_select:<ssehalfvecmode>
- (match_operand:VI4F_256 1 "register_operand" "v")
+ (match_operand:VI4F_256 1 "register_operand" "x,v")
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
"TARGET_AVX"
@@ -27255,20 +27572,22 @@
else
return "vinsert<i128>\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}";
}
- [(set_attr "type" "sselog")
+ [(set_attr "isa" "noavx512vl,avx512vl")
+ (set_attr "gpr32" "0,1")
+ (set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<sseinsnmode>")])
(define_insn "vec_set_hi_<mode><mask_name>"
- [(set (match_operand:VI4F_256 0 "register_operand" "=v")
+ [(set (match_operand:VI4F_256 0 "register_operand" "=x,v")
(vec_concat:VI4F_256
(vec_select:<ssehalfvecmode>
- (match_operand:VI4F_256 1 "register_operand" "v")
+ (match_operand:VI4F_256 1 "register_operand" "x,v")
(parallel [(const_int 0) (const_int 1)
(const_int 2) (const_int 3)]))
- (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "vm")))]
+ (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xjm,vm")))]
"TARGET_AVX"
{
if (TARGET_AVX512VL)
@@ -27276,7 +27595,9 @@
else
return "vinsert<i128>\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}";
}
- [(set_attr "type" "sselog")
+ [(set_attr "isa" "noavx512vl,avx512vl")
+ (set_attr "gpr32" "0,1")
+ (set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
@@ -27285,7 +27606,7 @@
(define_insn "vec_set_lo_<mode>"
[(set (match_operand:V16_256 0 "register_operand" "=x,v")
(vec_concat:V16_256
- (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xm,vm")
+ (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xjm,vm")
(vec_select:<ssehalfvecmode>
(match_operand:V16_256 1 "register_operand" "x,v")
(parallel [(const_int 8) (const_int 9)
@@ -27296,7 +27617,9 @@
"@
vinsert%~128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}
vinserti32x4\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
- [(set_attr "type" "sselog")
+ [(set_attr "isa" "noavx512vl,avx512vl")
+ (set_attr "gpr32" "0,1")
+ (set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex,evex")
@@ -27311,12 +27634,14 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))
- (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xm,vm")))]
+ (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xjm,vm")))]
"TARGET_AVX"
"@
vinsert%~128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}
vinserti32x4\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
- [(set_attr "type" "sselog")
+ [(set_attr "isa" "noavx512vl,avx512vl")
+ (set_attr "gpr32" "0,1")
+ (set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex,evex")
@@ -27325,7 +27650,7 @@
(define_insn "vec_set_lo_v32qi"
[(set (match_operand:V32QI 0 "register_operand" "=x,v")
(vec_concat:V32QI
- (match_operand:V16QI 2 "nonimmediate_operand" "xm,v")
+ (match_operand:V16QI 2 "nonimmediate_operand" "xjm,v")
(vec_select:V16QI
(match_operand:V32QI 1 "register_operand" "x,v")
(parallel [(const_int 16) (const_int 17)
@@ -27340,7 +27665,9 @@
"@
vinsert%~128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}
vinserti32x4\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
- [(set_attr "type" "sselog")
+ [(set_attr "isa" "noavx512vl,avx512vl")
+ (set_attr "type" "sselog")
+ (set_attr "gpr32" "0,1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex,evex")
@@ -27359,12 +27686,14 @@
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)]))
- (match_operand:V16QI 2 "nonimmediate_operand" "xm,vm")))]
+ (match_operand:V16QI 2 "nonimmediate_operand" "xjm,vm")))]
"TARGET_AVX"
"@
vinsert%~128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}
vinserti32x4\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
- [(set_attr "type" "sselog")
+ [(set_attr "isa" "noavx512vl,avx512vl")
+ (set_attr "gpr32" "0")
+ (set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex,evex")
@@ -27374,7 +27703,7 @@
[(set (match_operand:V48_128_256 0 "register_operand" "=x")
(unspec:V48_128_256
[(match_operand:<sseintvecmode> 2 "register_operand" "x")
- (match_operand:V48_128_256 1 "memory_operand" "m")]
+ (match_operand:V48_128_256 1 "memory_operand" "jm")]
UNSPEC_MASKMOV))]
"TARGET_AVX"
{
@@ -27384,13 +27713,14 @@
return "vmaskmov<ssefltmodesuffix>\t{%1, %2, %0|%0, %2, %1}";
}
[(set_attr "type" "sselog1")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "btver2_decode" "vector")
(set_attr "mode" "<sseinsnmode>")])
(define_insn "<avx_avx2>_maskstore<ssemodesuffix><avxsizesuffix>"
- [(set (match_operand:V48_128_256 0 "memory_operand" "+m")
+ [(set (match_operand:V48_128_256 0 "memory_operand" "+jm")
(unspec:V48_128_256
[(match_operand:<sseintvecmode> 1 "register_operand" "x")
(match_operand:V48_128_256 2 "register_operand" "x")
@@ -27404,6 +27734,7 @@
return "vmaskmov<ssefltmodesuffix>\t{%2, %1, %0|%0, %1, %2}";
}
[(set_attr "type" "sselog1")
+ (set_attr "gpr32" "0")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "btver2_decode" "vector")
@@ -27543,28 +27874,29 @@
;; Modes handled by vec_init expanders.
(define_mode_iterator VEC_INIT_MODE
- [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
- (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512F") (V16BF "TARGET_AVX") V8BF
- (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")
- (V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX")])
+ [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512F && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512F && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512")
+ (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")
+ (V4TI "TARGET_AVX512F && TARGET_EVEX512") (V2TI "TARGET_AVX")])
;; Likewise, but for initialization from half sized vectors.
;; Thus, these are all VEC_INIT_MODE modes except V2??.
(define_mode_iterator VEC_INIT_HALF_MODE
- [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX")
- (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512F") (V16BF "TARGET_AVX") V8BF
- (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX")
- (V4TI "TARGET_AVX512F")])
+ [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX")
+ (V32HF "TARGET_AVX512F && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512F && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF
+ (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX")
+ (V4TI "TARGET_AVX512F && TARGET_EVEX512")])
(define_expand "vec_init<mode><ssescalarmodelower>"
[(match_operand:VEC_INIT_MODE 0 "register_operand")
@@ -27661,11 +27993,13 @@
[(set (match_operand:V_256_512 0 "register_operand" "=x,v,x,Yv")
(vec_concat:V_256_512
(match_operand:<ssehalfvecmode> 1 "nonimmediate_operand" "x,v,xm,vm")
- (match_operand:<ssehalfvecmode> 2 "nonimm_or_0_operand" "xm,vm,C,C")))]
+ (match_operand:<ssehalfvecmode> 2 "nonimm_or_0_operand" "xjm,vm,C,C")))]
"TARGET_AVX
&& (operands[2] == CONST0_RTX (<ssehalfvecmode>mode)
|| !MEM_P (operands[1]))"
{
+ bool egpr_used = (TARGET_APX_EGPR
+ && x86_extended_rex2reg_mentioned_p (operands[1]));
switch (which_alternative)
{
case 0:
@@ -27713,7 +28047,8 @@
if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
{
if (which_alternative == 2)
- return "vmovdqu\t{%1, %t0|%t0, %1}";
+ return egpr_used ? "vmovups\t{%1, %t0|%t0, %1}"
+ : "vmovdqu\t{%1, %t0|%t0, %1}";
else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
return "vmovdqu64\t{%1, %t0|%t0, %1}";
else
@@ -27722,7 +28057,8 @@
else
{
if (which_alternative == 2)
- return "vmovdqa\t{%1, %t0|%t0, %1}";
+ return egpr_used ? "vmovaps\t{%1, %t0|%t0, %1}"
+ : "vmovdqa\t{%1, %t0|%t0, %1}";
else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
return "vmovdqa64\t{%1, %t0|%t0, %1}";
else
@@ -27732,7 +28068,8 @@
if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
{
if (which_alternative == 2)
- return "vmovdqu\t{%1, %x0|%x0, %1}";
+ return egpr_used ? "vmovups\t{%1, %x0|%x0, %1}"
+ : "vmovdqu\t{%1, %x0|%x0, %1}";
else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
return "vmovdqu64\t{%1, %x0|%x0, %1}";
else
@@ -27741,7 +28078,8 @@
else
{
if (which_alternative == 2)
- return "vmovdqa\t{%1, %x0|%x0, %1}";
+ return egpr_used ? "vmovaps\t{%1, %x0|%x0, %1}"
+ : "vmovdqa\t{%1, %x0|%x0, %1}";
else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
return "vmovdqa64\t{%1, %x0|%x0, %1}";
else
@@ -27754,7 +28092,9 @@
gcc_unreachable ();
}
}
- [(set_attr "type" "sselog,sselog,ssemov,ssemov")
+ [(set_attr "isa" "noavx512f,avx512f,*,*")
+ (set_attr "gpr32" "0,1,1,1")
+ (set_attr "type" "sselog,sselog,ssemov,ssemov")
(set_attr "prefix_extra" "1,1,*,*")
(set_attr "length_immediate" "1,1,*,*")
(set_attr "prefix" "maybe_evex")
@@ -27817,7 +28157,7 @@
(unspec:V16SF
[(match_operand:V16HI 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTPH2PS))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vcvtph2ps\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -27907,7 +28247,7 @@
UNSPEC_VCVTPS2PH)
(match_operand:V16HI 3 "nonimm_or_0_operand")
(match_operand:HI 4 "register_operand")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
{
int round = INTVAL (operands[2]);
/* Separate {sae} from rounding control imm,
@@ -27926,7 +28266,7 @@
[(match_operand:V16SF 1 "register_operand" "v")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_VCVTPS2PH))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vcvtps2ph\t{%2, <round_saeonly_mask_op3>%1, %0<mask_operand3>|%0<mask_operand3>, %1<round_saeonly_mask_op3>, %2}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -27938,7 +28278,7 @@
[(match_operand:V16SF 1 "register_operand" "v")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_VCVTPS2PH))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"vcvtps2ph\t{%2, %1, %0<merge_mask_operand3>|%0<merge_mask_operand3>, %1, %2}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -27992,7 +28332,7 @@
[(match_operand:VEC_GATHER_MODE 2 "register_operand" "0")
(match_operator:<ssescalarmode> 7 "vsib_mem_operator"
[(unspec:P
- [(match_operand:P 3 "vsib_address_operand" "Tv")
+ [(match_operand:P 3 "vsib_address_operand" "jb")
(match_operand:<VEC_GATHER_IDXSI> 4 "register_operand" "x")
(match_operand:SI 6 "const1248_operand")]
UNSPEC_VSIBADDR)])
@@ -28003,6 +28343,7 @@
"TARGET_AVX2"
"%M3v<sseintprefix>gatherd<ssemodesuffix>\t{%1, %7, %0|%0, %7, %1}"
[(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "<sseinsnmode>")])
@@ -28012,7 +28353,7 @@
[(pc)
(match_operator:<ssescalarmode> 6 "vsib_mem_operator"
[(unspec:P
- [(match_operand:P 2 "vsib_address_operand" "Tv")
+ [(match_operand:P 2 "vsib_address_operand" "jb")
(match_operand:<VEC_GATHER_IDXSI> 3 "register_operand" "x")
(match_operand:SI 5 "const1248_operand")]
UNSPEC_VSIBADDR)])
@@ -28023,6 +28364,7 @@
"TARGET_AVX2"
"%M2v<sseintprefix>gatherd<ssemodesuffix>\t{%1, %6, %0|%0, %6, %1}"
[(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "<sseinsnmode>")])
@@ -28053,7 +28395,7 @@
[(match_operand:<VEC_GATHER_SRCDI> 2 "register_operand" "0")
(match_operator:<ssescalarmode> 7 "vsib_mem_operator"
[(unspec:P
- [(match_operand:P 3 "vsib_address_operand" "Tv")
+ [(match_operand:P 3 "vsib_address_operand" "jb")
(match_operand:<VEC_GATHER_IDXDI> 4 "register_operand" "x")
(match_operand:SI 6 "const1248_operand")]
UNSPEC_VSIBADDR)])
@@ -28064,6 +28406,7 @@
"TARGET_AVX2"
"%M3v<sseintprefix>gatherq<ssemodesuffix>\t{%5, %7, %2|%2, %7, %5}"
[(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "<sseinsnmode>")])
@@ -28073,7 +28416,7 @@
[(pc)
(match_operator:<ssescalarmode> 6 "vsib_mem_operator"
[(unspec:P
- [(match_operand:P 2 "vsib_address_operand" "Tv")
+ [(match_operand:P 2 "vsib_address_operand" "jb")
(match_operand:<VEC_GATHER_IDXDI> 3 "register_operand" "x")
(match_operand:SI 5 "const1248_operand")]
UNSPEC_VSIBADDR)])
@@ -28088,6 +28431,7 @@
return "%M2v<sseintprefix>gatherq<ssemodesuffix>\t{%4, %6, %0|%0, %6, %4}";
}
[(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "<sseinsnmode>")])
@@ -28098,7 +28442,7 @@
[(match_operand:<VEC_GATHER_SRCDI> 2 "register_operand" "0")
(match_operator:<ssescalarmode> 7 "vsib_mem_operator"
[(unspec:P
- [(match_operand:P 3 "vsib_address_operand" "Tv")
+ [(match_operand:P 3 "vsib_address_operand" "jb")
(match_operand:<VEC_GATHER_IDXDI> 4 "register_operand" "x")
(match_operand:SI 6 "const1248_operand")]
UNSPEC_VSIBADDR)])
@@ -28111,6 +28455,7 @@
"TARGET_AVX2"
"%M3v<sseintprefix>gatherq<ssemodesuffix>\t{%5, %7, %0|%0, %7, %5}"
[(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "<sseinsnmode>")])
@@ -28121,7 +28466,7 @@
[(pc)
(match_operator:<ssescalarmode> 6 "vsib_mem_operator"
[(unspec:P
- [(match_operand:P 2 "vsib_address_operand" "Tv")
+ [(match_operand:P 2 "vsib_address_operand" "jb")
(match_operand:<VEC_GATHER_IDXDI> 3 "register_operand" "x")
(match_operand:SI 5 "const1248_operand")]
UNSPEC_VSIBADDR)])
@@ -28134,6 +28479,7 @@
"TARGET_AVX2"
"%M2v<sseintprefix>gatherq<ssemodesuffix>\t{%4, %6, %0|%0, %6, %4}"
[(set_attr "type" "ssemov")
+ (set_attr "gpr32" "0")
(set_attr "prefix" "vex")
(set_attr "mode" "<sseinsnmode>")])
@@ -28907,7 +29253,7 @@
(match_operand:V8DI 2 "register_operand" "v")
(match_operand:V8DI 3 "nonimmediate_operand" "vm")]
VPMADD52))]
- "TARGET_AVX512IFMA"
+ "TARGET_AVX512IFMA && TARGET_EVEX512"
"vpmadd52<vpmadd52type>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -29325,7 +29671,7 @@
[(set (match_operand:VI1_AVX512F 0 "register_operand" "=x,v")
(unspec:VI1_AVX512F
[(match_operand:VI1_AVX512F 1 "register_operand" "0,v")
- (match_operand:VI1_AVX512F 2 "vector_operand" "xBm,vm")
+ (match_operand:VI1_AVX512F 2 "vector_operand" "xja,vm")
(match_operand 3 "const_0_to_255_operand")]
UNSPEC_GF2P8AFFINEINV))]
"TARGET_GFNI"
@@ -29333,6 +29679,7 @@
gf2p8affineinvqb\t{%3, %2, %0| %0, %2, %3}
vgf2p8affineinvqb\t{%3, %2, %1, %0<mask_operand4>| %0<mask_operand4>, %1, %2, %3}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -29341,7 +29688,7 @@
[(set (match_operand:VI1_AVX512F 0 "register_operand" "=x,v")
(unspec:VI1_AVX512F
[(match_operand:VI1_AVX512F 1 "register_operand" "0,v")
- (match_operand:VI1_AVX512F 2 "vector_operand" "xBm,vm")
+ (match_operand:VI1_AVX512F 2 "vector_operand" "xja,vm")
(match_operand 3 "const_0_to_255_operand")]
UNSPEC_GF2P8AFFINE))]
"TARGET_GFNI"
@@ -29349,6 +29696,7 @@
gf2p8affineqb\t{%3, %2, %0| %0, %2, %3}
vgf2p8affineqb\t{%3, %2, %1, %0<mask_operand4>| %0<mask_operand4>, %1, %2, %3}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -29357,13 +29705,14 @@
[(set (match_operand:VI1_AVX512F 0 "register_operand" "=x,v")
(unspec:VI1_AVX512F
[(match_operand:VI1_AVX512F 1 "register_operand" "%0,v")
- (match_operand:VI1_AVX512F 2 "vector_operand" "xBm,vm")]
+ (match_operand:VI1_AVX512F 2 "vector_operand" "xja,vm")]
UNSPEC_GF2P8MUL))]
"TARGET_GFNI"
"@
gf2p8mulb\t{%2, %0| %0, %2}
vgf2p8mulb\t{%2, %1, %0<mask_operand3>| %0<mask_operand3>, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "gpr32" "0,1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -29509,9 +29858,9 @@
(match_operand:VI1_AVX512VNNI 1 "register_operand")
(match_operand:VI1_AVX512VNNI 2 "register_operand")
(match_operand:<ssedvecmode> 3 "register_operand")]
- "(<MODE_SIZE> == 64
- ||((TARGET_AVX512VNNI && TARGET_AVX512VL)
- || TARGET_AVXVNNI))"
+ "((<MODE_SIZE> == 64 && TARGET_EVEX512)
+ || ((TARGET_AVX512VNNI && TARGET_AVX512VL)
+ || TARGET_AVXVNNI))"
{
operands[1] = lowpart_subreg (<ssedvecmode>mode,
force_reg (<MODE>mode, operands[1]),
@@ -29532,7 +29881,7 @@
(match_operand:V16SI 2 "register_operand" "v")
(match_operand:V16SI 3 "nonimmediate_operand" "vm")]
UNSPEC_VPDPBUSD))]
- "TARGET_AVX512VNNI"
+ "TARGET_AVX512VNNI && TARGET_EVEX512"
"vpdpbusd\t{%3, %2, %0|%0, %2, %3}"
[(set_attr ("prefix") ("evex"))])
@@ -29600,7 +29949,7 @@
(match_operand:V16SI 2 "register_operand" "v")
(match_operand:V16SI 3 "nonimmediate_operand" "vm")]
UNSPEC_VPDPBUSDS))]
- "TARGET_AVX512VNNI"
+ "TARGET_AVX512VNNI && TARGET_EVEX512"
"vpdpbusds\t{%3, %2, %0|%0, %2, %3}"
[(set_attr ("prefix") ("evex"))])
@@ -29668,7 +30017,7 @@
(match_operand:V16SI 2 "register_operand" "v")
(match_operand:V16SI 3 "nonimmediate_operand" "vm")]
UNSPEC_VPDPWSSD))]
- "TARGET_AVX512VNNI"
+ "TARGET_AVX512VNNI && TARGET_EVEX512"
"vpdpwssd\t{%3, %2, %0|%0, %2, %3}"
[(set_attr ("prefix") ("evex"))])
@@ -29736,7 +30085,7 @@
(match_operand:V16SI 2 "register_operand" "v")
(match_operand:V16SI 3 "nonimmediate_operand" "vm")]
UNSPEC_VPDPWSSDS))]
- "TARGET_AVX512VNNI"
+ "TARGET_AVX512VNNI && TARGET_EVEX512"
"vpdpwssds\t{%3, %2, %0|%0, %2, %3}"
[(set_attr ("prefix") ("evex"))])
@@ -29859,9 +30208,9 @@
(set_attr "mode" "<sseinsnmode>")])
(define_mode_iterator VI48_AVX512VP2VL
- [V8DI
- (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
- (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")])
+ [(V8DI "TARGET_EVEX512")
+ (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
+ (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")])
(define_mode_iterator MASK_DWI [P2QI P2HI])
@@ -29902,12 +30251,12 @@
(unspec:P2HI [(match_operand:V16SI 1 "register_operand" "v")
(match_operand:V16SI 2 "vector_operand" "vm")]
UNSPEC_VP2INTERSECT))]
- "TARGET_AVX512VP2INTERSECT"
+ "TARGET_AVX512VP2INTERSECT && TARGET_EVEX512"
"vp2intersectd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr ("prefix") ("evex"))])
(define_mode_iterator VF_AVX512BF16VL
- [V32BF (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
+ [(V32BF "TARGET_EVEX512") (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
;; Converting from BF to SF
(define_mode_attr bf16_cvt_2sf
[(V32BF "V16SF") (V16BF "V8SF") (V8BF "V4SF")])
@@ -30000,7 +30349,8 @@
"TARGET_AVX512BF16 && TARGET_AVX512VL"
"vcvtneps2bf16{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}")
-(define_mode_iterator VF1_AVX512_256 [V16SF (V8SF "TARGET_AVX512VL")])
+(define_mode_iterator VF1_AVX512_256
+ [(V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL")])
(define_expand "avx512f_cvtneps2bf16_<mode>_maskz"
[(match_operand:<sf_cvt_bf16> 0 "register_operand")
@@ -30285,10 +30635,10 @@
;; vinserti64x4 $0x1, %ymm15, %zmm15, %zmm15
(define_mode_iterator INT_BROADCAST_MODE
- [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F && TARGET_64BIT")
+ [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_EVEX512 && TARGET_64BIT")
(V4DI "TARGET_AVX && TARGET_64BIT") (V2DI "TARGET_64BIT")])
;; Broadcast from an integer. NB: Enable broadcast only if we can move
diff --git a/gcc/config/i386/usermsrintrin.h b/gcc/config/i386/usermsrintrin.h
new file mode 100644
index 0000000..9e1dbdc
--- /dev/null
+++ b/gcc/config/i386/usermsrintrin.h
@@ -0,0 +1,60 @@
+/* Copyright (C) 2022 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if !defined _X86GPRINTRIN_H_INCLUDED
+#error "Never use <usermsrintrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef _USER_MSRINTRIN_H_INCLUDED
+#define _USER_MSRINTRIN_H_INCLUDED
+
+#ifdef __x86_64__
+
+#ifndef __USER_MSR__
+#pragma GCC push_options
+#pragma GCC target("usermsr")
+#define __DISABLE_USER_MSR__
+#endif /* __USER_MSR__ */
+
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_urdmsr (unsigned long long __A)
+{
+ return (unsigned long long) __builtin_ia32_urdmsr (__A);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_uwrmsr (unsigned long long __A, unsigned long long __B)
+{
+ __builtin_ia32_uwrmsr (__A, __B);
+}
+
+#ifdef __DISABLE_USER_MSR__
+#undef __DISABLE_USER_MSR__
+#pragma GCC pop_options
+#endif /* __DISABLE_USER_MSR__ */
+
+#endif /* __x86_64__ */
+
+#endif /* _USER_MSRINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/vaesintrin.h b/gcc/config/i386/vaesintrin.h
index 58fc19c..b2bcdbe 100644
--- a/gcc/config/i386/vaesintrin.h
+++ b/gcc/config/i386/vaesintrin.h
@@ -66,9 +66,9 @@ _mm256_aesenclast_epi128 (__m256i __A, __m256i __B)
#endif /* __DISABLE_VAES__ */
-#if !defined(__VAES__) || !defined(__AVX512F__)
+#if !defined(__VAES__) || !defined(__AVX512F__) || !defined(__EVEX512__)
#pragma GCC push_options
-#pragma GCC target("vaes,avx512f")
+#pragma GCC target("vaes,avx512f,evex512")
#define __DISABLE_VAESF__
#endif /* __VAES__ */
diff --git a/gcc/config/i386/vpclmulqdqintrin.h b/gcc/config/i386/vpclmulqdqintrin.h
index 2c83b60..c8c2c19 100644
--- a/gcc/config/i386/vpclmulqdqintrin.h
+++ b/gcc/config/i386/vpclmulqdqintrin.h
@@ -28,9 +28,9 @@
#ifndef _VPCLMULQDQINTRIN_H_INCLUDED
#define _VPCLMULQDQINTRIN_H_INCLUDED
-#if !defined(__VPCLMULQDQ__) || !defined(__AVX512F__)
+#if !defined(__VPCLMULQDQ__) || !defined(__AVX512F__) || !defined(__EVEX512__)
#pragma GCC push_options
-#pragma GCC target("vpclmulqdq,avx512f")
+#pragma GCC target("vpclmulqdq,avx512f,evex512")
#define __DISABLE_VPCLMULQDQF__
#endif /* __VPCLMULQDQF__ */
diff --git a/gcc/config/i386/x86-tune.def b/gcc/config/i386/x86-tune.def
index 4b2c5d5..53e177a 100644
--- a/gcc/config/i386/x86-tune.def
+++ b/gcc/config/i386/x86-tune.def
@@ -42,8 +42,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
DEF_TUNE (X86_TUNE_SCHEDULE, "schedule",
m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL | m_SILVERMONT
| m_INTEL | m_KNL | m_KNM | m_K6_GEODE | m_AMD_MULTIPLE | m_LUJIAZUI
- | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE
- | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID
+ | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_PARTIAL_REG_DEPENDENCY: Enable more register renaming
on modern chips. Prefer stores affecting whole integer register
@@ -53,7 +53,7 @@ DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency",
m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2
| m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL
| m_KNL | m_KNM | m_AMD_MULTIPLE | m_LUJIAZUI | m_TREMONT
- | m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: This knob promotes all store
destinations to be 128bit to allow register renaming on 128bit SSE units,
@@ -63,8 +63,8 @@ DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency",
that can be partly masked by careful scheduling of moves. */
DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY, "sse_partial_reg_dependency",
m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10
- | m_BDVER | m_ZNVER | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE
- | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ | m_BDVER | m_ZNVER | m_LUJIAZUI | m_TREMONT | m_CORE_HYBRID
+ | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY: This knob avoids
partial write to the destination in scalar SSE conversion from FP
@@ -72,23 +72,23 @@ DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY, "sse_partial_reg_dependency",
DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY,
"sse_partial_reg_fp_converts_dependency",
m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10
- | m_BDVER | m_ZNVER | m_LUJIAZUI | m_ALDERLAKE | m_ARROWLAKE
- | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ | m_BDVER | m_ZNVER | m_LUJIAZUI | m_CORE_HYBRID | m_CORE_ATOM
+ | m_GENERIC)
/* X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY: This knob avoids partial
write to the destination in scalar SSE conversion from integer to FP. */
DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY,
"sse_partial_reg_converts_dependency",
m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10
- | m_BDVER | m_ZNVER | m_LUJIAZUI | m_ALDERLAKE | m_ARROWLAKE
- | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ | m_BDVER | m_ZNVER | m_LUJIAZUI | m_CORE_HYBRID | m_CORE_ATOM
+ | m_GENERIC)
/* X86_TUNE_DEST_FALSE_DEP_FOR_GLC: This knob inserts zero-idiom before
several insns to break false dependency on the dest register for GLC
micro-architecture. */
DEF_TUNE (X86_TUNE_DEST_FALSE_DEP_FOR_GLC,
- "dest_false_dep_for_glc", m_SAPPHIRERAPIDS | m_ALDERLAKE | m_ARROWLAKE
- | m_ARROWLAKE_S | m_CORE_ATOM)
+ "dest_false_dep_for_glc", m_SAPPHIRERAPIDS | m_CORE_HYBRID
+ | m_CORE_ATOM)
/* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
are resolved on SSE register parts instead of whole registers, so we may
@@ -114,16 +114,14 @@ DEF_TUNE (X86_TUNE_MOVX, "movx",
m_PPRO | m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE
| m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_KNL | m_KNM | m_INTEL
| m_GOLDMONT_PLUS | m_GEODE | m_AMD_MULTIPLE | m_LUJIAZUI
- | m_CORE_AVX2 | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE
- | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ | m_CORE_AVX2 | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_MEMORY_MISMATCH_STALL: Avoid partial stores that are followed by
full sized loads. */
DEF_TUNE (X86_TUNE_MEMORY_MISMATCH_STALL, "memory_mismatch_stall",
m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_INTEL
| m_KNL | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS | m_AMD_MULTIPLE
- | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S
- | m_CORE_ATOM | m_GENERIC)
+ | m_LUJIAZUI | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_FUSE_CMP_AND_BRANCH_32: Fuse compare with a subsequent
conditional jump instruction for 32 bit TARGET. */
@@ -179,16 +177,14 @@ DEF_TUNE (X86_TUNE_EPILOGUE_USING_MOVE, "epilogue_using_move",
/* X86_TUNE_USE_LEAVE: Use "leave" instruction in epilogues where it fits. */
DEF_TUNE (X86_TUNE_USE_LEAVE, "use_leave",
m_386 | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE | m_LUJIAZUI
- | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S
- | m_CORE_ATOM | m_GENERIC)
+ | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_PUSH_MEMORY: Enable generation of "push mem" instructions.
Some chips, like 486 and Pentium works faster with separate load
and push instructions. */
DEF_TUNE (X86_TUNE_PUSH_MEMORY, "push_memory",
m_386 | m_P4_NOCONA | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE
- | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S
- | m_CORE_ATOM | m_GENERIC)
+ | m_LUJIAZUI | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
over esp subtraction. */
@@ -258,16 +254,16 @@ DEF_TUNE (X86_TUNE_READ_MODIFY, "read_modify", ~(m_PENT | m_LAKEMONT | m_PPRO))
DEF_TUNE (X86_TUNE_USE_INCDEC, "use_incdec",
~(m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE
| m_BONNELL | m_SILVERMONT | m_INTEL | m_KNL | m_KNM | m_GOLDMONT
- | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE
- | m_ARROWLAKE_S | m_CORE_ATOM | m_LUJIAZUI | m_GENERIC))
+ | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM
+ | m_LUJIAZUI | m_GENERIC))
/* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
for DFmode copies */
DEF_TUNE (X86_TUNE_INTEGER_DFMODE_MOVES, "integer_dfmode_moves",
~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT
| m_KNL | m_KNM | m_INTEL | m_GEODE | m_AMD_MULTIPLE | m_LUJIAZUI
- | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE
- | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC))
+ | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID
+ | m_CORE_ATOM | m_GENERIC))
/* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
will impact LEA instruction selection. */
@@ -305,8 +301,8 @@ DEF_TUNE (X86_TUNE_SINGLE_STRINGOP, "single_stringop", m_386 | m_P4_NOCONA)
move/set sequences of bytes with known size. */
DEF_TUNE (X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB,
"prefer_known_rep_movsb_stosb",
- m_SKYLAKE | m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM
- | m_TREMONT | m_CORE_AVX512 | m_LUJIAZUI)
+ m_SKYLAKE | m_CORE_HYBRID | m_CORE_ATOM | m_TREMONT | m_CORE_AVX512
+ | m_LUJIAZUI)
/* X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES: Enable generation of
compact prologues and epilogues by issuing a misaligned moves. This
@@ -316,15 +312,14 @@ DEF_TUNE (X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB,
DEF_TUNE (X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES,
"misaligned_move_string_pro_epilogues",
m_386 | m_486 | m_CORE_ALL | m_AMD_MULTIPLE | m_LUJIAZUI | m_TREMONT
- | m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_USE_SAHF: Controls use of SAHF. */
DEF_TUNE (X86_TUNE_USE_SAHF, "use_sahf",
m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT
| m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER
| m_BTVER | m_ZNVER | m_LUJIAZUI | m_GOLDMONT | m_GOLDMONT_PLUS
- | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S| m_CORE_ATOM
- | m_GENERIC)
+ | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_USE_CLTD: Controls use of CLTD and CTQO instructions. */
DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd",
@@ -335,8 +330,8 @@ DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd",
DEF_TUNE (X86_TUNE_USE_BT, "use_bt",
m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL
| m_LAKEMONT | m_AMD_MULTIPLE | m_LUJIAZUI | m_GOLDMONT
- | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE
- | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM
+ | m_GENERIC)
/* X86_TUNE_AVOID_FALSE_DEP_FOR_BMI: Avoid false dependency
for bit-manipulation instructions. */
@@ -355,13 +350,13 @@ DEF_TUNE (X86_TUNE_ADJUST_UNROLL, "adjust_unroll_factor", m_BDVER3 | m_BDVER4)
if-converted sequence to one. */
DEF_TUNE (X86_TUNE_ONE_IF_CONV_INSN, "one_if_conv_insn",
m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_CORE_ALL | m_GOLDMONT
- | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE
- | m_ARROWLAKE_S | m_CORE_ATOM | m_LUJIAZUI | m_GENERIC)
+ | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM
+ | m_LUJIAZUI | m_GENERIC)
/* X86_TUNE_AVOID_MFENCE: Use lock prefixed instructions instead of mfence. */
DEF_TUNE (X86_TUNE_AVOID_MFENCE, "avoid_mfence",
- m_CORE_ALL | m_BDVER | m_ZNVER | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE
- | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ m_CORE_ALL | m_BDVER | m_ZNVER | m_LUJIAZUI | m_TREMONT | m_CORE_HYBRID
+ | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_EXPAND_ABS: This enables a new abs pattern by
generating instructions for abs (x) = (((signed) x >> (W-1) ^ x) -
@@ -386,8 +381,7 @@ DEF_TUNE (X86_TUNE_USE_SIMODE_FIOP, "use_simode_fiop",
~(m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL
| m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_AMD_MULTIPLE
| m_LUJIAZUI | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT
- | m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM
- | m_GENERIC))
+ | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC))
/* X86_TUNE_USE_FFREEP: Use freep instruction instead of fstp. */
DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE | m_LUJIAZUI)
@@ -396,8 +390,8 @@ DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE | m_LUJIAZUI)
DEF_TUNE (X86_TUNE_EXT_80387_CONSTANTS, "ext_80387_constants",
m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT
| m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_ATHLON_K8 | m_LUJIAZUI
- | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE
- | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID
+ | m_CORE_ATOM | m_GENERIC)
/*****************************************************************************/
/* SSE instruction selection tuning */
@@ -412,17 +406,16 @@ DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill",
of a sequence loading registers by parts. */
DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal",
m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_KNL | m_KNM
- | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE
- | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM | m_AMDFAM10 | m_BDVER
- | m_BTVER | m_ZNVER | m_LUJIAZUI | m_GENERIC)
+ | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID
+ | m_CORE_ATOM | m_AMDFAM10 | m_BDVER | m_BTVER | m_ZNVER | m_LUJIAZUI
+ | m_GENERIC)
/* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores
instead of a sequence loading registers by parts. */
DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal",
m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_KNL | m_KNM
- | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE
- | m_ARROWLAKE | m_ARROWLAKE_S| m_CORE_ATOM | m_BDVER | m_ZNVER
- | m_LUJIAZUI | m_GENERIC)
+ | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID
+ | m_CORE_ATOM | m_BDVER | m_ZNVER | m_LUJIAZUI | m_GENERIC)
/* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL: Use packed single
precision 128bit instructions instead of double where possible. */
@@ -431,15 +424,14 @@ DEF_TUNE (X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL, "sse_packed_single_insn_optim
/* X86_TUNE_SSE_TYPELESS_STORES: Always movaps/movups for 128bit stores. */
DEF_TUNE (X86_TUNE_SSE_TYPELESS_STORES, "sse_typeless_stores",
- m_AMD_MULTIPLE | m_LUJIAZUI | m_CORE_ALL | m_TREMONT | m_ALDERLAKE
- | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC)
+ m_AMD_MULTIPLE | m_LUJIAZUI | m_CORE_ALL | m_TREMONT | m_CORE_HYBRID
+ | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_SSE_LOAD0_BY_PXOR: Always use pxor to load0 as opposed to
xorps/xorpd and other variants. */
DEF_TUNE (X86_TUNE_SSE_LOAD0_BY_PXOR, "sse_load0_by_pxor",
m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BDVER | m_BTVER | m_ZNVER
- | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S
- | m_CORE_ATOM | m_GENERIC)
+ | m_LUJIAZUI | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC)
/* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from integer
to SSE registers. If disabled, the moves will be done by storing
@@ -485,14 +477,14 @@ DEF_TUNE (X86_TUNE_SLOW_PSHUFB, "slow_pshufb",
/* X86_TUNE_AVOID_4BYTE_PREFIXES: Avoid instructions requiring 4+ bytes of prefixes. */
DEF_TUNE (X86_TUNE_AVOID_4BYTE_PREFIXES, "avoid_4byte_prefixes",
- m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE
- | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM | m_INTEL)
+ m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID
+ | m_CORE_ATOM | m_INTEL)
/* X86_TUNE_USE_GATHER_2PARTS: Use gather instructions for vectors with 2
elements. */
DEF_TUNE (X86_TUNE_USE_GATHER_2PARTS, "use_gather_2parts",
- ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ZNVER4 | m_ALDERLAKE
- | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC | m_GDS))
+ ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ZNVER4 | m_CORE_HYBRID
+ | m_CORE_ATOM | m_GENERIC | m_GDS))
/* X86_TUNE_USE_SCATTER_2PARTS: Use scater instructions for vectors with 2
elements. */
@@ -502,8 +494,8 @@ DEF_TUNE (X86_TUNE_USE_SCATTER_2PARTS, "use_scatter_2parts",
/* X86_TUNE_USE_GATHER_4PARTS: Use gather instructions for vectors with 4
elements. */
DEF_TUNE (X86_TUNE_USE_GATHER_4PARTS, "use_gather_4parts",
- ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ZNVER4 | m_ALDERLAKE
- | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC | m_GDS))
+ ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ZNVER4 | m_CORE_HYBRID
+ | m_CORE_ATOM | m_GENERIC | m_GDS))
/* X86_TUNE_USE_SCATTER_4PARTS: Use scater instructions for vectors with 4
elements. */
@@ -513,8 +505,8 @@ DEF_TUNE (X86_TUNE_USE_SCATTER_4PARTS, "use_scatter_4parts",
/* X86_TUNE_USE_GATHER: Use gather instructions for vectors with 8 or more
elements. */
DEF_TUNE (X86_TUNE_USE_GATHER_8PARTS, "use_gather_8parts",
- ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER4 | m_ALDERLAKE | m_ARROWLAKE
- | m_ARROWLAKE_S | m_CORE_ATOM | m_GENERIC | m_GDS))
+ ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER4 | m_CORE_HYBRID | m_CORE_ATOM
+ | m_GENERIC | m_GDS))
/* X86_TUNE_USE_SCATTER: Use scater instructions for vectors with 8 or more
elements. */
@@ -528,8 +520,7 @@ DEF_TUNE (X86_TUNE_AVOID_128FMA_CHAINS, "avoid_fma_chains", m_ZNVER1 | m_ZNVER2
/* X86_TUNE_AVOID_256FMA_CHAINS: Avoid creating loops with tight 256bit or
smaller FMA chain. */
DEF_TUNE (X86_TUNE_AVOID_256FMA_CHAINS, "avoid_fma256_chains", m_ZNVER2 | m_ZNVER3
- | m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S | m_SAPPHIRERAPIDS
- | m_CORE_ATOM)
+ | m_CORE_HYBRID | m_SAPPHIRERAPIDS | m_CORE_ATOM)
/* X86_TUNE_AVOID_512FMA_CHAINS: Avoid creating loops with tight 512bit or
smaller FMA chain. */
@@ -573,14 +564,12 @@ DEF_TUNE (X86_TUNE_AVX512_SPLIT_REGS, "avx512_split_regs", m_ZNVER4)
/* X86_TUNE_AVX256_MOVE_BY_PIECES: Optimize move_by_pieces with 256-bit
AVX instructions. */
DEF_TUNE (X86_TUNE_AVX256_MOVE_BY_PIECES, "avx256_move_by_pieces",
- m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_AVX2 | m_ZNVER1
- | m_ZNVER2 | m_ZNVER3)
+ m_CORE_HYBRID | m_CORE_AVX2 | m_ZNVER1 | m_ZNVER2 | m_ZNVER3)
/* X86_TUNE_AVX256_STORE_BY_PIECES: Optimize store_by_pieces with 256-bit
AVX instructions. */
DEF_TUNE (X86_TUNE_AVX256_STORE_BY_PIECES, "avx256_store_by_pieces",
- m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S | m_CORE_AVX2 | m_ZNVER1
- | m_ZNVER2 | m_ZNVER3)
+ m_CORE_HYBRID | m_CORE_AVX2 | m_ZNVER1 | m_ZNVER2 | m_ZNVER3)
/* X86_TUNE_AVX512_MOVE_BY_PIECES: Optimize move_by_pieces with 512-bit
AVX instructions. */
@@ -717,3 +706,6 @@ DEF_TUNE (X86_TUNE_EMIT_VZEROUPPER, "emit_vzeroupper", ~m_KNL)
/* X86_TUNE_SLOW_STC: This disables use of stc, clc and cmc carry flag
modifications on architectures where theses operations are slow. */
DEF_TUNE (X86_TUNE_SLOW_STC, "slow_stc", m_PENT4)
+
+/* X86_TUNE_USE_RCR: Controls use of rcr 1 instruction instead of shrd. */
+DEF_TUNE (X86_TUNE_USE_RCR, "use_rcr", m_AMD_MULTIPLE)
diff --git a/gcc/config/i386/x86gprintrin.h b/gcc/config/i386/x86gprintrin.h
index f41be3f..11a8a96 100644
--- a/gcc/config/i386/x86gprintrin.h
+++ b/gcc/config/i386/x86gprintrin.h
@@ -108,6 +108,8 @@
#include <hresetintrin.h>
+#include <usermsrintrin.h>
+
extern __inline void
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_wbinvd (void)
diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
index 9e1b0d0..8fa7439 100644
--- a/gcc/config/loongarch/loongarch.cc
+++ b/gcc/config/loongarch/loongarch.cc
@@ -10194,7 +10194,8 @@ loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val)
void
loongarch_expand_vector_group_init (rtx target, rtx vals)
{
- rtx ops[2] = { XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1) };
+ rtx ops[2] = { force_reg (E_V16QImode, XVECEXP (vals, 0, 0)),
+ force_reg (E_V16QImode, XVECEXP (vals, 0, 1)) };
emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (E_V32QImode, ops[0],
ops[1])));
}
diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
index d357e32..da3ec2a 100644
--- a/gcc/config/loongarch/loongarch.h
+++ b/gcc/config/loongarch/loongarch.h
@@ -49,9 +49,6 @@ along with GCC; see the file COPYING3. If not see
#define TARGET_LIBGCC_SDATA_SECTION ".sdata"
-/* Driver native functions for SPEC processing in the GCC driver. */
-#include "loongarch-driver.h"
-
/* This definition replaces the formerly used 'm' constraint with a
different constraint letter in order to avoid changing semantics of
the 'm' constraint when accepting new address formats in
@@ -1061,11 +1058,6 @@ typedef struct {
#define ASM_OUTPUT_ALIGN(STREAM, LOG) fprintf (STREAM, "\t.align\t%d\n", (LOG))
-/* "nop" instruction 54525952 (andi $r0,$r0,0) is
- used for padding. */
-#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, LOG) \
- fprintf (STREAM, "\t.align\t%d,54525952,4\n", (LOG))
-
/* This is how to output an assembler line to advance the location
counter by SIZE bytes. */
diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
index 4fcb6d7..9f5a753 100644
--- a/gcc/config/loongarch/loongarch.md
+++ b/gcc/config/loongarch/loongarch.md
@@ -37,7 +37,6 @@
UNSPEC_FCLASS
UNSPEC_FMAX
UNSPEC_FMIN
- UNSPEC_FCOPYSIGN
UNSPEC_FTINT
UNSPEC_FTINTRM
UNSPEC_FTINTRP
@@ -1129,9 +1128,8 @@
(define_insn "copysign<mode>3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
- (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")
- (match_operand:ANYF 2 "register_operand" "f")]
- UNSPEC_FCOPYSIGN))]
+ (copysign:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")))]
"TARGET_HARD_FLOAT"
"fcopysign.<fmt>\t%0,%1,%2"
[(set_attr "type" "fcopysign")
@@ -2152,7 +2150,7 @@
[(set (match_operand:FCC 0 "register_operand" "=z")
(const_int 0))]
""
- "movgr2cf\t%0,$r0")
+ "fcmp.caf.s\t%0,$f0,$f0")
;; Conditional move instructions.
diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch
index 9b06fa8..667a6bb 100644
--- a/gcc/config/loongarch/t-loongarch
+++ b/gcc/config/loongarch/t-loongarch
@@ -16,7 +16,8 @@
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
-TM_H += loongarch-multilib.h $(srcdir)/config/loongarch/loongarch-driver.h
+
+GTM_H += loongarch-multilib.h
OPTIONS_H_EXTRA += $(srcdir)/config/loongarch/loongarch-def.h \
$(srcdir)/config/loongarch/loongarch-tune.h
diff --git a/gcc/config/nvptx/nvptx.cc b/gcc/config/nvptx/nvptx.cc
index edef39f..634c316 100644
--- a/gcc/config/nvptx/nvptx.cc
+++ b/gcc/config/nvptx/nvptx.cc
@@ -335,8 +335,9 @@ nvptx_option_override (void)
init_machine_status = nvptx_init_machine_status;
/* Via nvptx 'OPTION_DEFAULT_SPECS', '-misa' always appears on the command
- line. */
- gcc_checking_assert (OPTION_SET_P (ptx_isa_option));
+ line; but handle the case that the compiler is not run via the driver. */
+ if (!OPTION_SET_P (ptx_isa_option))
+ fatal_error (UNKNOWN_LOCATION, "%<-march=%> must be specified");
handle_ptx_version_option ();
diff --git a/gcc/config/pa/pa.md b/gcc/config/pa/pa.md
index f603591..4f85991 100644
--- a/gcc/config/pa/pa.md
+++ b/gcc/config/pa/pa.md
@@ -10739,10 +10739,10 @@ add,l %2,%3,%3\;bv,n %%r0(%3)"
;; generating PA 1.x code even though all PA 1.x systems are strongly ordered.
;; When barriers are needed, we use a strongly ordered ldcw instruction as
-;; the barrier. Most PA 2.0 targets are cache coherent. In that case, we
-;; can use the coherent cache control hint and avoid aligning the ldcw
-;; address. In spite of its description, it is not clear that the sync
-;; instruction works as a barrier.
+;; the barrier. All PA 2.0 targets accept the "co" cache control hint but
+;; only PA8800 and PA8900 processors implement the cacheable hint. In
+;; that case, we can avoid aligning the ldcw address. In spite of its
+;; description, it is not clear that the sync instruction works as a barrier.
(define_expand "memory_barrier"
[(parallel
@@ -10772,7 +10772,7 @@ add,l %2,%3,%3\;bv,n %%r0(%3)"
(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
(clobber (match_operand 1 "pmode_register_operand" "=&r"))]
"TARGET_64BIT"
- "ldo 15(%%sp),%1\n\tdepd %%r0,63,3,%1\n\tldcw 0(%1),%1"
+ "ldo 15(%%sp),%1\n\tdepd %%r0,63,3,%1\n\tldcw,co 0(%1),%1"
[(set_attr "type" "binary")
(set_attr "length" "12")])
@@ -10781,6 +10781,6 @@ add,l %2,%3,%3\;bv,n %%r0(%3)"
(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
(clobber (match_operand 1 "pmode_register_operand" "=&r"))]
""
- "ldo 15(%%sp),%1\n\t{dep|depw} %%r0,31,3,%1\n\tldcw 0(%1),%1"
+ "ldo 15(%%sp),%1\n\t{dep|depw} %%r0,31,3,%1\n\t{ldcw|ldcw,co} 0(%1),%1"
[(set_attr "type" "binary")
(set_attr "length" "12")])
diff --git a/gcc/config/pa/pa.opt b/gcc/config/pa/pa.opt
index dd358f2..573edce 100644
--- a/gcc/config/pa/pa.opt
+++ b/gcc/config/pa/pa.opt
@@ -50,7 +50,7 @@ Target Mask(CALLER_COPIES)
Caller copies function arguments passed by hidden reference.
mcoherent-ldcw
-Target Var(TARGET_COHERENT_LDCW) Init(1)
+Target Var(TARGET_COHERENT_LDCW) Init(0)
Use ldcw/ldcd coherent cache-control hint.
mdisable-fpregs
diff --git a/gcc/config/pa/pa32-linux.h b/gcc/config/pa/pa32-linux.h
index bdd13ce..f48e453 100644
--- a/gcc/config/pa/pa32-linux.h
+++ b/gcc/config/pa/pa32-linux.h
@@ -68,11 +68,6 @@ call_ ## FUNC (void) \
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE BITS_PER_WORD
-/* POSIX types such as pthread_mutex_t require 16-byte alignment to retain
- layout compatibility with the original linux thread implementation. */
-#undef MALLOC_ABI_ALIGNMENT
-#define MALLOC_ABI_ALIGNMENT 128
-
/* Place jump tables in the text section except when generating non-PIC
code. When generating non-PIC code, the relocations needed to load the
address of the jump table result in a text label in the final executable
diff --git a/gcc/config/riscv/autovec.md b/gcc/config/riscv/autovec.md
index cd0cbdd..c5b1e52 100644
--- a/gcc/config/riscv/autovec.md
+++ b/gcc/config/riscv/autovec.md
@@ -59,7 +59,7 @@
(match_operand:<RATIO64:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO64:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -74,7 +74,7 @@
(match_operand:<RATIO32:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO32:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -89,7 +89,7 @@
(match_operand:<RATIO16:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO16:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -104,7 +104,7 @@
(match_operand:<RATIO8:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO8:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -119,7 +119,7 @@
(match_operand:<RATIO4:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO4:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -134,7 +134,7 @@
(match_operand:<RATIO2:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO2:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -153,7 +153,7 @@
(match_operand:<RATIO1:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO1:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -172,7 +172,7 @@
(match_operand:<RATIO64:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO64:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -187,7 +187,7 @@
(match_operand:<RATIO32:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO32:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -202,7 +202,7 @@
(match_operand:<RATIO16:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO16:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -217,7 +217,7 @@
(match_operand:<RATIO8:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO8:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -232,7 +232,7 @@
(match_operand:<RATIO4:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO4:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -247,7 +247,7 @@
(match_operand:<RATIO2:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO2:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -266,7 +266,7 @@
(match_operand:<RATIO1:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_mode_p (<RATIO1:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -336,7 +336,7 @@
;; - vadd.vx/vadd.vi
;; -------------------------------------------------------------------------
-(define_expand "@vec_series<mode>"
+(define_expand "vec_series<mode>"
[(match_operand:V_VLSI 0 "register_operand")
(match_operand:<VEL> 1 "reg_or_int_operand")
(match_operand:<VEL> 2 "reg_or_int_operand")]
@@ -575,10 +575,10 @@
;; -------------------------------------------------------------------------
(define_expand "vcond_mask_<mode><mode>"
- [(match_operand:VB 0 "register_operand")
- (match_operand:VB 1 "register_operand")
- (match_operand:VB 2 "register_operand")
- (match_operand:VB 3 "register_operand")]
+ [(match_operand:VB_VLS 0 "register_operand")
+ (match_operand:VB_VLS 1 "register_operand")
+ (match_operand:VB_VLS 2 "register_operand")
+ (match_operand:VB_VLS 3 "register_operand")]
"TARGET_VECTOR"
{
/* mask1 = operands[3] & operands[1]. */
@@ -974,6 +974,30 @@
}
[(set_attr "type" "vfncvtitof")])
+;; This operation can be performed in the loop vectorizer but unfortunately
+;; not applicable for now. We can remove this pattern after loop vectorizer
+;; is able to take care of INT64 to FP16 conversion.
+(define_insn_and_split "<float_cvt><mode><vnnconvert>2"
+ [(set (match_operand:<VNNCONVERT> 0 "register_operand")
+ (any_float:<VNNCONVERT>
+ (match_operand:VWWCONVERTI 1 "register_operand")))]
+ "TARGET_VECTOR && TARGET_ZVFH && can_create_pseudo_p () && !flag_trapping_math"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+ {
+ rtx single = gen_reg_rtx (<VNCONVERT>mode); /* Get vector SF mode. */
+
+ /* Step-1, INT64 => FP32. */
+ emit_insn (gen_<float_cvt><mode><vnconvert>2 (single, operands[1]));
+ /* Step-2, FP32 => FP16. */
+ emit_insn (gen_trunc<vnconvert><vnnconvert>2 (operands[0], single));
+
+ DONE;
+ }
+ [(set_attr "type" "vfncvtitof")]
+)
+
;; =========================================================================
;; == Unary arithmetic
;; =========================================================================
@@ -2205,12 +2229,20 @@
})
;; -------------------------------------------------------------------------
-;; ---- [FP] Math.h.
+;; ---- [FP] Rounding.
;; -------------------------------------------------------------------------
;; Includes:
;; - ceil/ceilf
;; - floor/floorf
;; - nearbyint/nearbyintf
+;; - rint/rintf
+;; - round/roundf
+;; - trunc/truncf
+;; - roundeven/roundevenf
+;; - lrint/lrintf
+;; - irintf
+;; - lceil/lceilf
+;; - lfloor/lfloorf
;; -------------------------------------------------------------------------
(define_expand "ceil<mode>2"
[(match_operand:V_VLSF 0 "register_operand")
@@ -2281,3 +2313,43 @@
DONE;
}
)
+
+(define_expand "lrint<mode><v_i_l_ll_convert>2"
+ [(match_operand:<V_I_L_LL_CONVERT> 0 "register_operand")
+ (match_operand:V_VLS_FCONVERT_I_L_LL 1 "register_operand")]
+ "TARGET_VECTOR && !flag_trapping_math && !flag_rounding_math"
+ {
+ riscv_vector::expand_vec_lrint (operands[0], operands[1], <MODE>mode, <V_I_L_LL_CONVERT>mode);
+ DONE;
+ }
+)
+
+(define_expand "lround<mode><v_i_l_ll_convert>2"
+ [(match_operand:<V_I_L_LL_CONVERT> 0 "register_operand")
+ (match_operand:V_VLS_FCONVERT_I_L_LL 1 "register_operand")]
+ "TARGET_VECTOR && !flag_trapping_math && !flag_rounding_math"
+ {
+ riscv_vector::expand_vec_lround (operands[0], operands[1], <MODE>mode, <V_I_L_LL_CONVERT>mode);
+ DONE;
+ }
+)
+
+(define_expand "lceil<mode><v_i_l_ll_convert>2"
+ [(match_operand:<V_I_L_LL_CONVERT> 0 "register_operand")
+ (match_operand:V_VLS_FCONVERT_I_L_LL 1 "register_operand")]
+ "TARGET_VECTOR && !flag_trapping_math && !flag_rounding_math"
+ {
+ riscv_vector::expand_vec_lceil (operands[0], operands[1], <MODE>mode, <V_I_L_LL_CONVERT>mode);
+ DONE;
+ }
+)
+
+(define_expand "lfloor<mode><v_i_l_ll_convert>2"
+ [(match_operand:<V_I_L_LL_CONVERT> 0 "register_operand")
+ (match_operand:V_VLS_FCONVERT_I_L_LL 1 "register_operand")]
+ "TARGET_VECTOR && !flag_trapping_math && !flag_rounding_math"
+ {
+ riscv_vector::expand_vec_lfloor (operands[0], operands[1], <MODE>mode, <V_I_L_LL_CONVERT>mode);
+ DONE;
+ }
+)
diff --git a/gcc/config/riscv/bitmanip.md b/gcc/config/riscv/bitmanip.md
index 0d126a8..a9c8275 100644
--- a/gcc/config/riscv/bitmanip.md
+++ b/gcc/config/riscv/bitmanip.md
@@ -215,6 +215,20 @@
[(set_attr "type" "bitmanip")
(set_attr "mode" "<X:MODE>")])
+(define_insn_and_split "*<optab>_not_const<mode>"
+ [(set (match_operand:X 0 "register_operand" "=r")
+ (bitmanip_bitwise:X (not:X (match_operand:X 1 "register_operand" "r"))
+ (match_operand:X 2 "const_arith_operand" "I")))
+ (clobber (match_scratch:X 3 "=&r"))]
+ "(TARGET_ZBB || TARGET_ZBKB) && !TARGET_ZCB
+ && !optimize_function_for_size_p (cfun)"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3) (match_dup 2))
+ (set (match_dup 0) (bitmanip_bitwise:X (not:X (match_dup 1)) (match_dup 3)))]
+ ""
+ [(set_attr "type" "bitmanip")])
+
;; '(a >= 0) ? b : 0' is emitted branchless (from if-conversion). Without a
;; bit of extra help for combine (i.e., the below split), we end up emitting
;; not/srai/and instead of combining the not into an andn.
diff --git a/gcc/config/riscv/constraints.md b/gcc/config/riscv/constraints.md
index 964fdd4..07ee14d 100644
--- a/gcc/config/riscv/constraints.md
+++ b/gcc/config/riscv/constraints.md
@@ -151,6 +151,13 @@
(define_register_constraint "zmvr" "(TARGET_ZFA || TARGET_XTHEADFMV) ? GR_REGS : NO_REGS"
"An integer register for ZFA or XTheadFmv.")
+;; CORE-V Constraints
+(define_constraint "CVP2"
+ "Checking for CORE-V ALU clip if ival plus 1 is a power of 2"
+ (and (match_code "const_int")
+ (and (match_test "IN_RANGE (ival, 0, 1073741823)")
+ (match_test "exact_log2 (ival + 1) != -1"))))
+
;; Vector constraints.
(define_register_constraint "vr" "TARGET_VECTOR ? V_REGS : NO_REGS"
diff --git a/gcc/config/riscv/corev.def b/gcc/config/riscv/corev.def
new file mode 100644
index 0000000..17580df
--- /dev/null
+++ b/gcc/config/riscv/corev.def
@@ -0,0 +1,43 @@
+// XCVMAC
+RISCV_BUILTIN (cv_mac_mac, "cv_mac_mac", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_SI, cvmac),
+RISCV_BUILTIN (cv_mac_msu, "cv_mac_msu", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_SI, cvmac),
+RISCV_BUILTIN (cv_mac_muluN, "cv_mac_muluN", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_mulhhuN, "cv_mac_mulhhuN", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_mulsN, "cv_mac_mulsN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_mulhhsN, "cv_mac_mulhhsN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_muluRN, "cv_mac_muluRN", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_mulhhuRN, "cv_mac_mulhhuRN", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_mulsRN, "cv_mac_mulsRN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_mulhhsRN, "cv_mac_mulhhsRN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_macuN, "cv_mac_macuN", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_USI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_machhuN, "cv_mac_machhuN", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_USI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_macsN, "cv_mac_macsN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_SI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_machhsN, "cv_mac_machhsN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_SI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_macuRN, "cv_mac_macuRN", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_USI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_machhuRN, "cv_mac_machhuRN", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_USI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_macsRN, "cv_mac_macsRN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_SI_UQI, cvmac),
+RISCV_BUILTIN (cv_mac_machhsRN, "cv_mac_machhsRN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_SI_UQI, cvmac),
+
+// XCVALU
+RISCV_BUILTIN (cv_alu_slet, "cv_alu_slet", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI, cvalu),
+RISCV_BUILTIN (cv_alu_sletu, "cv_alu_sletu", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_USI_USI, cvalu),
+RISCV_BUILTIN (cv_alu_min, "cv_alu_min", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI, cvalu),
+RISCV_BUILTIN (cv_alu_minu, "cv_alu_minu", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI, cvalu),
+RISCV_BUILTIN (cv_alu_max, "cv_alu_max", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI, cvalu),
+RISCV_BUILTIN (cv_alu_maxu, "cv_alu_maxu", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI, cvalu),
+
+RISCV_BUILTIN (cv_alu_exths, "cv_alu_exths", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_HI, cvalu),
+RISCV_BUILTIN (cv_alu_exthz, "cv_alu_exthz", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_UHI, cvalu),
+RISCV_BUILTIN (cv_alu_extbs, "cv_alu_extbs", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_QI, cvalu),
+RISCV_BUILTIN (cv_alu_extbz, "cv_alu_extbz", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_UQI, cvalu),
+
+RISCV_BUILTIN (cv_alu_clip, "cv_alu_clip", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI, cvalu),
+RISCV_BUILTIN (cv_alu_clipu, "cv_alu_clipu", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI, cvalu),
+RISCV_BUILTIN (cv_alu_addN, "cv_alu_addN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_UQI, cvalu),
+RISCV_BUILTIN (cv_alu_adduN, "cv_alu_adduN", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_UQI, cvalu),
+RISCV_BUILTIN (cv_alu_addRN, "cv_alu_addRN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_UQI, cvalu),
+RISCV_BUILTIN (cv_alu_adduRN, "cv_alu_adduRN",RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_UQI, cvalu),
+RISCV_BUILTIN (cv_alu_subN, "cv_alu_subN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_UQI, cvalu),
+RISCV_BUILTIN (cv_alu_subuN, "cv_alu_subuN", RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_UQI, cvalu),
+RISCV_BUILTIN (cv_alu_subRN, "cv_alu_subRN", RISCV_BUILTIN_DIRECT, RISCV_SI_FTYPE_SI_SI_UQI, cvalu),
+RISCV_BUILTIN (cv_alu_subuRN, "cv_alu_subuRN",RISCV_BUILTIN_DIRECT, RISCV_USI_FTYPE_USI_USI_UQI, cvalu),
diff --git a/gcc/config/riscv/corev.md b/gcc/config/riscv/corev.md
new file mode 100644
index 0000000..1350bd4
--- /dev/null
+++ b/gcc/config/riscv/corev.md
@@ -0,0 +1,693 @@
+;; Machine description for CORE-V vendor extensions.
+;; Copyright (C) 2023 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspec" [
+
+ ;;CORE-V ALU
+ UNSPEC_CV_ALU_CLIP
+ UNSPEC_CV_ALU_CLIPR
+ UNSPEC_CV_ALU_CLIPU
+ UNSPEC_CV_ALU_CLIPUR
+])
+
+;; XCVMAC extension.
+
+(define_insn "riscv_cv_mac_mac"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r"))
+ (match_operand:SI 3 "register_operand" "0")))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.mac\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_msu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 3 "register_operand" "0")
+ (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r"))))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.msu\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_muluN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI
+ (mult:SI
+ (zero_extend:SI
+ (truncate:HI
+ (match_operand:SI 1 "register_operand" "r")))
+ (zero_extend:SI
+ (truncate:HI
+ (match_operand:SI 2 "register_operand" "r"))))
+ (match_operand:QI 3 "const_csr_operand" "K")))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.mulun\t%0,%1,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_mulhhuN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI
+ (mult:SI
+ (zero_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 16))))
+ (zero_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+ (const_int 16)))))
+ (match_operand:QI 3 "const_csr_operand" "K")))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.mulhhun\t%0,%1,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_mulsN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI
+ (mult:SI
+ (sign_extend:SI
+ (truncate:HI
+ (match_operand:SI 1 "register_operand" "r")))
+ (sign_extend:SI
+ (truncate:HI
+ (match_operand:SI 2 "register_operand" "r"))))
+ (match_operand:QI 3 "const_csr_operand" "K")))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.mulsn\t%0,%1,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_mulhhsN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI
+ (mult:SI
+ (sign_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 16))))
+ (sign_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+ (const_int 16)))))
+ (match_operand:QI 3 "const_csr_operand" "K")))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.mulhhsn\t%0,%1,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_muluRN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI
+ (fma:SI
+ (zero_extend:SI
+ (truncate:HI
+ (match_operand:SI 1 "register_operand" "r")))
+ (zero_extend:SI
+ (truncate:HI
+ (match_operand:SI 2 "register_operand" "r")))
+ (if_then_else
+ (ne:QI (match_operand:QI 3 "const_csr_operand" "K") (const_int 0))
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 3)
+ (const_int 1)))
+ (const_int 0)))
+ (match_dup 3)))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.mulurn\t%0,%1,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_mulhhuRN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI
+ (fma:SI
+ (zero_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 16))))
+ (zero_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+ (const_int 16))))
+ (if_then_else
+ (ne:QI (match_operand:QI 3 "const_csr_operand" "K") (const_int 0))
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 3)
+ (const_int 1)))
+ (const_int 0)))
+ (match_dup 3)))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.mulhhurn\t%0,%1,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_mulsRN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI
+ (fma:SI
+ (sign_extend:SI
+ (truncate:HI
+ (match_operand:SI 1 "register_operand" "r")))
+ (sign_extend:SI
+ (truncate:HI
+ (match_operand:SI 2 "register_operand" "r")))
+ (if_then_else
+ (ne:QI (match_operand:QI 3 "const_csr_operand" "K") (const_int 0))
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 3)
+ (const_int 1)))
+ (const_int 0)))
+ (match_dup 3)))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.mulsrn\t%0,%1,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_mulhhsRN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI
+ (fma:SI
+ (sign_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 16))))
+ (sign_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+ (const_int 16))))
+ (if_then_else
+ (ne:QI (match_operand:QI 3 "const_csr_operand" "K") (const_int 0))
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 3)
+ (const_int 1)))
+ (const_int 0)))
+ (match_dup 3)))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.mulhhsrn\t%0,%1,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_macuN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI
+ (fma:SI
+ (zero_extend:SI
+ (truncate:HI
+ (match_operand:SI 1 "register_operand" "r")))
+ (zero_extend:SI
+ (truncate:HI
+ (match_operand:SI 2 "register_operand" "r")))
+ (match_operand:SI 3 "register_operand" "0"))
+ (match_operand:QI 4 "const_csr_operand" "K")))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.macun\t%0,%1,%2,%4"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_machhuN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI
+ (fma:SI
+ (zero_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 16))))
+ (zero_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+ (const_int 16))))
+ (match_operand:SI 3 "register_operand" "0"))
+ (match_operand:QI 4 "const_csr_operand" "K")))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.machhun\t%0,%1,%2,%4"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_macsN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI
+ (fma:SI
+ (sign_extend:SI
+ (truncate:HI
+ (match_operand:SI 1 "register_operand" "r")))
+ (sign_extend:SI
+ (truncate:HI
+ (match_operand:SI 2 "register_operand" "r")))
+ (match_operand:SI 3 "register_operand" "0"))
+ (match_operand:QI 4 "const_csr_operand" "K")))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.macsn\t%0,%1,%2,%4"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_machhsN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI
+ (fma:SI
+ (sign_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 16))))
+ (sign_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+ (const_int 16))))
+ (match_operand:SI 3 "register_operand" "0"))
+ (match_operand:QI 4 "const_csr_operand" "K")))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.machhsn\t%0,%1,%2,%4"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_macuRN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI
+ (plus:SI
+ (fma:SI
+ (zero_extend:SI
+ (truncate:HI
+ (match_operand:SI 1 "register_operand" "r")))
+ (zero_extend:SI
+ (truncate:HI
+ (match_operand:SI 2 "register_operand" "r")))
+ (match_operand:SI 3 "register_operand" "0"))
+ (if_then_else
+ (ne:QI (match_operand:QI 4 "const_csr_operand" "K") (const_int 0))
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 4)
+ (const_int 1)))
+ (const_int 0)))
+ (match_dup 4)))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.macurn\t%0,%1,%2,%4"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_machhuRN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI
+ (plus:SI
+ (fma:SI
+ (zero_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 16))))
+ (zero_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+ (const_int 16))))
+ (match_operand:SI 3 "register_operand" "0"))
+ (if_then_else
+ (ne:QI (match_operand:QI 4 "const_csr_operand" "K") (const_int 0))
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 4)
+ (const_int 1)))
+ (const_int 0)))
+ (match_dup 4)))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.machhurn\t%0,%1,%2,%4"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_macsRN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI
+ (plus:SI
+ (fma:SI
+ (sign_extend:SI
+ (truncate:HI
+ (match_operand:SI 1 "register_operand" "r")))
+ (sign_extend:SI
+ (truncate:HI
+ (match_operand:SI 2 "register_operand" "r")))
+ (match_operand:SI 3 "register_operand" "0"))
+ (if_then_else
+ (ne:QI (match_operand:QI 4 "const_csr_operand" "K") (const_int 0))
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 4)
+ (const_int 1)))
+ (const_int 0)))
+ (match_dup 4)))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.macsrn\t%0,%1,%2,%4"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_mac_machhsRN"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI
+ (plus:SI
+ (fma:SI
+ (sign_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 16))))
+ (sign_extend:SI
+ (truncate:HI
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+ (const_int 16))))
+ (match_operand:SI 3 "register_operand" "0"))
+ (if_then_else
+ (ne:QI (match_operand:QI 4 "const_csr_operand" "K") (const_int 0))
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 4)
+ (const_int 1)))
+ (const_int 0)))
+ (match_dup 4)))]
+
+ "TARGET_XCVMAC && !TARGET_64BIT"
+ "cv.machhsrn\t%0,%1,%2,%4"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+;; XCVALU builtins
+
+(define_insn "riscv_cv_alu_slet"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (le:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "cv.sle\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_sletu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (leu:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "cv.sleu\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_min"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (smin:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "cv.min\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_minu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (umin:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "cv.minu\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_max"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (smax:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "cv.max\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_maxu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (umax:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "cv.maxu\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_exths"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI
+ (truncate:HI
+ (match_operand:HI 1 "register_operand" "r"))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "cv.exths\t%0, %1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_exthz"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (truncate:HI
+ (match_operand:HI 1 "register_operand" "r"))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "cv.exthz\t%0, %1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_extbs"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI
+ (truncate:QI
+ (match_operand:QI 1 "register_operand" "r"))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "cv.extbs\t%0, %1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_extbz"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (truncate:QI
+ (match_operand:QI 1 "register_operand" "r"))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "cv.extbz\t%0, %1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_clip"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "immediate_register_operand" "CVP2,r")]
+ UNSPEC_CV_ALU_CLIP))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "@
+ cv.clip\t%0,%1,%X2
+ cv.clipr\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_clipu"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "immediate_register_operand" "CVP2,r")]
+ UNSPEC_CV_ALU_CLIPU))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "@
+ cv.clipu\t%0,%1,%X2
+ cv.clipur\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_addN"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashiftrt:SI
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "r,0")
+ (match_operand:SI 2 "register_operand" "r,r"))
+ (and:SI (match_operand:QI 3 "csr_operand" "K,r")
+ (const_int 31))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "@
+ cv.addn\t%0,%1,%2,%3
+ cv.addnr\t%0,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_adduN"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lshiftrt:SI
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "r,0")
+ (match_operand:SI 2 "register_operand" "r,r"))
+ (and:SI (match_operand:QI 3 "csr_operand" "K,r")
+ (const_int 31))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "@
+ cv.addun\t%0,%1,%2,%3
+ cv.addunr\t%0,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_addRN"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashiftrt:SI
+ (plus:SI
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "r,0")
+ (match_operand:SI 2 "register_operand" "r,r"))
+ (if_then_else (eq (match_operand:QI 3 "csr_operand" "K,r")
+ (const_int 0))
+ (const_int 1)
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 3)
+ (const_int 1)))))
+ (and:SI (match_dup 3)
+ (const_int 31))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "@
+ cv.addrn\t%0,%1,%2,%3
+ cv.addrnr\t%0,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_adduRN"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lshiftrt:SI
+ (plus:SI
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "r,0")
+ (match_operand:SI 2 "register_operand" "r,r"))
+ (if_then_else (eq (match_operand:QI 3 "csr_operand" "K,r")
+ (const_int 0))
+ (const_int 1)
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 3)
+ (const_int 1)))))
+ (and:SI (match_dup 3)
+ (const_int 31))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "@
+ cv.addurn\t%0,%1,%2,%3
+ cv.addurnr\t%0,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_subN"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashiftrt:SI
+ (minus:SI
+ (match_operand:SI 1 "register_operand" "r,0")
+ (match_operand:SI 2 "register_operand" "r,r"))
+ (and:SI (match_operand:QI 3 "csr_operand" "K,r")
+ (const_int 31))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "@
+ cv.subn\t%0,%1,%2,%3
+ cv.subnr\t%0,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_subuN"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lshiftrt:SI
+ (minus:SI
+ (match_operand:SI 1 "register_operand" "r,0")
+ (match_operand:SI 2 "register_operand" "r,r"))
+ (and:SI (match_operand:QI 3 "csr_operand" "K,r")
+ (const_int 31))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "@
+ cv.subun\t%0,%1,%2,%3
+ cv.subunr\t%0,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_subRN"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashiftrt:SI
+ (plus:SI
+ (minus:SI
+ (match_operand:SI 1 "register_operand" "r,0")
+ (match_operand:SI 2 "register_operand" "r,r"))
+ (if_then_else (eq (match_operand:QI 3 "csr_operand" "K,r")
+ (const_int 0))
+ (const_int 1)
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 3)
+ (const_int 1)))))
+ (and:SI (match_dup 3)
+ (const_int 31))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "@
+ cv.subrn\t%0,%1,%2,%3
+ cv.subrnr\t%0,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "riscv_cv_alu_subuRN"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lshiftrt:SI
+ (plus:SI
+ (minus:SI
+ (match_operand:SI 1 "register_operand" "r,0")
+ (match_operand:SI 2 "register_operand" "r,r"))
+ (if_then_else (eq (match_operand:QI 3 "csr_operand" "K,r")
+ (const_int 0))
+ (const_int 1)
+ (ashift:SI (const_int 1)
+ (minus:QI (match_dup 3)
+ (const_int 1)))))
+ (and:SI (match_dup 3)
+ (const_int 31))))]
+
+ "TARGET_XCVALU && !TARGET_64BIT"
+ "@
+ cv.suburn\t%0,%1,%2,%3
+ cv.suburnr\t%0,%2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
diff --git a/gcc/config/riscv/generic-ooo.md b/gcc/config/riscv/generic-ooo.md
new file mode 100644
index 0000000..78b9e48
--- /dev/null
+++ b/gcc/config/riscv/generic-ooo.md
@@ -0,0 +1,284 @@
+;; RISC-V generic out-of-order core scheduling model.
+;; Copyright (C) 2017-2023 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "generic_ooo")
+
+;; Regarding functional units we assume a three-way split:
+;; - Integer ALU (IXU) - 4 symmetric units.
+;; - Floating-point (FXU) - 2 symmetric units.
+;; - Vector Unit (VXU) - 1 unit.
+
+;; We assume 6-wide issue:
+;; - 5-wide generic/integer issue.
+;; - 1-wide vector issue.
+
+;; For now, the only subunits are for non-pipelined integer division and
+;; vector div/mult/sqrt.
+;; No extra units for e.g. vector permutes, masking, everything is assumed to
+;; be on the same pipelined execution unit.
+
+;; Latency:
+;; - Regular integer operations take 1 cycle.
+;; - Multiplication/Division take multiple cycles.
+;; - Float operations take 4-6 cycles.
+;; - Regular vector operations take 2-6 cycles.
+;; (This assumes LMUL = 1, latency for LMUL = 2, 4, 8 is scaled accordingly
+;; by riscv_sched_adjust_cost when -madjust-lmul-cost is given)
+;; - Load/Store:
+;; - To/From IXU: 4 cycles.
+;; - To/From FXU: 6 cycles.
+;; - To/From VXU: 6 cycles.
+
+;; Integer/float issue queues.
+(define_cpu_unit "issue0,issue1,issue2,issue3,issue4" "generic_ooo")
+
+;; Separate issue queue for vector instructions.
+(define_cpu_unit "generic_ooo_vxu_issue" "generic_ooo")
+
+;; Integer/float execution units.
+(define_cpu_unit "ixu0,ixu1,ixu2,ixu3" "generic_ooo")
+(define_cpu_unit "fxu0,fxu1" "generic_ooo")
+
+;; Integer subunit for division.
+(define_cpu_unit "generic_ooo_div" "generic_ooo")
+
+;; Vector execution unit.
+(define_cpu_unit "generic_ooo_vxu_alu" "generic_ooo")
+
+;; Vector subunit that does mult/div/sqrt.
+(define_cpu_unit "generic_ooo_vxu_multicycle" "generic_ooo")
+
+;; Shortcuts
+(define_reservation "generic_ooo_issue" "issue0|issue1|issue2|issue3|issue4")
+(define_reservation "generic_ooo_ixu_alu" "ixu0|ixu1|ixu2|ixu3")
+(define_reservation "generic_ooo_fxu" "fxu0|fxu1")
+
+
+;; Integer load/store
+(define_insn_reservation "generic_ooo_int_load" 4
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "load"))
+ "generic_ooo_issue,generic_ooo_ixu_alu")
+
+(define_insn_reservation "generic_ooo_int_store" 4
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "store"))
+ "generic_ooo_issue,generic_ooo_ixu_alu")
+
+;; Float load/store
+(define_insn_reservation "generic_ooo_float_load" 6
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "fpload"))
+ "generic_ooo_issue,generic_ooo_ixu_alu")
+
+(define_insn_reservation "generic_ooo_float_store" 6
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "fpstore"))
+ "generic_ooo_issue,generic_ooo_fxu")
+
+;; Vector load/store
+(define_insn_reservation "generic_ooo_vec_load" 6
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vlde,vldm,vlds,vldux,vldox,vldff,vldr"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+(define_insn_reservation "generic_ooo_vec_store" 6
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vste,vstm,vsts,vstux,vstox,vstr"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+;; Vector segment loads/stores.
+(define_insn_reservation "generic_ooo_vec_loadstore_seg" 10
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vlsegde,vlsegds,vlsegdux,vlsegdox,vlsegdff,\
+ vssegte,vssegts,vssegtux,vssegtox"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+
+;; Generic integer instructions.
+(define_insn_reservation "generic_ooo_alu" 1
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "unknown,const,arith,shift,slt,multi,auipc,nop,logical,\
+ move,bitmanip,min,max,minu,maxu,clz,ctz"))
+ "generic_ooo_issue,generic_ooo_ixu_alu")
+
+
+;; Float move, convert and compare.
+(define_insn_reservation "generic_ooo_float_move" 3
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "fmove"))
+ "generic_ooo_issue,generic_ooo_fxu")
+
+(define_insn_reservation "generic_ooo_fcvt" 3
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "fcvt"))
+ "generic_ooo_issue,generic_ooo_fxu")
+
+(define_insn_reservation "generic_ooo_fcmp" 2
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "fcmp"))
+ "generic_ooo_issue,generic_ooo_fxu")
+
+;; Integer multiplication.
+(define_insn_reservation "generic_ooo_imul" 4
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "imul"))
+ "generic_ooo_issue,generic_ooo_ixu_alu,generic_ooo_ixu_alu")
+
+;; Assume integer division is not pipelined. Do not block the unit for more
+;; than three cycles so the DFA does not get too large. Similar for other
+;; non-pipelined instructions.
+(define_insn_reservation "generic_ooo_idiv" 16
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "idiv"))
+ "generic_ooo_issue,generic_ooo_ixu_alu,generic_ooo_div,generic_ooo_div*3")
+
+;; Float addition and multiplication.
+(define_insn_reservation "generic_ooo_faddmul" 4
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "fadd,fmul"))
+ "generic_ooo_issue,generic_ooo_fxu")
+
+;; Float FMA.
+(define_insn_reservation "generic_ooo_float_fma" 6
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "fmadd"))
+ "generic_ooo_issue,generic_ooo_fxu")
+
+;; Assume float division and sqrt are not pipelined.
+(define_insn_reservation "generic_ooo_float_div_single" 12
+ (and (eq_attr "tune" "generic_ooo")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "SF")))
+ "generic_ooo_issue,generic_ooo_fxu,generic_ooo_div,generic_ooo_div*3")
+
+(define_insn_reservation "generic_ooo_float_div_double" 16
+ (and (eq_attr "tune" "generic_ooo")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "generic_ooo_issue,generic_ooo_fxu,generic_ooo_div,generic_ooo_div*3")
+
+;; Popcount and clmul.
+(define_insn_reservation "generic_ooo_popcount" 2
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "cpop,clmul"))
+ "generic_ooo_issue,generic_ooo_ixu_alu")
+
+;; Regular vector operations and integer comparisons.
+(define_insn_reservation "generic_ooo_vec_alu" 3
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vialu,viwalu,vext,vicalu,vshift,vnshift,viminmax,vicmp,\
+ vimov,vsalu,vaalu,vsshift,vnclip,vmov,vfmov"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+;; Vector float comparison, conversion etc.
+(define_insn_reservation "generic_ooo_vec_fcmp" 3
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vfrecp,vfminmax,vfcmp,vfsgnj,vfclass,vfcvtitof,\
+ vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,\
+ vfncvtftoi,vfncvtftof"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+;; Vector integer multiplication.
+(define_insn_reservation "generic_ooo_vec_imul" 4
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vimul,viwmul,vimuladd,viwmuladd,vsmul"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+;; Vector float addition.
+(define_insn_reservation "generic_ooo_vec_fadd" 4
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vfalu,vfwalu"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+;; Vector float multiplication and FMA.
+(define_insn_reservation "generic_ooo_vec_fmul" 6
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vfmul,vfwmul,vfmuladd,vfwmuladd"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+;; Vector crypto, assumed to be a generic operation for now.
+(define_insn_reservation "generic_ooo_crypto" 4
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "crypto"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+;; Vector permute.
+(define_insn_reservation "generic_ooo_perm" 3
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vimerge,vfmerge,vslideup,vslidedown,vislide1up,\
+ vislide1down,vfslide1up,vfslide1down,vgather,vcompress"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+;; Vector reduction.
+(define_insn_reservation "generic_ooo_vec_reduction" 8
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vired,viwred,vfredu,vfwredu"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_multicycle")
+
+;; Vector ordered reduction, assume the latency number is for
+;; a 128-bit vector. It is scaled in riscv_sched_adjust_cost
+;; for larger vectors.
+(define_insn_reservation "generic_ooo_vec_ordered_reduction" 10
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vfredo,vfwredo"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_multicycle*3")
+
+;; Vector integer division, assume not pipelined.
+(define_insn_reservation "generic_ooo_vec_idiv" 16
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vidiv"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_multicycle*3")
+
+;; Vector float divisions and sqrt, assume not pipelined.
+(define_insn_reservation "generic_ooo_vec_float_divsqrt" 16
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vfdiv,vfsqrt"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_multicycle*3")
+
+;; Vector mask operations.
+(define_insn_reservation "generic_ooo_vec_mask" 2
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,\
+ vfmovvf,vfmovfv"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_alu")
+
+;; Vector vsetvl.
+(define_insn_reservation "generic_ooo_vec_vesetvl" 1
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "vsetvl,vsetvl_pre"))
+ "generic_ooo_vxu_issue")
+
+;; Vector rounding mode setters, assume pipeline barrier.
+(define_insn_reservation "generic_ooo_vec_setrm" 20
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "wrvxrm,wrfrm"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_issue*3")
+
+;; Vector read vlen/vlenb.
+(define_insn_reservation "generic_ooo_vec_readlen" 4
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "rdvlenb,rdvl"))
+ "generic_ooo_vxu_issue,generic_ooo_vxu_issue")
+
+;; Transfer from/to coprocessor. Assume not pipelined.
+(define_insn_reservation "generic_ooo_xfer" 4
+ (and (eq_attr "tune" "generic_ooo")
+ (eq_attr "type" "mfc,mtc"))
+ "generic_ooo_issue,generic_ooo_ixu_alu,generic_ooo_ixu_alu*3")
diff --git a/gcc/config/riscv/generic.md b/gcc/config/riscv/generic.md
index 57d3c3b..8894048 100644
--- a/gcc/config/riscv/generic.md
+++ b/gcc/config/riscv/generic.md
@@ -47,7 +47,7 @@
(define_insn_reservation "generic_branch" 1
(and (eq_attr "tune" "generic")
- (eq_attr "type" "branch,jump,call"))
+ (eq_attr "type" "branch,jump,call,jalr"))
"alu")
(define_insn_reservation "generic_imul" 10
diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h
index 7323ff3..8901671 100644
--- a/gcc/config/riscv/linux.h
+++ b/gcc/config/riscv/linux.h
@@ -55,9 +55,10 @@ along with GCC; see the file COPYING3. If not see
%{shared} \
%{!shared: \
%{!static: \
- %{rdynamic:-export-dynamic} \
- -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
- %{static:-static}}"
+ %{!static-pie: \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "}} \
+ %{static:-static} %{static-pie:-static -pie --no-dynamic-linker -z text}}"
#define STARTFILE_PREFIX_SPEC \
"/lib" XLEN_SPEC "/" ABI_SPEC "/ " \
diff --git a/gcc/config/riscv/predicates.md b/gcc/config/riscv/predicates.md
index 6b72a5f..a37d035 100644
--- a/gcc/config/riscv/predicates.md
+++ b/gcc/config/riscv/predicates.md
@@ -395,6 +395,11 @@
return true;
})
+;; CORE-V Predicates:
+(define_predicate "immediate_register_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_code "const_int")))
+
;; Predicates for the V extension.
(define_special_predicate "vector_length_operand"
(ior (match_operand 0 "pmode_register_operand")
diff --git a/gcc/config/riscv/riscv-builtins.cc b/gcc/config/riscv/riscv-builtins.cc
index 3fe3a89..fc3976f 100644
--- a/gcc/config/riscv/riscv-builtins.cc
+++ b/gcc/config/riscv/riscv-builtins.cc
@@ -47,6 +47,8 @@ along with GCC; see the file COPYING3. If not see
#define RISCV_FTYPE_NAME1(A, B) RISCV_##A##_FTYPE_##B
#define RISCV_FTYPE_NAME2(A, B, C) RISCV_##A##_FTYPE_##B##_##C
#define RISCV_FTYPE_NAME3(A, B, C, D) RISCV_##A##_FTYPE_##B##_##C##_##D
+#define RISCV_FTYPE_NAME4(A, B, C, D, E) \
+ RISCV_##A##_FTYPE_##B##_##C##_##D##_##E
/* Classifies the prototype of a built-in function. */
enum riscv_function_type {
@@ -123,6 +125,10 @@ AVAIL (clmulr_zbc32, TARGET_ZBC && !TARGET_64BIT)
AVAIL (clmulr_zbc64, TARGET_ZBC && TARGET_64BIT)
AVAIL (hint_pause, (!0))
+// CORE-V AVAIL
+AVAIL (cvmac, TARGET_XCVMAC && !TARGET_64BIT)
+AVAIL (cvalu, TARGET_XCVALU && !TARGET_64BIT)
+
/* Construct a riscv_builtin_description from the given arguments.
INSN is the name of the associated instruction pattern, without the
@@ -158,6 +164,9 @@ AVAIL (hint_pause, (!0))
#define RISCV_ATYPE_UHI unsigned_intHI_type_node
#define RISCV_ATYPE_USI unsigned_intSI_type_node
#define RISCV_ATYPE_UDI unsigned_intDI_type_node
+#define RISCV_ATYPE_QI intQI_type_node
+#define RISCV_ATYPE_HI intHI_type_node
+#define RISCV_ATYPE_SI intSI_type_node
#define RISCV_ATYPE_VOID_PTR ptr_type_node
/* RISCV_FTYPE_ATYPESN takes N RISCV_FTYPES-like type codes and lists
@@ -170,10 +179,14 @@ AVAIL (hint_pause, (!0))
RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C
#define RISCV_FTYPE_ATYPES3(A, B, C, D) \
RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D
+#define RISCV_FTYPE_ATYPES4(A, B, C, D, E) \
+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D, \
+ RISCV_ATYPE_##E
static const struct riscv_builtin_description riscv_builtins[] = {
#include "riscv-cmo.def"
#include "riscv-scalar-crypto.def"
+ #include "corev.def"
DIRECT_BUILTIN (frflags, RISCV_USI_FTYPE, hard_float),
DIRECT_NO_TARGET_BUILTIN (fsflags, RISCV_VOID_FTYPE_USI, hard_float),
diff --git a/gcc/config/riscv/riscv-cores.def b/gcc/config/riscv/riscv-cores.def
index 7d87ab7..91deabb 100644
--- a/gcc/config/riscv/riscv-cores.def
+++ b/gcc/config/riscv/riscv-cores.def
@@ -38,6 +38,7 @@ RISCV_TUNE("sifive-3-series", generic, rocket_tune_info)
RISCV_TUNE("sifive-5-series", generic, rocket_tune_info)
RISCV_TUNE("sifive-7-series", sifive_7, sifive_7_tune_info)
RISCV_TUNE("thead-c906", generic, thead_c906_tune_info)
+RISCV_TUNE("generic-ooo", generic_ooo, generic_ooo_tune_info)
RISCV_TUNE("size", generic, optimize_size_tune_info)
#undef RISCV_TUNE
diff --git a/gcc/config/riscv/riscv-ftypes.def b/gcc/config/riscv/riscv-ftypes.def
index 33620c5..0d1e4dd 100644
--- a/gcc/config/riscv/riscv-ftypes.def
+++ b/gcc/config/riscv/riscv-ftypes.def
@@ -32,6 +32,10 @@ DEF_RISCV_FTYPE (1, (VOID, USI))
DEF_RISCV_FTYPE (1, (VOID, VOID_PTR))
DEF_RISCV_FTYPE (1, (USI, USI))
DEF_RISCV_FTYPE (1, (UDI, UDI))
+DEF_RISCV_FTYPE (1, (USI, UQI))
+DEF_RISCV_FTYPE (1, (USI, UHI))
+DEF_RISCV_FTYPE (1, (SI, QI))
+DEF_RISCV_FTYPE (1, (SI, HI))
DEF_RISCV_FTYPE (2, (USI, UQI, UQI))
DEF_RISCV_FTYPE (2, (USI, UHI, UHI))
DEF_RISCV_FTYPE (2, (USI, USI, USI))
@@ -40,4 +44,11 @@ DEF_RISCV_FTYPE (2, (UDI, UHI, UHI))
DEF_RISCV_FTYPE (2, (UDI, USI, USI))
DEF_RISCV_FTYPE (2, (UDI, UDI, USI))
DEF_RISCV_FTYPE (2, (UDI, UDI, UDI))
+DEF_RISCV_FTYPE (2, (SI, USI, USI))
+DEF_RISCV_FTYPE (2, (SI, SI, SI))
+DEF_RISCV_FTYPE (3, (USI, USI, USI, UQI))
DEF_RISCV_FTYPE (3, (USI, USI, USI, USI))
+DEF_RISCV_FTYPE (3, (SI, SI, SI, UQI))
+DEF_RISCV_FTYPE (3, (SI, SI, SI, SI))
+DEF_RISCV_FTYPE (4, (USI, USI, USI, USI, UQI))
+DEF_RISCV_FTYPE (4, (SI, SI, SI, SI, UQI))
diff --git a/gcc/config/riscv/riscv-opts.h b/gcc/config/riscv/riscv-opts.h
index a525f67..31ee42d 100644
--- a/gcc/config/riscv/riscv-opts.h
+++ b/gcc/config/riscv/riscv-opts.h
@@ -52,7 +52,8 @@ extern enum riscv_isa_spec_class riscv_isa_spec;
/* Keep this list in sync with define_attr "tune" in riscv.md. */
enum riscv_microarchitecture_type {
generic,
- sifive_7
+ sifive_7,
+ generic_ooo
};
extern enum riscv_microarchitecture_type riscv_microarchitecture;
@@ -101,191 +102,7 @@ enum riscv_entity
MAX_RISCV_ENTITIES
};
-#define MASK_ZICSR (1 << 0)
-#define MASK_ZIFENCEI (1 << 1)
-#define MASK_ZIHINTNTL (1 << 2)
-#define MASK_ZIHINTPAUSE (1 << 3)
-
-#define TARGET_ZICSR ((riscv_zi_subext & MASK_ZICSR) != 0)
-#define TARGET_ZIFENCEI ((riscv_zi_subext & MASK_ZIFENCEI) != 0)
-#define TARGET_ZIHINTNTL ((riscv_zi_subext & MASK_ZIHINTNTL) != 0)
-#define TARGET_ZIHINTPAUSE ((riscv_zi_subext & MASK_ZIHINTPAUSE) != 0)
-
-#define MASK_ZAWRS (1 << 0)
-#define TARGET_ZAWRS ((riscv_za_subext & MASK_ZAWRS) != 0)
-
-#define MASK_ZBA (1 << 0)
-#define MASK_ZBB (1 << 1)
-#define MASK_ZBC (1 << 2)
-#define MASK_ZBS (1 << 3)
-
-#define TARGET_ZBA ((riscv_zb_subext & MASK_ZBA) != 0)
-#define TARGET_ZBB ((riscv_zb_subext & MASK_ZBB) != 0)
-#define TARGET_ZBC ((riscv_zb_subext & MASK_ZBC) != 0)
-#define TARGET_ZBS ((riscv_zb_subext & MASK_ZBS) != 0)
-
-#define MASK_ZFINX (1 << 0)
-#define MASK_ZDINX (1 << 1)
-#define MASK_ZHINX (1 << 2)
-#define MASK_ZHINXMIN (1 << 3)
-
-#define TARGET_ZFINX ((riscv_zinx_subext & MASK_ZFINX) != 0)
-#define TARGET_ZDINX ((riscv_zinx_subext & MASK_ZDINX) != 0)
-#define TARGET_ZHINX ((riscv_zinx_subext & MASK_ZHINX) != 0)
-#define TARGET_ZHINXMIN ((riscv_zinx_subext & MASK_ZHINXMIN) != 0)
-
-#define MASK_ZBKB (1 << 0)
-#define MASK_ZBKC (1 << 1)
-#define MASK_ZBKX (1 << 2)
-#define MASK_ZKNE (1 << 3)
-#define MASK_ZKND (1 << 4)
-#define MASK_ZKNH (1 << 5)
-#define MASK_ZKR (1 << 6)
-#define MASK_ZKSED (1 << 7)
-#define MASK_ZKSH (1 << 8)
-#define MASK_ZKT (1 << 9)
-
-#define TARGET_ZBKB ((riscv_zk_subext & MASK_ZBKB) != 0)
-#define TARGET_ZBKC ((riscv_zk_subext & MASK_ZBKC) != 0)
-#define TARGET_ZBKX ((riscv_zk_subext & MASK_ZBKX) != 0)
-#define TARGET_ZKNE ((riscv_zk_subext & MASK_ZKNE) != 0)
-#define TARGET_ZKND ((riscv_zk_subext & MASK_ZKND) != 0)
-#define TARGET_ZKNH ((riscv_zk_subext & MASK_ZKNH) != 0)
-#define TARGET_ZKR ((riscv_zk_subext & MASK_ZKR) != 0)
-#define TARGET_ZKSED ((riscv_zk_subext & MASK_ZKSED) != 0)
-#define TARGET_ZKSH ((riscv_zk_subext & MASK_ZKSH) != 0)
-#define TARGET_ZKT ((riscv_zk_subext & MASK_ZKT) != 0)
-
-#define MASK_ZTSO (1 << 0)
-
-#define TARGET_ZTSO ((riscv_ztso_subext & MASK_ZTSO) != 0)
-
-#define MASK_VECTOR_ELEN_32 (1 << 0)
-#define MASK_VECTOR_ELEN_64 (1 << 1)
-#define MASK_VECTOR_ELEN_FP_32 (1 << 2)
-#define MASK_VECTOR_ELEN_FP_64 (1 << 3)
-/* Align the bit index to riscv-vector-builtins.h. */
-#define MASK_VECTOR_ELEN_FP_16 (1 << 6)
-
-#define TARGET_VECTOR_ELEN_32 \
- ((riscv_vector_elen_flags & MASK_VECTOR_ELEN_32) != 0)
-#define TARGET_VECTOR_ELEN_64 \
- ((riscv_vector_elen_flags & MASK_VECTOR_ELEN_64) != 0)
-#define TARGET_VECTOR_ELEN_FP_32 \
- ((riscv_vector_elen_flags & MASK_VECTOR_ELEN_FP_32) != 0)
-#define TARGET_VECTOR_ELEN_FP_64 \
- ((riscv_vector_elen_flags & MASK_VECTOR_ELEN_FP_64) != 0)
-#define TARGET_VECTOR_ELEN_FP_16 \
- ((riscv_vector_elen_flags & MASK_VECTOR_ELEN_FP_16) != 0)
-
-#define MASK_ZVBB (1 << 0)
-#define MASK_ZVBC (1 << 1)
-
-#define TARGET_ZVBB ((riscv_zvb_subext & MASK_ZVBB) != 0)
-#define TARGET_ZVBC ((riscv_zvb_subext & MASK_ZVBC) != 0)
-
-#define MASK_ZVKG (1 << 0)
-#define MASK_ZVKNED (1 << 1)
-#define MASK_ZVKNHA (1 << 2)
-#define MASK_ZVKNHB (1 << 3)
-#define MASK_ZVKSED (1 << 4)
-#define MASK_ZVKSH (1 << 5)
-#define MASK_ZVKN (1 << 6)
-#define MASK_ZVKNC (1 << 7)
-#define MASK_ZVKNG (1 << 8)
-#define MASK_ZVKS (1 << 9)
-#define MASK_ZVKSC (1 << 10)
-#define MASK_ZVKSG (1 << 11)
-#define MASK_ZVKT (1 << 12)
-
-#define TARGET_ZVKG ((riscv_zvk_subext & MASK_ZVKG) != 0)
-#define TARGET_ZVKNED ((riscv_zvk_subext & MASK_ZVKNED) != 0)
-#define TARGET_ZVKNHA ((riscv_zvk_subext & MASK_ZVKNHA) != 0)
-#define TARGET_ZVKNHB ((riscv_zvk_subext & MASK_ZVKNHB) != 0)
-#define TARGET_ZVKSED ((riscv_zvk_subext & MASK_ZVKSED) != 0)
-#define TARGET_ZVKSH ((riscv_zvk_subext & MASK_ZVKSH) != 0)
-#define TARGET_ZVKN ((riscv_zvk_subext & MASK_ZVKN) != 0)
-#define TARGET_ZVKNC ((riscv_zvk_subext & MASK_ZVKNC) != 0)
-#define TARGET_ZVKNG ((riscv_zvk_subext & MASK_ZVKNG) != 0)
-#define TARGET_ZVKS ((riscv_zvk_subext & MASK_ZVKS) != 0)
-#define TARGET_ZVKSC ((riscv_zvk_subext & MASK_ZVKSC) != 0)
-#define TARGET_ZVKSG ((riscv_zvk_subext & MASK_ZVKSG) != 0)
-#define TARGET_ZVKT ((riscv_zvk_subext & MASK_ZVKT) != 0)
-
-#define MASK_ZVL32B (1 << 0)
-#define MASK_ZVL64B (1 << 1)
-#define MASK_ZVL128B (1 << 2)
-#define MASK_ZVL256B (1 << 3)
-#define MASK_ZVL512B (1 << 4)
-#define MASK_ZVL1024B (1 << 5)
-#define MASK_ZVL2048B (1 << 6)
-#define MASK_ZVL4096B (1 << 7)
-#define MASK_ZVL8192B (1 << 8)
-#define MASK_ZVL16384B (1 << 9)
-#define MASK_ZVL32768B (1 << 10)
-#define MASK_ZVL65536B (1 << 11)
-
-#define TARGET_ZVL32B ((riscv_zvl_flags & MASK_ZVL32B) != 0)
-#define TARGET_ZVL64B ((riscv_zvl_flags & MASK_ZVL64B) != 0)
-#define TARGET_ZVL128B ((riscv_zvl_flags & MASK_ZVL128B) != 0)
-#define TARGET_ZVL256B ((riscv_zvl_flags & MASK_ZVL256B) != 0)
-#define TARGET_ZVL512B ((riscv_zvl_flags & MASK_ZVL512B) != 0)
-#define TARGET_ZVL1024B ((riscv_zvl_flags & MASK_ZVL1024B) != 0)
-#define TARGET_ZVL2048B ((riscv_zvl_flags & MASK_ZVL2048B) != 0)
-#define TARGET_ZVL4096B ((riscv_zvl_flags & MASK_ZVL4096B) != 0)
-#define TARGET_ZVL8192B ((riscv_zvl_flags & MASK_ZVL8192B) != 0)
-#define TARGET_ZVL16384B ((riscv_zvl_flags & MASK_ZVL16384B) != 0)
-#define TARGET_ZVL32768B ((riscv_zvl_flags & MASK_ZVL32768B) != 0)
-#define TARGET_ZVL65536B ((riscv_zvl_flags & MASK_ZVL65536B) != 0)
-
-#define MASK_ZICBOZ (1 << 0)
-#define MASK_ZICBOM (1 << 1)
-#define MASK_ZICBOP (1 << 2)
-
-#define TARGET_ZICBOZ ((riscv_zicmo_subext & MASK_ZICBOZ) != 0)
-#define TARGET_ZICBOM ((riscv_zicmo_subext & MASK_ZICBOM) != 0)
-#define TARGET_ZICBOP ((riscv_zicmo_subext & MASK_ZICBOP) != 0)
-
-#define MASK_ZICOND (1 << 2)
-#define TARGET_ZICOND ((riscv_zi_subext & MASK_ZICOND) != 0)
-
-#define MASK_ZFA (1 << 0)
-#define TARGET_ZFA ((riscv_zfa_subext & MASK_ZFA) != 0)
-
-#define MASK_ZFHMIN (1 << 0)
-#define MASK_ZFH (1 << 1)
-#define MASK_ZVFHMIN (1 << 2)
-#define MASK_ZVFH (1 << 3)
-
-#define TARGET_ZFHMIN ((riscv_zf_subext & MASK_ZFHMIN) != 0)
-#define TARGET_ZFH ((riscv_zf_subext & MASK_ZFH) != 0)
-#define TARGET_ZVFHMIN ((riscv_zf_subext & MASK_ZVFHMIN) != 0)
-#define TARGET_ZVFH ((riscv_zf_subext & MASK_ZVFH) != 0)
-
-#define MASK_ZMMUL (1 << 0)
-#define TARGET_ZMMUL ((riscv_zm_subext & MASK_ZMMUL) != 0)
-
-#define MASK_ZCA (1 << 0)
-#define MASK_ZCB (1 << 1)
-#define MASK_ZCE (1 << 2)
-#define MASK_ZCF (1 << 3)
-#define MASK_ZCD (1 << 4)
-#define MASK_ZCMP (1 << 5)
-#define MASK_ZCMT (1 << 6)
-
-#define TARGET_ZCA ((riscv_zc_subext & MASK_ZCA) != 0)
-#define TARGET_ZCB ((riscv_zc_subext & MASK_ZCB) != 0)
-#define TARGET_ZCE ((riscv_zc_subext & MASK_ZCE) != 0)
-#define TARGET_ZCF ((riscv_zc_subext & MASK_ZCF) != 0)
-#define TARGET_ZCD ((riscv_zc_subext & MASK_ZCD) != 0)
-#define TARGET_ZCMP ((riscv_zc_subext & MASK_ZCMP) != 0)
-#define TARGET_ZCMT ((riscv_zc_subext & MASK_ZCMT) != 0)
-
-#define MASK_SVINVAL (1 << 0)
-#define MASK_SVNAPOT (1 << 1)
-
-#define TARGET_SVINVAL ((riscv_sv_subext & MASK_SVINVAL) != 0)
-#define TARGET_SVNAPOT ((riscv_sv_subext & MASK_SVNAPOT) != 0)
+#define TARGET_ZICOND_LIKE (TARGET_ZICOND || (TARGET_XVENTANACONDOPS && TARGET_64BIT))
/* Bit of riscv_zvl_flags will set contintuly, N-1 bit will set if N-bit is
set, e.g. MASK_ZVL64B has set then MASK_ZVL32B is set, so we can use
@@ -295,41 +112,18 @@ enum riscv_entity
? 0 \
: 32 << (__builtin_popcount (riscv_zvl_flags) - 1))
-#define MASK_XTHEADBA (1 << 0)
-#define MASK_XTHEADBB (1 << 1)
-#define MASK_XTHEADBS (1 << 2)
-#define MASK_XTHEADCMO (1 << 3)
-#define MASK_XTHEADCONDMOV (1 << 4)
-#define MASK_XTHEADFMEMIDX (1 << 5)
-#define MASK_XTHEADFMV (1 << 6)
-#define MASK_XTHEADINT (1 << 7)
-#define MASK_XTHEADMAC (1 << 8)
-#define MASK_XTHEADMEMIDX (1 << 9)
-#define MASK_XTHEADMEMPAIR (1 << 10)
-#define MASK_XTHEADSYNC (1 << 11)
-
-#define TARGET_XTHEADBA ((riscv_xthead_subext & MASK_XTHEADBA) != 0)
-#define TARGET_XTHEADBB ((riscv_xthead_subext & MASK_XTHEADBB) != 0)
-#define TARGET_XTHEADBS ((riscv_xthead_subext & MASK_XTHEADBS) != 0)
-#define TARGET_XTHEADCMO ((riscv_xthead_subext & MASK_XTHEADCMO) != 0)
-#define TARGET_XTHEADCONDMOV ((riscv_xthead_subext & MASK_XTHEADCONDMOV) != 0)
-#define TARGET_XTHEADFMEMIDX ((riscv_xthead_subext & MASK_XTHEADFMEMIDX) != 0)
-#define TARGET_XTHEADFMV ((riscv_xthead_subext & MASK_XTHEADFMV) != 0)
-#define TARGET_XTHEADINT ((riscv_xthead_subext & MASK_XTHEADINT) != 0)
-#define TARGET_XTHEADMAC ((riscv_xthead_subext & MASK_XTHEADMAC) != 0)
-#define TARGET_XTHEADMEMIDX ((riscv_xthead_subext & MASK_XTHEADMEMIDX) != 0)
-#define TARGET_XTHEADMEMPAIR ((riscv_xthead_subext & MASK_XTHEADMEMPAIR) != 0)
-#define TARGET_XTHEADSYNC ((riscv_xthead_subext & MASK_XTHEADSYNC) != 0)
-
-#define MASK_XVENTANACONDOPS (1 << 0)
-
-#define TARGET_XVENTANACONDOPS ((riscv_xventana_subext & MASK_XVENTANACONDOPS) != 0)
-
-#define TARGET_ZICOND_LIKE (TARGET_ZICOND || (TARGET_XVENTANACONDOPS && TARGET_64BIT))
+/* Same as TARGET_MIN_VLEN, but take an OPTS as gcc_options. */
+#define TARGET_MIN_VLEN_OPTS(opts) \
+ ((opts->x_riscv_zvl_flags == 0) \
+ ? 0 \
+ : 32 << (__builtin_popcount (opts->x_riscv_zvl_flags) - 1))
/* We only enable VLS modes for VLA vectorization since fixed length VLMAX mode
is the highest priority choice and should not conflict with VLS modes. */
#define TARGET_VECTOR_VLS \
(TARGET_VECTOR && riscv_autovec_preference == RVV_SCALABLE)
+/* TODO: Enable RVV movmisalign by default for now. */
+#define TARGET_VECTOR_MISALIGN_SUPPORTED 1
+
#endif /* ! GCC_RISCV_OPTS_H */
diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h
index 368982a..6190faa 100644
--- a/gcc/config/riscv/riscv-protos.h
+++ b/gcc/config/riscv/riscv-protos.h
@@ -117,7 +117,6 @@ extern rtx riscv_emit_binary (enum rtx_code code, rtx dest, rtx x, rtx y);
extern bool riscv_expand_conditional_move (rtx, rtx, rtx, rtx);
extern rtx riscv_legitimize_call_address (rtx);
extern void riscv_set_return_address (rtx, rtx);
-extern bool riscv_expand_block_move (rtx, rtx, rtx);
extern rtx riscv_return_addr (int, rtx);
extern poly_int64 riscv_initial_elimination_offset (int, int);
extern void riscv_expand_prologue (void);
@@ -125,7 +124,6 @@ extern void riscv_expand_epilogue (int);
extern bool riscv_epilogue_uses (unsigned int);
extern bool riscv_can_use_return_insn (void);
extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
-extern bool riscv_expand_block_move (rtx, rtx, rtx);
extern bool riscv_store_data_bypass_p (rtx_insn *, rtx_insn *);
extern rtx riscv_gen_gpr_save_insn (struct riscv_frame_info *);
extern bool riscv_gpr_save_operation_p (rtx);
@@ -160,6 +158,9 @@ extern bool riscv_hard_regno_rename_ok (unsigned, unsigned);
rtl_opt_pass * make_pass_shorten_memrefs (gcc::context *ctxt);
rtl_opt_pass * make_pass_vsetvl (gcc::context *ctxt);
+/* Routines implemented in riscv-string.c. */
+extern bool riscv_expand_block_move (rtx, rtx, rtx);
+
/* Information about one CPU we know about. */
struct riscv_cpu_info {
/* This CPU's canonical name. */
@@ -302,6 +303,9 @@ enum insn_type : unsigned int
UNARY_OP_TAMA = __MASK_OP_TAMA | UNARY_OP_P,
UNARY_OP_TAMU = __MASK_OP_TAMU | UNARY_OP_P,
UNARY_OP_FRM_DYN = UNARY_OP | FRM_DYN_P,
+ UNARY_OP_FRM_RMM = UNARY_OP | FRM_RMM_P,
+ UNARY_OP_FRM_RUP = UNARY_OP | FRM_RUP_P,
+ UNARY_OP_FRM_RDN = UNARY_OP | FRM_RDN_P,
UNARY_OP_TAMU_FRM_DYN = UNARY_OP_TAMU | FRM_DYN_P,
UNARY_OP_TAMU_FRM_RUP = UNARY_OP_TAMU | FRM_RUP_P,
UNARY_OP_TAMU_FRM_RDN = UNARY_OP_TAMU | FRM_RDN_P,
@@ -421,7 +425,7 @@ rtx expand_builtin (unsigned int, tree, rtx);
bool check_builtin_call (location_t, vec<location_t>, unsigned int,
tree, unsigned int, tree *);
bool const_vec_all_same_in_range_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
-bool legitimize_move (rtx, rtx);
+bool legitimize_move (rtx, rtx *);
void emit_vlmax_vsetvl (machine_mode, rtx);
void emit_hard_vlmax_vsetvl (machine_mode, rtx);
void emit_vlmax_insn (unsigned, unsigned, rtx *);
@@ -474,6 +478,10 @@ void expand_vec_rint (rtx, rtx, machine_mode, machine_mode);
void expand_vec_round (rtx, rtx, machine_mode, machine_mode);
void expand_vec_trunc (rtx, rtx, machine_mode, machine_mode);
void expand_vec_roundeven (rtx, rtx, machine_mode, machine_mode);
+void expand_vec_lrint (rtx, rtx, machine_mode, machine_mode);
+void expand_vec_lround (rtx, rtx, machine_mode, machine_mode);
+void expand_vec_lceil (rtx, rtx, machine_mode, machine_mode);
+void expand_vec_lfloor (rtx, rtx, machine_mode, machine_mode);
#endif
bool sew64_scalar_helper (rtx *, rtx *, rtx, machine_mode,
bool, void (*)(rtx *, rtx));
@@ -492,6 +500,7 @@ bool slide1_sew64_helper (int, machine_mode, machine_mode,
machine_mode, rtx *);
rtx gen_avl_for_scalar_move (rtx);
void expand_tuple_move (rtx *);
+bool expand_block_move (rtx, rtx, rtx);
machine_mode preferred_simd_mode (scalar_mode);
machine_mode get_mask_mode (machine_mode);
void expand_vec_series (rtx, rtx, rtx);
@@ -542,6 +551,7 @@ opt_machine_mode vectorize_related_mode (machine_mode, scalar_mode,
unsigned int autovectorize_vector_modes (vec<machine_mode> *, bool);
bool cmp_lmul_le_one (machine_mode);
bool cmp_lmul_gt_one (machine_mode);
+bool gather_scatter_valid_offset_mode_p (machine_mode);
}
/* We classify builtin types into two classes:
diff --git a/gcc/config/riscv/riscv-string.cc b/gcc/config/riscv/riscv-string.cc
index 2bdff03..0b4606a 100644
--- a/gcc/config/riscv/riscv-string.cc
+++ b/gcc/config/riscv/riscv-string.cc
@@ -592,3 +592,158 @@ riscv_expand_strlen (rtx result, rtx src, rtx search_char, rtx align)
return false;
}
+
+/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
+ Assume that the areas do not overlap. */
+
+static void
+riscv_block_move_straight (rtx dest, rtx src, unsigned HOST_WIDE_INT length)
+{
+ unsigned HOST_WIDE_INT offset, delta;
+ unsigned HOST_WIDE_INT bits;
+ int i;
+ enum machine_mode mode;
+ rtx *regs;
+
+ bits = MAX (BITS_PER_UNIT,
+ MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))));
+
+ mode = mode_for_size (bits, MODE_INT, 0).require ();
+ delta = bits / BITS_PER_UNIT;
+
+ /* Allocate a buffer for the temporary registers. */
+ regs = XALLOCAVEC (rtx, length / delta);
+
+ /* Load as many BITS-sized chunks as possible. Use a normal load if
+ the source has enough alignment, otherwise use left/right pairs. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ {
+ regs[i] = gen_reg_rtx (mode);
+ riscv_emit_move (regs[i], adjust_address (src, mode, offset));
+ }
+
+ /* Copy the chunks to the destination. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
+
+ /* Mop up any left-over bytes. */
+ if (offset < length)
+ {
+ src = adjust_address (src, BLKmode, offset);
+ dest = adjust_address (dest, BLKmode, offset);
+ move_by_pieces (dest, src, length - offset,
+ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), RETURN_BEGIN);
+ }
+}
+
+/* Helper function for doing a loop-based block operation on memory
+ reference MEM. Each iteration of the loop will operate on LENGTH
+ bytes of MEM.
+
+ Create a new base register for use within the loop and point it to
+ the start of MEM. Create a new memory reference that uses this
+ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
+
+static void
+riscv_adjust_block_mem (rtx mem, unsigned HOST_WIDE_INT length,
+ rtx *loop_reg, rtx *loop_mem)
+{
+ *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
+
+ /* Although the new mem does not refer to a known location,
+ it does keep up to LENGTH bytes of alignment. */
+ *loop_mem = change_address (mem, BLKmode, *loop_reg);
+ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
+}
+
+/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
+ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
+ the memory regions do not overlap. */
+
+static void
+riscv_block_move_loop (rtx dest, rtx src, unsigned HOST_WIDE_INT length,
+ unsigned HOST_WIDE_INT bytes_per_iter)
+{
+ rtx label, src_reg, dest_reg, final_src, test;
+ unsigned HOST_WIDE_INT leftover;
+
+ leftover = length % bytes_per_iter;
+ length -= leftover;
+
+ /* Create registers and memory references for use within the loop. */
+ riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
+ riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
+
+ /* Calculate the value that SRC_REG should have after the last iteration
+ of the loop. */
+ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
+ 0, 0, OPTAB_WIDEN);
+
+ /* Emit the start of the loop. */
+ label = gen_label_rtx ();
+ emit_label (label);
+
+ /* Emit the loop body. */
+ riscv_block_move_straight (dest, src, bytes_per_iter);
+
+ /* Move on to the next block. */
+ riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
+ riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
+
+ /* Emit the loop condition. */
+ test = gen_rtx_NE (VOIDmode, src_reg, final_src);
+ emit_jump_insn (gen_cbranch4 (Pmode, test, src_reg, final_src, label));
+
+ /* Mop up any left-over bytes. */
+ if (leftover)
+ riscv_block_move_straight (dest, src, leftover);
+ else
+ emit_insn(gen_nop ());
+}
+
+/* Expand a cpymemsi instruction, which copies LENGTH bytes from
+ memory reference SRC to memory reference DEST. */
+
+bool
+riscv_expand_block_move (rtx dest, rtx src, rtx length)
+{
+ if (CONST_INT_P (length))
+ {
+ unsigned HOST_WIDE_INT hwi_length = UINTVAL (length);
+ unsigned HOST_WIDE_INT factor, align;
+
+ align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
+ factor = BITS_PER_WORD / align;
+
+ if (optimize_function_for_size_p (cfun)
+ && hwi_length * factor * UNITS_PER_WORD > MOVE_RATIO (false))
+ return false;
+
+ if (hwi_length <= (RISCV_MAX_MOVE_BYTES_STRAIGHT / factor))
+ {
+ riscv_block_move_straight (dest, src, INTVAL (length));
+ return true;
+ }
+ else if (optimize && align >= BITS_PER_WORD)
+ {
+ unsigned min_iter_words
+ = RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD;
+ unsigned iter_words = min_iter_words;
+ unsigned HOST_WIDE_INT bytes = hwi_length;
+ unsigned HOST_WIDE_INT words = bytes / UNITS_PER_WORD;
+
+ /* Lengthen the loop body if it shortens the tail. */
+ for (unsigned i = min_iter_words; i < min_iter_words * 2 - 1; i++)
+ {
+ unsigned cur_cost = iter_words + words % iter_words;
+ unsigned new_cost = i + words % i;
+ if (new_cost <= cur_cost)
+ iter_words = i;
+ }
+
+ riscv_block_move_loop (dest, src, bytes, iter_words * UNITS_PER_WORD);
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/gcc/config/riscv/riscv-subset.h b/gcc/config/riscv/riscv-subset.h
index dca0728..d2a4bd2 100644
--- a/gcc/config/riscv/riscv-subset.h
+++ b/gcc/config/riscv/riscv-subset.h
@@ -69,8 +69,12 @@ private:
const char *parse_std_ext (const char *);
+ const char *parse_single_std_ext (const char *);
+
const char *parse_multiletter_ext (const char *, const char *,
const char *);
+ const char *parse_single_multiletter_ext (const char *, const char *,
+ const char *);
void handle_implied_ext (const char *);
bool check_implied_ext ();
@@ -91,14 +95,21 @@ public:
unsigned xlen () const {return m_xlen;};
+ riscv_subset_list *clone () const;
+
static riscv_subset_list *parse (const char *, location_t);
+ const char *parse_single_ext (const char *);
const riscv_subset_t *begin () const {return m_head;};
const riscv_subset_t *end () const {return NULL;};
int match_score (riscv_subset_list *) const;
+
+ void set_loc (location_t);
};
extern const riscv_subset_list *riscv_current_subset_list (void);
+extern void
+riscv_set_arch_by_subset_list (riscv_subset_list *, struct gcc_options *);
#endif /* ! GCC_RISCV_SUBSET_H */
diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 26700cf..895c11d 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -49,6 +49,7 @@
#include "tm-constrs.h"
#include "rtx-vector-builder.h"
#include "targhooks.h"
+#include "predict.h"
using namespace riscv_vector;
@@ -1013,7 +1014,7 @@ expand_const_vector (rtx target, rtx src)
rtx base, step;
if (const_vec_series_p (src, &base, &step))
{
- emit_insn (gen_vec_series (mode, target, base, step));
+ expand_vec_series (target, base, step);
return;
}
@@ -1170,7 +1171,7 @@ expand_const_vector (rtx target, rtx src)
rtx step = CONST_VECTOR_ELT (src, 2);
/* Step 1 - { base1, base1 + step, base1 + step * 2, ... } */
rtx tmp = gen_reg_rtx (mode);
- emit_insn (gen_vec_series (mode, tmp, base1, step));
+ expand_vec_series (tmp, base1, step);
/* Step 2 - { base0, base1, base1 + step, base1 + step * 2, ... } */
scalar_mode elem_mode = GET_MODE_INNER (mode);
if (!rtx_equal_p (base0, const0_rtx))
@@ -1217,10 +1218,12 @@ get_frm_mode (rtx operand)
}
/* Expand a pre-RA RVV data move from SRC to DEST.
- It expands move for RVV fractional vector modes. */
+ It expands move for RVV fractional vector modes.
+ Return true if the move as already been emitted. */
bool
-legitimize_move (rtx dest, rtx src)
+legitimize_move (rtx dest, rtx *srcp)
{
+ rtx src = *srcp;
machine_mode mode = GET_MODE (dest);
if (CONST_VECTOR_P (src))
{
@@ -1238,7 +1241,7 @@ legitimize_move (rtx dest, rtx src)
{
/* Need to force register if mem <- !reg. */
if (MEM_P (dest) && !REG_P (src))
- src = force_reg (mode, src);
+ *srcp = force_reg (mode, src);
return false;
}
@@ -1269,7 +1272,7 @@ legitimize_move (rtx dest, rtx src)
{
/* Need to force register if mem <- !reg. */
if (MEM_P (dest) && !REG_P (src))
- src = force_reg (mode, src);
+ *srcp = force_reg (mode, src);
return false;
}
@@ -1989,6 +1992,206 @@ expand_tuple_move (rtx *ops)
}
}
+/* Used by cpymemsi in riscv.md . */
+
+bool
+expand_block_move (rtx dst_in, rtx src_in, rtx length_in)
+{
+ /*
+ memcpy:
+ mv a3, a0 # Copy destination
+ loop:
+ vsetvli t0, a2, e8, m8, ta, ma # Vectors of 8b
+ vle8.v v0, (a1) # Load bytes
+ add a1, a1, t0 # Bump pointer
+ sub a2, a2, t0 # Decrement count
+ vse8.v v0, (a3) # Store bytes
+ add a3, a3, t0 # Bump pointer
+ bnez a2, loop # Any more?
+ ret # Return
+ */
+ if (!TARGET_VECTOR)
+ return false;
+ HOST_WIDE_INT potential_ew
+ = (MIN (MIN (MEM_ALIGN (src_in), MEM_ALIGN (dst_in)), BITS_PER_WORD)
+ / BITS_PER_UNIT);
+ machine_mode vmode = VOIDmode;
+ bool need_loop = true;
+ bool size_p = optimize_function_for_size_p (cfun);
+ rtx src, dst;
+ rtx end = gen_reg_rtx (Pmode);
+ rtx vec;
+ rtx length_rtx = length_in;
+
+ if (CONST_INT_P (length_in))
+ {
+ HOST_WIDE_INT length = INTVAL (length_in);
+
+ /* By using LMUL=8, we can copy as many bytes in one go as there
+ are bits in a vector register. If the entire block thus fits,
+ we don't need a loop. */
+ if (length <= TARGET_MIN_VLEN)
+ {
+ need_loop = false;
+
+ /* If a single scalar load / store pair can do the job, leave it
+ to the scalar code to do that. */
+ /* ??? If fast unaligned access is supported, the scalar code could
+ use suitably sized scalars irrespective of alignemnt. If that
+ gets fixed, we have to adjust the test here. */
+
+ if (pow2p_hwi (length) && length <= potential_ew)
+ return false;
+ }
+
+ /* Find the vector mode to use. Using the largest possible element
+ size is likely to give smaller constants, and thus potentially
+ reducing code size. However, if we need a loop, we need to update
+ the pointers, and that is more complicated with a larger element
+ size, unless we use an immediate, which prevents us from dynamically
+ using the targets transfer size that the hart supports. And then,
+ unless we know the *exact* vector size of the hart, we'd need
+ multiple vsetvli / branch statements, so it's not even a size win.
+ If, in the future, we find an RISCV-V implementation that is slower
+ for small element widths, we might allow larger element widths for
+ loops too. */
+ if (need_loop)
+ potential_ew = 1;
+ for (; potential_ew; potential_ew >>= 1)
+ {
+ scalar_int_mode elem_mode;
+ unsigned HOST_WIDE_INT bits = potential_ew * BITS_PER_UNIT;
+ unsigned HOST_WIDE_INT per_iter;
+ HOST_WIDE_INT nunits;
+
+ if (need_loop)
+ per_iter = TARGET_MIN_VLEN;
+ else
+ per_iter = length;
+ nunits = per_iter / potential_ew;
+
+ /* Unless we get an implementation that's slow for small element
+ size / non-word-aligned accesses, we assume that the hardware
+ handles this well, and we don't want to complicate the code
+ with shifting word contents around or handling extra bytes at
+ the start and/or end. So we want the total transfer size and
+ alignment to fit with the element size. */
+ if (length % potential_ew != 0
+ || !int_mode_for_size (bits, 0).exists (&elem_mode))
+ continue;
+ /* Find the mode to use for the copy inside the loop - or the
+ sole copy, if there is no loop. */
+ if (!need_loop)
+ {
+ /* Try if we have an exact mode for the copy. */
+ if (get_vector_mode (elem_mode, nunits).exists (&vmode))
+ break;
+ /* Since we don't have a mode that exactlty matches the transfer
+ size, we'll need to use pred_store, which is not available
+ for all vector modes, but only iE_RVV_M* modes, hence trying
+ to find a vector mode for a merely rounded-up size is
+ pointless.
+ Still, by choosing a lower LMUL factor that still allows
+ an entire transfer, we can reduce register pressure. */
+ for (unsigned lmul = 1; lmul <= 4; lmul <<= 1)
+ if (TARGET_MIN_VLEN * lmul <= nunits * BITS_PER_UNIT
+ /* Avoid loosing the option of using vsetivli . */
+ && (nunits <= 31 * lmul || nunits > 31 * 8)
+ && (get_vector_mode
+ (elem_mode,
+ exact_div (BYTES_PER_RISCV_VECTOR * lmul,
+ potential_ew)
+ ).exists (&vmode)))
+ break;
+ }
+
+ /* The RVVM8?I modes are notionally 8 * BYTES_PER_RISCV_VECTOR bytes
+ wide. BYTES_PER_RISCV_VECTOR can't be eavenly divided by
+ the sizes of larger element types; the LMUL factor of 8 can at
+ the moment be divided by the SEW, with SEW of up to 8 bytes,
+ but there are reserved encodings so there might be larger
+ SEW in the future. */
+ if (get_vector_mode (elem_mode,
+ exact_div (BYTES_PER_RISCV_VECTOR * 8,
+ potential_ew)).exists (&vmode))
+ break;
+
+ /* We may get here if we tried an element size that's larger than
+ the hardware supports, but we should at least find a suitable
+ byte vector mode. */
+ gcc_assert (potential_ew > 1);
+ }
+ if (potential_ew > 1)
+ length_rtx = GEN_INT (length / potential_ew);
+ }
+ else
+ {
+ vmode = E_RVVM8QImode;
+ }
+
+ /* A memcpy libcall in the worst case takes 3 instructions to prepare the
+ arguments + 1 for the call. When RVV should take 7 instructions and
+ we're optimizing for size a libcall may be preferable. */
+ if (size_p && need_loop)
+ return false;
+
+ /* length_rtx holds the (remaining) length of the required copy.
+ cnt holds the length we copy with the current load/store pair. */
+ rtx cnt = length_rtx;
+ rtx label = NULL_RTX;
+ rtx dst_addr = copy_addr_to_reg (XEXP (dst_in, 0));
+ rtx src_addr = copy_addr_to_reg (XEXP (src_in, 0));
+
+ if (need_loop)
+ {
+ length_rtx = copy_to_mode_reg (Pmode, length_rtx);
+ cnt = gen_reg_rtx (Pmode);
+ label = gen_label_rtx ();
+
+ emit_label (label);
+ emit_insn (gen_no_side_effects_vsetvl_rtx (vmode, cnt, length_rtx));
+ }
+
+ vec = gen_reg_rtx (vmode);
+ src = change_address (src_in, vmode, src_addr);
+ dst = change_address (dst_in, vmode, dst_addr);
+
+ /* If we don't need a loop and have a suitable mode to describe the size,
+ just do a load / store pair and leave it up to the later lazy code
+ motion pass to insert the appropriate vsetvli. */
+ if (!need_loop && known_eq (GET_MODE_SIZE (vmode), INTVAL (length_in)))
+ {
+ emit_move_insn (vec, src);
+ emit_move_insn (dst, vec);
+ }
+ else
+ {
+ machine_mode mask_mode = get_vector_mode (BImode, GET_MODE_NUNITS (vmode)).require ();
+ rtx mask = CONSTM1_RTX (mask_mode);
+ if (!satisfies_constraint_K (cnt))
+ cnt= force_reg (Pmode, cnt);
+ rtx m_ops[] = {vec, mask, src};
+ emit_nonvlmax_insn (code_for_pred_mov (vmode), UNARY_OP_TAMA,
+ m_ops, cnt);
+ emit_insn (gen_pred_store (vmode, dst, mask, vec, cnt,
+ get_avl_type_rtx (NONVLMAX)));
+ }
+
+ if (need_loop)
+ {
+ emit_insn (gen_rtx_SET (src_addr, gen_rtx_PLUS (Pmode, src_addr, cnt)));
+ emit_insn (gen_rtx_SET (dst_addr, gen_rtx_PLUS (Pmode, dst_addr, cnt)));
+ emit_insn (gen_rtx_SET (length_rtx, gen_rtx_MINUS (Pmode, length_rtx, cnt)));
+
+ /* Emit the loop condition. */
+ rtx test = gen_rtx_NE (VOIDmode, end, const0_rtx);
+ emit_jump_insn (gen_cbranch4 (Pmode, test, length_rtx, const0_rtx, label));
+ emit_insn (gen_nop ());
+ }
+
+ return true;
+}
+
/* Return the vectorization machine mode for RVV according to LMUL. */
machine_mode
preferred_simd_mode (scalar_mode mode)
@@ -2619,6 +2822,89 @@ shuffle_merge_patterns (struct expand_vec_perm_d *d)
return true;
}
+/* Recognize the consecutive index that we can use a single
+ vrgather.v[x|i] to shuffle the vectors.
+
+ e.g. short[8] = VEC_PERM_EXPR <a, a, {0,1,0,1,0,1,0,1}>
+ Use SEW = 32, index = 1 vrgather.vi to get the result. */
+static bool
+shuffle_consecutive_patterns (struct expand_vec_perm_d *d)
+{
+ machine_mode vmode = d->vmode;
+ scalar_mode smode = GET_MODE_INNER (vmode);
+ poly_int64 vec_len = d->perm.length ();
+ HOST_WIDE_INT elt;
+
+ if (!vec_len.is_constant () || !d->perm[0].is_constant (&elt))
+ return false;
+ int vlen = vec_len.to_constant ();
+
+ /* Compute the last element index of consecutive pattern from the leading
+ consecutive elements. */
+ int last_consecutive_idx = -1;
+ int consecutive_num = -1;
+ for (int i = 1; i < vlen; i++)
+ {
+ if (maybe_ne (d->perm[i], d->perm[i - 1] + 1))
+ break;
+ last_consecutive_idx = i;
+ consecutive_num = last_consecutive_idx + 1;
+ }
+
+ int new_vlen = vlen / consecutive_num;
+ if (last_consecutive_idx < 0 || consecutive_num == vlen
+ || !pow2p_hwi (consecutive_num) || !pow2p_hwi (new_vlen))
+ return false;
+ /* VEC_PERM <..., (index, index + 1, ... index + consecutive_num - 1)>.
+ All elements of index, index + 1, ... index + consecutive_num - 1 should
+ locate at the same vector. */
+ if (maybe_ge (d->perm[0], vec_len)
+ != maybe_ge (d->perm[last_consecutive_idx], vec_len))
+ return false;
+ /* If a vector has 8 elements. We allow optimizations on consecutive
+ patterns e.g. <0, 1, 2, 3, 0, 1, 2, 3> or <4, 5, 6, 7, 4, 5, 6, 7>.
+ Other patterns like <2, 3, 4, 5, 2, 3, 4, 5> are not feasible patterns
+ to be optimized. */
+ if (d->perm[0].to_constant () % consecutive_num != 0)
+ return false;
+ unsigned int container_bits = consecutive_num * GET_MODE_BITSIZE (smode);
+ if (container_bits > 64)
+ return false;
+ else if (container_bits == 64)
+ {
+ if (!TARGET_VECTOR_ELEN_64)
+ return false;
+ else if (FLOAT_MODE_P (smode) && !TARGET_VECTOR_ELEN_FP_64)
+ return false;
+ }
+
+ /* Check the rest of elements are the same consecutive pattern. */
+ for (int i = consecutive_num; i < vlen; i++)
+ if (maybe_ne (d->perm[i], d->perm[i % consecutive_num]))
+ return false;
+
+ if (FLOAT_MODE_P (smode))
+ smode = float_mode_for_size (container_bits).require ();
+ else
+ smode = int_mode_for_size (container_bits, 0).require ();
+ if (!get_vector_mode (smode, new_vlen).exists (&vmode))
+ return false;
+ machine_mode sel_mode = related_int_vector_mode (vmode).require ();
+
+ /* Success! */
+ if (d->testing_p)
+ return true;
+
+ int index = elt / consecutive_num;
+ if (index >= new_vlen)
+ index = index - new_vlen;
+ rtx sel = gen_const_vector_dup (sel_mode, index);
+ rtx op = elt >= vlen ? d->op0 : d->op1;
+ emit_vlmax_gather_insn (gen_lowpart (vmode, d->target),
+ gen_lowpart (vmode, op), sel);
+ return true;
+}
+
/* Recognize the patterns that we can use compress operation to shuffle the
vectors. The perm selector of compress pattern is divided into 2 part:
The first part is the random index number < NUNITS.
@@ -2817,7 +3103,7 @@ shuffle_decompress_patterns (struct expand_vec_perm_d *d)
/* Generate { 0, 1, .... } mask. */
rtx vid = gen_reg_rtx (sel_mode);
rtx vid_repeat = gen_reg_rtx (sel_mode);
- emit_insn (gen_vec_series (sel_mode, vid, const0_rtx, const1_rtx));
+ expand_vec_series (vid, const0_rtx, const1_rtx);
rtx and_ops[] = {vid_repeat, vid, const1_rtx};
emit_vlmax_insn (code_for_pred_scalar (AND, sel_mode), BINARY_OP, and_ops);
rtx const_vec = gen_const_vector_dup (sel_mode, 1);
@@ -2827,6 +3113,95 @@ shuffle_decompress_patterns (struct expand_vec_perm_d *d)
return true;
}
+static bool
+shuffle_bswap_pattern (struct expand_vec_perm_d *d)
+{
+ HOST_WIDE_INT diff;
+ unsigned i, size, step;
+
+ if (!d->one_vector_p || !d->perm[0].is_constant (&diff) || !diff)
+ return false;
+
+ step = diff + 1;
+ size = step * GET_MODE_UNIT_BITSIZE (d->vmode);
+
+ switch (size)
+ {
+ case 16:
+ break;
+ case 32:
+ case 64:
+ /* We will have VEC_PERM_EXPR after rtl expand when invoking
+ __builtin_bswap. It will generate about 9 instructions in
+ loop as below, no matter it is bswap16, bswap32 or bswap64.
+ .L2:
+ 1 vle16.v v4,0(a0)
+ 2 vmv.v.x v2,a7
+ 3 vand.vv v2,v6,v2
+ 4 slli a2,a5,1
+ 5 vrgatherei16.vv v1,v4,v2
+ 6 sub a4,a4,a5
+ 7 vse16.v v1,0(a3)
+ 8 add a0,a0,a2
+ 9 add a3,a3,a2
+ bne a4,zero,.L2
+
+ But for bswap16 we may have a even simple code gen, which
+ has only 7 instructions in loop as below.
+ .L5
+ 1 vle8.v v2,0(a5)
+ 2 addi a5,a5,32
+ 3 vsrl.vi v4,v2,8
+ 4 vsll.vi v2,v2,8
+ 5 vor.vv v4,v4,v2
+ 6 vse8.v v4,0(a4)
+ 7 addi a4,a4,32
+ bne a5,a6,.L5
+
+ Unfortunately, the instructions in loop will grow to 13 and 24
+ for bswap32 and bswap64. Thus, we will leverage vrgather (9 insn)
+ for both the bswap64 and bswap32, but take shift and or (7 insn)
+ for bswap16.
+ */
+ default:
+ return false;
+ }
+
+ for (i = 0; i < step; i++)
+ if (!d->perm.series_p (i, step, diff - i, step))
+ return false;
+
+ if (d->testing_p)
+ return true;
+
+ machine_mode vhi_mode;
+ poly_uint64 vhi_nunits = exact_div (GET_MODE_NUNITS (d->vmode), 2);
+
+ if (!get_vector_mode (HImode, vhi_nunits).exists (&vhi_mode))
+ return false;
+
+ /* Step-1: Move op0 to src with VHI mode. */
+ rtx src = gen_reg_rtx (vhi_mode);
+ emit_move_insn (src, gen_lowpart (vhi_mode, d->op0));
+
+ /* Step-2: Shift right 8 bits to dest. */
+ rtx dest = expand_binop (vhi_mode, lshr_optab, src, gen_int_mode (8, Pmode),
+ NULL_RTX, 0, OPTAB_DIRECT);
+
+ /* Step-3: Shift left 8 bits to src. */
+ src = expand_binop (vhi_mode, ashl_optab, src, gen_int_mode (8, Pmode),
+ NULL_RTX, 0, OPTAB_DIRECT);
+
+ /* Step-4: Logic Or dest and src to dest. */
+ dest = expand_binop (vhi_mode, ior_optab, dest, src,
+ NULL_RTX, 0, OPTAB_DIRECT);
+
+ /* Step-5: Move src to target with VQI mode. */
+ emit_move_insn (d->target, gen_lowpart (d->vmode, dest));
+
+ return true;
+}
+
/* Recognize the pattern that can be shuffled by generic approach. */
static bool
@@ -2882,10 +3257,14 @@ expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
{
if (shuffle_merge_patterns (d))
return true;
+ if (shuffle_consecutive_patterns (d))
+ return true;
if (shuffle_compress_patterns (d))
return true;
if (shuffle_decompress_patterns (d))
return true;
+ if (shuffle_bswap_pattern (d))
+ return true;
if (shuffle_generic_patterns (d))
return true;
return false;
@@ -3197,15 +3576,14 @@ expand_gather_scatter (rtx *ops, bool is_load)
machine_mode vec_mode = GET_MODE (vec_reg);
machine_mode idx_mode = GET_MODE (vec_offset);
- scalar_mode inner_vec_mode = GET_MODE_INNER (vec_mode);
scalar_mode inner_idx_mode = GET_MODE_INNER (idx_mode);
- unsigned inner_vsize = GET_MODE_BITSIZE (inner_vec_mode);
unsigned inner_offsize = GET_MODE_BITSIZE (inner_idx_mode);
poly_int64 nunits = GET_MODE_NUNITS (vec_mode);
poly_int64 value;
bool is_vlmax = poly_int_rtx_p (len, &value) && known_eq (value, nunits);
- if (inner_offsize < inner_vsize)
+ /* Extend the offset element to address width. */
+ if (inner_offsize < BITS_PER_WORD)
{
/* 7.2. Vector Load/Store Addressing Modes.
If the vector offset elements are narrower than XLEN, they are
@@ -3502,6 +3880,14 @@ cmp_lmul_gt_one (machine_mode mode)
return false;
}
+/* Return true if the gather/scatter offset mode is valid. */
+bool
+gather_scatter_valid_offset_mode_p (machine_mode mode)
+{
+ machine_mode new_mode;
+ return get_vector_mode (Pmode, GET_MODE_NUNITS (mode)).exists (&new_mode);
+}
+
/* We don't have to convert the floating point to integer when the
mantissa is zero. Thus, ther will be a limitation for both the
single and double precision floating point. There will be no
@@ -3618,6 +4004,16 @@ emit_vec_cvt_x_f (rtx op_dest, rtx op_src, rtx mask,
}
static void
+emit_vec_cvt_x_f (rtx op_dest, rtx op_src, insn_type type,
+ machine_mode vec_mode)
+{
+ rtx ops[] = {op_dest, op_src};
+ insn_code icode = code_for_pred_fcvt_x_f (UNSPEC_VFCVT, vec_mode);
+
+ emit_vlmax_insn (icode, type, ops);
+}
+
+static void
emit_vec_cvt_f_x (rtx op_dest, rtx op_src, rtx mask,
insn_type type, machine_mode vec_mode)
{
@@ -3801,4 +4197,44 @@ expand_vec_roundeven (rtx op_0, rtx op_1, machine_mode vec_fp_mode,
emit_vec_copysign (op_0, op_0, op_1, vec_fp_mode);
}
+void
+expand_vec_lrint (rtx op_0, rtx op_1, machine_mode vec_fp_mode,
+ machine_mode vec_long_mode)
+{
+ gcc_assert (known_eq (GET_MODE_SIZE (vec_fp_mode),
+ GET_MODE_SIZE (vec_long_mode)));
+
+ emit_vec_cvt_x_f (op_0, op_1, UNARY_OP_FRM_DYN, vec_fp_mode);
+}
+
+void
+expand_vec_lround (rtx op_0, rtx op_1, machine_mode vec_fp_mode,
+ machine_mode vec_long_mode)
+{
+ gcc_assert (known_eq (GET_MODE_SIZE (vec_fp_mode),
+ GET_MODE_SIZE (vec_long_mode)));
+
+ emit_vec_cvt_x_f (op_0, op_1, UNARY_OP_FRM_RMM, vec_fp_mode);
+}
+
+void
+expand_vec_lceil (rtx op_0, rtx op_1, machine_mode vec_fp_mode,
+ machine_mode vec_long_mode)
+{
+ gcc_assert (known_eq (GET_MODE_SIZE (vec_fp_mode),
+ GET_MODE_SIZE (vec_long_mode)));
+
+ emit_vec_cvt_x_f (op_0, op_1, UNARY_OP_FRM_RUP, vec_fp_mode);
+}
+
+void
+expand_vec_lfloor (rtx op_0, rtx op_1, machine_mode vec_fp_mode,
+ machine_mode vec_long_mode)
+{
+ gcc_assert (known_eq (GET_MODE_SIZE (vec_fp_mode),
+ GET_MODE_SIZE (vec_long_mode)));
+
+ emit_vec_cvt_x_f (op_0, op_1, UNARY_OP_FRM_RDN, vec_fp_mode);
+}
+
} // namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-costs.cc b/gcc/config/riscv/riscv-vector-costs.cc
index 878f72c..af87388 100644
--- a/gcc/config/riscv/riscv-vector-costs.cc
+++ b/gcc/config/riscv/riscv-vector-costs.cc
@@ -40,6 +40,7 @@ along with GCC; see the file COPYING3. If not see
#include "bitmap.h"
#include "ssa.h"
#include "backend.h"
+#include "tree-data-ref.h"
/* This file should be included last. */
#include "riscv-vector-costs.h"
@@ -135,8 +136,9 @@ compute_local_program_points (
|| is_gimple_call (gsi_stmt (si))))
continue;
stmt_vec_info stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
- if (STMT_VINFO_TYPE (vect_stmt_to_vectorize (stmt_info))
- != undef_vec_info_type)
+ enum stmt_vec_info_type type
+ = STMT_VINFO_TYPE (vect_stmt_to_vectorize (stmt_info));
+ if (type != undef_vec_info_type)
{
stmt_point info = {point, gsi_stmt (si)};
program_points.safe_push (info);
@@ -152,6 +154,14 @@ compute_local_program_points (
}
}
+static machine_mode
+get_biggest_mode (machine_mode mode1, machine_mode mode2)
+{
+ unsigned int mode1_size = GET_MODE_BITSIZE (mode1).to_constant ();
+ unsigned int mode2_size = GET_MODE_BITSIZE (mode2).to_constant ();
+ return mode1_size >= mode2_size ? mode1 : mode2;
+}
+
/* Compute local live ranges of each vectorized variable.
Note that we only compute local live ranges (within a block) since
local live ranges information is accurate enough for us to determine
@@ -199,12 +209,12 @@ compute_local_live_ranges (
{
unsigned int point = program_point.point;
gimple *stmt = program_point.stmt;
- machine_mode mode = biggest_mode;
tree lhs = gimple_get_lhs (stmt);
if (lhs != NULL_TREE && is_gimple_reg (lhs)
&& !POINTER_TYPE_P (TREE_TYPE (lhs)))
{
- mode = TYPE_MODE (TREE_TYPE (lhs));
+ biggest_mode = get_biggest_mode (biggest_mode,
+ TYPE_MODE (TREE_TYPE (lhs)));
bool existed_p = false;
pair &live_range
= live_ranges->get_or_insert (lhs, &existed_p);
@@ -223,7 +233,9 @@ compute_local_live_ranges (
the future. */
if (is_gimple_val (var) && !POINTER_TYPE_P (TREE_TYPE (var)))
{
- mode = TYPE_MODE (TREE_TYPE (var));
+ biggest_mode
+ = get_biggest_mode (biggest_mode,
+ TYPE_MODE (TREE_TYPE (var)));
bool existed_p = false;
pair &live_range
= live_ranges->get_or_insert (var, &existed_p);
@@ -236,9 +248,6 @@ compute_local_live_ranges (
live_range = pair (0, point);
}
}
- if (GET_MODE_SIZE (mode).to_constant ()
- > GET_MODE_SIZE (biggest_mode).to_constant ())
- biggest_mode = mode;
}
if (dump_enabled_p ())
for (hash_map<tree, pair>::iterator iter = live_ranges->begin ();
@@ -289,9 +298,7 @@ max_number_of_live_regs (const basic_block bb,
unsigned int i;
unsigned int live_point = 0;
auto_vec<unsigned int> live_vars_vec;
- live_vars_vec.safe_grow (max_point + 1, true);
- for (i = 0; i < live_vars_vec.length (); ++i)
- live_vars_vec[i] = 0;
+ live_vars_vec.safe_grow_cleared (max_point + 1, true);
for (hash_map<tree, pair>::iterator iter = live_ranges.begin ();
iter != live_ranges.end (); ++iter)
{
@@ -360,6 +367,31 @@ get_current_lmul (class loop *loop)
return loop_autovec_infos.get (loop)->current_lmul;
}
+/* Get STORE value. */
+static tree
+get_store_value (gimple *stmt)
+{
+ if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
+ {
+ if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
+ return gimple_call_arg (stmt, 3);
+ else
+ gcc_unreachable ();
+ }
+ else
+ return gimple_assign_rhs1 (stmt);
+}
+
+/* Return true if it is non-contiguous load/store. */
+static bool
+non_contiguous_memory_access_p (stmt_vec_info stmt_info)
+{
+ enum stmt_vec_info_type type
+ = STMT_VINFO_TYPE (vect_stmt_to_vectorize (stmt_info));
+ return ((type == load_vec_info_type || type == store_vec_info_type)
+ && !adjacent_dr_p (STMT_VINFO_DATA_REF (stmt_info)));
+}
+
/* Update the live ranges according PHI.
Loop:
@@ -395,13 +427,15 @@ update_local_live_ranges (
unsigned int nbbs = loop->num_nodes;
unsigned int i, j;
gphi_iterator psi;
+ gimple_stmt_iterator si;
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
- "Update local program points for bb %d:\n", bb->index);
- for (psi = gsi_start_phis (bbs[i]); !gsi_end_p (psi); gsi_next (&psi))
+ "Update local program points for bb %d:\n",
+ bbs[i]->index);
+ for (psi = gsi_start_phis (bb); !gsi_end_p (psi); gsi_next (&psi))
{
gphi *phi = psi.phi ();
stmt_vec_info stmt_info = vinfo->lookup_stmt (phi);
@@ -413,12 +447,23 @@ update_local_live_ranges (
{
edge e = gimple_phi_arg_edge (phi, j);
tree def = gimple_phi_arg_def (phi, j);
- auto *live_ranges = live_ranges_per_bb.get (e->src);
+ auto *live_ranges = live_ranges_per_bb.get (bb);
+ auto *live_range = live_ranges->get (def);
+ if (live_range && flow_bb_inside_loop_p (loop, e->src))
+ {
+ unsigned int start = (*live_range).first;
+ (*live_range).first = 0;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Update %T start point from %d to %d:\n",
+ def, start, (*live_range).first);
+ }
+ live_ranges = live_ranges_per_bb.get (e->src);
if (!program_points_per_bb.get (e->src))
continue;
unsigned int max_point
= (*program_points_per_bb.get (e->src)).length () - 1;
- auto *live_range = live_ranges->get (def);
+ live_range = live_ranges->get (def);
if (!live_range)
continue;
@@ -430,6 +475,43 @@ update_local_live_ranges (
end, (*live_range).second);
}
}
+ for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
+ {
+ if (!(is_gimple_assign (gsi_stmt (si))
+ || is_gimple_call (gsi_stmt (si))))
+ continue;
+ stmt_vec_info stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
+ enum stmt_vec_info_type type
+ = STMT_VINFO_TYPE (vect_stmt_to_vectorize (stmt_info));
+ if (non_contiguous_memory_access_p (stmt_info))
+ {
+ /* For non-adjacent load/store STMT, we will potentially
+ convert it into:
+
+ 1. MASK_LEN_GATHER_LOAD (..., perm indice).
+ 2. Continguous load/store + VEC_PERM (..., perm indice)
+
+ We will be likely using one more vector variable. */
+ unsigned int max_point
+ = (*program_points_per_bb.get (bb)).length () - 1;
+ auto *live_ranges = live_ranges_per_bb.get (bb);
+ bool existed_p = false;
+ tree var = type == load_vec_info_type
+ ? gimple_get_lhs (gsi_stmt (si))
+ : get_store_value (gsi_stmt (si));
+ tree sel_type = build_nonstandard_integer_type (
+ TYPE_PRECISION (TREE_TYPE (var)), 1);
+ tree sel = build_decl (UNKNOWN_LOCATION, VAR_DECL,
+ get_identifier ("vect_perm"), sel_type);
+ pair &live_range = live_ranges->get_or_insert (sel, &existed_p);
+ gcc_assert (!existed_p);
+ live_range = pair (0, max_point);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Add perm indice %T, start = 0, end = %d\n",
+ sel, max_point);
+ }
+ }
}
}
@@ -446,10 +528,6 @@ costs::preferred_new_lmul_p (const vector_costs *uncast_other) const
auto other_loop_vinfo = as_a<loop_vec_info> (other->m_vinfo);
class loop *loop = LOOP_VINFO_LOOP (this_loop_vinfo);
- if (!LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (this_loop_vinfo)
- && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (other_loop_vinfo))
- return false;
-
if (loop_autovec_infos.get (loop) && loop_autovec_infos.get (loop)->end_p)
return false;
else if (loop_autovec_infos.get (loop))
@@ -483,6 +561,15 @@ costs::preferred_new_lmul_p (const vector_costs *uncast_other) const
machine_mode biggest_mode
= compute_local_live_ranges (program_points_per_bb, live_ranges_per_bb);
+ /* If we can use simple VLS modes to handle NITERS element.
+ We don't need to use VLA modes with partial vector auto-vectorization. */
+ if (LOOP_VINFO_NITERS_KNOWN_P (this_loop_vinfo)
+ && known_le (tree_to_poly_int64 (LOOP_VINFO_NITERS (this_loop_vinfo))
+ * GET_MODE_SIZE (biggest_mode).to_constant (),
+ (int) RVV_M8 * BYTES_PER_RISCV_VECTOR)
+ && pow2p_hwi (LOOP_VINFO_INT_NITERS (this_loop_vinfo)))
+ return vector_costs::better_main_loop_than_p (other);
+
/* Update live ranges according to PHI. */
update_local_live_ranges (other->m_vinfo, program_points_per_bb,
live_ranges_per_bb);
diff --git a/gcc/config/riscv/riscv-vsetvl.cc b/gcc/config/riscv/riscv-vsetvl.cc
index af8c31d..4b06d93 100644
--- a/gcc/config/riscv/riscv-vsetvl.cc
+++ b/gcc/config/riscv/riscv-vsetvl.cc
@@ -2417,8 +2417,8 @@ vector_infos_manager::vector_infos_manager ()
vector_antin = nullptr;
vector_antout = nullptr;
vector_earliest = nullptr;
- vector_insn_infos.safe_grow (get_max_uid ());
- vector_block_infos.safe_grow (last_basic_block_for_fn (cfun));
+ vector_insn_infos.safe_grow_cleared (get_max_uid ());
+ vector_block_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
if (!optimize)
{
basic_block cfg_bb;
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index 6e7a719..f2dcb0db 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -183,6 +183,9 @@ struct GTY(()) machine_function {
/* True if attributes on current function have been checked. */
bool attributes_checked_p;
+ /* True if RA must be saved because of a far jump. */
+ bool far_jump_used;
+
/* The current frame information, calculated by riscv_compute_frame_info. */
struct riscv_frame_info frame;
@@ -382,6 +385,21 @@ static const struct riscv_tune_param thead_c906_tune_info = {
false /* use_divmod_expansion */
};
+/* Costs to use when optimizing for a generic ooo profile. */
+static const struct riscv_tune_param generic_ooo_tune_info = {
+ {COSTS_N_INSNS (2), COSTS_N_INSNS (2)}, /* fp_add */
+ {COSTS_N_INSNS (5), COSTS_N_INSNS (6)}, /* fp_mul */
+ {COSTS_N_INSNS (7), COSTS_N_INSNS (8)}, /* fp_div */
+ {COSTS_N_INSNS (2), COSTS_N_INSNS (2)}, /* int_mul */
+ {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
+ 1, /* issue_rate */
+ 3, /* branch_cost */
+ 4, /* memory_cost */
+ 4, /* fmv_cost */
+ false, /* slow_unaligned_access */
+ false, /* use_divmod_expansion */
+};
+
/* Costs to use when optimizing for size. */
static const struct riscv_tune_param optimize_size_tune_info = {
{COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
@@ -2042,7 +2060,7 @@ riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
{
rtx index = XEXP (base, 0);
rtx fp = XEXP (base, 1);
- if (REGNO (fp) == VIRTUAL_STACK_VARS_REGNUM)
+ if (REG_P (fp) && REGNO (fp) == VIRTUAL_STACK_VARS_REGNUM)
{
/* If we were given a MULT, we must fix the constant
@@ -2386,9 +2404,8 @@ riscv_legitimize_poly_move (machine_mode mode, rtx dest, rtx tmp, rtx src)
}
else
{
- /* FIXME: We currently DON'T support TARGET_MIN_VLEN > 4096. */
- int max_power = exact_log2 (4096 / 128);
- for (int i = 0; i < max_power; i++)
+ int max_power = exact_log2 (MAX_POLY_VARIANT);
+ for (int i = 0; i <= max_power; i++)
{
int possible_div_factor = 1 << i;
if (factor % (vlenb / possible_div_factor) == 0)
@@ -2768,6 +2785,19 @@ riscv_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno ATTRIBUTE_UN
switch (GET_CODE (x))
{
+ case SET:
+ /* If we are called for an INSN that's a simple set of a register,
+ then cost based on the SET_SRC alone. */
+ if (outer_code == INSN && REG_P (SET_DEST (x)))
+ {
+ riscv_rtx_costs (SET_SRC (x), mode, outer_code, opno, total, speed);
+ return true;
+ }
+
+ /* Otherwise return FALSE indicating we should recurse into both the
+ SET_DEST and SET_SRC combining the cost of both. */
+ return false;
+
case CONST_INT:
/* trivial constants checked using OUTER_CODE in case they are
encodable in insn itself w/o need for additional insn(s). */
@@ -5117,161 +5147,6 @@ riscv_legitimize_call_address (rtx addr)
return addr;
}
-/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
- Assume that the areas do not overlap. */
-
-static void
-riscv_block_move_straight (rtx dest, rtx src, unsigned HOST_WIDE_INT length)
-{
- unsigned HOST_WIDE_INT offset, delta;
- unsigned HOST_WIDE_INT bits;
- int i;
- enum machine_mode mode;
- rtx *regs;
-
- bits = MAX (BITS_PER_UNIT,
- MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))));
-
- mode = mode_for_size (bits, MODE_INT, 0).require ();
- delta = bits / BITS_PER_UNIT;
-
- /* Allocate a buffer for the temporary registers. */
- regs = XALLOCAVEC (rtx, length / delta);
-
- /* Load as many BITS-sized chunks as possible. Use a normal load if
- the source has enough alignment, otherwise use left/right pairs. */
- for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
- {
- regs[i] = gen_reg_rtx (mode);
- riscv_emit_move (regs[i], adjust_address (src, mode, offset));
- }
-
- /* Copy the chunks to the destination. */
- for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
- riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
-
- /* Mop up any left-over bytes. */
- if (offset < length)
- {
- src = adjust_address (src, BLKmode, offset);
- dest = adjust_address (dest, BLKmode, offset);
- move_by_pieces (dest, src, length - offset,
- MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), RETURN_BEGIN);
- }
-}
-
-/* Helper function for doing a loop-based block operation on memory
- reference MEM. Each iteration of the loop will operate on LENGTH
- bytes of MEM.
-
- Create a new base register for use within the loop and point it to
- the start of MEM. Create a new memory reference that uses this
- register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
-
-static void
-riscv_adjust_block_mem (rtx mem, unsigned HOST_WIDE_INT length,
- rtx *loop_reg, rtx *loop_mem)
-{
- *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
-
- /* Although the new mem does not refer to a known location,
- it does keep up to LENGTH bytes of alignment. */
- *loop_mem = change_address (mem, BLKmode, *loop_reg);
- set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
-}
-
-/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
- bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
- the memory regions do not overlap. */
-
-static void
-riscv_block_move_loop (rtx dest, rtx src, unsigned HOST_WIDE_INT length,
- unsigned HOST_WIDE_INT bytes_per_iter)
-{
- rtx label, src_reg, dest_reg, final_src, test;
- unsigned HOST_WIDE_INT leftover;
-
- leftover = length % bytes_per_iter;
- length -= leftover;
-
- /* Create registers and memory references for use within the loop. */
- riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
- riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
-
- /* Calculate the value that SRC_REG should have after the last iteration
- of the loop. */
- final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
- 0, 0, OPTAB_WIDEN);
-
- /* Emit the start of the loop. */
- label = gen_label_rtx ();
- emit_label (label);
-
- /* Emit the loop body. */
- riscv_block_move_straight (dest, src, bytes_per_iter);
-
- /* Move on to the next block. */
- riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
- riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
-
- /* Emit the loop condition. */
- test = gen_rtx_NE (VOIDmode, src_reg, final_src);
- emit_jump_insn (gen_cbranch4 (Pmode, test, src_reg, final_src, label));
-
- /* Mop up any left-over bytes. */
- if (leftover)
- riscv_block_move_straight (dest, src, leftover);
- else
- emit_insn(gen_nop ());
-}
-
-/* Expand a cpymemsi instruction, which copies LENGTH bytes from
- memory reference SRC to memory reference DEST. */
-
-bool
-riscv_expand_block_move (rtx dest, rtx src, rtx length)
-{
- if (CONST_INT_P (length))
- {
- unsigned HOST_WIDE_INT hwi_length = UINTVAL (length);
- unsigned HOST_WIDE_INT factor, align;
-
- align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
- factor = BITS_PER_WORD / align;
-
- if (optimize_function_for_size_p (cfun)
- && hwi_length * factor * UNITS_PER_WORD > MOVE_RATIO (false))
- return false;
-
- if (hwi_length <= (RISCV_MAX_MOVE_BYTES_STRAIGHT / factor))
- {
- riscv_block_move_straight (dest, src, INTVAL (length));
- return true;
- }
- else if (optimize && align >= BITS_PER_WORD)
- {
- unsigned min_iter_words
- = RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD;
- unsigned iter_words = min_iter_words;
- unsigned HOST_WIDE_INT bytes = hwi_length;
- unsigned HOST_WIDE_INT words = bytes / UNITS_PER_WORD;
-
- /* Lengthen the loop body if it shortens the tail. */
- for (unsigned i = min_iter_words; i < min_iter_words * 2 - 1; i++)
- {
- unsigned cur_cost = iter_words + words % iter_words;
- unsigned new_cost = i + words % i;
- if (new_cost <= cur_cost)
- iter_words = i;
- }
-
- riscv_block_move_loop (dest, src, bytes, iter_words * UNITS_PER_WORD);
- return true;
- }
- }
- return false;
-}
-
/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
in context CONTEXT. HI_RELOC indicates a high-part reloc. */
@@ -5420,6 +5295,7 @@ riscv_get_v_regno_alignment (machine_mode mode)
any outermost HIGH.
'R' Print the low-part relocation associated with OP.
'C' Print the integer branch condition for comparison OP.
+ 'N' Print the inverse of the integer branch condition for comparison OP.
'A' Print the atomic operation suffix for memory model OP.
'I' Print the LR suffix for memory model OP.
'J' Print the SC suffix for memory model OP.
@@ -5576,6 +5452,11 @@ riscv_print_operand (FILE *file, rtx op, int letter)
fputs (GET_RTX_NAME (code), file);
break;
+ case 'N':
+ /* The RTL names match the instruction names. */
+ fputs (GET_RTX_NAME (reverse_condition (code)), file);
+ break;
+
case 'A': {
const enum memmodel model = memmodel_base (INTVAL (op));
if (riscv_memmodel_needs_amo_acquire (model)
@@ -5633,6 +5514,13 @@ riscv_print_operand (FILE *file, rtx op, int letter)
output_addr_const (file, newop);
break;
}
+ case 'X':
+ {
+ int ival = INTVAL (op) + 1;
+ rtx newop = GEN_INT (ctz_hwi (ival) + 1);
+ output_addr_const (file, newop);
+ break;
+ }
default:
switch (code)
{
@@ -5845,6 +5733,64 @@ riscv_frame_set (rtx mem, rtx reg)
return set;
}
+/* Returns true if the current function might contain a far jump. */
+
+static bool
+riscv_far_jump_used_p ()
+{
+ size_t func_size = 0;
+
+ if (cfun->machine->far_jump_used)
+ return true;
+
+ /* We can't change far_jump_used during or after reload, as there is
+ no chance to change stack frame layout. So we must rely on the
+ conservative heuristic below having done the right thing. */
+ if (reload_in_progress || reload_completed)
+ return false;
+
+ /* Estimate the function length. */
+ for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ func_size += get_attr_length (insn);
+
+ /* Conservatively determine whether some jump might exceed 1 MiB
+ displacement. */
+ if (func_size * 2 >= 0x100000)
+ cfun->machine->far_jump_used = true;
+
+ return cfun->machine->far_jump_used;
+}
+
+/* Return true, if the current function must save the incoming return
+ address. */
+
+static bool
+riscv_save_return_addr_reg_p (void)
+{
+ /* The $ra register is call-clobbered: if this is not a leaf function,
+ save it. */
+ if (!crtl->is_leaf)
+ return true;
+
+ /* We need to save the incoming return address if __builtin_eh_return
+ is being used to set a different return address. */
+ if (crtl->calls_eh_return)
+ return true;
+
+ /* Far jumps/branches use $ra as a temporary to set up the target jump
+ location (clobbering the incoming return address). */
+ if (riscv_far_jump_used_p ())
+ return true;
+
+ /* Need not to use ra for leaf when frame pointer is turned off by
+ option whatever the omit-leaf-frame's value. */
+ if (frame_pointer_needed && crtl->is_leaf
+ && !TARGET_OMIT_LEAF_FRAME_POINTER)
+ return true;
+
+ return false;
+}
+
/* Return true if the current function must save register REGNO. */
static bool
@@ -5865,11 +5811,7 @@ riscv_save_reg_p (unsigned int regno)
if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
return true;
- /* Need not to use ra for leaf when frame pointer is turned off by option
- whatever the omit-leaf-frame's value. */
- bool keep_leaf_ra = frame_pointer_needed && crtl->is_leaf
- && !TARGET_OMIT_LEAF_FRAME_POINTER;
- if (regno == RETURN_ADDR_REGNUM && (crtl->calls_eh_return || keep_leaf_ra))
+ if (regno == RETURN_ADDR_REGNUM && riscv_save_return_addr_reg_p ())
return true;
/* If this is an interrupt handler, then must save extra registers. */
@@ -7792,6 +7734,75 @@ riscv_sched_variable_issue (FILE *, int, rtx_insn *insn, int more)
return more - 1;
}
+/* Adjust the cost/latency of instructions for scheduling.
+ For now this is just used to change the latency of vector instructions
+ according to their LMUL. We assume that an insn with LMUL == 8 requires
+ eight times more execution cycles than the same insn with LMUL == 1.
+ As this may cause very high latencies which lead to scheduling artifacts
+ we currently only perform the adjustment when -madjust-lmul-cost is given.
+ */
+static int
+riscv_sched_adjust_cost (rtx_insn *, int, rtx_insn *insn, int cost,
+ unsigned int)
+{
+ /* Only do adjustments for the generic out-of-order scheduling model. */
+ if (!TARGET_VECTOR || riscv_microarchitecture != generic_ooo)
+ return cost;
+
+ if (recog_memoized (insn) < 0)
+ return cost;
+
+ enum attr_type type = get_attr_type (insn);
+
+ if (type == TYPE_VFREDO || type == TYPE_VFWREDO)
+ {
+ /* TODO: For ordered reductions scale the base cost relative to the
+ number of units. */
+ ;
+ }
+
+ /* Don't do any LMUL-based latency adjustment unless explicitly asked to. */
+ if (!TARGET_ADJUST_LMUL_COST)
+ return cost;
+
+ /* vsetvl has a vlmul attribute but its latency does not depend on it. */
+ if (type == TYPE_VSETVL || type == TYPE_VSETVL_PRE)
+ return cost;
+
+ enum riscv_vector::vlmul_type lmul =
+ (riscv_vector::vlmul_type)get_attr_vlmul (insn);
+
+ double factor = 1;
+ switch (lmul)
+ {
+ case riscv_vector::LMUL_2:
+ factor = 2;
+ break;
+ case riscv_vector::LMUL_4:
+ factor = 4;
+ break;
+ case riscv_vector::LMUL_8:
+ factor = 8;
+ break;
+ case riscv_vector::LMUL_F2:
+ factor = 0.5;
+ break;
+ case riscv_vector::LMUL_F4:
+ factor = 0.25;
+ break;
+ case riscv_vector::LMUL_F8:
+ factor = 0.125;
+ break;
+ default:
+ factor = 1;
+ }
+
+ /* If the latency was nonzero, keep it that way. */
+ int new_cost = MAX (cost > 0 ? 1 : 0, cost * factor);
+
+ return new_cost;
+}
+
/* Auxiliary function to emit RISC-V ELF attribute. */
static void
riscv_emit_attribute ()
@@ -7969,10 +7980,11 @@ riscv_init_machine_status (void)
/* Return the VLEN value associated with -march.
TODO: So far we only support length-agnostic value. */
static poly_uint16
-riscv_convert_vector_bits (void)
+riscv_convert_vector_bits (struct gcc_options *opts)
{
int chunk_num;
- if (TARGET_MIN_VLEN > 32)
+ int min_vlen = TARGET_MIN_VLEN_OPTS (opts);
+ if (min_vlen > 32)
{
/* When targetting minimum VLEN > 32, we should use 64-bit chunk size.
Otherwise we can not include SEW = 64bits.
@@ -7990,7 +8002,7 @@ riscv_convert_vector_bits (void)
- TARGET_MIN_VLEN = 2048bit: [256,256]
- TARGET_MIN_VLEN = 4096bit: [512,512]
FIXME: We currently DON'T support TARGET_MIN_VLEN > 4096bit. */
- chunk_num = TARGET_MIN_VLEN / 64;
+ chunk_num = min_vlen / 64;
}
else
{
@@ -8009,10 +8021,10 @@ riscv_convert_vector_bits (void)
to set RVV mode size. The RVV machine modes size are run-time constant if
TARGET_VECTOR is enabled. The RVV machine modes size remains default
compile-time constant if TARGET_VECTOR is disabled. */
- if (TARGET_VECTOR)
+ if (TARGET_VECTOR_OPTS_P (opts))
{
- if (riscv_autovec_preference == RVV_FIXED_VLMAX)
- return (int) TARGET_MIN_VLEN / (riscv_bytes_per_vector_chunk * 8);
+ if (opts->x_riscv_autovec_preference == RVV_FIXED_VLMAX)
+ return (int) min_vlen / (riscv_bytes_per_vector_chunk * 8);
else
return poly_uint16 (chunk_num, chunk_num);
}
@@ -8020,40 +8032,33 @@ riscv_convert_vector_bits (void)
return 1;
}
-/* Implement TARGET_OPTION_OVERRIDE. */
-
-static void
-riscv_option_override (void)
+/* 'Unpack' up the internal tuning structs and update the options
+ in OPTS. The caller must have set up selected_tune and selected_arch
+ as all the other target-specific codegen decisions are
+ derived from them. */
+void
+riscv_override_options_internal (struct gcc_options *opts)
{
const struct riscv_tune_info *cpu;
-#ifdef SUBTARGET_OVERRIDE_OPTIONS
- SUBTARGET_OVERRIDE_OPTIONS;
-#endif
-
- flag_pcc_struct_return = 0;
-
- if (flag_pic)
- g_switch_value = 0;
-
/* The presence of the M extension implies that division instructions
are present, so include them unless explicitly disabled. */
- if (TARGET_MUL && (target_flags_explicit & MASK_DIV) == 0)
- target_flags |= MASK_DIV;
- else if (!TARGET_MUL && TARGET_DIV)
+ if (TARGET_MUL_OPTS_P (opts) && (target_flags_explicit & MASK_DIV) == 0)
+ opts->x_target_flags |= MASK_DIV;
+ else if (!TARGET_MUL_OPTS_P (opts) && TARGET_DIV_OPTS_P (opts))
error ("%<-mdiv%> requires %<-march%> to subsume the %<M%> extension");
/* Likewise floating-point division and square root. */
if ((TARGET_HARD_FLOAT || TARGET_ZFINX) && (target_flags_explicit & MASK_FDIV) == 0)
- target_flags |= MASK_FDIV;
+ opts->x_target_flags |= MASK_FDIV;
/* Handle -mtune, use -mcpu if -mtune is not given, and use default -mtune
if both -mtune and -mcpu are not given. */
- cpu = riscv_parse_tune (riscv_tune_string ? riscv_tune_string :
- (riscv_cpu_string ? riscv_cpu_string :
+ cpu = riscv_parse_tune (opts->x_riscv_tune_string ? opts->x_riscv_tune_string :
+ (opts->x_riscv_cpu_string ? opts->x_riscv_cpu_string :
RISCV_TUNE_STRING_DEFAULT));
riscv_microarchitecture = cpu->microarchitecture;
- tune_param = optimize_size ? &optimize_size_tune_info : cpu->tune_param;
+ tune_param = opts->x_optimize_size ? &optimize_size_tune_info : cpu->tune_param;
/* Use -mtune's setting for slow_unaligned_access, even when optimizing
for size. For architectures that trap and emulate unaligned accesses,
@@ -8069,15 +8074,38 @@ riscv_option_override (void)
if ((target_flags_explicit & MASK_STRICT_ALIGN) == 0
&& cpu->tune_param->slow_unaligned_access)
- target_flags |= MASK_STRICT_ALIGN;
+ opts->x_target_flags |= MASK_STRICT_ALIGN;
/* If the user hasn't specified a branch cost, use the processor's
default. */
- if (riscv_branch_cost == 0)
- riscv_branch_cost = tune_param->branch_cost;
+ if (opts->x_riscv_branch_cost == 0)
+ opts->x_riscv_branch_cost = tune_param->branch_cost;
- /* Function to allocate machine-dependent function status. */
- init_machine_status = &riscv_init_machine_status;
+ /* FIXME: We don't allow TARGET_MIN_VLEN > 4096 since the datatypes of
+ both GET_MODE_SIZE and GET_MODE_BITSIZE are poly_uint16.
+
+ We can only allow TARGET_MIN_VLEN * 8 (LMUL) < 65535. */
+ if (TARGET_MIN_VLEN_OPTS (opts) > 4096)
+ sorry ("Current RISC-V GCC cannot support VLEN greater than 4096bit for "
+ "'V' Extension");
+
+ /* Convert -march to a chunks count. */
+ riscv_vector_chunks = riscv_convert_vector_bits (opts);
+}
+
+/* Implement TARGET_OPTION_OVERRIDE. */
+
+static void
+riscv_option_override (void)
+{
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+
+ flag_pcc_struct_return = 0;
+
+ if (flag_pic)
+ g_switch_value = 0;
if (flag_pic)
riscv_cmodel = CM_PIC;
@@ -8192,20 +8220,14 @@ riscv_option_override (void)
riscv_stack_protector_guard_offset = offs;
}
- /* FIXME: We don't allow TARGET_MIN_VLEN > 4096 since the datatypes of
- both GET_MODE_SIZE and GET_MODE_BITSIZE are poly_uint16.
-
- We can only allow TARGET_MIN_VLEN * 8 (LMUL) < 65535. */
- if (TARGET_MIN_VLEN > 4096)
- sorry (
- "Current RISC-V GCC cannot support VLEN greater than 4096bit for 'V' Extension");
-
SET_OPTION_IF_UNSET (&global_options, &global_options_set,
param_sched_pressure_algorithm,
SCHED_PRESSURE_MODEL);
- /* Convert -march to a chunks count. */
- riscv_vector_chunks = riscv_convert_vector_bits ();
+ /* Function to allocate machine-dependent function status. */
+ init_machine_status = &riscv_init_machine_status;
+
+ riscv_override_options_internal (&global_options);
}
/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
@@ -9008,18 +9030,7 @@ riscv_support_vector_misalignment (machine_mode mode,
int misalignment,
bool is_packed ATTRIBUTE_UNUSED)
{
- /* Only enable misalign data movements for VLS modes. */
- if (TARGET_VECTOR_VLS && STRICT_ALIGNMENT)
- {
- /* Return if movmisalign pattern is not supported for this mode. */
- if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
- return false;
-
- /* Misalignment factor is unknown at compile time. */
- if (misalignment == -1)
- return false;
- }
- /* Disable movmisalign for VLA auto-vectorization. */
+ /* Depend on movmisalign pattern. */
return default_builtin_support_vector_misalignment (mode, type, misalignment,
is_packed);
}
@@ -9618,6 +9629,9 @@ riscv_preferred_else_value (unsigned ifn, tree vectype, unsigned int nops,
#undef TARGET_SCHED_VARIABLE_ISSUE
#define TARGET_SCHED_VARIABLE_ISSUE riscv_sched_variable_issue
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST riscv_sched_adjust_cost
+
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
#define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
diff --git a/gcc/config/riscv/riscv.h b/gcc/config/riscv/riscv.h
index 7ac7884..0164514 100644
--- a/gcc/config/riscv/riscv.h
+++ b/gcc/config/riscv/riscv.h
@@ -310,7 +310,7 @@ ASM_MISA_SPEC
#define FIXED_REGISTERS \
{ /* General registers. */ \
- 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
/* Floating-point registers. */ \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
@@ -328,7 +328,7 @@ ASM_MISA_SPEC
#define CALL_USED_REGISTERS \
{ /* General registers. */ \
- 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
/* Floating-point registers. */ \
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
@@ -1191,4 +1191,9 @@ extern void riscv_remove_unneeded_save_restore_calls (void);
#define OPTIMIZE_MODE_SWITCHING(ENTITY) (TARGET_VECTOR)
#define NUM_MODES_FOR_MODE_SWITCHING {VXRM_MODE_NONE, riscv_vector::FRM_NONE}
+
+/* The size difference between different RVV modes can be up to 64 times.
+ e.g. RVVMF64BI vs RVVMF1BI on zvl512b, which is [1, 1] vs [64, 64]. */
+#define MAX_POLY_VARIANT 64
+
#endif /* ! GCC_RISCV_H */
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index e00b8ee..23d9133 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -282,7 +282,9 @@
;; Classification of each insn.
;; branch conditional branch
-;; jump unconditional jump
+;; jump unconditional direct jump
+;; jalr unconditional indirect jump
+;; ret various returns, no arguments
;; call unconditional call
;; load load instruction(s)
;; fpload floating point load
@@ -427,12 +429,12 @@
;; vmov whole vector register move
;; vector unknown vector instruction
(define_attr "type"
- "unknown,branch,jump,call,load,fpload,store,fpstore,
+ "unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
fmadd,fdiv,fcmp,fcvt,fsqrt,multi,auipc,sfb_alu,nop,trap,ghost,bitmanip,
rotate,clmul,min,max,minu,maxu,clz,ctz,cpop,
atomic,condmove,cbo,crypto,pushpop,mvpair,zicond,rdvlenb,rdvl,wrvxrm,wrfrm,
- rdfrm,vsetvl,vlde,vste,vldm,vstm,vlds,vsts,
+ rdfrm,vsetvl,vsetvl_pre,vlde,vste,vldm,vstm,vlds,vsts,
vldux,vldox,vstux,vstox,vldff,vldr,vstr,
vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff,
vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,
@@ -513,11 +515,22 @@
;; Length of instruction in bytes.
(define_attr "length" ""
(cond [
+ ;; Branches further than +/- 1 MiB require three instructions.
;; Branches further than +/- 4 KiB require two instructions.
(eq_attr "type" "branch")
(if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
(le (minus (pc) (match_dup 0)) (const_int 4092)))
(const_int 4)
+ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 1048568))
+ (le (minus (pc) (match_dup 0)) (const_int 1048572)))
+ (const_int 8)
+ (const_int 12)))
+
+ ;; Jumps further than +/- 1 MiB require two instructions.
+ (eq_attr "type" "jump")
+ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 1048568))
+ (le (minus (pc) (match_dup 0)) (const_int 1048572)))
+ (const_int 4)
(const_int 8))
;; Conservatively assume calls take two instructions (AUIPC + JALR).
@@ -559,7 +572,7 @@
;; Microarchitectures we know how to tune for.
;; Keep this in sync with enum riscv_microarchitecture.
(define_attr "tune"
- "generic,sifive_7"
+ "generic,sifive_7,generic_ooo"
(const (symbol_ref "((enum attr_tune) riscv_microarchitecture)")))
;; Describe a user's asm statement.
@@ -1997,13 +2010,16 @@
;; Pretend to have the ability to load complex const_int in order to get
;; better code generation around them.
-;;
;; But avoid constants that are special cased elsewhere.
+;;
+;; Hide it from IRA register equiv recog* () to elide potential undoing of split
+;;
(define_insn_and_split "*mvconst_internal"
[(set (match_operand:GPR 0 "register_operand" "=r")
(match_operand:GPR 1 "splittable_const_int_operand" "i"))]
- "!(p2m1_shift_operand (operands[1], <MODE>mode)
- || high_mask_shift_operand (operands[1], <MODE>mode))"
+ "!ira_in_progress
+ && !(p2m1_shift_operand (operands[1], <MODE>mode)
+ || high_mask_shift_operand (operands[1], <MODE>mode))"
"#"
"&& 1"
[(const_int 0)]
@@ -2271,14 +2287,16 @@
DONE;
})
-(define_expand "cpymemsi"
+(define_expand "cpymem<mode>"
[(parallel [(set (match_operand:BLK 0 "general_operand")
(match_operand:BLK 1 "general_operand"))
- (use (match_operand:SI 2 ""))
+ (use (match_operand:P 2 ""))
(use (match_operand:SI 3 "const_int_operand"))])]
""
{
- if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
+ if (riscv_vector::expand_block_move (operands[0], operands[1], operands[2]))
+ DONE;
+ else if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
DONE;
else
FAIL;
@@ -2610,7 +2628,12 @@
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "b%C1\t%2,%z3,%0"
+{
+ if (get_attr_length (insn) == 12)
+ return "b%N1\t%2,%z3,1f; jump\t%l0,ra; 1:";
+
+ return "b%C1\t%2,%z3,%l0";
+}
[(set_attr "type" "branch")
(set_attr "mode" "none")])
@@ -2895,10 +2918,16 @@
;; Unconditional branches.
(define_insn "jump"
- [(set (pc)
- (label_ref (match_operand 0 "" "")))]
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
""
- "j\t%l0"
+{
+ /* Hopefully this does not happen often as this is going
+ to clobber $ra and muck up the return stack predictors. */
+ if (get_attr_length (insn) == 8)
+ return "jump\t%l0,ra";
+
+ return "j\t%l0";
+}
[(set_attr "type" "jump")
(set_attr "mode" "none")])
@@ -2918,7 +2947,7 @@
[(set (pc) (match_operand:P 0 "register_operand" "l"))]
""
"jr\t%0"
- [(set_attr "type" "jump")
+ [(set_attr "type" "jalr")
(set_attr "mode" "none")])
(define_expand "tablejump"
@@ -2943,7 +2972,7 @@
(use (label_ref (match_operand 1 "" "")))]
""
"jr\t%0"
- [(set_attr "type" "jump")
+ [(set_attr "type" "jalr")
(set_attr "mode" "none")])
;;
@@ -3003,7 +3032,7 @@
{
return riscv_output_return ();
}
- [(set_attr "type" "jump")
+ [(set_attr "type" "jalr")
(set_attr "mode" "none")])
;; Normal return.
@@ -3013,7 +3042,7 @@
(use (match_operand 0 "pmode_register_operand" ""))]
""
"jr\t%0"
- [(set_attr "type" "jump")
+ [(set_attr "type" "jalr")
(set_attr "mode" "none")])
;; This is used in compiling the unwind routines.
@@ -3067,7 +3096,7 @@
"epilogue_completed"
[(const_int 0)]
"riscv_expand_epilogue (EXCEPTION_RETURN); DONE;"
- [(set_attr "type" "jump")])
+ [(set_attr "type" "ret")])
;;
;; ....................
@@ -3250,7 +3279,7 @@
(const_int 0)]
""
""
- [(set_attr "type" "jump")])
+ [(set_attr "type" "ret")])
(define_insn "riscv_frcsr"
[(set (match_operand:SI 0 "register_operand" "=r")
@@ -3292,21 +3321,21 @@
(unspec_volatile [(const_int 0)] UNSPECV_MRET)]
""
"mret"
- [(set_attr "type" "jump")])
+ [(set_attr "type" "ret")])
(define_insn "riscv_sret"
[(return)
(unspec_volatile [(const_int 0)] UNSPECV_SRET)]
""
"sret"
- [(set_attr "type" "jump")])
+ [(set_attr "type" "ret")])
(define_insn "riscv_uret"
[(return)
(unspec_volatile [(const_int 0)] UNSPECV_URET)]
""
"uret"
- [(set_attr "type" "jump")])
+ [(set_attr "type" "ret")])
(define_insn "stack_tie<mode>"
[(set (mem:BLK (scratch))
@@ -3587,6 +3616,8 @@
(include "generic.md")
(include "sifive-7.md")
(include "thead.md")
+(include "generic-ooo.md")
(include "vector.md")
(include "zicond.md")
(include "zc.md")
+(include "corev.md")
diff --git a/gcc/config/riscv/riscv.opt b/gcc/config/riscv/riscv.opt
index 21d0060..70d7815 100644
--- a/gcc/config/riscv/riscv.opt
+++ b/gcc/config/riscv/riscv.opt
@@ -209,57 +209,240 @@ long riscv_stack_protector_guard_offset = 0
TargetVariable
int riscv_zi_subext
+Mask(ZICSR) Var(riscv_zi_subext)
+
+Mask(ZIFENCEI) Var(riscv_zi_subext)
+
+Mask(ZIHINTNTL) Var(riscv_zi_subext)
+
+Mask(ZIHINTPAUSE) Var(riscv_zi_subext)
+
+Mask(ZICOND) Var(riscv_zi_subext)
+
TargetVariable
int riscv_za_subext
+Mask(ZAWRS) Var(riscv_za_subext)
+
TargetVariable
int riscv_zb_subext
+Mask(ZBA) Var(riscv_zb_subext)
+
+Mask(ZBB) Var(riscv_zb_subext)
+
+Mask(ZBC) Var(riscv_zb_subext)
+
+Mask(ZBS) Var(riscv_zb_subext)
+
TargetVariable
int riscv_zinx_subext
+Mask(ZFINX) Var(riscv_zinx_subext)
+
+Mask(ZDINX) Var(riscv_zinx_subext)
+
+Mask(ZHINX) Var(riscv_zinx_subext)
+
+Mask(ZHINXMIN) Var(riscv_zinx_subext)
+
TargetVariable
int riscv_zk_subext
+Mask(ZBKB) Var(riscv_zk_subext)
+
+Mask(ZBKC) Var(riscv_zk_subext)
+
+Mask(ZBKX) Var(riscv_zk_subext)
+
+Mask(ZKNE) Var(riscv_zk_subext)
+
+Mask(ZKND) Var(riscv_zk_subext)
+
+Mask(ZKNH) Var(riscv_zk_subext)
+
+Mask(ZKR) Var(riscv_zk_subext)
+
+Mask(ZKSED) Var(riscv_zk_subext)
+
+Mask(ZKSH) Var(riscv_zk_subext)
+
+Mask(ZKT) Var(riscv_zk_subext)
+
TargetVariable
int riscv_vector_elen_flags
+Mask(VECTOR_ELEN_32) Var(riscv_vector_elen_flags)
+
+Mask(VECTOR_ELEN_64) Var(riscv_vector_elen_flags)
+
+Mask(VECTOR_ELEN_FP_32) Var(riscv_vector_elen_flags)
+
+Mask(VECTOR_ELEN_FP_64) Var(riscv_vector_elen_flags)
+
+Mask(VECTOR_ELEN_FP_16) Var(riscv_vector_elen_flags)
+
TargetVariable
int riscv_zvl_flags
+Mask(ZVL32B) Var(riscv_zvl_flags)
+
+Mask(ZVL64B) Var(riscv_zvl_flags)
+
+Mask(ZVL128B) Var(riscv_zvl_flags)
+
+Mask(ZVL256B) Var(riscv_zvl_flags)
+
+Mask(ZVL512B) Var(riscv_zvl_flags)
+
+Mask(ZVL1024B) Var(riscv_zvl_flags)
+
+Mask(ZVL2048B) Var(riscv_zvl_flags)
+
+Mask(ZVL4096B) Var(riscv_zvl_flags)
+
+Mask(ZVL8192B) Var(riscv_zvl_flags)
+
+Mask(ZVL16384B) Var(riscv_zvl_flags)
+
+Mask(ZVL32768B) Var(riscv_zvl_flags)
+
+Mask(ZVL65536B) Var(riscv_zvl_flags)
+
TargetVariable
int riscv_zvb_subext
+Mask(ZVBB) Var(riscv_zvb_subext)
+
+Mask(ZVBC) Var(riscv_zvb_subext)
+
TargetVariable
int riscv_zvk_subext
+Mask(ZVKG) Var(riscv_zvk_subext)
+
+Mask(ZVKNED) Var(riscv_zvk_subext)
+
+Mask(ZVKNHA) Var(riscv_zvk_subext)
+
+Mask(ZVKNHB) Var(riscv_zvk_subext)
+
+Mask(ZVKSED) Var(riscv_zvk_subext)
+
+Mask(ZVKSH) Var(riscv_zvk_subext)
+
+Mask(ZVKN) Var(riscv_zvk_subext)
+
+Mask(ZVKNC) Var(riscv_zvk_subext)
+
+Mask(ZVKNG) Var(riscv_zvk_subext)
+
+Mask(ZVKS) Var(riscv_zvk_subext)
+
+Mask(ZVKSC) Var(riscv_zvk_subext)
+
+Mask(ZVKSG) Var(riscv_zvk_subext)
+
+Mask(ZVKT) Var(riscv_zvk_subext)
+
TargetVariable
int riscv_zicmo_subext
+Mask(ZICBOZ) Var(riscv_zicmo_subext)
+
+Mask(ZICBOM) Var(riscv_zicmo_subext)
+
+Mask(ZICBOP) Var(riscv_zicmo_subext)
+
TargetVariable
int riscv_zf_subext
+Mask(ZFHMIN) Var(riscv_zf_subext)
+
+Mask(ZFH) Var(riscv_zf_subext)
+
+Mask(ZVFHMIN) Var(riscv_zf_subext)
+
+Mask(ZVFH) Var(riscv_zf_subext)
+
TargetVariable
int riscv_zfa_subext
+Mask(ZFA) Var(riscv_zfa_subext)
+
TargetVariable
int riscv_zm_subext
+Mask(ZMMUL) Var(riscv_zm_subext)
+
TargetVariable
int riscv_zc_subext
+Mask(ZCA) Var(riscv_zc_subext)
+
+Mask(ZCB) Var(riscv_zc_subext)
+
+Mask(ZCE) Var(riscv_zc_subext)
+
+Mask(ZCF) Var(riscv_zc_subext)
+
+Mask(ZCD) Var(riscv_zc_subext)
+
+Mask(ZCMP) Var(riscv_zc_subext)
+
+Mask(ZCMT) Var(riscv_zc_subext)
+
TargetVariable
int riscv_sv_subext
+Mask(SVINVAL) Var(riscv_sv_subext)
+
+Mask(SVNAPOT) Var(riscv_sv_subext)
+
TargetVariable
int riscv_ztso_subext
+Mask(ZTSO) Var(riscv_ztso_subext)
+
+TargetVariable
+int riscv_xcv_subext
+
+Mask(XCVMAC) Var(riscv_xcv_subext)
+
+Mask(XCVALU) Var(riscv_xcv_subext)
+
TargetVariable
int riscv_xthead_subext
+Mask(XTHEADBA) Var(riscv_xthead_subext)
+
+Mask(XTHEADBB) Var(riscv_xthead_subext)
+
+Mask(XTHEADBS) Var(riscv_xthead_subext)
+
+Mask(XTHEADCMO) Var(riscv_xthead_subext)
+
+Mask(XTHEADCONDMOV) Var(riscv_xthead_subext)
+
+Mask(XTHEADFMEMIDX) Var(riscv_xthead_subext)
+
+Mask(XTHEADFMV) Var(riscv_xthead_subext)
+
+Mask(XTHEADINT) Var(riscv_xthead_subext)
+
+Mask(XTHEADMAC) Var(riscv_xthead_subext)
+
+Mask(XTHEADMEMIDX) Var(riscv_xthead_subext)
+
+Mask(XTHEADMEMPAIR) Var(riscv_xthead_subext)
+
+Mask(XTHEADSYNC) Var(riscv_xthead_subext)
+
TargetVariable
int riscv_xventana_subext
+Mask(XVENTANACONDOPS) Var(riscv_xventana_subext)
+
Enum
Name(isa_spec_class) Type(enum riscv_isa_spec_class)
Supported ISA specs (for use with the -misa-spec= option):
@@ -337,6 +520,9 @@ Enum(riscv_autovec_lmul) String(dynamic) Value(RVV_DYNAMIC)
Target RejectNegative Joined Enum(riscv_autovec_lmul) Var(riscv_autovec_lmul) Init(RVV_M1)
-param=riscv-autovec-lmul=<string> Set the RVV LMUL of auto-vectorization in the RISC-V port.
+madjust-lmul-cost
+Target Var(TARGET_ADJUST_LMUL_COST) Init(0)
+
-param=riscv-vector-abi
Target Undocumented Bool Var(riscv_vector_abi) Init(0)
Enable the use of vector registers for function arguments and return value.
diff --git a/gcc/config/riscv/sifive-7.md b/gcc/config/riscv/sifive-7.md
index 526278e..a63394c 100644
--- a/gcc/config/riscv/sifive-7.md
+++ b/gcc/config/riscv/sifive-7.md
@@ -44,7 +44,7 @@
(define_insn_reservation "sifive_7_jump" 1
(and (eq_attr "tune" "sifive_7")
- (eq_attr "type" "jump,call"))
+ (eq_attr "type" "jump,call,jalr"))
"sifive_7_B")
(define_insn_reservation "sifive_7_mul" 3
diff --git a/gcc/config/riscv/thead.cc b/gcc/config/riscv/thead.cc
index 507c912..be0cd7c 100644
--- a/gcc/config/riscv/thead.cc
+++ b/gcc/config/riscv/thead.cc
@@ -366,14 +366,15 @@ th_mempair_save_regs (rtx operands[4])
{
rtx set1 = gen_rtx_SET (operands[0], operands[1]);
rtx set2 = gen_rtx_SET (operands[2], operands[3]);
+ rtx dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
rtx insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set1, set2)));
RTX_FRAME_RELATED_P (insn) = 1;
- REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- copy_rtx (set1), REG_NOTES (insn));
-
- REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- copy_rtx (set2), REG_NOTES (insn));
+ XVECEXP (dwarf, 0, 0) = copy_rtx (set1);
+ XVECEXP (dwarf, 0, 1) = copy_rtx (set2);
+ RTX_FRAME_RELATED_P (XVECEXP (dwarf, 0, 0)) = 1;
+ RTX_FRAME_RELATED_P (XVECEXP (dwarf, 0, 1)) = 1;
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
}
/* Similar like riscv_restore_reg, but restores two registers from memory
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index b6cd872..0850475 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -295,83 +295,6 @@
RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
])
-(define_mode_iterator VLMULEXT2 [
- RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
-
- RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
-
- (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
-
- RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
-
- (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-
- (RVVM4DI "TARGET_VECTOR_ELEN_64") (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
-
- (RVVM4DF "TARGET_VECTOR_ELEN_FP_64") (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
-])
-
-(define_mode_iterator VLMULEXT4 [
- RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
-
- RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
-
- (RVVM2HF "TARGET_VECTOR_ELEN_FP_16") (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
-
- RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
-
- (RVVM2SF "TARGET_VECTOR_ELEN_FP_32") (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-
- (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
-
- (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
-])
-
-(define_mode_iterator VLMULEXT8 [
- RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
-
- RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
-
- (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
-
- RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
-
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-
- (RVVM1DI "TARGET_VECTOR_ELEN_64")
-
- (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
-])
-
-(define_mode_iterator VLMULEXT16 [
- RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
-
- RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
-
- (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
-
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
-
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-])
-
-(define_mode_iterator VLMULEXT32 [
- RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
-
- (RVVMF4HI "TARGET_MIN_VLEN > 32")
-
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
-])
-
-(define_mode_iterator VLMULEXT64 [
- (RVVMF8QI "TARGET_MIN_VLEN > 32")
-])
-
(define_mode_iterator VEI16 [
RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
@@ -1247,6 +1170,24 @@
(V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096")
])
+(define_mode_iterator VWWCONVERTI [
+ (RVVM8DI "TARGET_VECTOR_ELEN_64 && TARGET_ZVFH")
+ (RVVM4DI "TARGET_VECTOR_ELEN_64 && TARGET_ZVFH")
+ (RVVM2DI "TARGET_VECTOR_ELEN_64 && TARGET_ZVFH")
+ (RVVM1DI "TARGET_VECTOR_ELEN_64 && TARGET_ZVFH")
+
+ (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH")
+ (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH")
+ (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH")
+ (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 64")
+ (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 128")
+ (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 256")
+ (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 512")
+ (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024")
+ (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048")
+ (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096")
+])
+
(define_mode_iterator VQEXTI [
RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
@@ -1561,39 +1502,6 @@
RVVM4x2QI
])
-(define_mode_iterator VQI [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
-])
-
-(define_mode_iterator VHI [
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
-])
-
-(define_mode_iterator VSI [
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
-])
-
-(define_mode_iterator VDI [
- (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
- (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
-])
-
-(define_mode_iterator VHF [
- (RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
- (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
-])
-
-(define_mode_iterator VSF [
- (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-])
-
-(define_mode_iterator VDF [
- (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
- (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
-])
-
(define_mode_attr V_LMUL1 [
(RVVM8QI "RVVM1QI") (RVVM4QI "RVVM1QI") (RVVM2QI "RVVM1QI") (RVVM1QI "RVVM1QI") (RVVMF2QI "RVVM1QI") (RVVMF4QI "RVVM1QI") (RVVMF8QI "RVVM1QI")
@@ -2322,7 +2230,7 @@
(RVVM1x5QI "rvvm1qi") (RVVMF2x5QI "rvvmf2qi") (RVVMF4x5QI "rvvmf4qi") (RVVMF8x5QI "rvvmf8qi")
(RVVM2x4QI "rvvm2qi") (RVVM1x4QI "rvvm1qi") (RVVMF2x4QI "rvvmf2qi") (RVVMF4x4QI "rvvmf4qi") (RVVMF8x4QI "rvvmf8qi")
(RVVM2x3QI "rvvm2qi") (RVVM1x3QI "rvvm1qi") (RVVMF2x3QI "rvvmf2qi") (RVVMF4x3QI "rvvmf4qi") (RVVMF8x3QI "rvvmf8qi")
- (RVVM4x2QI "rvvm4qi") (RVVM2x2QI "rvvm1qi") (RVVM1x2QI "rvvm1qi") (RVVMF2x2QI "rvvmf2qi") (RVVMF4x2QI "rvvmf4qi") (RVVMF8x2QI "rvvmf8qi")
+ (RVVM4x2QI "rvvm4qi") (RVVM2x2QI "rvvm2qi") (RVVM1x2QI "rvvm1qi") (RVVMF2x2QI "rvvmf2qi") (RVVMF4x2QI "rvvmf4qi") (RVVMF8x2QI "rvvmf8qi")
(RVVM1x8HI "rvvm1hi") (RVVMF2x8HI "rvvmf2hi") (RVVMF4x8HI "rvvmf4hi")
(RVVM1x7HI "rvvm1hi") (RVVMF2x7HI "rvvmf2hi") (RVVMF4x7HI "rvvmf4hi")
@@ -3243,6 +3151,95 @@
(V512DF "v512si")
])
+;; NN indicates narrow twice
+(define_mode_attr VNNCONVERT [
+ (RVVM8DI "RVVM2HF") (RVVM4DI "RVVM1HF") (RVVM2DI "RVVMF2HF")
+ (RVVM1DI "RVVMF4HF")
+
+ (V1DI "V1HF") (V2DI "V2HF") (V4DI "V4HF") (V8DI "V8HF") (V16DI "V16HF")
+ (V32DI "V32HF") (V64DI "V64HF") (V128DI "V128HF") (V256DI "V256HF")
+ (V512DI "V512HF")
+])
+
+;; nn indicates narrow twice
+(define_mode_attr vnnconvert [
+ (RVVM8DI "rvvm2hf") (RVVM4DI "rvvm1hf") (RVVM2DI "rvvmf2hf")
+ (RVVM1DI "rvvmf4hf")
+
+ (V1DI "v1hf") (V2DI "v2hf") (V4DI "v4hf") (V8DI "v8hf") (V16DI "v16hf")
+ (V32DI "v32hf") (V64DI "v64hf") (V128DI "v128hf") (V256DI "v256hf")
+ (V512DI "v512hf")
+])
+
+;; Convert to int, long and long long
+(define_mode_attr V_I_L_LL_CONVERT [
+ (RVVM8SF "RVVM8SI") (RVVM4SF "RVVM4SI") (RVVM2SF "RVVM2SI")
+ (RVVM1SF "RVVM1SI") (RVVMF2SF "RVVMF2SI")
+
+ (RVVM8DF "RVVM8DI") (RVVM4DF "RVVM4DI") (RVVM2DF "RVVM2DI")
+ (RVVM1DF "RVVM1DI")
+
+ (V1SF "V1SI") (V2SF "V2SI") (V4SF "V4SI") (V8SF "V8SI") (V16SF "V16SI")
+ (V32SF "V32SI") (V64SF "V64SI") (V128SF "V128SI") (V256SF "V256SI")
+ (V512SF "V512SI") (V1024SF "V1024SI")
+
+ (V1DF "V1DI") (V2DF "V2DI") (V4DF "V4DI") (V8DF "V8DI") (V16DF "V16DI")
+ (V32DF "V32DI") (V64DF "V64DI") (V128DF "V128DI") (V256DF "V256DI")
+ (V512DF "V512DI")
+])
+
+(define_mode_attr v_i_l_ll_convert [
+ (RVVM8SF "rvvm8si") (RVVM4SF "rvvm4si") (RVVM2SF "rvvm2si")
+ (RVVM1SF "rvvm1si") (RVVMF2SF "rvvmf2si")
+
+ (RVVM8DF "rvvm8di") (RVVM4DF "rvvm4di") (RVVM2DF "rvvm2di")
+ (RVVM1DF "rvvm1di")
+
+ (V1SF "v1si") (V2SF "v2si") (V4SF "v4si") (V8SF "v8si") (V16SF "v16si")
+ (V32SF "v32si") (V64SF "v64si") (V128SF "v128si") (V256SF "v256si")
+ (V512SF "v512si") (V1024SF "v1024si")
+
+ (V1DF "v1di") (V2DF "v2di") (V4DF "v4di") (V8DF "v8di") (V16DF "v16di")
+ (V32DF "v32di") (V64DF "v64di") (V128DF "v128di") (V256DF "v256di")
+ (V512DF "v512di")
+])
+
+(define_mode_iterator V_VLS_FCONVERT_I_L_LL [
+ (RVVM8SF "TARGET_VECTOR_ELEN_FP_32")
+ (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
+ (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
+ (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+
+ (RVVM8DF "TARGET_VECTOR_ELEN_FP_64")
+ (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
+ (RVVM2DF "TARGET_VECTOR_ELEN_FP_64")
+ (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
+
+ (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32")
+ (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32")
+ (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32")
+ (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32")
+ (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+ (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+ (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256")
+ (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512")
+ (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024")
+ (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048")
+ (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096")
+
+ (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64")
+ (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64")
+ (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64")
+ (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64")
+ (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+ (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256")
+ (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512")
+ (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024")
+ (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048")
+ (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096")
+])
+
(define_mode_attr VDEMOTE [
(RVVM8DI "RVVM8SI") (RVVM4DI "RVVM4SI") (RVVM2DI "RVVM2SI") (RVVM1DI "RVVM1SI")
(V1DI "V1SI")
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index d5300a3..32955fb 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -939,7 +939,8 @@
(unspec:P [(match_operand:P 1 "const_int_operand" "i")] UNSPEC_VLMAX))]
"TARGET_VECTOR"
""
- [(set_attr "type" "vector")])
+ [(set_attr "type" "vsetvl_pre")]
+ )
;; Set VXRM
(define_insn "vxrmsi"
@@ -1037,7 +1038,7 @@
before spilling. The clobber scratch is used by spilling fractional
registers in IRA/LRA so it's too early. */
- if (riscv_vector::legitimize_move (operands[0], operands[1]))
+ if (riscv_vector::legitimize_move (operands[0], &operands[1]))
DONE;
})
@@ -1093,7 +1094,7 @@
(match_operand:VB 1 "general_operand"))]
"TARGET_VECTOR"
{
- if (riscv_vector::legitimize_move (operands[0], operands[1]))
+ if (riscv_vector::legitimize_move (operands[0], &operands[1]))
DONE;
})
@@ -1218,47 +1219,10 @@
(match_operand:VLS_AVL_IMM 1 "general_operand"))]
"TARGET_VECTOR"
{
- if (riscv_vector::legitimize_move (operands[0], operands[1]))
+ if (riscv_vector::legitimize_move (operands[0], &operands[1]))
DONE;
})
-(define_insn_and_split "*mov<mode>_mem_to_mem"
- [(set (match_operand:VLS_AVL_IMM 0 "memory_operand")
- (match_operand:VLS_AVL_IMM 1 "memory_operand"))]
- "TARGET_VECTOR && can_create_pseudo_p ()"
- "#"
- "&& 1"
- [(const_int 0)]
- {
- if (GET_MODE_BITSIZE (<MODE>mode).to_constant () <= MAX_BITS_PER_WORD)
- {
- /* Opitmize the following case:
-
- typedef int8_t v2qi __attribute__ ((vector_size (2)));
- v2qi v = *(v2qi*)in;
- *(v2qi*)out = v;
-
- We prefer scalar load/store instead of vle.v/vse.v when
- the VLS modes size is smaller scalar mode. */
- machine_mode mode;
- unsigned size = GET_MODE_BITSIZE (<MODE>mode).to_constant ();
- if (FLOAT_MODE_P (<MODE>mode))
- mode = mode_for_size (size, MODE_FLOAT, 0).require ();
- else
- mode = mode_for_size (size, MODE_INT, 0).require ();
- emit_move_insn (gen_lowpart (mode, operands[0]),
- gen_lowpart (mode, operands[1]));
- }
- else
- {
- operands[1] = force_reg (<MODE>mode, operands[1]);
- emit_move_insn (operands[0], operands[1]);
- }
- DONE;
- }
- [(set_attr "type" "vmov")]
-)
-
(define_insn_and_split "*mov<mode>"
[(set (match_operand:VLS_AVL_IMM 0 "reg_or_mem_operand" "=vr, m, vr")
(match_operand:VLS_AVL_IMM 1 "reg_or_mem_operand" " m,vr, vr"))]
@@ -1274,7 +1238,7 @@
|| !register_operand (operands[1], <MODE>mode))"
[(const_int 0)]
{
- bool ok_p = riscv_vector::legitimize_move (operands[0], operands[1]);
+ bool ok_p = riscv_vector::legitimize_move (operands[0], &operands[1]);
gcc_assert (ok_p);
DONE;
}
@@ -1286,7 +1250,7 @@
(match_operand:VLS_AVL_REG 1 "general_operand"))]
"TARGET_VECTOR"
{
- bool ok_p = riscv_vector::legitimize_move (operands[0], operands[1]);
+ bool ok_p = riscv_vector::legitimize_move (operands[0], &operands[1]);
gcc_assert (ok_p);
DONE;
})
@@ -1363,6 +1327,19 @@
}
)
+;; According to RVV ISA:
+;; If an element accessed by a vector memory instruction is not naturally aligned to the size of the element,
+;; either the element is transferred successfully or an address misaligned exception is raised on that element.
+(define_expand "movmisalign<mode>"
+ [(set (match_operand:V 0 "nonimmediate_operand")
+ (match_operand:V 1 "general_operand"))]
+ "TARGET_VECTOR && TARGET_VECTOR_MISALIGN_SUPPORTED"
+ {
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+ }
+)
+
;; -----------------------------------------------------------------
;; ---- Duplicate Operations
;; -----------------------------------------------------------------
diff --git a/gcc/config/rs6000/predicates.md b/gcc/config/rs6000/predicates.md
index 925f69c..ef7d3f2 100644
--- a/gcc/config/rs6000/predicates.md
+++ b/gcc/config/rs6000/predicates.md
@@ -2098,3 +2098,8 @@
else
return false;
})
+
+(define_predicate "lowpart_subreg_operator"
+ (and (match_code "subreg")
+ (match_test "subreg_lowpart_offset (mode, GET_MODE (SUBREG_REG (op)))
+ == SUBREG_BYTE (op)")))
diff --git a/gcc/config/rs6000/rs6000.cc b/gcc/config/rs6000/rs6000.cc
index cc9253b..cc24dd5 100644
--- a/gcc/config/rs6000/rs6000.cc
+++ b/gcc/config/rs6000/rs6000.cc
@@ -1904,7 +1904,7 @@ rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
return 1;
- if (TARGET_P8_VECTOR && (mode == SImode))
+ if (TARGET_POPCNTD && mode == SImode)
return 1;
if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
@@ -10299,6 +10299,199 @@ rs6000_emit_set_const (rtx dest, rtx source)
return true;
}
+/* Check if C can be rotated to a negative value which 'lis' instruction is
+ able to load: 1..1xx0..0. If so, set *ROT to the number by which C is
+ rotated, and return true. Return false otherwise. */
+
+static bool
+can_be_rotated_to_negative_lis (HOST_WIDE_INT c, int *rot)
+{
+ /* case a. 1..1xxx0..01..1: up to 15 x's, at least 16 0's. */
+ int leading_ones = clz_hwi (~c);
+ int tailing_ones = ctz_hwi (~c);
+ int middle_zeros = ctz_hwi (c >> tailing_ones);
+ if (middle_zeros >= 16 && leading_ones + tailing_ones >= 33)
+ {
+ *rot = HOST_BITS_PER_WIDE_INT - tailing_ones;
+ return true;
+ }
+
+ /* case b. xx0..01..1xx: some of 15 x's (and some of 16 0's) are
+ rotated over the highest bit. */
+ int pos_one = clz_hwi ((c << 16) >> 16);
+ middle_zeros = ctz_hwi (c >> (HOST_BITS_PER_WIDE_INT - pos_one));
+ int middle_ones = clz_hwi (~(c << pos_one));
+ if (middle_zeros >= 16 && middle_ones >= 33)
+ {
+ *rot = pos_one;
+ return true;
+ }
+
+ return false;
+}
+
+/* Check if value C can be built by 2 instructions: one is 'li or lis',
+ another is rotldi.
+
+ If so, *SHIFT is set to the shift operand of rotldi(rldicl), and *MASK
+ is set to the mask operand of rotldi(rldicl), and return true.
+ Return false otherwise. */
+
+static bool
+can_be_built_by_li_lis_and_rotldi (HOST_WIDE_INT c, int *shift,
+ HOST_WIDE_INT *mask)
+{
+ /* If C or ~C contains at least 49 successive zeros, then C can be rotated
+ to/from a positive or negative value that 'li' is able to load. */
+ int n;
+ if (can_be_rotated_to_lowbits (c, 15, &n)
+ || can_be_rotated_to_lowbits (~c, 15, &n)
+ || can_be_rotated_to_negative_lis (c, &n))
+ {
+ *mask = HOST_WIDE_INT_M1;
+ *shift = HOST_BITS_PER_WIDE_INT - n;
+ return true;
+ }
+
+ return false;
+}
+
+/* Check if value C can be built by 2 instructions: one is 'li or lis',
+ another is rldicl.
+
+ If so, *SHIFT is set to the shift operand of rldicl, and *MASK is set to
+ the mask operand of rldicl, and return true.
+ Return false otherwise. */
+
+static bool
+can_be_built_by_li_lis_and_rldicl (HOST_WIDE_INT c, int *shift,
+ HOST_WIDE_INT *mask)
+{
+ /* Leading zeros may be cleaned by rldicl with a mask. Change leading zeros
+ to ones and then recheck it. */
+ int lz = clz_hwi (c);
+
+ /* If lz == 0, the left shift is undefined. */
+ if (!lz)
+ return false;
+
+ HOST_WIDE_INT unmask_c
+ = c | (HOST_WIDE_INT_M1U << (HOST_BITS_PER_WIDE_INT - lz));
+ int n;
+ if (can_be_rotated_to_lowbits (~unmask_c, 15, &n)
+ || can_be_rotated_to_negative_lis (unmask_c, &n))
+ {
+ *mask = HOST_WIDE_INT_M1U >> lz;
+ *shift = n == 0 ? 0 : HOST_BITS_PER_WIDE_INT - n;
+ return true;
+ }
+
+ return false;
+}
+
+/* Check if value C can be built by 2 instructions: one is 'li or lis',
+ another is rldicr.
+
+ If so, *SHIFT is set to the shift operand of rldicr, and *MASK is set to
+ the mask operand of rldicr, and return true.
+ Return false otherwise. */
+
+static bool
+can_be_built_by_li_lis_and_rldicr (HOST_WIDE_INT c, int *shift,
+ HOST_WIDE_INT *mask)
+{
+ /* Tailing zeros may be cleaned by rldicr with a mask. Change tailing zeros
+ to ones and then recheck it. */
+ int tz = ctz_hwi (c);
+
+ /* If tz == HOST_BITS_PER_WIDE_INT, the left shift is undefined. */
+ if (tz >= HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ HOST_WIDE_INT unmask_c = c | ((HOST_WIDE_INT_1U << tz) - 1);
+ int n;
+ if (can_be_rotated_to_lowbits (~unmask_c, 15, &n)
+ || can_be_rotated_to_negative_lis (unmask_c, &n))
+ {
+ *mask = HOST_WIDE_INT_M1U << tz;
+ *shift = HOST_BITS_PER_WIDE_INT - n;
+ return true;
+ }
+
+ return false;
+}
+
+/* Check if value C can be built by 2 instructions: one is 'li', another is
+ rldic.
+
+ If so, *SHIFT is set to the 'shift' operand of rldic; and *MASK is set
+ to the mask value about the 'mb' operand of rldic; and return true.
+ Return false otherwise. */
+
+static bool
+can_be_built_by_li_and_rldic (HOST_WIDE_INT c, int *shift, HOST_WIDE_INT *mask)
+{
+ /* There are 49 successive ones in the negative value of 'li'. */
+ int ones = 49;
+
+ /* 1..1xx1..1: negative value of li --> 0..01..1xx0..0:
+ right bits are shifted as 0's, and left 1's(and x's) are cleaned. */
+ int tz = ctz_hwi (c);
+ int lz = clz_hwi (c);
+
+ /* If lz == HOST_BITS_PER_WIDE_INT, the left shift is undefined. */
+ if (lz >= HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ int middle_ones = clz_hwi (~(c << lz));
+ if (tz + lz + middle_ones >= ones
+ && (tz - lz) < HOST_BITS_PER_WIDE_INT
+ && tz < HOST_BITS_PER_WIDE_INT)
+ {
+ *mask = ((1LL << (HOST_BITS_PER_WIDE_INT - tz - lz)) - 1LL) << tz;
+ *shift = tz;
+ return true;
+ }
+
+ /* 1..1xx1..1 --> 1..1xx0..01..1: some 1's(following x's) are cleaned. */
+ int leading_ones = clz_hwi (~c);
+ int tailing_ones = ctz_hwi (~c);
+ int middle_zeros = ctz_hwi (c >> tailing_ones);
+ if (leading_ones + tailing_ones + middle_zeros >= ones
+ && middle_zeros < HOST_BITS_PER_WIDE_INT)
+ {
+ *mask = ~(((1ULL << middle_zeros) - 1ULL) << tailing_ones);
+ *shift = tailing_ones + middle_zeros;
+ return true;
+ }
+
+ /* xx1..1xx: --> xx0..01..1xx: some 1's(following x's) are cleaned. */
+ /* Get the position for the first bit of successive 1.
+ The 24th bit would be in successive 0 or 1. */
+ HOST_WIDE_INT low_mask = (HOST_WIDE_INT_1U << 24) - HOST_WIDE_INT_1U;
+ int pos_first_1 = ((c & (low_mask + 1)) == 0)
+ ? clz_hwi (c & low_mask)
+ : HOST_BITS_PER_WIDE_INT - ctz_hwi (~(c | low_mask));
+
+ /* Make sure the left and right shifts are defined. */
+ if (!IN_RANGE (pos_first_1, 1, HOST_BITS_PER_WIDE_INT-1))
+ return false;
+
+ middle_ones = clz_hwi (~c << pos_first_1);
+ middle_zeros = ctz_hwi (c >> (HOST_BITS_PER_WIDE_INT - pos_first_1));
+ if (pos_first_1 < HOST_BITS_PER_WIDE_INT
+ && middle_ones + middle_zeros < HOST_BITS_PER_WIDE_INT
+ && middle_ones + middle_zeros >= ones)
+ {
+ *mask = ~(((1ULL << middle_zeros) - 1LL)
+ << (HOST_BITS_PER_WIDE_INT - pos_first_1));
+ *shift = HOST_BITS_PER_WIDE_INT - pos_first_1 + middle_zeros;
+ return true;
+ }
+
+ return false;
+}
+
/* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
Output insns to set DEST equal to the constant C as a series of
lis, ori and shl instructions. */
@@ -10307,15 +10500,14 @@ static void
rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
{
rtx temp;
+ int shift;
+ HOST_WIDE_INT mask;
HOST_WIDE_INT ud1, ud2, ud3, ud4;
ud1 = c & 0xffff;
- c = c >> 16;
- ud2 = c & 0xffff;
- c = c >> 16;
- ud3 = c & 0xffff;
- c = c >> 16;
- ud4 = c & 0xffff;
+ ud2 = (c >> 16) & 0xffff;
+ ud3 = (c >> 32) & 0xffff;
+ ud4 = (c >> 48) & 0xffff;
if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
|| (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
@@ -10346,6 +10538,22 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
emit_move_insn (dest, gen_rtx_XOR (DImode, temp,
GEN_INT ((ud2 ^ 0xffff) << 16)));
}
+ else if (can_be_built_by_li_lis_and_rotldi (c, &shift, &mask)
+ || can_be_built_by_li_lis_and_rldicl (c, &shift, &mask)
+ || can_be_built_by_li_lis_and_rldicr (c, &shift, &mask)
+ || can_be_built_by_li_and_rldic (c, &shift, &mask))
+ {
+ temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
+ unsigned HOST_WIDE_INT imm = (c | ~mask);
+ imm = (imm >> shift) | (imm << (HOST_BITS_PER_WIDE_INT - shift));
+
+ emit_move_insn (temp, GEN_INT (imm));
+ if (shift != 0)
+ temp = gen_rtx_ROTATE (DImode, temp, GEN_INT (shift));
+ if (mask != HOST_WIDE_INT_M1)
+ temp = gen_rtx_AND (DImode, temp, GEN_INT (mask));
+ emit_move_insn (dest, temp);
+ }
else if (ud3 == 0 && ud4 == 0)
{
temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 7b583d7..2a1b5ec 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -108,7 +108,6 @@
UNSPEC_TOCREL
UNSPEC_MACHOPIC_OFFSET
UNSPEC_BPERM
- UNSPEC_COPYSIGN
UNSPEC_PARITY
UNSPEC_CMPB
UNSPEC_FCTIW
@@ -643,6 +642,9 @@
(define_code_iterator any_fix [fix unsigned_fix])
(define_code_iterator any_float [float unsigned_float])
+; Shift right.
+(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
+
(define_code_attr u [(sign_extend "")
(zero_extend "u")
(fix "")
@@ -5379,13 +5381,10 @@
operands[5] = CONST0_RTX (<MODE>mode);
})
-;; Use an unspec rather providing an if-then-else in RTL, to prevent the
-;; compiler from optimizing -0.0
(define_insn "copysign<mode>3_fcpsgn"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=d,wa")
- (unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "d,wa")
- (match_operand:SFDF 2 "gpc_reg_operand" "d,wa")]
- UNSPEC_COPYSIGN))]
+ (copysign:SFDF (match_operand:SFDF 1 "gpc_reg_operand" "d,wa")
+ (match_operand:SFDF 2 "gpc_reg_operand" "d,wa")))]
"TARGET_HARD_FLOAT && (TARGET_CMPB || VECTOR_UNIT_VSX_P (<MODE>mode))"
"@
fcpsgn %0,%2,%1
@@ -6719,10 +6718,18 @@
[(set (match_operand:DI 0 "gpc_reg_operand" "=d")
(unspec:DI [(match_operand:SFDF 1 "gpc_reg_operand" "<rreg2>")]
UNSPEC_FCTID))]
- "TARGET_HARD_FLOAT && TARGET_FPRND"
+ "TARGET_HARD_FLOAT"
"fctid %0,%1"
[(set_attr "type" "fp")])
+(define_insn "lrint<mode>si2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=d")
+ (unspec:SI [(match_operand:SFDF 1 "gpc_reg_operand" "<rreg2>")]
+ UNSPEC_FCTIW))]
+ "TARGET_HARD_FLOAT && TARGET_POPCNTD"
+ "fctiw %0,%1"
+ [(set_attr "type" "fp")])
+
(define_insn "btrunc<mode>2"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=d,wa")
(unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "d,wa")]
@@ -7630,7 +7637,7 @@
(define_insn "*movsi_internal1"
[(set (match_operand:SI 0 "nonimmediate_operand"
- "=r, r,
+ "=r, r, d,
r, d, v,
m, ?Z, ?Z,
r, r, r, r,
@@ -7639,7 +7646,7 @@
wa, r,
r, *h, *h")
(match_operand:SI 1 "input_operand"
- "r, U,
+ "r, U, d,
m, ?Z, ?Z,
r, d, v,
I, L, eI, n,
@@ -7652,6 +7659,7 @@
"@
mr %0,%1
la %0,%a1
+ fmr %0,%1
lwz%U1%X1 %0,%1
lfiwzx %0,%y1
lxsiwzx %x0,%y1
@@ -7675,7 +7683,7 @@
mt%0 %1
nop"
[(set_attr "type"
- "*, *,
+ "*, *, fpsimple,
load, fpload, fpload,
store, fpstore, fpstore,
*, *, *, *,
@@ -7684,7 +7692,7 @@
mtvsr, mfvsr,
*, *, *")
(set_attr "length"
- "*, *,
+ "*, *, *,
*, *, *,
*, *, *,
*, *, *, 8,
@@ -7693,9 +7701,9 @@
*, *,
*, *, *")
(set_attr "isa"
- "*, *,
- *, p8v, p8v,
- *, p8v, p8v,
+ "*, *, *,
+ *, p7, p8v,
+ *, *, p8v,
*, *, p10, *,
p8v, p9v, p9v, p8v,
p9v, p8v, p9v,
@@ -8310,13 +8318,26 @@
{
rtx op0 = operands[0];
rtx op1 = operands[1];
- rtx op2 = operands[2];
- rtx op1_di = gen_rtx_REG (DImode, REGNO (op1));
- /* Move SF value to upper 32-bits for xscvspdpn. */
- emit_insn (gen_ashldi3 (op2, op1_di, GEN_INT (32)));
- emit_insn (gen_p8_mtvsrd_sf (op0, op2));
- emit_insn (gen_vsx_xscvspdpn_directmove (op0, op0));
+ /* Move lowpart 32-bits from register for SFmode. */
+ if (TARGET_P9_VECTOR)
+ {
+ /* Using mtvsrws;xscvspdpn. */
+ rtx op0_v = gen_rtx_REG (V4SImode, REGNO (op0));
+ emit_insn (gen_vsx_splat_v4si (op0_v, op1));
+ emit_insn (gen_vsx_xscvspdpn_directmove (op0, op0));
+ }
+ else
+ {
+ rtx op2 = operands[2];
+ rtx op1_di = gen_rtx_REG (DImode, REGNO (op1));
+
+ /* Using sldi;mtvsrd;xscvspdpn. */
+ emit_insn (gen_ashldi3 (op2, op1_di, GEN_INT (32)));
+ emit_insn (gen_p8_mtvsrd_sf (op0, op2));
+ emit_insn (gen_vsx_xscvspdpn_directmove (op0, op0));
+ }
+
DONE;
}
[(set_attr "length"
@@ -8333,14 +8354,13 @@
;; {%1:SF=unspec[r122:DI>>0x20#0] 86;clobber scratch;}
;; split it before reload with "and mask" to avoid generating shift right
;; 32 bit then shift left 32 bit.
-(define_insn_and_split "movsf_from_si2"
+(define_insn_and_split "movsf_from_si2_<code>"
[(set (match_operand:SF 0 "gpc_reg_operand" "=wa")
(unspec:SF
- [(subreg:SI
- (ashiftrt:DI
+ [(match_operator:SI 3 "lowpart_subreg_operator"
+ [(any_shiftrt:DI
(match_operand:DI 1 "input_operand" "r")
- (const_int 32))
- 0)]
+ (const_int 32))])]
UNSPEC_SF_FROM_SI))
(clobber (match_scratch:DI 2 "=r"))]
"TARGET_NO_SF_SUBREG"
@@ -12419,33 +12439,26 @@
DONE;
})
-(define_insn "stack_protect_setsi"
- [(set (match_operand:SI 0 "memory_operand" "=m")
- (unspec:SI [(match_operand:SI 1 "memory_operand" "m")] UNSPEC_SP_SET))
- (set (match_scratch:SI 2 "=&r") (const_int 0))]
- "TARGET_32BIT"
- "lwz%U1%X1 %2,%1\;stw%U0%X0 %2,%0\;li %2,0"
- [(set_attr "type" "three")
- (set_attr "length" "12")])
-
;; We can't use the prefixed attribute here because there are two memory
;; instructions. We can't split the insn due to the fact that this operation
;; needs to be done in one piece.
-(define_insn "stack_protect_setdi"
- [(set (match_operand:DI 0 "memory_operand" "=Y")
- (unspec:DI [(match_operand:DI 1 "memory_operand" "Y")] UNSPEC_SP_SET))
- (set (match_scratch:DI 2 "=&r") (const_int 0))]
- "TARGET_64BIT"
+(define_insn "stack_protect_set<mode>"
+ [(set (match_operand:P 0 "memory_operand" "=YZ")
+ (unspec:P [(match_operand:P 1 "memory_operand" "YZ")] UNSPEC_SP_SET))
+ (set (match_scratch:P 2 "=&r") (const_int 0))]
+ ""
{
- if (prefixed_memory (operands[1], DImode))
- output_asm_insn ("pld %2,%1", operands);
+ if (prefixed_memory (operands[1], <MODE>mode))
+ /* Prefixed load only supports D-form but no update and X-form. */
+ output_asm_insn ("p<ptrload> %2,%1", operands);
else
- output_asm_insn ("ld%U1%X1 %2,%1", operands);
+ output_asm_insn ("<ptrload>%U1%X1 %2,%1", operands);
- if (prefixed_memory (operands[0], DImode))
- output_asm_insn ("pstd %2,%0", operands);
+ if (prefixed_memory (operands[0], <MODE>mode))
+ /* Prefixed store only supports D-form but no update and X-form. */
+ output_asm_insn ("pst<wd> %2,%0", operands);
else
- output_asm_insn ("std%U0%X0 %2,%0", operands);
+ output_asm_insn ("st<wd>%U0%X0 %2,%0", operands);
return "li %2,0";
}
@@ -12491,45 +12504,33 @@
DONE;
})
-(define_insn "stack_protect_testsi"
- [(set (match_operand:CCEQ 0 "cc_reg_operand" "=x,?y")
- (unspec:CCEQ [(match_operand:SI 1 "memory_operand" "m,m")
- (match_operand:SI 2 "memory_operand" "m,m")]
- UNSPEC_SP_TEST))
- (set (match_scratch:SI 4 "=r,r") (const_int 0))
- (clobber (match_scratch:SI 3 "=&r,&r"))]
- "TARGET_32BIT"
- "@
- lwz%U1%X1 %3,%1\;lwz%U2%X2 %4,%2\;xor. %3,%3,%4\;li %4,0
- lwz%U1%X1 %3,%1\;lwz%U2%X2 %4,%2\;cmplw %0,%3,%4\;li %3,0\;li %4,0"
- [(set_attr "length" "16,20")])
-
;; We can't use the prefixed attribute here because there are two memory
;; instructions. We can't split the insn due to the fact that this operation
;; needs to be done in one piece.
-(define_insn "stack_protect_testdi"
+(define_insn "stack_protect_test<mode>"
[(set (match_operand:CCEQ 0 "cc_reg_operand" "=x,?y")
- (unspec:CCEQ [(match_operand:DI 1 "memory_operand" "Y,Y")
- (match_operand:DI 2 "memory_operand" "Y,Y")]
+ (unspec:CCEQ [(match_operand:P 1 "memory_operand" "YZ,YZ")
+ (match_operand:P 2 "memory_operand" "YZ,YZ")]
UNSPEC_SP_TEST))
- (set (match_scratch:DI 4 "=r,r") (const_int 0))
- (clobber (match_scratch:DI 3 "=&r,&r"))]
- "TARGET_64BIT"
+ (set (match_scratch:P 4 "=r,r") (const_int 0))
+ (clobber (match_scratch:P 3 "=&r,&r"))]
+ ""
{
- if (prefixed_memory (operands[1], DImode))
- output_asm_insn ("pld %3,%1", operands);
+ if (prefixed_memory (operands[1], <MODE>mode))
+ /* Prefixed load only supports D-form but no update and X-form. */
+ output_asm_insn ("p<ptrload> %3,%1", operands);
else
- output_asm_insn ("ld%U1%X1 %3,%1", operands);
+ output_asm_insn ("<ptrload>%U1%X1 %3,%1", operands);
- if (prefixed_memory (operands[2], DImode))
- output_asm_insn ("pld %4,%2", operands);
+ if (prefixed_memory (operands[2], <MODE>mode))
+ output_asm_insn ("p<ptrload> %4,%2", operands);
else
- output_asm_insn ("ld%U2%X2 %4,%2", operands);
+ output_asm_insn ("<ptrload>%U2%X2 %4,%2", operands);
if (which_alternative == 0)
output_asm_insn ("xor. %3,%3,%4", operands);
else
- output_asm_insn ("cmpld %0,%3,%4\;li %3,0", operands);
+ output_asm_insn ("cmpl<wd> %0,%3,%4\;li %3,0", operands);
return "li %4,0";
}
@@ -14984,10 +14985,9 @@
(define_insn "copysign<mode>3_hard"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
- (unspec:IEEE128
- [(match_operand:IEEE128 1 "altivec_register_operand" "v")
- (match_operand:IEEE128 2 "altivec_register_operand" "v")]
- UNSPEC_COPYSIGN))]
+ (copysign:IEEE128
+ (match_operand:IEEE128 1 "altivec_register_operand" "v")
+ (match_operand:IEEE128 2 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xscpsgnqp %0,%2,%1"
[(set_attr "type" "vecmove")
@@ -14995,10 +14995,9 @@
(define_insn "copysign<mode>3_soft"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
- (unspec:IEEE128
- [(match_operand:IEEE128 1 "altivec_register_operand" "v")
- (match_operand:IEEE128 2 "altivec_register_operand" "v")]
- UNSPEC_COPYSIGN))
+ (copysign:IEEE128
+ (match_operand:IEEE128 1 "altivec_register_operand" "v")
+ (match_operand:IEEE128 2 "altivec_register_operand" "v")))
(clobber (match_scratch:IEEE128 3 "=&v"))]
"!TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xscpsgndp %x3,%x2,%x1\;xxpermdi %x0,%x3,%x1,1"
diff --git a/gcc/config/rs6000/vector.md b/gcc/config/rs6000/vector.md
index 1ae04c8..f4fc620 100644
--- a/gcc/config/rs6000/vector.md
+++ b/gcc/config/rs6000/vector.md
@@ -332,8 +332,8 @@
(define_expand "vector_copysign<mode>3"
[(set (match_operand:VEC_F 0 "vfloat_operand")
- (unspec:VEC_F [(match_operand:VEC_F 1 "vfloat_operand")
- (match_operand:VEC_F 2 "vfloat_operand")] UNSPEC_COPYSIGN))]
+ (copysign:VEC_F (match_operand:VEC_F 1 "vfloat_operand")
+ (match_operand:VEC_F 2 "vfloat_operand")))]
"VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)"
{
if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode))
diff --git a/gcc/config/rs6000/vsx.md b/gcc/config/rs6000/vsx.md
index 4de41e7..f3b4022 100644
--- a/gcc/config/rs6000/vsx.md
+++ b/gcc/config/rs6000/vsx.md
@@ -2233,10 +2233,9 @@
;; Copy sign
(define_insn "vsx_copysign<mode>3"
[(set (match_operand:VSX_F 0 "vsx_register_operand" "=wa")
- (unspec:VSX_F
- [(match_operand:VSX_F 1 "vsx_register_operand" "wa")
- (match_operand:VSX_F 2 "vsx_register_operand" "wa")]
- UNSPEC_COPYSIGN))]
+ (copysign:VSX_F
+ (match_operand:VSX_F 1 "vsx_register_operand" "wa")
+ (match_operand:VSX_F 2 "vsx_register_operand" "wa")))]
"VECTOR_UNIT_VSX_P (<MODE>mode)"
"xvcpsgn<sd>p %x0,%x2,%x1"
[(set_attr "type" "<VStype_simple>")])
diff --git a/gcc/config/s390/s390.md b/gcc/config/s390/s390.md
index 9631b2a..3f29ba2 100644
--- a/gcc/config/s390/s390.md
+++ b/gcc/config/s390/s390.md
@@ -124,7 +124,6 @@
; Byte-wise Population Count
UNSPEC_POPCNT
- UNSPEC_COPYSIGN
; Load FP Integer
UNSPEC_FPINT_FLOOR
@@ -11918,9 +11917,8 @@
(define_insn "copysign<mode>3<tf_fpr>"
[(set (match_operand:FP 0 "register_operand" "=f")
- (unspec:FP [(match_operand:FP 1 "register_operand" "<fT0>")
- (match_operand:FP 2 "register_operand" "f")]
- UNSPEC_COPYSIGN))]
+ (copysign:FP (match_operand:FP 1 "register_operand" "<fT0>")
+ (match_operand:FP 2 "register_operand" "f")))]
"TARGET_Z196"
"cpsdr\t%0,%2,%1"
[(set_attr "op_type" "RRF")
diff --git a/gcc/config/s390/vector.md b/gcc/config/s390/vector.md
index f0e9ed3..7d1eb36 100644
--- a/gcc/config/s390/vector.md
+++ b/gcc/config/s390/vector.md
@@ -1154,14 +1154,14 @@
(plus:V16QI (match_dup 2) (match_dup 3)))
; Generate mask for the odd numbered byte elements
(set (match_dup 3)
- (const_vector:V16QI [(const_int 0) (const_int 255)
- (const_int 0) (const_int 255)
- (const_int 0) (const_int 255)
- (const_int 0) (const_int 255)
- (const_int 0) (const_int 255)
- (const_int 0) (const_int 255)
- (const_int 0) (const_int 255)
- (const_int 0) (const_int 255)]))
+ (const_vector:V16QI [(const_int 0) (const_int -1)
+ (const_int 0) (const_int -1)
+ (const_int 0) (const_int -1)
+ (const_int 0) (const_int -1)
+ (const_int 0) (const_int -1)
+ (const_int 0) (const_int -1)
+ (const_int 0) (const_int -1)
+ (const_int 0) (const_int -1)]))
; Zero out the even indexed bytes
(set (match_operand:V8HI 0 "register_operand" "=v")
(and:V8HI (subreg:V8HI (match_dup 2) 0)
diff --git a/gcc/configure b/gcc/configure
index c43bde8..9f5b708 100755
--- a/gcc/configure
+++ b/gcc/configure
@@ -26361,6 +26361,39 @@ $as_echo "#define HAVE_AS_MMACOSX_VERSION_MIN_OPTION 1" >>confdefs.h
fi
+ if test x$gcc_cv_as_mmacosx_version_min = "xyes"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .build_version" >&5
+$as_echo_n "checking assembler for .build_version... " >&6; }
+if ${gcc_cv_as_darwin_build_version+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ gcc_cv_as_darwin_build_version=no
+ if test x$gcc_cv_as != x; then
+ $as_echo ' .build_version macos, 10, 14 sdk_version 10, 14' > conftest.s
+ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -mmacosx-version-min=10.14 -o conftest.o conftest.s >&5'
+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }
+ then
+ gcc_cv_as_darwin_build_version=yes
+ else
+ echo "configure: failed program was" >&5
+ cat conftest.s >&5
+ fi
+ rm -f conftest.o conftest.s
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_darwin_build_version" >&5
+$as_echo "$gcc_cv_as_darwin_build_version" >&6; }
+if test $gcc_cv_as_darwin_build_version = yes; then
+
+$as_echo "#define HAVE_AS_MACOS_BUILD_VERSION 1" >>confdefs.h
+
+fi
+
+ fi
;;
esac
diff --git a/gcc/configure.ac b/gcc/configure.ac
index fb8e32f..c10e007 100644
--- a/gcc/configure.ac
+++ b/gcc/configure.ac
@@ -4374,6 +4374,14 @@ case "$target_os" in
[-mmacosx-version-min=10.1], [.text],,
[AC_DEFINE(HAVE_AS_MMACOSX_VERSION_MIN_OPTION, 1,
[Define if your macOS assembler supports the -mmacos-version-min option.])])
+ if test x$gcc_cv_as_mmacosx_version_min = "xyes"; then
+ gcc_GAS_CHECK_FEATURE([.build_version],
+ gcc_cv_as_darwin_build_version,
+ [-mmacosx-version-min=10.14],
+ [ .build_version macos, 10, 14 sdk_version 10, 14],,
+ [AC_DEFINE(HAVE_AS_MACOS_BUILD_VERSION, 1,
+ [Define if your macOS assembler supports .build_version directives])])
+ fi
;;
esac
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 34f20b2..702402f 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,83 @@
+2023-10-17 Marek Polacek <polacek@redhat.com>
+
+ PR c++/111840
+ * parser.cc (cp_parser_simple_declaration): Do cp_parser_error
+ for FUNCTION_DECLs.
+
+2023-10-17 Marek Polacek <polacek@redhat.com>
+
+ PR c++/111660
+ * cp-gimplify.cc (cp_fold_immediate_r) <case COND_EXPR>: Don't
+ handle it here.
+ (cp_fold_r): Handle COND_EXPR here.
+
+2023-10-17 Jason Merrill <jason@redhat.com>
+
+ * mangle.cc (abi_check): New.
+ (write_prefix, write_unqualified_name, write_discriminator)
+ (write_type, write_member_name, write_expression)
+ (write_template_arg, write_template_param): Use it.
+ (start_mangling): Assign from {}.
+ * cp-tree.h: Update comment.
+
+2023-10-17 Nathaniel Shead <nathanieloshead@gmail.com>
+
+ * constexpr.cc (cxx_eval_dynamic_cast_fn): Add missing
+ auto_diagnostic_group.
+ (cxx_eval_call_expression): Likewise.
+ (diag_array_subscript): Likewise.
+ (outside_lifetime_error): Likewise.
+ (potential_constant_expression_1): Likewise.
+
+2023-10-16 Jason Merrill <jason@redhat.com>
+
+ * parser.cc (cp_parser_fold_expression): Track location range.
+ * semantics.cc (finish_unary_fold_expr)
+ (finish_left_unary_fold_expr, finish_right_unary_fold_expr)
+ (finish_binary_fold_expr): Add location parm.
+ * constraint.cc (finish_shorthand_constraint): Pass it.
+ * pt.cc (convert_generic_types_to_packs): Likewise.
+ * cp-tree.h: Adjust.
+
+2023-10-16 Marek Polacek <polacek@redhat.com>
+
+ PR c++/111272
+ * constexpr.cc (explain_invalid_constexpr_fn): Also check the body of
+ a constructor in C++14 and up.
+
+2023-10-14 Jakub Jelinek <jakub@redhat.com>
+
+ PR c/102989
+ * module.cc (trees_out::start, trees_in::start): Remove
+ TREE_INT_CST_OFFSET_NUNITS handling.
+
+2023-10-10 Jason Merrill <jason@redhat.com>
+
+ PR c++/109422
+ * mangle.cc (write_template_param): Also mangle level.
+
+2023-10-08 David Malcolm <dmalcolm@redhat.com>
+
+ * module.cc (module_state::read_location): Update for renaming of
+ get_combined_adhoc_loc.
+
+2023-10-03 David Malcolm <dmalcolm@redhat.com>
+
+ * error.cc (print_instantiation_partial_context_line): Call
+ diagnostic_show_locus.
+
+2023-10-03 David Malcolm <dmalcolm@redhat.com>
+
+ * error.cc: Update for "m_" prefixes to text_info fields.
+
+2023-09-30 Eugene Rozenfeld <erozen@microsoft.com>
+
+ * Make-lang.in: Make create_fdas_for_cc1plus target not .PHONY
+
+2023-09-28 Richard Sandiford <richard.sandiford@arm.com>
+
+ * constexpr.cc (cxx_fold_indirect_ref): Remove unused variables.
+
2023-09-22 Jason Merrill <jason@redhat.com>
PR c++/111357
diff --git a/gcc/cp/Make-lang.in b/gcc/cp/Make-lang.in
index ba5e876..2727fb7 100644
--- a/gcc/cp/Make-lang.in
+++ b/gcc/cp/Make-lang.in
@@ -189,8 +189,6 @@ cp/name-lookup.o: $(srcdir)/cp/std-name-hint.h
components_in_prev = "bfd opcodes binutils fixincludes gas gcc gmp mpfr mpc isl gold intl ld libbacktrace libcpp libcody libdecnumber libiberty libiberty-linker-plugin libiconv zlib lto-plugin libctf libsframe"
components_in_prev_target = "libstdc++-v3 libsanitizer libvtv libgcc libbacktrace libphobos zlib libgomp libatomic"
-.PHONY: create_fdas_for_cc1plus
-
cc1plus.fda: create_fdas_for_cc1plus
$(PROFILE_MERGER) $(shell ls -ha cc1plus_*.fda) --output_file cc1plus.fda -gcov_version 2
@@ -214,6 +212,8 @@ create_fdas_for_cc1plus: ../stage1-gcc/cc1plus$(exeext) ../prev-gcc/$(PERF_DATA)
$(CREATE_GCOV) -binary ../prev-gcc/cc1plus$(exeext) -gcov $$profile_name -profile $$perf_path -gcov_version 2; \
fi; \
done;
+
+ $(STAMP) $@
#
# Build hooks:
diff --git a/gcc/cp/constexpr.cc b/gcc/cp/constexpr.cc
index 0f948db..7c8f2cc 100644
--- a/gcc/cp/constexpr.cc
+++ b/gcc/cp/constexpr.cc
@@ -1098,7 +1098,15 @@ explain_invalid_constexpr_fn (tree fun)
body = massage_constexpr_body (fun, body);
require_potential_rvalue_constant_expression (body);
if (DECL_CONSTRUCTOR_P (fun))
- cx_check_missing_mem_inits (DECL_CONTEXT (fun), body, true);
+ {
+ cx_check_missing_mem_inits (DECL_CONTEXT (fun), body, true);
+ if (cxx_dialect > cxx11)
+ {
+ /* Also check the body, not just the ctor-initializer. */
+ body = DECL_SAVED_TREE (fun);
+ require_potential_rvalue_constant_expression (body);
+ }
+ }
}
}
}
@@ -2429,6 +2437,7 @@ cxx_eval_dynamic_cast_fn (const constexpr_ctx *ctx, tree call,
{
if (!ctx->quiet)
{
+ auto_diagnostic_group d;
error_at (loc, "reference %<dynamic_cast%> failed");
inform (loc, "dynamic type %qT of its operand does "
"not have a base class of type %qT",
@@ -2484,6 +2493,7 @@ cxx_eval_dynamic_cast_fn (const constexpr_ctx *ctx, tree call,
{
if (!ctx->quiet)
{
+ auto_diagnostic_group d;
error_at (loc, "reference %<dynamic_cast%> failed");
inform (loc, "static type %qT of its operand is a "
"non-public base class of dynamic type %qT",
@@ -2516,6 +2526,7 @@ cxx_eval_dynamic_cast_fn (const constexpr_ctx *ctx, tree call,
{
if (!ctx->quiet)
{
+ auto_diagnostic_group d;
error_at (loc, "reference %<dynamic_cast%> failed");
inform (loc, "static type %qT of its operand is a non-public"
" base class of dynamic type %qT", objtype, mdtype);
@@ -2537,6 +2548,7 @@ cxx_eval_dynamic_cast_fn (const constexpr_ctx *ctx, tree call,
{
if (!ctx->quiet)
{
+ auto_diagnostic_group d;
error_at (loc, "reference %<dynamic_cast%> failed");
if (b_kind == bk_ambig)
inform (loc, "%qT is an ambiguous base class of dynamic "
@@ -2814,6 +2826,7 @@ cxx_eval_call_expression (const constexpr_ctx *ctx, tree t,
{
if (!ctx->quiet)
{
+ auto_diagnostic_group d;
error_at (loc, "array deallocation of object "
"allocated with non-array "
"allocation");
@@ -2836,6 +2849,7 @@ cxx_eval_call_expression (const constexpr_ctx *ctx, tree t,
{
if (!ctx->quiet)
{
+ auto_diagnostic_group d;
error_at (loc, "non-array deallocation of "
"object allocated with array "
"allocation");
@@ -4185,6 +4199,7 @@ diag_array_subscript (location_t loc, const constexpr_ctx *ctx, tree array, tree
STRIP_ANY_LOCATION_WRAPPER (array);
if (DECL_P (array))
{
+ auto_diagnostic_group d;
if (TYPE_DOMAIN (arraytype))
error_at (loc, "array subscript value %qE is outside the bounds "
"of array %qD of type %qT", sidx, array, arraytype);
@@ -5830,6 +5845,7 @@ cxx_eval_indirect_ref (const constexpr_ctx *ctx, tree t,
static void
outside_lifetime_error (location_t loc, tree r)
{
+ auto_diagnostic_group d;
if (DECL_NAME (r) == heap_deleted_identifier)
{
/* Provide a more accurate message for deleted variables. */
@@ -9452,6 +9468,7 @@ potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now,
if (flags & tf_error)
{
tree cap = DECL_CAPTURED_VARIABLE (t);
+ auto_diagnostic_group d;
if (constexpr_error (input_location, fundef_p,
"lambda capture of %qE is not a "
"constant expression", cap)
diff --git a/gcc/cp/constraint.cc b/gcc/cp/constraint.cc
index c9e4e70..64b64e1 100644
--- a/gcc/cp/constraint.cc
+++ b/gcc/cp/constraint.cc
@@ -1607,7 +1607,8 @@ finish_shorthand_constraint (tree decl, tree constr)
/* Make the check a fold-expression if needed. */
if (apply_to_each_p && declared_pack_p)
- check = finish_left_unary_fold_expr (check, TRUTH_ANDIF_EXPR);
+ check = finish_left_unary_fold_expr (DECL_SOURCE_LOCATION (decl),
+ check, TRUTH_ANDIF_EXPR);
return check;
}
diff --git a/gcc/cp/cp-gimplify.cc b/gcc/cp/cp-gimplify.cc
index bdf6e5f..a282c39 100644
--- a/gcc/cp/cp-gimplify.cc
+++ b/gcc/cp/cp-gimplify.cc
@@ -1052,27 +1052,6 @@ cp_fold_immediate_r (tree *stmt_p, int *walk_subtrees, void *data_)
switch (TREE_CODE (stmt))
{
- /* Unfortunately we must handle code like
- false ? bar () : 42
- where we have to check bar too. The cp_fold call in cp_fold_r could
- fold the ?: into a constant before we see it here. */
- case COND_EXPR:
- /* If we are called from cp_fold_immediate, we don't need to worry about
- cp_fold folding away the COND_EXPR. */
- if (data->flags & ff_fold_immediate)
- break;
- if (TREE_OPERAND (stmt, 1)
- && cp_walk_tree (&TREE_OPERAND (stmt, 1), cp_fold_immediate_r, data,
- nullptr))
- return error_mark_node;
- if (TREE_OPERAND (stmt, 2)
- && cp_walk_tree (&TREE_OPERAND (stmt, 2), cp_fold_immediate_r, data,
- nullptr))
- return error_mark_node;
- /* We're done here. Don't clear *walk_subtrees here though: we're called
- from cp_fold_r and we must let it recurse on the expression with
- cp_fold. */
- break;
case PTRMEM_CST:
if (TREE_CODE (PTRMEM_CST_MEMBER (stmt)) == FUNCTION_DECL
&& DECL_IMMEDIATE_FUNCTION_P (PTRMEM_CST_MEMBER (stmt)))
@@ -1162,8 +1141,35 @@ cp_fold_r (tree *stmt_p, int *walk_subtrees, void *data_)
tree stmt = *stmt_p;
enum tree_code code = TREE_CODE (stmt);
- if (cxx_dialect > cxx17)
- cp_fold_immediate_r (stmt_p, walk_subtrees, data);
+ if (cxx_dialect >= cxx20)
+ {
+ /* Unfortunately we must handle code like
+ false ? bar () : 42
+ where we have to check bar too. The cp_fold call below could
+ fold the ?: into a constant before we've checked it. */
+ if (code == COND_EXPR)
+ {
+ auto then_fn = cp_fold_r, else_fn = cp_fold_r;
+ /* See if we can figure out if either of the branches is dead. If it
+ is, we don't need to do everything that cp_fold_r does. */
+ tree cond = maybe_constant_value (TREE_OPERAND (stmt, 0));
+ if (integer_zerop (cond))
+ then_fn = cp_fold_immediate_r;
+ else if (TREE_CODE (cond) == INTEGER_CST)
+ else_fn = cp_fold_immediate_r;
+
+ cp_walk_tree (&TREE_OPERAND (stmt, 0), cp_fold_r, data, nullptr);
+ if (TREE_OPERAND (stmt, 1))
+ cp_walk_tree (&TREE_OPERAND (stmt, 1), then_fn, data,
+ nullptr);
+ if (TREE_OPERAND (stmt, 2))
+ cp_walk_tree (&TREE_OPERAND (stmt, 2), else_fn, data,
+ nullptr);
+ *walk_subtrees = 0;
+ /* Don't return yet, still need the cp_fold below. */
+ }
+ cp_fold_immediate_r (stmt_p, walk_subtrees, data);
+ }
*stmt_p = stmt = cp_fold (*stmt_p, data->flags);
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 6e34952..1d7df62 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -4969,12 +4969,14 @@ get_vec_init_expr (tree t)
/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
template parameters, with 1 being the outermost set of template
- parameters. The TREE_VALUE is a vector, whose elements are the
+ parameters. The TREE_TYPE is TEMPLATE_PARMS_CONSTRAINTS.
+ The TREE_VALUE is a vector, whose elements are the
template parameters at each level. Each element in the vector is a
TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
non-type parameter), or a TYPE_DECL (if the parameter is a type
parameter) or a TEMPLATE_DECL (if the parameter is a template
- parameter). The TREE_PURPOSE is the default value, if any. The
+ parameter). The TREE_PURPOSE is the default value, if any.
+ The TREE_TYPE is TEMPLATE_PARM_CONSTRAINTS. The
TEMPLATE_PARM_INDEX for the parameter is available as the
DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
TYPE_DECL).
@@ -8209,9 +8211,9 @@ extern void maybe_warn_about_useless_cast (location_t, tree, tree,
tsubst_flags_t);
extern tree cp_perform_integral_promotions (tree, tsubst_flags_t);
-extern tree finish_left_unary_fold_expr (tree, int);
-extern tree finish_right_unary_fold_expr (tree, int);
-extern tree finish_binary_fold_expr (tree, tree, int);
+extern tree finish_left_unary_fold_expr (location_t, tree, int);
+extern tree finish_right_unary_fold_expr (location_t, tree, int);
+extern tree finish_binary_fold_expr (location_t, tree, tree, int);
extern tree treat_lvalue_as_rvalue_p (tree, bool);
extern bool decl_in_std_namespace_p (tree);
extern void maybe_warn_pessimizing_move (tree, tree, bool);
diff --git a/gcc/cp/error.cc b/gcc/cp/error.cc
index 8a5219a..767478cf 100644
--- a/gcc/cp/error.cc
+++ b/gcc/cp/error.cc
@@ -3774,6 +3774,8 @@ print_instantiation_partial_context_line (diagnostic_context *context,
? _("recursively required from here\n")
: _("required from here\n"));
}
+ gcc_rich_location rich_loc (loc);
+ diagnostic_show_locus (context, &rich_loc, DK_NOTE);
}
/* Same as print_instantiation_full_context but less verbose. */
@@ -4454,10 +4456,10 @@ cp_printer (pretty_printer *pp, text_info *text, const char *spec,
const char *result;
tree t = NULL;
-#define next_tree (t = va_arg (*text->args_ptr, tree))
-#define next_tcode ((enum tree_code) va_arg (*text->args_ptr, int))
-#define next_lang ((enum languages) va_arg (*text->args_ptr, int))
-#define next_int va_arg (*text->args_ptr, int)
+#define next_tree (t = va_arg (*text->m_args_ptr, tree))
+#define next_tcode ((enum tree_code) va_arg (*text->m_args_ptr, int))
+#define next_lang ((enum languages) va_arg (*text->m_args_ptr, int))
+#define next_int va_arg (*text->m_args_ptr, int)
if (precision != 0 || wide)
return false;
diff --git a/gcc/cp/mangle.cc b/gcc/cp/mangle.cc
index d88c779..afa68da 100644
--- a/gcc/cp/mangle.cc
+++ b/gcc/cp/mangle.cc
@@ -275,6 +275,17 @@ static tree mangle_special_for_type (const tree, const char *);
#define write_unsigned_number(NUMBER) \
write_number ((NUMBER), /*unsigned_p=*/1, 10)
+/* Check for -fabi-version dependent mangling and also set the need_abi_warning
+ flag as appropriate. */
+
+static bool
+abi_check (int ver)
+{
+ if (abi_warn_or_compat_version_crosses (ver))
+ G.need_abi_warning = true;
+ return abi_version_at_least (ver);
+}
+
/* If DECL is a template instance (including the uninstantiated template
itself), return its TEMPLATE_INFO. Otherwise return NULL. */
@@ -1267,9 +1278,7 @@ write_prefix (const tree node)
/* Before ABI 18, we did not count these as substitution
candidates. This leads to incorrect demanglings (and
ABI divergence to other compilers). */
- if (abi_warn_or_compat_version_crosses (18))
- G.need_abi_warning = true;
- if (!abi_version_at_least (18))
+ if (!abi_check (18))
return;
}
}
@@ -1542,9 +1551,7 @@ write_unqualified_name (tree decl)
&& any_abi_below (11))
if (tree mtags = missing_abi_tags (decl))
{
- if (abi_warn_or_compat_version_crosses (11))
- G.need_abi_warning = true;
- if (!abi_version_at_least (11))
+ if (!abi_check (11))
tags = chainon (mtags, tags);
}
write_abi_tags (tags);
@@ -2094,9 +2101,7 @@ write_discriminator (const int discriminator)
write_char ('_');
if (discriminator - 1 >= 10)
{
- if (abi_warn_or_compat_version_crosses (11))
- G.need_abi_warning = 1;
- if (abi_version_at_least (11))
+ if (abi_check (11))
write_char ('_');
}
write_unsigned_number (discriminator - 1);
@@ -2425,9 +2430,7 @@ write_type (tree type)
if (etype && !type_uses_auto (etype))
{
- if (abi_warn_or_compat_version_crosses (5))
- G.need_abi_warning = 1;
- if (!abi_version_at_least (5))
+ if (!abi_check (5))
{
write_type (etype);
return;
@@ -2448,10 +2451,8 @@ write_type (tree type)
case NULLPTR_TYPE:
write_string ("Dn");
- if (abi_version_at_least (7))
+ if (abi_check (7))
++is_builtin_type;
- if (abi_warn_or_compat_version_crosses (7))
- G.need_abi_warning = 1;
break;
case TYPEOF_TYPE:
@@ -2935,10 +2936,8 @@ write_member_name (tree member)
{
if (IDENTIFIER_ANY_OP_P (member))
{
- if (abi_version_at_least (11))
+ if (abi_check (11))
write_string ("on");
- if (abi_warn_or_compat_version_crosses (11))
- G.need_abi_warning = 1;
}
write_unqualified_id (member);
}
@@ -3108,7 +3107,7 @@ write_expression (tree expr)
write_char ('f');
if (delta != 0)
{
- if (abi_version_at_least (5))
+ if (abi_check (5))
{
/* Let L be the number of function prototype scopes from the
innermost one (in which the parameter reference occurs) up
@@ -3120,8 +3119,6 @@ write_expression (tree expr)
write_char ('L');
write_unsigned_number (delta - 1);
}
- if (abi_warn_or_compat_version_crosses (5))
- G.need_abi_warning = true;
}
write_char ('p');
write_compact_number (index - 1);
@@ -3138,9 +3135,7 @@ write_expression (tree expr)
if (PACK_EXPANSION_P (op))
{
- if (abi_warn_or_compat_version_crosses (11))
- G.need_abi_warning = true;
- if (abi_version_at_least (11))
+ if (abi_check (11))
{
/* sZ rather than szDp. */
write_string ("sZ");
@@ -3158,9 +3153,7 @@ write_expression (tree expr)
{
tree args = ARGUMENT_PACK_ARGS (op);
int length = TREE_VEC_LENGTH (args);
- if (abi_warn_or_compat_version_crosses (10))
- G.need_abi_warning = true;
- if (abi_version_at_least (10))
+ if (abi_check (10))
{
/* sP <template-arg>* E # sizeof...(T), size of a captured
template parameter pack from an alias template */
@@ -3198,9 +3191,7 @@ write_expression (tree expr)
{
if (!ALIGNOF_EXPR_STD_P (expr))
{
- if (abi_warn_or_compat_version_crosses (16))
- G.need_abi_warning = true;
- if (abi_version_at_least (16))
+ if (abi_check (16))
{
/* We used to mangle __alignof__ like alignof. */
write_string ("u11__alignof__");
@@ -3445,10 +3436,8 @@ write_expression (tree expr)
tree name = dependent_name (expr);
if (IDENTIFIER_ANY_OP_P (name))
{
- if (abi_version_at_least (16))
+ if (abi_check (16))
write_string ("on");
- if (abi_warn_or_compat_version_crosses (16))
- G.need_abi_warning = 1;
}
write_unqualified_id (name);
}
@@ -3507,9 +3496,7 @@ write_expression (tree expr)
if (code == CONST_CAST_EXPR
|| code == STATIC_CAST_EXPR)
{
- if (abi_warn_or_compat_version_crosses (6))
- G.need_abi_warning = 1;
- if (!abi_version_at_least (6))
+ if (!abi_check (6))
name = OVL_OP_INFO (false, CAST_EXPR)->mangled_name;
}
@@ -3578,10 +3565,8 @@ write_expression (tree expr)
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
- if (abi_version_at_least (6))
+ if (abi_check (6))
write_char ('_');
- if (abi_warn_or_compat_version_crosses (6))
- G.need_abi_warning = 1;
/* Fall through. */
default:
@@ -3776,11 +3761,9 @@ write_template_arg (tree node)
if (TREE_CODE (node) == BASELINK
&& !type_unknown_p (node))
{
- if (abi_version_at_least (6))
+ /* Before v6 we wrongly wrapped a class-scope function in X/E. */
+ if (abi_check (6))
node = BASELINK_FUNCTIONS (node);
- if (abi_warn_or_compat_version_crosses (6))
- /* We wrongly wrapped a class-scope function in X/E. */
- G.need_abi_warning = 1;
}
if (ARGUMENT_PACK_P (node))
@@ -3788,12 +3771,10 @@ write_template_arg (tree node)
/* Expand the template argument pack. */
tree args = ARGUMENT_PACK_ARGS (node);
int i, length = TREE_VEC_LENGTH (args);
- if (abi_version_at_least (6))
+ if (abi_check (6))
write_char ('J');
else
write_char ('I');
- if (abi_warn_or_compat_version_crosses (6))
- G.need_abi_warning = 1;
for (i = 0; i < length; ++i)
write_template_arg (TREE_VEC_ELT (args, i));
write_char ('E');
@@ -3816,12 +3797,10 @@ write_template_arg (tree node)
write_char ('L');
/* Until ABI version 3, the underscore before the mangled name
was incorrectly omitted. */
- if (!abi_version_at_least (3))
+ if (!abi_check (3))
write_char ('Z');
else
write_string ("_Z");
- if (abi_warn_or_compat_version_crosses (3))
- G.need_abi_warning = 1;
write_encoding (node);
write_char ('E');
}
@@ -3921,6 +3900,7 @@ static void
write_template_param (const tree parm)
{
int parm_index;
+ int level;
MANGLE_TRACE_TREE ("template-parm", parm);
@@ -3930,10 +3910,12 @@ write_template_param (const tree parm)
case TEMPLATE_TEMPLATE_PARM:
case BOUND_TEMPLATE_TEMPLATE_PARM:
parm_index = TEMPLATE_TYPE_IDX (parm);
+ level = TEMPLATE_TYPE_LEVEL (parm);
break;
case TEMPLATE_PARM_INDEX:
parm_index = TEMPLATE_PARM_IDX (parm);
+ level = TEMPLATE_PARM_LEVEL (parm);
break;
default:
@@ -3941,6 +3923,14 @@ write_template_param (const tree parm)
}
write_char ('T');
+ if (level > 1)
+ {
+ if (abi_check (19))
+ {
+ write_char ('L');
+ write_compact_number (level - 1);
+ }
+ }
/* NUMBER as it appears in the mangling is (-1)-indexed, with the
earliest template param denoted by `_'. */
write_compact_number (parm_index);
@@ -3994,10 +3984,8 @@ write_substitution (const int seq_id)
static inline void
start_mangling (const tree entity)
{
+ G = {};
G.entity = entity;
- G.need_abi_warning = false;
- G.need_cxx17_warning = false;
- G.mod = false;
obstack_free (&name_obstack, name_base);
mangle_obstack = &name_obstack;
name_base = obstack_alloc (&name_obstack, 0);
diff --git a/gcc/cp/module.cc b/gcc/cp/module.cc
index 77c9edc..bbb1e20 100644
--- a/gcc/cp/module.cc
+++ b/gcc/cp/module.cc
@@ -5158,7 +5158,6 @@ trees_out::start (tree t, bool code_streamed)
case INTEGER_CST:
u (TREE_INT_CST_NUNITS (t));
u (TREE_INT_CST_EXT_NUNITS (t));
- u (TREE_INT_CST_OFFSET_NUNITS (t));
break;
case OMP_CLAUSE:
@@ -5231,7 +5230,6 @@ trees_in::start (unsigned code)
unsigned n = u ();
unsigned e = u ();
t = make_int_cst (n, e);
- TREE_INT_CST_OFFSET_NUNITS(t) = u ();
}
break;
@@ -15969,7 +15967,8 @@ module_state::read_location (bytes_in &sec) const
range.m_finish = read_location (sec);
unsigned discriminator = sec.u ();
if (locus != loc && range.m_start != loc && range.m_finish != loc)
- locus = get_combined_adhoc_loc (line_table, locus, range, NULL, discriminator);
+ locus = line_table->get_or_create_combined_loc (locus, range,
+ nullptr, discriminator);
}
break;
diff --git a/gcc/cp/parser.cc b/gcc/cp/parser.cc
index f3abae7..57b62fb 100644
--- a/gcc/cp/parser.cc
+++ b/gcc/cp/parser.cc
@@ -5533,6 +5533,8 @@ static cp_expr
cp_parser_fold_expression (cp_parser *parser, tree expr1)
{
cp_id_kind pidk;
+ location_t loc = cp_lexer_peek_token (parser->lexer)->location;
+ const cp_token *token = nullptr;
// Left fold.
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
@@ -5540,6 +5542,7 @@ cp_parser_fold_expression (cp_parser *parser, tree expr1)
if (expr1)
return error_mark_node;
cp_lexer_consume_token (parser->lexer);
+ token = cp_lexer_peek_token (parser->lexer);
int op = cp_parser_fold_operator (parser);
if (op == ERROR_MARK)
{
@@ -5551,10 +5554,11 @@ cp_parser_fold_expression (cp_parser *parser, tree expr1)
false, &pidk);
if (expr == error_mark_node)
return error_mark_node;
- return finish_left_unary_fold_expr (expr, op);
+ loc = make_location (token->location, loc, parser->lexer);
+ return finish_left_unary_fold_expr (loc, expr, op);
}
- const cp_token* token = cp_lexer_peek_token (parser->lexer);
+ token = cp_lexer_peek_token (parser->lexer);
int op = cp_parser_fold_operator (parser);
if (op == ERROR_MARK)
{
@@ -5585,7 +5589,10 @@ cp_parser_fold_expression (cp_parser *parser, tree expr1)
// Right fold.
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
- return finish_right_unary_fold_expr (expr1, op);
+ {
+ loc = make_location (token->location, loc, parser->lexer);
+ return finish_right_unary_fold_expr (loc, expr1, op);
+ }
if (cp_lexer_next_token_is_not (parser->lexer, token->type))
{
@@ -5598,7 +5605,8 @@ cp_parser_fold_expression (cp_parser *parser, tree expr1)
tree expr2 = cp_parser_cast_expression (parser, false, false, false, &pidk);
if (expr2 == error_mark_node)
return error_mark_node;
- return finish_binary_fold_expr (expr1, expr2, op);
+ loc = make_location (token->location, loc, parser->lexer);
+ return finish_binary_fold_expr (loc, expr1, expr2, op);
}
/* Parse a primary-expression.
@@ -15661,6 +15669,7 @@ cp_parser_simple_declaration (cp_parser* parser,
maybe_range_for_decl,
&init_loc,
&auto_result);
+ const bool fndecl_p = TREE_CODE (decl) == FUNCTION_DECL;
/* If an error occurred while parsing tentatively, exit quickly.
(That usually happens when in the body of a function; each
statement is treated as a declaration-statement until proven
@@ -15674,16 +15683,13 @@ cp_parser_simple_declaration (cp_parser* parser,
init-declarator, they shall all form declarations of
variables. */
if (auto_function_declaration == NULL_TREE)
- auto_function_declaration
- = TREE_CODE (decl) == FUNCTION_DECL ? decl : error_mark_node;
- else if (TREE_CODE (decl) == FUNCTION_DECL
- || auto_function_declaration != error_mark_node)
+ auto_function_declaration = fndecl_p ? decl : error_mark_node;
+ else if (fndecl_p || auto_function_declaration != error_mark_node)
{
error_at (decl_specifiers.locations[ds_type_spec],
"non-variable %qD in declaration with more than one "
"declarator with placeholder type",
- TREE_CODE (decl) == FUNCTION_DECL
- ? decl : auto_function_declaration);
+ fndecl_p ? decl : auto_function_declaration);
auto_function_declaration = error_mark_node;
}
}
@@ -15755,7 +15761,9 @@ cp_parser_simple_declaration (cp_parser* parser,
/* If we have already issued an error message we don't need
to issue another one. */
if ((decl != error_mark_node
- && DECL_INITIAL (decl) != error_mark_node)
+ /* grokfndecl sets DECL_INITIAL to error_mark_node for
+ functions. */
+ && (fndecl_p || DECL_INITIAL (decl) != error_mark_node))
|| cp_parser_uncommitted_to_tentative_parse_p (parser))
cp_parser_error (parser, "expected %<,%> or %<;%>");
/* Skip tokens until we reach the end of the statement. */
diff --git a/gcc/cp/pt.cc b/gcc/cp/pt.cc
index 73ac1cb..7cbf903 100644
--- a/gcc/cp/pt.cc
+++ b/gcc/cp/pt.cc
@@ -31423,7 +31423,9 @@ convert_generic_types_to_packs (tree parm, int start_idx, int end_idx)
{
tree id = unpack_concept_check (constr);
TREE_VEC_ELT (TREE_OPERAND (id, 1), 0) = t;
- tree fold = finish_left_unary_fold_expr (constr, TRUTH_ANDIF_EXPR);
+ location_t loc = DECL_SOURCE_LOCATION (TYPE_NAME (t));
+ tree fold = finish_left_unary_fold_expr (loc, constr,
+ TRUTH_ANDIF_EXPR);
TEMPLATE_PARM_CONSTRAINTS (node) = fold;
/* If there was a constraint, we also need to replace that in
diff --git a/gcc/cp/semantics.cc b/gcc/cp/semantics.cc
index 80ef136..2a0cf96 100644
--- a/gcc/cp/semantics.cc
+++ b/gcc/cp/semantics.cc
@@ -12605,7 +12605,7 @@ capture_decltype (tree decl)
this is a right unary fold. Otherwise it is a left unary fold. */
static tree
-finish_unary_fold_expr (tree expr, int op, tree_code dir)
+finish_unary_fold_expr (location_t loc, tree expr, int op, tree_code dir)
{
/* Build a pack expansion (assuming expr has pack type). */
if (!uses_parameter_packs (expr))
@@ -12618,7 +12618,7 @@ finish_unary_fold_expr (tree expr, int op, tree_code dir)
/* Build the fold expression. */
tree code = build_int_cstu (integer_type_node, abs (op));
- tree fold = build_min_nt_loc (input_location, dir, code, pack);
+ tree fold = build_min_nt_loc (loc, dir, code, pack);
FOLD_EXPR_MODIFY_P (fold) = (op < 0);
TREE_TYPE (fold) = build_dependent_operator_type (NULL_TREE,
FOLD_EXPR_OP (fold),
@@ -12627,27 +12627,28 @@ finish_unary_fold_expr (tree expr, int op, tree_code dir)
}
tree
-finish_left_unary_fold_expr (tree expr, int op)
+finish_left_unary_fold_expr (location_t loc, tree expr, int op)
{
- return finish_unary_fold_expr (expr, op, UNARY_LEFT_FOLD_EXPR);
+ return finish_unary_fold_expr (loc, expr, op, UNARY_LEFT_FOLD_EXPR);
}
tree
-finish_right_unary_fold_expr (tree expr, int op)
+finish_right_unary_fold_expr (location_t loc, tree expr, int op)
{
- return finish_unary_fold_expr (expr, op, UNARY_RIGHT_FOLD_EXPR);
+ return finish_unary_fold_expr (loc, expr, op, UNARY_RIGHT_FOLD_EXPR);
}
/* Build a binary fold expression over EXPR1 and EXPR2. The
associativity of the fold is determined by EXPR1 and EXPR2 (whichever
has an unexpanded parameter pack). */
-tree
-finish_binary_fold_expr (tree pack, tree init, int op, tree_code dir)
+static tree
+finish_binary_fold_expr (location_t loc, tree pack, tree init,
+ int op, tree_code dir)
{
pack = make_pack_expansion (pack);
tree code = build_int_cstu (integer_type_node, abs (op));
- tree fold = build_min_nt_loc (input_location, dir, code, pack, init);
+ tree fold = build_min_nt_loc (loc, dir, code, pack, init);
FOLD_EXPR_MODIFY_P (fold) = (op < 0);
TREE_TYPE (fold) = build_dependent_operator_type (NULL_TREE,
FOLD_EXPR_OP (fold),
@@ -12656,16 +12657,16 @@ finish_binary_fold_expr (tree pack, tree init, int op, tree_code dir)
}
tree
-finish_binary_fold_expr (tree expr1, tree expr2, int op)
+finish_binary_fold_expr (location_t loc, tree expr1, tree expr2, int op)
{
// Determine which expr has an unexpanded parameter pack and
// set the pack and initial term.
bool pack1 = uses_parameter_packs (expr1);
bool pack2 = uses_parameter_packs (expr2);
if (pack1 && !pack2)
- return finish_binary_fold_expr (expr1, expr2, op, BINARY_RIGHT_FOLD_EXPR);
+ return finish_binary_fold_expr (loc, expr1, expr2, op, BINARY_RIGHT_FOLD_EXPR);
else if (pack2 && !pack1)
- return finish_binary_fold_expr (expr2, expr1, op, BINARY_LEFT_FOLD_EXPR);
+ return finish_binary_fold_expr (loc, expr2, expr1, op, BINARY_LEFT_FOLD_EXPR);
else
{
if (pack1)
diff --git a/gcc/d/ChangeLog b/gcc/d/ChangeLog
index 403e27e..5c44bfe 100644
--- a/gcc/d/ChangeLog
+++ b/gcc/d/ChangeLog
@@ -1,3 +1,90 @@
+2023-10-17 Iain Buclaw <ibuclaw@gdcproject.org>
+
+ * d-tree.h (intrinsic_code): Update define for DEF_D_INTRINSIC.
+ (maybe_reject_intrinsic): New prototype.
+ * expr.cc (ExprVisitor::visit (SymOffExp *)): Call
+ maybe_reject_intrinsic.
+ * intrinsics.cc (intrinsic_decl): Add fallback field.
+ (intrinsic_decls): Update define for DEF_D_INTRINSIC.
+ (maybe_reject_intrinsic): New function.
+ * intrinsics.def (DEF_D_LIB_BUILTIN): Update.
+ (DEF_CTFE_BUILTIN): Update.
+ (INTRINSIC_BSF): Declare as library builtin.
+ (INTRINSIC_BSR): Likewise.
+ (INTRINSIC_BT): Likewise.
+ (INTRINSIC_BSF64): Likewise.
+ (INTRINSIC_BSR64): Likewise.
+ (INTRINSIC_BT64): Likewise.
+ (INTRINSIC_POPCNT32): Likewise.
+ (INTRINSIC_POPCNT64): Likewise.
+ (INTRINSIC_ROL): Likewise.
+ (INTRINSIC_ROL_TIARG): Likewise.
+ (INTRINSIC_ROR): Likewise.
+ (INTRINSIC_ROR_TIARG): Likewise.
+ (INTRINSIC_ADDS): Likewise.
+ (INTRINSIC_ADDSL): Likewise.
+ (INTRINSIC_ADDU): Likewise.
+ (INTRINSIC_ADDUL): Likewise.
+ (INTRINSIC_SUBS): Likewise.
+ (INTRINSIC_SUBSL): Likewise.
+ (INTRINSIC_SUBU): Likewise.
+ (INTRINSIC_SUBUL): Likewise.
+ (INTRINSIC_MULS): Likewise.
+ (INTRINSIC_MULSL): Likewise.
+ (INTRINSIC_MULU): Likewise.
+ (INTRINSIC_MULUI): Likewise.
+ (INTRINSIC_MULUL): Likewise.
+ (INTRINSIC_NEGS): Likewise.
+ (INTRINSIC_NEGSL): Likewise.
+ (INTRINSIC_TOPRECF): Likewise.
+ (INTRINSIC_TOPREC): Likewise.
+ (INTRINSIC_TOPRECL): Likewise.
+
+2023-10-16 Iain Buclaw <ibuclaw@gdcproject.org>
+
+ * dmd/MERGE: Merge upstream dmd 4c18eed967.
+ * d-diagnostic.cc (verrorReport): Update for new front-end interface.
+ (verrorReportSupplemental): Likewise.
+ * d-lang.cc (d_init_options): Likewise.
+ (d_handle_option): Likewise.
+ (d_post_options): Likewise.
+ (d_parse_file): Likewise.
+ * decl.cc (get_symbol_decl): Likewise.
+
+2023-10-15 Iain Buclaw <ibuclaw@gdcproject.org>
+
+ * dmd/MERGE: Merge upstream dmd f9efc98fd7.
+ * dmd/VERSION: Bump version to v2.105.2.
+ * d-builtins.cc (build_frontend_type): Update for new front-end
+ interface.
+ * d-diagnostic.cc (verrorReport): Don't emit tips when error gagging
+ is turned on.
+ * d-lang.cc (d_handle_option): Remove obsolete parameter.
+ (d_post_options): Likewise.
+ (d_read_ddoc_files): New function.
+ (d_generate_ddoc_file): New function.
+ (d_parse_file): Update for new front-end interface.
+ * expr.cc (ExprVisitor::visit (AssocArrayLiteralExp *)): Check for new
+ front-end lowering of static associative arrays.
+
+2023-10-14 Iain Buclaw <ibuclaw@gdcproject.org>
+
+ PR d/111537
+ * expr.cc (ExprVisitor::visit (StringExp *)): Include null terminator
+ in STRING_CST string.
+ * modules.cc (get_compiler_dso_type): Generate ModuleInfo** type for
+ the minfo fields.
+
+2023-10-14 Iain Buclaw <ibuclaw@gdcproject.org>
+
+ * d-lang.cc (d_write_file): New function.
+ (d_parse_file): Reduce code duplication.
+
+2023-10-03 David Malcolm <dmalcolm@redhat.com>
+
+ * d-diagnostic.cc (d_diagnostic_report_diagnostic): Use text_info
+ ctor.
+
2023-09-23 Iain Buclaw <ibuclaw@gdcproject.org>
* dmd/MERGE: Merge upstream dmd 4574d1728d.
diff --git a/gcc/d/d-builtins.cc b/gcc/d/d-builtins.cc
index 60f76fc..cf998d2 100644
--- a/gcc/d/d-builtins.cc
+++ b/gcc/d/d-builtins.cc
@@ -322,7 +322,8 @@ build_frontend_type (tree type)
return NULL;
}
- args->push (Parameter::create (sc, targ, NULL, NULL, NULL));
+ args->push (Parameter::create (Loc (), sc, targ,
+ NULL, NULL, NULL));
}
/* GCC generic and placeholder built-ins are marked as variadic, yet
diff --git a/gcc/d/d-diagnostic.cc b/gcc/d/d-diagnostic.cc
index 57f36f2..b3ad60c 100644
--- a/gcc/d/d-diagnostic.cc
+++ b/gcc/d/d-diagnostic.cc
@@ -205,11 +205,7 @@ d_diagnostic_report_diagnostic (const Loc &loc, int opt, const char *format,
else
{
/* Write verbatim messages with no location direct to stream. */
- text_info text;
- text.err_no = errno;
- text.args_ptr = &argp;
- text.format_spec = expand_d_format (format);
- text.x_data = NULL;
+ text_info text (expand_d_format (format), &argp, errno, nullptr);
pp_format_verbatim (global_dc->printer, &text);
pp_newline_and_flush (global_dc->printer);
@@ -224,7 +220,7 @@ d_diagnostic_report_diagnostic (const Loc &loc, int opt, const char *format,
void D_ATTRIBUTE_FORMAT(2,0) ATTRIBUTE_GCC_DIAG(2,0)
verrorReport (const Loc& loc, const char *format, va_list ap, ErrorKind kind,
- const char *prefix1 = NULL, const char *prefix2 = NULL)
+ const char *prefix1, const char *prefix2)
{
diagnostic_t diag_kind = DK_UNSPECIFIED;
int opt = 0;
@@ -237,7 +233,7 @@ verrorReport (const Loc& loc, const char *format, va_list ap, ErrorKind kind,
if (global.gag)
global.gaggedErrors++;
- if (global.gag && !global.params.showGaggedErrors)
+ if (global.gag && !global.params.v.showGaggedErrors)
return;
diag_kind = global.gag ? DK_ANACHRONISM : DK_ERROR;
@@ -281,6 +277,9 @@ verrorReport (const Loc& loc, const char *format, va_list ap, ErrorKind kind,
}
else if (kind == ErrorKind::tip)
{
+ if (global.gag)
+ return;
+
diag_kind = DK_DEBUG;
verbatim = true;
}
@@ -309,7 +308,7 @@ verrorReportSupplemental (const Loc& loc, const char* format, va_list ap,
{
if (kind == ErrorKind::error)
{
- if (global.gag && !global.params.showGaggedErrors)
+ if (global.gag && !global.params.v.showGaggedErrors)
return;
}
else if (kind == ErrorKind::warning)
diff --git a/gcc/d/d-lang.cc b/gcc/d/d-lang.cc
index 7dddcf5..902fd86 100644
--- a/gcc/d/d-lang.cc
+++ b/gcc/d/d-lang.cc
@@ -295,7 +295,6 @@ d_init_options (unsigned int, cl_decoded_option *decoded_options)
global.compileEnv.vendor = lang_hooks.name;
global.params.argv0 = xstrdup (decoded_options[0].arg);
- global.params.errorLimit = flag_max_errors;
/* Default extern(C++) mangling to C++17. */
global.params.cplusplus = CppStdRevisionCpp17;
@@ -303,7 +302,8 @@ d_init_options (unsigned int, cl_decoded_option *decoded_options)
/* Warnings and deprecations are disabled by default. */
global.params.useDeprecated = DIAGNOSTICinform;
global.params.warnings = DIAGNOSTICoff;
- global.params.messageStyle = MessageStyle::gnu;
+ global.params.v.errorLimit = flag_max_errors;
+ global.params.v.messageStyle = MessageStyle::gnu;
global.params.imppath = d_gc_malloc<Strings> ();
global.params.fileImppath = d_gc_malloc<Strings> ();
@@ -458,9 +458,7 @@ d_handle_option (size_t scode, const char *arg, HOST_WIDE_INT value,
case OPT_fdebug_:
if (Identifier::isValidIdentifier (CONST_CAST (char *, arg)))
{
- if (!global.params.debugids)
- global.params.debugids = d_gc_malloc<Strings> ();
- global.params.debugids->push (arg);
+ DebugCondition::addGlobalIdent (arg);
break;
}
@@ -662,30 +660,30 @@ d_handle_option (size_t scode, const char *arg, HOST_WIDE_INT value,
break;
case OPT_ftransition_all:
- global.params.vfield = value;
- global.params.vgc = value;
- global.params.vin = value;
- global.params.vtls = value;
+ global.params.v.field = value;
+ global.params.v.gc = value;
+ global.params.v.vin = value;
+ global.params.v.tls = value;
break;
case OPT_ftransition_field:
- global.params.vfield = value;
+ global.params.v.field = value;
break;
case OPT_ftransition_in:
- global.params.vin = value;
+ global.params.v.vin = value;
break;
case OPT_ftransition_nogc:
- global.params.vgc = value;
+ global.params.v.gc = value;
break;
case OPT_ftransition_templates:
- global.params.vtemplates = value;
+ global.params.v.templates = value;
break;
case OPT_ftransition_tls:
- global.params.vtls = value;
+ global.params.v.tls = value;
break;
case OPT_funittest:
@@ -695,9 +693,7 @@ d_handle_option (size_t scode, const char *arg, HOST_WIDE_INT value,
case OPT_fversion_:
if (Identifier::isValidIdentifier (CONST_CAST (char *, arg)))
{
- if (!global.params.versionids)
- global.params.versionids = d_gc_malloc<Strings> ();
- global.params.versionids->push (arg);
+ VersionCondition::addGlobalIdent (arg);
break;
}
@@ -773,13 +769,12 @@ d_handle_option (size_t scode, const char *arg, HOST_WIDE_INT value,
break;
case OPT_v:
- global.params.verbose = value;
+ global.params.v.verbose = value;
break;
case OPT_Wall:
if (value)
global.params.warnings = DIAGNOSTICinform;
- global.params.obsolete = value;
break;
case OPT_Wdeprecated:
@@ -793,7 +788,7 @@ d_handle_option (size_t scode, const char *arg, HOST_WIDE_INT value,
case OPT_Wspeculative:
if (value)
- global.params.showGaggedErrors = 1;
+ global.params.v.showGaggedErrors = 1;
break;
case OPT_Xf:
@@ -917,19 +912,20 @@ d_post_options (const char ** fn)
&& global.params.warnings == DIAGNOSTICerror)
global.params.useDeprecated = DIAGNOSTICerror;
- /* Make -fmax-errors visible to frontend's diagnostic machinery. */
- if (OPTION_SET_P (flag_max_errors))
- global.params.errorLimit = flag_max_errors;
-
if (flag_excess_precision == EXCESS_PRECISION_DEFAULT)
flag_excess_precision = EXCESS_PRECISION_STANDARD;
global.params.useInline = flag_inline_functions;
- global.params.showColumns = flag_show_column;
- global.params.printErrorContext = flag_diagnostics_show_caret;
+
+ /* Make -fmax-errors visible to frontend's diagnostic machinery. */
+ if (OPTION_SET_P (flag_max_errors))
+ global.params.v.errorLimit = flag_max_errors;
+
+ global.params.v.showColumns = flag_show_column;
+ global.params.v.printErrorContext = flag_diagnostics_show_caret;
/* Keep the front-end location type in sync with params. */
- Loc::set (global.params.showColumns, global.params.messageStyle);
+ Loc::set (global.params.v.showColumns, global.params.v.messageStyle);
if (global.params.useInline)
global.params.dihdr.fullOutput = true;
@@ -941,26 +937,6 @@ d_post_options (const char ** fn)
global.compileEnv.previewIn = global.params.previewIn;
global.compileEnv.ddocOutput = global.params.ddoc.doOutput;
global.compileEnv.shortenedMethods = global.params.shortenedMethods;
- global.compileEnv.obsolete = global.params.obsolete;
-
- /* Add in versions given on the command line. */
- if (global.params.versionids)
- {
- for (size_t i = 0; i < global.params.versionids->length; i++)
- {
- const char *s = (*global.params.versionids)[i];
- VersionCondition::addGlobalIdent (s);
- }
- }
-
- if (global.params.debugids)
- {
- for (size_t i = 0; i < global.params.debugids->length; i++)
- {
- const char *s = (*global.params.debugids)[i];
- DebugCondition::addGlobalIdent (s);
- }
- }
if (warn_return_type == -1)
warn_return_type = 0;
@@ -978,12 +954,96 @@ d_add_builtin_module (Module *m)
builtin_modules.push (m);
}
+/* Writes to FILENAME. DATA is the full content of the file to be written. */
+
+static void
+d_write_file (const char *filename, const char *data)
+{
+ FILE *stream;
+
+ if (filename && (filename[0] != '-' || filename[1] != '\0'))
+ stream = fopen (filename, "w");
+ else
+ stream = stdout;
+
+ if (!stream)
+ {
+ error ("unable to open %s for writing: %m", filename);
+ return;
+ }
+
+ fprintf (stream, "%s", data);
+
+ if (stream != stdout && (ferror (stream) || fclose (stream)))
+ error ("writing output file %s: %m", filename);
+}
+
+/* Read ddoc macro files named by the DDOCFILES, then write the concatenated
+ the contents into DDOCBUF. */
+
+static void
+d_read_ddoc_files (Strings &ddocfiles, OutBuffer &ddocbuf)
+{
+ if (ddocbuf.length ())
+ return;
+
+ for (size_t i = 0; i < ddocfiles.length; i++)
+ {
+ int fd = open (ddocfiles[i], O_RDONLY);
+ bool ok = false;
+ struct stat buf;
+
+ if (fd == -1 || fstat (fd, &buf))
+ {
+ error ("unable to open %s for reading: %m", ddocfiles[i]);
+ continue;
+ }
+
+ /* Check we've not been given a directory, or a file bigger than 4GB. */
+ if (S_ISDIR (buf.st_mode))
+ errno = ENOENT;
+ else if (buf.st_size != unsigned (buf.st_size))
+ errno = EMFILE;
+ else
+ {
+ unsigned size = unsigned (buf.st_size);
+ char *buffer = (char *) xmalloc (size);
+
+ if (read (fd, buffer, size) == ssize_t (size))
+ {
+ ddocbuf.write (buffer, size);
+ ok = true;
+ }
+
+ free (buffer);
+ }
+
+ close (fd);
+ if (!ok)
+ fatal_error (input_location, "reading ddoc file %s: %m", ddocfiles[i]);
+ }
+}
+
+static void
+d_generate_ddoc_file (Module *m, OutBuffer &ddocbuf)
+{
+ input_location = make_location_t (m->loc);
+
+ d_read_ddoc_files (global.params.ddoc.files, ddocbuf);
+
+ OutBuffer ddocbuf_out;
+ gendocfile (m, ddocbuf.peekChars (), ddocbuf.length (), global.datetime,
+ global.errorSink, ddocbuf_out);
+
+ d_write_file (m->docfile.toChars (), ddocbuf_out.peekChars ());
+}
+
/* Implements the lang_hooks.parse_file routine for language D. */
static void
d_parse_file (void)
{
- if (global.params.verbose)
+ if (global.params.v.verbose)
{
message ("binary %s", global.params.argv0.ptr);
message ("version %s", global.versionChars ());
@@ -1014,6 +1074,9 @@ d_parse_file (void)
Modules modules;
modules.reserve (num_in_fnames);
+ /* Buffer for contents of .ddoc files. */
+ OutBuffer ddocbuf;
+
/* In this mode, the first file name is supposed to be a duplicate
of one of the input files. */
if (d_option.fonly && strcmp (d_option.fonly, main_input_filename) != 0)
@@ -1090,7 +1153,7 @@ d_parse_file (void)
{
Module *m = modules[i];
- if (global.params.verbose)
+ if (global.params.v.verbose)
message ("parse %s", m->toChars ());
if (!Module::rootModule)
@@ -1101,7 +1164,8 @@ d_parse_file (void)
if (m->filetype == FileType::ddoc)
{
- gendocfile (m, global.errorSink);
+ d_generate_ddoc_file (m, ddocbuf);
+
/* Remove M from list of modules. */
modules.remove (i);
i--;
@@ -1141,10 +1205,12 @@ d_parse_file (void)
|| (d_option.fonly && m != Module::rootModule))
continue;
- if (global.params.verbose)
+ if (global.params.v.verbose)
message ("import %s", m->toChars ());
- genhdrfile (m);
+ OutBuffer buf;
+ genhdrfile (m, buf);
+ d_write_file (m->hdrfile.toChars (), buf.peekChars ());
}
dump_headers = true;
@@ -1158,7 +1224,7 @@ d_parse_file (void)
{
Module *m = modules[i];
- if (global.params.verbose)
+ if (global.params.v.verbose)
message ("importall %s", m->toChars ());
m->importAll (NULL);
@@ -1182,7 +1248,7 @@ d_parse_file (void)
continue;
}
- if (global.params.verbose)
+ if (global.params.v.verbose)
message ("semantic %s", m->toChars ());
dsymbolSemantic (m, NULL);
@@ -1213,7 +1279,7 @@ d_parse_file (void)
{
Module *m = modules[i];
- if (global.params.verbose)
+ if (global.params.v.verbose)
message ("semantic2 %s", m->toChars ());
semantic2 (m, NULL);
@@ -1229,7 +1295,7 @@ d_parse_file (void)
{
Module *m = modules[i];
- if (global.params.verbose)
+ if (global.params.v.verbose)
message ("semantic3 %s", m->toChars ());
semantic3 (m, NULL);
@@ -1264,8 +1330,6 @@ d_parse_file (void)
if (d_option.deps)
{
obstack buffer;
- FILE *deps_stream;
-
gcc_obstack_init (&buffer);
for (size_t i = 0; i < modules.length; i++)
@@ -1275,60 +1339,19 @@ d_parse_file (void)
if (d_option.deps_filename_user)
d_option.deps_filename = d_option.deps_filename_user;
- if (d_option.deps_filename)
- {
- deps_stream = fopen (d_option.deps_filename, "w");
- if (!deps_stream)
- {
- fatal_error (input_location, "opening dependency file %s: %m",
- d_option.deps_filename);
- goto had_errors;
- }
- }
- else
- deps_stream = stdout;
-
- fprintf (deps_stream, "%s", (char *) obstack_finish (&buffer));
-
- if (deps_stream != stdout
- && (ferror (deps_stream) || fclose (deps_stream)))
- {
- fatal_error (input_location, "closing dependency file %s: %m",
- d_option.deps_filename);
- }
+ d_write_file (d_option.deps_filename,
+ (char *) obstack_finish (&buffer));
}
- if (global.params.vtemplates)
+ if (global.params.v.templates)
printTemplateStats ();
/* Generate JSON files. */
if (global.params.json.doOutput)
{
OutBuffer buf;
- json_generate (&buf, &modules);
-
- const char *name = global.params.json.name.ptr;
- FILE *json_stream;
-
- if (name && (name[0] != '-' || name[1] != '\0'))
- {
- const char *nameext
- = FileName::defaultExt (name, json_ext.ptr);
- json_stream = fopen (nameext, "w");
- if (!json_stream)
- {
- fatal_error (input_location, "opening json file %s: %m", nameext);
- goto had_errors;
- }
- }
- else
- json_stream = stdout;
-
- fprintf (json_stream, "%s", buf.peekChars ());
-
- if (json_stream != stdout
- && (ferror (json_stream) || fclose (json_stream)))
- fatal_error (input_location, "closing json file %s: %m", name);
+ json_generate (modules, buf);
+ d_write_file (global.params.json.name.ptr, buf.peekChars ());
}
/* Generate Ddoc files. */
@@ -1337,7 +1360,7 @@ d_parse_file (void)
for (size_t i = 0; i < modules.length; i++)
{
Module *m = modules[i];
- gendocfile (m, global.errorSink);
+ d_generate_ddoc_file (m, ddocbuf);
}
}
@@ -1350,7 +1373,7 @@ d_parse_file (void)
OutBuffer buf;
buf.doindent = 1;
- moduleToBuffer (&buf, m);
+ moduleToBuffer (buf, m);
message ("%s", buf.peekChars ());
}
}
@@ -1372,7 +1395,7 @@ d_parse_file (void)
|| (d_option.fonly && m != Module::rootModule))
continue;
- if (global.params.verbose)
+ if (global.params.v.verbose)
message ("code %s", m->toChars ());
if (!flag_syntax_only)
@@ -1391,22 +1414,8 @@ d_parse_file (void)
/* We want to write the mixin expansion file also on error. */
if (global.params.mixinOut.doOutput)
{
- FILE *mixin_stream = fopen (global.params.mixinOut.name.ptr, "w");
-
- if (mixin_stream)
- {
- OutBuffer *buf = global.params.mixinOut.buffer;
- fprintf (mixin_stream, "%s", buf->peekChars ());
-
- if (ferror (mixin_stream) || fclose (mixin_stream))
- fatal_error (input_location, "closing mixin file %s: %m",
- global.params.mixinOut.name.ptr);
- }
- else
- {
- fatal_error (input_location, "opening mixin file %s: %m",
- global.params.mixinOut.name.ptr);
- }
+ d_write_file (global.params.mixinOut.name.ptr,
+ global.params.mixinOut.buffer->peekChars ());
}
/* Remove generated .di files on error. */
diff --git a/gcc/d/d-tree.h b/gcc/d/d-tree.h
index b64a6fb..66c2f24 100644
--- a/gcc/d/d-tree.h
+++ b/gcc/d/d-tree.h
@@ -94,7 +94,7 @@ enum level_kind
enum intrinsic_code
{
-#define DEF_D_INTRINSIC(CODE, B, N, M, D, C) CODE,
+#define DEF_D_INTRINSIC(CODE, B, N, M, D, C, F) CODE,
#include "intrinsics.def"
@@ -668,6 +668,7 @@ extern tree build_import_decl (Dsymbol *);
/* In intrinsics.cc. */
extern void maybe_set_intrinsic (FuncDeclaration *);
extern tree maybe_expand_intrinsic (tree);
+extern tree maybe_reject_intrinsic (tree);
/* In modules.cc. */
extern void build_module_tree (Module *);
diff --git a/gcc/d/decl.cc b/gcc/d/decl.cc
index 7e612e1..c80bb8a 100644
--- a/gcc/d/decl.cc
+++ b/gcc/d/decl.cc
@@ -968,7 +968,7 @@ public:
return;
}
- if (global.params.verbose)
+ if (global.params.v.verbose)
message ("function %s", d->toPrettyChars ());
tree old_context = start_function (d);
@@ -1554,7 +1554,7 @@ get_symbol_decl (Declaration *decl)
/* Symbol is going in thread local storage. */
if (decl->isThreadlocal () && !DECL_ARTIFICIAL (decl->csym))
{
- if (global.params.vtls)
+ if (global.params.v.tls)
message (decl->loc, "`%s` is thread local", decl->toChars ());
set_decl_tls_model (decl->csym, decl_default_tls_model (decl->csym));
diff --git a/gcc/d/dmd/MERGE b/gcc/d/dmd/MERGE
index dc26778..7946002 100644
--- a/gcc/d/dmd/MERGE
+++ b/gcc/d/dmd/MERGE
@@ -1,4 +1,4 @@
-4574d1728d1f7e52ff40e6733b8c39889d128349
+4c18eed9674e04c1ca89fbc8bd5c4e483eb5477c
The first line of this file holds the git revision number of the last
merge done from the dlang/dmd repository.
diff --git a/gcc/d/dmd/VERSION b/gcc/d/dmd/VERSION
index 8012337..fd05dcf 100644
--- a/gcc/d/dmd/VERSION
+++ b/gcc/d/dmd/VERSION
@@ -1 +1 @@
-v2.105.0
+v2.105.2
diff --git a/gcc/d/dmd/access.d b/gcc/d/dmd/access.d
index ab9b5d9..1010c14 100644
--- a/gcc/d/dmd/access.d
+++ b/gcc/d/dmd/access.d
@@ -20,6 +20,7 @@ import dmd.dmodule;
import dmd.dscope;
import dmd.dstruct;
import dmd.dsymbol;
+import dmd.errors;
import dmd.expression;
import dmd.location;
import dmd.tokens;
@@ -47,7 +48,7 @@ bool checkAccess(AggregateDeclaration ad, Loc loc, Scope* sc, Dsymbol smember)
if (!symbolIsVisible(sc, smember))
{
- ad.error(loc, "%s `%s` is not accessible", smember.kind(), smember.toChars());
+ error(loc, "%s `%s` %s `%s` is not accessible", ad.kind(), ad.toPrettyChars(), smember.kind(), smember.toChars());
//printf("smember = %s %s, vis = %d, semanticRun = %d\n",
// smember.kind(), smember.toPrettyChars(), smember.visible() smember.semanticRun);
return true;
diff --git a/gcc/d/dmd/aggregate.d b/gcc/d/dmd/aggregate.d
index 4ae6b6b..68b5f1b 100644
--- a/gcc/d/dmd/aggregate.d
+++ b/gcc/d/dmd/aggregate.d
@@ -31,6 +31,7 @@ import dmd.errors;
import dmd.expression;
import dmd.func;
import dmd.globals;
+import dmd.hdrgen;
import dmd.id;
import dmd.identifier;
import dmd.location;
@@ -213,7 +214,7 @@ extern (C++) abstract class AggregateDeclaration : ScopeDsymbol
if (!members)
{
- error(loc, "unknown size");
+ .error(loc, "%s `%s` unknown size", kind, toPrettyChars);
return false;
}
@@ -243,7 +244,7 @@ extern (C++) abstract class AggregateDeclaration : ScopeDsymbol
Lfail:
// There's unresolvable forward reference.
if (type != Type.terror)
- error(loc, "no size because of forward reference");
+ error(loc, "%s `%s` no size because of forward reference", kind, toPrettyChars);
// Don't cache errors from speculative semantic, might be resolvable later.
// https://issues.dlang.org/show_bug.cgi?id=16574
if (!global.gag)
@@ -337,7 +338,7 @@ extern (C++) abstract class AggregateDeclaration : ScopeDsymbol
else if (v2._init && i < j)
{
.error(v2.loc, "union field `%s` with default initialization `%s` must be before field `%s`",
- v2.toChars(), v2._init.toChars(), vd.toChars());
+ v2.toChars(), dmd.hdrgen.toChars(v2._init), vd.toChars());
errors = true;
}
}
@@ -452,7 +453,7 @@ extern (C++) abstract class AggregateDeclaration : ScopeDsymbol
assert(!vx._init.isVoidInitializer());
if (vx.inuse) // https://issues.dlang.org/show_bug.cgi?id=18057
{
- vx.error(loc, "recursive initialization of field");
+ .error(loc, "%s `%s` recursive initialization of field", vx.kind(), vx.toPrettyChars());
errors = true;
}
else
@@ -753,7 +754,7 @@ extern (C++) abstract class AggregateDeclaration : ScopeDsymbol
s.isTemplateDeclaration() ||
s.isOverloadSet()))
{
- s.error("is not a constructor; identifiers starting with `__` are reserved for the implementation");
+ .error(s.loc, "%s `%s` is not a constructor; identifiers starting with `__` are reserved for the implementation", s.kind(), s.toPrettyChars());
errors = true;
s = null;
}
diff --git a/gcc/d/dmd/aggregate.h b/gcc/d/dmd/aggregate.h
index 03fe478..4b107e0 100644
--- a/gcc/d/dmd/aggregate.h
+++ b/gcc/d/dmd/aggregate.h
@@ -279,6 +279,7 @@ public:
ObjcClassDeclaration objc; // Data for a class declaration that is needed for the Objective-C integration
Symbol *cpp_type_info_ptr_sym; // cached instance of class Id.cpp_type_info_ptr
+ void classError(const char* fmt, const char* arg);
static ClassDeclaration *create(const Loc &loc, Identifier *id, BaseClasses *baseclasses, Dsymbols *members, bool inObject);
const char *toPrettyChars(bool QualifyTypes = false) override;
ClassDeclaration *syntaxCopy(Dsymbol *s) override;
diff --git a/gcc/d/dmd/arrayop.d b/gcc/d/dmd/arrayop.d
index d843073..25bbb3f 100644
--- a/gcc/d/dmd/arrayop.d
+++ b/gcc/d/dmd/arrayop.d
@@ -19,6 +19,7 @@ import dmd.astenums;
import dmd.declaration;
import dmd.dscope;
import dmd.dsymbol;
+import dmd.errors;
import dmd.expression;
import dmd.expressionsem;
import dmd.func;
@@ -92,7 +93,7 @@ bool checkNonAssignmentArrayOp(Expression e, bool suggestion = false)
const(char)* s = "";
if (suggestion)
s = " (possible missing [])";
- e.error("array operation `%s` without destination memory not allowed%s", e.toChars(), s);
+ error(e.loc, "array operation `%s` without destination memory not allowed%s", e.toChars(), s);
return true;
}
return false;
@@ -121,7 +122,7 @@ Expression arrayOp(BinExp e, Scope* sc)
Type tbn = tb.nextOf().toBasetype();
if (tbn.ty == Tvoid)
{
- e.error("cannot perform array operations on `void[]` arrays");
+ error(e.loc, "cannot perform array operations on `void[]` arrays");
return ErrorExp.get();
}
if (!isArrayOpValid(e))
@@ -164,7 +165,7 @@ Expression arrayOp(BinAssignExp e, Scope* sc)
if (tn && (!tn.isMutable() || !tn.isAssignable()))
{
- e.error("slice `%s` is not mutable", e.e1.toChars());
+ error(e.loc, "slice `%s` is not mutable", e.e1.toChars());
if (e.op == EXP.addAssign)
checkPossibleAddCatError!(AddAssignExp, CatAssignExp)(e.isAddAssignExp);
return ErrorExp.get();
@@ -370,7 +371,7 @@ bool isArrayOpOperand(Expression e)
ErrorExp arrayOpInvalidError(Expression e)
{
- e.error("invalid array operation `%s` (possible missing [])", e.toChars());
+ error(e.loc, "invalid array operation `%s` (possible missing [])", e.toChars());
if (e.op == EXP.add)
checkPossibleAddCatError!(AddExp, CatExp)(e.isAddExp());
else if (e.op == EXP.addAssign)
@@ -383,5 +384,5 @@ private void checkPossibleAddCatError(AddT, CatT)(AddT ae)
if (!ae.e2.type || ae.e2.type.ty != Tarray || !ae.e2.type.implicitConvTo(ae.e1.type))
return;
CatT ce = new CatT(ae.loc, ae.e1, ae.e2);
- ae.errorSupplemental("did you mean to concatenate (`%s`) instead ?", ce.toChars());
+ errorSupplemental(ae.loc, "did you mean to concatenate (`%s`) instead ?", ce.toChars());
}
diff --git a/gcc/d/dmd/attrib.d b/gcc/d/dmd/attrib.d
index baabe93..7b5def1 100644
--- a/gcc/d/dmd/attrib.d
+++ b/gcc/d/dmd/attrib.d
@@ -33,6 +33,7 @@ import dmd.dmodule;
import dmd.dscope;
import dmd.dsymbol;
import dmd.dsymbolsem : dsymbolSemantic;
+import dmd.errors;
import dmd.expression;
import dmd.expressionsem;
import dmd.func;
@@ -664,7 +665,7 @@ extern (C++) final class VisibilityDeclaration : AttribDeclaration
{
Package pkg = m.parent ? m.parent.isPackage() : null;
if (!pkg || !visibility.pkg.isAncestorPackageOf(pkg))
- error("does not bind to one of ancestor packages of module `%s`", m.toPrettyChars(true));
+ .error(loc, "%s `%s` does not bind to one of ancestor packages of module `%s`", kind(), toPrettyChars(false), m.toPrettyChars(true));
}
}
return AttribDeclaration.addMember(sc, sds);
@@ -679,7 +680,7 @@ extern (C++) final class VisibilityDeclaration : AttribDeclaration
{
assert(visibility.kind > Visibility.Kind.undefined);
OutBuffer buf;
- visibilityToBuffer(&buf, visibility);
+ visibilityToBuffer(buf, visibility);
return buf.extractChars();
}
@@ -1473,12 +1474,12 @@ extern (C++) final class UserAttributeDeclaration : AttribDeclaration
{
if (sym.isCPPNamespaceDeclaration() || sym.isNspace())
{
- exp.error("`@%s` cannot be applied to namespaces", Id.udaGNUAbiTag.toChars());
+ .error(exp.loc, "`@%s` cannot be applied to namespaces", Id.udaGNUAbiTag.toChars());
sym.errors = true;
}
else if (linkage != LINK.cpp)
{
- exp.error("`@%s` can only apply to C++ symbols", Id.udaGNUAbiTag.toChars());
+ .error(exp.loc, "`@%s` can only apply to C++ symbols", Id.udaGNUAbiTag.toChars());
sym.errors = true;
}
// Only one `@gnuAbiTag` is allowed by semantic2
diff --git a/gcc/d/dmd/blockexit.d b/gcc/d/dmd/blockexit.d
index bdc81f2..31a32cf 100644
--- a/gcc/d/dmd/blockexit.d
+++ b/gcc/d/dmd/blockexit.d
@@ -18,6 +18,7 @@ import dmd.astenums;
import dmd.canthrow;
import dmd.dclass;
import dmd.declaration;
+import dmd.errorsink;
import dmd.expression;
import dmd.func;
import dmd.globals;
@@ -56,11 +57,11 @@ enum BE : int
* Params:
* s = statement to check for block exit status
* func = function that statement s is in
- * mustNotThrow = generate an error if it throws
+ * eSink = generate an error if it throws
* Returns:
* BE.xxxx
*/
-int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
+int blockExit(Statement s, FuncDeclaration func, ErrorSink eSink)
{
int result = BE.none;
@@ -97,7 +98,7 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
if (s.exp.type && s.exp.type.toBasetype().isTypeNoreturn())
result = BE.halt;
- result |= canThrow(s.exp, func, mustNotThrow);
+ result |= canThrow(s.exp, func, eSink);
}
}
@@ -143,23 +144,24 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
// Deprecated in 2.100
// Make an error in 2.110
if (sl && sl.isCaseStatement())
- s.deprecation("switch case fallthrough - use 'goto %s;' if intended", gototype);
+ global.errorSink.deprecation(s.loc, "switch case fallthrough - use 'goto %s;' if intended", gototype);
else
- s.error("switch case fallthrough - use 'goto %s;' if intended", gototype);
+ global.errorSink.error(s.loc, "switch case fallthrough - use 'goto %s;' if intended", gototype);
}
}
}
if (!(result & BE.fallthru) && !s.comeFrom())
{
- if (blockExit(s, func, mustNotThrow) != BE.halt && s.hasCode() &&
+ version (none) // this warning is completely useless due to insane false positive rate in real life template code
+ if (blockExit(s, func, eSink) != BE.halt && s.hasCode() &&
s.loc != Loc.initial) // don't emit warning for generated code
- s.warning("statement is not reachable");
+ global.errorSink.warning(s.loc, "statement is not reachable");
}
else
{
result &= ~BE.fallthru;
- result |= blockExit(s, func, mustNotThrow);
+ result |= blockExit(s, func, eSink);
}
slast = s;
}
@@ -173,7 +175,7 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
{
if (s)
{
- int r = blockExit(s, func, mustNotThrow);
+ int r = blockExit(s, func, eSink);
result |= r & ~(BE.break_ | BE.continue_ | BE.fallthru);
if ((r & (BE.fallthru | BE.continue_ | BE.break_)) == 0)
result &= ~BE.fallthru;
@@ -184,7 +186,7 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
void visitScope(ScopeStatement s)
{
//printf("ScopeStatement::blockExit(%p)\n", s.statement);
- result = blockExit(s.statement, func, mustNotThrow);
+ result = blockExit(s.statement, func, eSink);
}
void visitWhile(WhileStatement s)
@@ -197,7 +199,7 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
{
if (s._body)
{
- result = blockExit(s._body, func, mustNotThrow);
+ result = blockExit(s._body, func, eSink);
if (result == BE.break_)
{
result = BE.fallthru;
@@ -210,7 +212,7 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
result = BE.fallthru;
if (result & BE.fallthru)
{
- result |= canThrow(s.condition, func, mustNotThrow);
+ result |= canThrow(s.condition, func, eSink);
if (!(result & BE.break_) && s.condition.toBool().hasValue(true))
result &= ~BE.fallthru;
@@ -223,13 +225,13 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
result = BE.fallthru;
if (s._init)
{
- result = blockExit(s._init, func, mustNotThrow);
+ result = blockExit(s._init, func, eSink);
if (!(result & BE.fallthru))
return;
}
if (s.condition)
{
- result |= canThrow(s.condition, func, mustNotThrow);
+ result |= canThrow(s.condition, func, eSink);
const opt = s.condition.toBool();
if (opt.hasValue(true))
@@ -241,22 +243,22 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
result &= ~BE.fallthru; // the body must do the exiting
if (s._body)
{
- int r = blockExit(s._body, func, mustNotThrow);
+ int r = blockExit(s._body, func, eSink);
if (r & (BE.break_ | BE.goto_))
result |= BE.fallthru;
result |= r & ~(BE.fallthru | BE.break_ | BE.continue_);
}
if (s.increment)
- result |= canThrow(s.increment, func, mustNotThrow);
+ result |= canThrow(s.increment, func, eSink);
}
void visitForeach(ForeachStatement s)
{
result = BE.fallthru;
- result |= canThrow(s.aggr, func, mustNotThrow);
+ result |= canThrow(s.aggr, func, eSink);
if (s._body)
- result |= blockExit(s._body, func, mustNotThrow) & ~(BE.break_ | BE.continue_);
+ result |= blockExit(s._body, func, eSink) & ~(BE.break_ | BE.continue_);
}
void visitForeachRange(ForeachRangeStatement s)
@@ -269,30 +271,30 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
{
//printf("IfStatement::blockExit(%p)\n", s);
result = BE.none;
- result |= canThrow(s.condition, func, mustNotThrow);
+ result |= canThrow(s.condition, func, eSink);
const opt = s.condition.toBool();
if (opt.hasValue(true))
{
- result |= blockExit(s.ifbody, func, mustNotThrow);
+ result |= blockExit(s.ifbody, func, eSink);
}
else if (opt.hasValue(false))
{
- result |= blockExit(s.elsebody, func, mustNotThrow);
+ result |= blockExit(s.elsebody, func, eSink);
}
else
{
- result |= blockExit(s.ifbody, func, mustNotThrow);
- result |= blockExit(s.elsebody, func, mustNotThrow);
+ result |= blockExit(s.ifbody, func, eSink);
+ result |= blockExit(s.elsebody, func, eSink);
}
//printf("IfStatement::blockExit(%p) = x%x\n", s, result);
}
void visitConditional(ConditionalStatement s)
{
- result = blockExit(s.ifbody, func, mustNotThrow);
+ result = blockExit(s.ifbody, func, eSink);
if (s.elsebody)
- result |= blockExit(s.elsebody, func, mustNotThrow);
+ result |= blockExit(s.elsebody, func, eSink);
}
void visitPragma(PragmaStatement s)
@@ -308,11 +310,11 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
void visitSwitch(SwitchStatement s)
{
result = BE.none;
- result |= canThrow(s.condition, func, mustNotThrow);
+ result |= canThrow(s.condition, func, eSink);
if (s._body)
{
- result |= blockExit(s._body, func, mustNotThrow);
+ result |= blockExit(s._body, func, eSink);
if (result & BE.break_)
{
result |= BE.fallthru;
@@ -325,12 +327,12 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
void visitCase(CaseStatement s)
{
- result = blockExit(s.statement, func, mustNotThrow);
+ result = blockExit(s.statement, func, eSink);
}
void visitDefault(DefaultStatement s)
{
- result = blockExit(s.statement, func, mustNotThrow);
+ result = blockExit(s.statement, func, eSink);
}
void visitGotoDefault(GotoDefaultStatement s)
@@ -353,7 +355,7 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
{
result = BE.return_;
if (s.exp)
- result |= canThrow(s.exp, func, mustNotThrow);
+ result |= canThrow(s.exp, func, eSink);
}
void visitBreak(BreakStatement s)
@@ -369,20 +371,20 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
void visitSynchronized(SynchronizedStatement s)
{
- result = blockExit(s._body, func, mustNotThrow);
+ result = blockExit(s._body, func, eSink);
}
void visitWith(WithStatement s)
{
result = BE.none;
- result |= canThrow(s.exp, func, mustNotThrow);
- result |= blockExit(s._body, func, mustNotThrow);
+ result |= canThrow(s.exp, func, eSink);
+ result |= blockExit(s._body, func, eSink);
}
void visitTryCatch(TryCatchStatement s)
{
assert(s._body);
- result = blockExit(s._body, func, false);
+ result = blockExit(s._body, func, null);
int catchresult = 0;
foreach (c; *s.catches)
@@ -390,7 +392,7 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
if (c.type == Type.terror)
continue;
- int cresult = blockExit(c.handler, func, mustNotThrow);
+ int cresult = blockExit(c.handler, func, eSink);
/* If we're catching Object, then there is no throwing
*/
@@ -411,10 +413,10 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
}
catchresult |= cresult;
}
- if (mustNotThrow && (result & BE.throw_))
+ if (eSink && (result & BE.throw_))
{
// now explain why this is nothrow
- blockExit(s._body, func, mustNotThrow);
+ blockExit(s._body, func, eSink);
}
result |= catchresult;
}
@@ -423,12 +425,12 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
{
result = BE.fallthru;
if (s._body)
- result = blockExit(s._body, func, false);
+ result = blockExit(s._body, func, null);
// check finally body as well, it may throw (bug #4082)
int finalresult = BE.fallthru;
if (s.finalbody)
- finalresult = blockExit(s.finalbody, func, false);
+ finalresult = blockExit(s.finalbody, func, null);
// If either body or finalbody halts
if (result == BE.halt)
@@ -436,13 +438,13 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
if (finalresult == BE.halt)
result = BE.none;
- if (mustNotThrow)
+ if (eSink)
{
// now explain why this is nothrow
if (s._body && (result & BE.throw_))
- blockExit(s._body, func, mustNotThrow);
+ blockExit(s._body, func, eSink);
if (s.finalbody && (finalresult & BE.throw_))
- blockExit(s.finalbody, func, mustNotThrow);
+ blockExit(s.finalbody, func, eSink);
}
version (none)
@@ -452,7 +454,7 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
// destructor call, exit of synchronized statement, etc.
if (result == BE.halt && finalresult != BE.halt && s.finalbody && s.finalbody.hasCode())
{
- s.finalbody.warning("statement is not reachable");
+ eSink.warning(s.finalbody.loc, "statement is not reachable");
}
}
@@ -472,12 +474,12 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
if (s.internalThrow)
{
// https://issues.dlang.org/show_bug.cgi?id=8675
- // Allow throwing 'Throwable' object even if mustNotThrow.
+ // Allow throwing 'Throwable' object even if eSink.
result = BE.fallthru;
return;
}
- result = checkThrow(s.loc, s.exp, mustNotThrow, func);
+ result = checkThrow(s.loc, s.exp, func, eSink);
}
void visitGoto(GotoStatement s)
@@ -489,7 +491,7 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
void visitLabel(LabelStatement s)
{
//printf("LabelStatement::blockExit(%p)\n", s);
- result = blockExit(s.statement, func, mustNotThrow);
+ result = blockExit(s.statement, func, eSink);
if (s.breaks)
result |= BE.fallthru;
}
@@ -502,8 +504,8 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
{
if(func)
func.setThrow(s.loc, "`asm` statement is assumed to throw - mark it with `nothrow` if it does not");
- if (mustNotThrow)
- s.error("`asm` statement is assumed to throw - mark it with `nothrow` if it does not"); // TODO
+ if (eSink)
+ eSink.error(s.loc, "`asm` statement is assumed to throw - mark it with `nothrow` if it does not"); // TODO
else
result |= BE.throw_;
}
@@ -528,15 +530,13 @@ int blockExit(Statement s, FuncDeclaration func, bool mustNotThrow)
+ Params:
+ loc = location of the `throw`
+ exp = expression yielding the throwable
- + mustNotThrow = inside of a `nothrow` scope?
+ + eSink = if !null then inside of a `nothrow` scope
+ func = function containing the `throw`
+
+ Returns: `BE.[err]throw` depending on the type of `exp`
+/
-BE checkThrow(ref const Loc loc, Expression exp, const bool mustNotThrow, FuncDeclaration func)
+BE checkThrow(ref const Loc loc, Expression exp, FuncDeclaration func, ErrorSink eSink)
{
- import dmd.errors : error;
-
Type t = exp.type.toBasetype();
ClassDeclaration cd = t.isClassHandle();
assert(cd);
@@ -545,8 +545,8 @@ BE checkThrow(ref const Loc loc, Expression exp, const bool mustNotThrow, FuncDe
{
return BE.errthrow;
}
- if (mustNotThrow)
- loc.error("`%s` is thrown but not caught", exp.type.toChars());
+ if (eSink)
+ eSink.error(loc, "`%s` is thrown but not caught", exp.type.toChars());
else if (func)
func.setThrow(loc, "`%s` is thrown but not caught", exp.type);
diff --git a/gcc/d/dmd/canthrow.d b/gcc/d/dmd/canthrow.d
index ba13eb0..bb1fd6f 100644
--- a/gcc/d/dmd/canthrow.d
+++ b/gcc/d/dmd/canthrow.d
@@ -20,6 +20,7 @@ import dmd.astenums;
import dmd.blockexit : BE, checkThrow;
import dmd.declaration;
import dmd.dsymbol;
+import dmd.errorsink;
import dmd.expression;
import dmd.func;
import dmd.globals;
@@ -47,25 +48,25 @@ enum CT : BE
}
/********************************************
- * Returns true if the expression may throw exceptions.
- * If 'mustNotThrow' is true, generate an error if it throws
+ * If `eSink` is not null, generate an error if `e` throws
+ * Params:
+ * e = expression to check for throwing
+ * func = function
+ * eSink = if !null, then send error messages to eSink
+ * Returns: `CT.exception` or `CT.error` if the expression may throw exceptions.
*/
-extern (C++) /* CT */ BE canThrow(Expression e, FuncDeclaration func, bool mustNotThrow)
+extern (C++) /* CT */ BE canThrow(Expression e, FuncDeclaration func, ErrorSink eSink)
{
//printf("Expression::canThrow(%d) %s\n", mustNotThrow, e.toChars());
// stop walking if we determine this expression can throw
extern (C++) final class CanThrow : StoppableVisitor
{
alias visit = typeof(super).visit;
- FuncDeclaration func;
- bool mustNotThrow;
CT result;
public:
- extern (D) this(FuncDeclaration func, bool mustNotThrow) scope @safe
+ extern (D) this() scope @safe
{
- this.func = func;
- this.mustNotThrow = mustNotThrow;
}
void checkFuncThrows(Expression e, FuncDeclaration f)
@@ -73,9 +74,9 @@ extern (C++) /* CT */ BE canThrow(Expression e, FuncDeclaration func, bool mustN
auto tf = f.type.toBasetype().isTypeFunction();
if (tf && !tf.isnothrow)
{
- if (mustNotThrow)
+ if (eSink)
{
- e.error("%s `%s` is not `nothrow`", f.kind(), f.toPrettyChars());
+ eSink.error(e.loc, "%s `%s` is not `nothrow`", f.kind(), f.toPrettyChars());
if (!f.isDtorDeclaration())
errorSupplementalInferredAttr(f, 10, false, STC.nothrow_);
@@ -95,7 +96,7 @@ extern (C++) /* CT */ BE canThrow(Expression e, FuncDeclaration func, bool mustN
override void visit(DeclarationExp de)
{
- result |= Dsymbol_canThrow(de.declaration, func, mustNotThrow);
+ result |= Dsymbol_canThrow(de.declaration, func, eSink);
}
override void visit(CallExp ce)
@@ -138,12 +139,12 @@ extern (C++) /* CT */ BE canThrow(Expression e, FuncDeclaration func, bool mustN
if (ce.f)
checkFuncThrows(ce, ce.f);
- else if (mustNotThrow)
+ else if (eSink)
{
auto e1 = ce.e1;
if (auto pe = e1.isPtrExp()) // print 'fp' if e1 is (*fp)
e1 = pe.e1;
- ce.error("`%s` is not `nothrow`", e1.toChars());
+ eSink.error(ce.loc, "`%s` is not `nothrow`", e1.toChars());
}
result |= CT.exception;
}
@@ -202,7 +203,7 @@ extern (C++) /* CT */ BE canThrow(Expression e, FuncDeclaration func, bool mustN
override void visit(ThrowExp te)
{
- const res = checkThrow(te.loc, te.e1, mustNotThrow, func);
+ const res = checkThrow(te.loc, te.e1, func, eSink);
assert((res & ~(CT.exception | CT.error)) == 0);
result |= res;
}
@@ -213,22 +214,22 @@ extern (C++) /* CT */ BE canThrow(Expression e, FuncDeclaration func, bool mustN
}
}
- scope CanThrow ct = new CanThrow(func, mustNotThrow);
+ scope CanThrow ct = new CanThrow();
walkPostorder(e, ct);
return ct.result;
}
/**************************************
- * Does symbol, when initialized, throw?
+ * Does symbol `s`, when initialized, throw?
* Mirrors logic in Dsymbol_toElem().
*/
-private CT Dsymbol_canThrow(Dsymbol s, FuncDeclaration func, bool mustNotThrow)
+private CT Dsymbol_canThrow(Dsymbol s, FuncDeclaration func, ErrorSink eSink)
{
CT result;
int symbolDg(Dsymbol s)
{
- result |= Dsymbol_canThrow(s, func, mustNotThrow);
+ result |= Dsymbol_canThrow(s, func, eSink);
return 0;
}
@@ -237,7 +238,7 @@ private CT Dsymbol_canThrow(Dsymbol s, FuncDeclaration func, bool mustNotThrow)
{
s = s.toAlias();
if (s != vd)
- return Dsymbol_canThrow(s, func, mustNotThrow);
+ return Dsymbol_canThrow(s, func, eSink);
if (vd.storage_class & STC.manifest)
{
}
@@ -249,10 +250,10 @@ private CT Dsymbol_canThrow(Dsymbol s, FuncDeclaration func, bool mustNotThrow)
if (vd._init)
{
if (auto ie = vd._init.isExpInitializer())
- result |= canThrow(ie.exp, func, mustNotThrow);
+ result |= canThrow(ie.exp, func, eSink);
}
if (vd.needsScopeDtor())
- result |= canThrow(vd.edtor, func, mustNotThrow);
+ result |= canThrow(vd.edtor, func, eSink);
}
}
else if (auto ad = s.isAttribDeclaration())
diff --git a/gcc/d/dmd/chkformat.d b/gcc/d/dmd/chkformat.d
index feaa3c7..8cfad59 100644
--- a/gcc/d/dmd/chkformat.d
+++ b/gcc/d/dmd/chkformat.d
@@ -15,7 +15,7 @@ import core.stdc.ctype : isdigit;
import dmd.astenums;
import dmd.cond;
-import dmd.errors;
+import dmd.errorsink;
import dmd.expression;
import dmd.globals;
import dmd.identifier;
@@ -53,6 +53,7 @@ import dmd.target;
* format = format string
* args = arguments to match with format string
* isVa_list = if a "v" function (format check only)
+ * eSink = where the error messages go
*
* Returns:
* `true` if errors occurred
@@ -60,7 +61,8 @@ import dmd.target;
* C99 7.19.6.1
* https://www.cplusplus.com/reference/cstdio/printf/
*/
-bool checkPrintfFormat(ref const Loc loc, scope const char[] format, scope Expression[] args, bool isVa_list)
+public
+bool checkPrintfFormat(ref const Loc loc, scope const char[] format, scope Expression[] args, bool isVa_list, ErrorSink eSink)
{
//printf("checkPrintFormat('%.*s')\n", cast(int)format.length, format.ptr);
size_t n; // index in args
@@ -87,7 +89,7 @@ bool checkPrintfFormat(ref const Loc loc, scope const char[] format, scope Expre
{
// format check only
if (fmt == Format.error)
- deprecation(loc, "format specifier `\"%.*s\"` is invalid", cast(int)slice.length, slice.ptr);
+ eSink.deprecation(loc, "format specifier `\"%.*s\"` is invalid", cast(int)slice.length, slice.ptr);
continue;
}
@@ -96,7 +98,7 @@ bool checkPrintfFormat(ref const Loc loc, scope const char[] format, scope Expre
if (n == args.length)
{
if (args.length < (n + 1))
- deprecation(loc, "more format specifiers than %d arguments", cast(int)n);
+ eSink.deprecation(loc, "more format specifiers than %d arguments", cast(int)n);
else
skip = true;
return null;
@@ -106,7 +108,7 @@ bool checkPrintfFormat(ref const Loc loc, scope const char[] format, scope Expre
void errorMsg(const char* prefix, Expression arg, const char* texpect, Type tactual)
{
- deprecation(arg.loc, "%sargument `%s` for format specification `\"%.*s\"` must be `%s`, not `%s`",
+ eSink.deprecation(arg.loc, "%sargument `%s` for format specification `\"%.*s\"` must be `%s`, not `%s`",
prefix ? prefix : "", arg.toChars(), cast(int)slice.length, slice.ptr, texpect, tactual.toChars());
}
@@ -178,7 +180,7 @@ bool checkPrintfFormat(ref const Loc loc, scope const char[] format, scope Expre
else
errorMsg(null, e, (c_longsize == 4 ? "int" : "long"), t);
if (t.isintegral() && t.size() != c_longsize)
- errorSupplemental(e.loc, "C `long` is %d bytes on your system", c_longsize);
+ eSink.errorSupplemental(e.loc, "C `long` is %d bytes on your system", c_longsize);
}
break;
@@ -226,7 +228,7 @@ bool checkPrintfFormat(ref const Loc loc, scope const char[] format, scope Expre
break;
case Format.n: // pointer to int
- if (!(t.ty == Tpointer && tnext.ty == Tint32))
+ if (!(t.ty == Tpointer && tnext.ty == Tint32 && tnext.isMutable()))
errorMsg(null, e, "int*", t);
break;
@@ -286,7 +288,7 @@ bool checkPrintfFormat(ref const Loc loc, scope const char[] format, scope Expre
break;
case Format.error:
- deprecation(loc, "format specifier `\"%.*s\"` is invalid", cast(int)slice.length, slice.ptr);
+ eSink.deprecation(loc, "format specifier `\"%.*s\"` is invalid", cast(int)slice.length, slice.ptr);
break;
case Format.GNU_m:
@@ -327,6 +329,7 @@ bool checkPrintfFormat(ref const Loc loc, scope const char[] format, scope Expre
* format = format string
* args = arguments to match with format string
* isVa_list = if a "v" function (format check only)
+ * eSink = where the error messages go
*
* Returns:
* `true` if errors occurred
@@ -334,7 +337,8 @@ bool checkPrintfFormat(ref const Loc loc, scope const char[] format, scope Expre
* C99 7.19.6.2
* https://www.cplusplus.com/reference/cstdio/scanf/
*/
-bool checkScanfFormat(ref const Loc loc, scope const char[] format, scope Expression[] args, bool isVa_list)
+public
+bool checkScanfFormat(ref const Loc loc, scope const char[] format, scope Expression[] args, bool isVa_list, ErrorSink eSink)
{
size_t n = 0;
for (size_t i = 0; i < format.length;)
@@ -357,7 +361,7 @@ bool checkScanfFormat(ref const Loc loc, scope const char[] format, scope Expres
{
// format check only
if (fmt == Format.error)
- deprecation(loc, "format specifier `\"%.*s\"` is invalid", cast(int)slice.length, slice.ptr);
+ eSink.deprecation(loc, "format specifier `\"%.*s\"` is invalid", cast(int)slice.length, slice.ptr);
continue;
}
@@ -366,7 +370,7 @@ bool checkScanfFormat(ref const Loc loc, scope const char[] format, scope Expres
if (n == args.length)
{
if (!asterisk)
- deprecation(loc, "more format specifiers than %d arguments", cast(int)n);
+ eSink.deprecation(loc, "more format specifiers than %d arguments", cast(int)n);
return null;
}
return args[n++];
@@ -374,7 +378,7 @@ bool checkScanfFormat(ref const Loc loc, scope const char[] format, scope Expres
void errorMsg(const char* prefix, Expression arg, const char* texpect, Type tactual)
{
- deprecation(arg.loc, "%sargument `%s` for format specification `\"%.*s\"` must be `%s`, not `%s`",
+ eSink.deprecation(arg.loc, "%sargument `%s` for format specification `\"%.*s\"` must be `%s`, not `%s`",
prefix ? prefix : "", arg.toChars(), cast(int)slice.length, slice.ptr, texpect, tactual.toChars());
}
@@ -512,7 +516,7 @@ bool checkScanfFormat(ref const Loc loc, scope const char[] format, scope Expres
break;
case Format.error:
- deprecation(loc, "format specifier `\"%.*s\"` is invalid", cast(int)slice.length, slice.ptr);
+ eSink.deprecation(loc, "format specifier `\"%.*s\"` is invalid", cast(int)slice.length, slice.ptr);
break;
case Format.GNU_m:
@@ -523,6 +527,8 @@ bool checkScanfFormat(ref const Loc loc, scope const char[] format, scope Expres
return false;
}
+/*****************************************************************************************************/
+
private:
/**************************************
diff --git a/gcc/d/dmd/clone.d b/gcc/d/dmd/clone.d
index 4cff1ec..ca7f398 100644
--- a/gcc/d/dmd/clone.d
+++ b/gcc/d/dmd/clone.d
@@ -297,7 +297,7 @@ FuncDeclaration buildOpAssign(StructDeclaration sd, Scope* sc)
}
auto fparams = new Parameters();
- fparams.push(new Parameter(STC.nodtor, sd.type, Id.p, null, null));
+ fparams.push(new Parameter(loc, STC.nodtor, sd.type, Id.p, null, null));
auto tf = new TypeFunction(ParameterList(fparams), sd.handleType(), LINK.d, stc | STC.ref_);
auto fop = new FuncDeclaration(declLoc, Loc.initial, Id.assign, stc, tf);
fop.storage_class |= STC.inference;
@@ -546,10 +546,11 @@ FuncDeclaration buildXopEquals(StructDeclaration sd, Scope* sc)
TypeFunction tfeqptr;
{
Scope scx;
+ scx.eSink = sc.eSink;
/* const bool opEquals(ref const S s);
*/
auto parameters = new Parameters();
- parameters.push(new Parameter(STC.ref_ | STC.const_, sd.type, null, null, null));
+ parameters.push(new Parameter(Loc.initial, STC.ref_ | STC.const_, sd.type, null, null, null));
tfeqptr = new TypeFunction(ParameterList(parameters), Type.tbool, LINK.d);
tfeqptr.mod = MODFlags.const_;
tfeqptr = cast(TypeFunction)tfeqptr.typeSemantic(Loc.initial, &scx);
@@ -577,7 +578,7 @@ FuncDeclaration buildXopEquals(StructDeclaration sd, Scope* sc)
Loc declLoc; // loc is unnecessary so __xopEquals is never called directly
Loc loc; // loc is unnecessary so errors are gagged
auto parameters = new Parameters();
- parameters.push(new Parameter(STC.ref_ | STC.const_, sd.type, Id.p, null, null));
+ parameters.push(new Parameter(loc, STC.ref_ | STC.const_, sd.type, Id.p, null, null));
auto tf = new TypeFunction(ParameterList(parameters), Type.tbool, LINK.d, STC.const_);
tf = tf.addSTC(STC.const_).toTypeFunction();
Identifier id = Id.xopEquals;
@@ -620,10 +621,11 @@ FuncDeclaration buildXopCmp(StructDeclaration sd, Scope* sc)
TypeFunction tfcmpptr;
{
Scope scx;
+ scx.eSink = sc.eSink;
/* const int opCmp(ref const S s);
*/
auto parameters = new Parameters();
- parameters.push(new Parameter(STC.ref_ | STC.const_, sd.type, null, null, null));
+ parameters.push(new Parameter(Loc.initial, STC.ref_ | STC.const_, sd.type, null, null, null));
tfcmpptr = new TypeFunction(ParameterList(parameters), Type.tint32, LINK.d);
tfcmpptr.mod = MODFlags.const_;
tfcmpptr = cast(TypeFunction)tfcmpptr.typeSemantic(Loc.initial, &scx);
@@ -701,7 +703,7 @@ FuncDeclaration buildXopCmp(StructDeclaration sd, Scope* sc)
Loc declLoc; // loc is unnecessary so __xopCmp is never called directly
Loc loc; // loc is unnecessary so errors are gagged
auto parameters = new Parameters();
- parameters.push(new Parameter(STC.ref_ | STC.const_, sd.type, Id.p, null, null));
+ parameters.push(new Parameter(loc, STC.ref_ | STC.const_, sd.type, Id.p, null, null));
auto tf = new TypeFunction(ParameterList(parameters), Type.tint32, LINK.d, STC.const_);
tf = tf.addSTC(STC.const_).toTypeFunction();
Identifier id = Id.xopCmp;
@@ -820,7 +822,7 @@ FuncDeclaration buildXtoHash(StructDeclaration sd, Scope* sc)
Loc declLoc; // loc is unnecessary so __xtoHash is never called directly
Loc loc; // internal code should have no loc to prevent coverage
auto parameters = new Parameters();
- parameters.push(new Parameter(STC.ref_ | STC.const_, sd.type, Id.p, null, null));
+ parameters.push(new Parameter(loc, STC.ref_ | STC.const_, sd.type, Id.p, null, null));
auto tf = new TypeFunction(ParameterList(parameters), Type.thash_t, LINK.d, STC.nothrow_ | STC.trusted);
Identifier id = Id.xtoHash;
auto fop = new FuncDeclaration(declLoc, Loc.initial, id, STC.static_, tf);
@@ -1074,7 +1076,7 @@ private DtorDeclaration buildWindowsCppDtor(AggregateDeclaration ad, DtorDeclara
// // TODO: if (del) delete (char*)this;
// return (void*) this;
// }
- Parameter delparam = new Parameter(STC.undefined_, Type.tuns32, Identifier.idPool("del"), new IntegerExp(dtor.loc, 0, Type.tuns32), null);
+ Parameter delparam = new Parameter(Loc.initial, STC.undefined_, Type.tuns32, Identifier.idPool("del"), new IntegerExp(dtor.loc, 0, Type.tuns32), null);
Parameters* params = new Parameters;
params.push(delparam);
const stc = dtor.storage_class & ~STC.scope_; // because we add the `return this;` later
@@ -1126,7 +1128,7 @@ private DtorDeclaration buildExternDDtor(AggregateDeclaration ad, Scope* sc)
return null;
// Generate shim only when ABI incompatible on target platform
- if (ad.classKind != ClassKind.cpp || !target.cpp.wrapDtorInExternD)
+ if (dtor._linkage != LINK.cpp || !target.cpp.wrapDtorInExternD)
return dtor;
// generate member function that adjusts calling convention
@@ -1199,7 +1201,7 @@ FuncDeclaration buildInv(AggregateDeclaration ad, Scope* sc)
version (all)
{
// currently rejects
- ad.error(inv.loc, "mixing invariants with different `shared`/`synchronized` qualifiers is not supported");
+ .error(inv.loc, "%s `%s` mixing invariants with different `shared`/`synchronized` qualifiers is not supported", ad.kind(), ad.toPrettyChars());
e = null;
break;
}
@@ -1514,7 +1516,7 @@ private CtorDeclaration generateCopyCtorDeclaration(StructDeclaration sd, const
{
auto fparams = new Parameters();
auto structType = sd.type;
- fparams.push(new Parameter(paramStc | STC.ref_ | STC.return_ | STC.scope_, structType, Id.p, null, null));
+ fparams.push(new Parameter(Loc.initial, paramStc | STC.ref_ | STC.return_ | STC.scope_, structType, Id.p, null, null));
ParameterList pList = ParameterList(fparams);
auto tf = new TypeFunction(pList, structType, LINK.d, STC.ref_);
auto ccd = new CtorDeclaration(sd.loc, Loc.initial, STC.ref_, tf, true);
diff --git a/gcc/d/dmd/compiler.d b/gcc/d/dmd/compiler.d
index 68ec1d3..e85cc20 100644
--- a/gcc/d/dmd/compiler.d
+++ b/gcc/d/dmd/compiler.d
@@ -13,7 +13,6 @@ module dmd.compiler;
import dmd.arraytypes;
import dmd.dmodule;
-import dmd.dscope;
import dmd.expression;
import dmd.mtype;
import dmd.root.array;
diff --git a/gcc/d/dmd/cond.d b/gcc/d/dmd/cond.d
index 360acf5..70a7c88 100644
--- a/gcc/d/dmd/cond.d
+++ b/gcc/d/dmd/cond.d
@@ -322,7 +322,7 @@ extern (C++) final class StaticForeach : RootObject
foreach (params; pparams)
{
auto p = aggrfe ? (*aggrfe.parameters)[i] : rangefe.prm;
- params.push(new Parameter(p.storageClass, p.type, p.ident, null, null));
+ params.push(new Parameter(aloc, p.storageClass, p.type, p.ident, null, null));
}
}
Expression[2] res;
@@ -691,6 +691,10 @@ extern (C++) final class VersionCondition : DVCondition
case "LDC":
case "linux":
case "LittleEndian":
+ case "LoongArch32":
+ case "LoongArch64":
+ case "LoongArch_HardFloat":
+ case "LoongArch_SoftFloat":
case "MinGW":
case "MIPS32":
case "MIPS64":
diff --git a/gcc/d/dmd/constfold.d b/gcc/d/dmd/constfold.d
index e5526a1..ef408cb 100644
--- a/gcc/d/dmd/constfold.d
+++ b/gcc/d/dmd/constfold.d
@@ -336,7 +336,7 @@ UnionExp Div(const ref Loc loc, Type type, Expression e1, Expression e2)
n2 = e2.toInteger();
if (n2 == 0)
{
- e2.error("divide by 0");
+ error(e2.loc, "divide by 0");
emplaceExp!(ErrorExp)(&ue);
return ue;
}
@@ -345,13 +345,13 @@ UnionExp Div(const ref Loc loc, Type type, Expression e1, Expression e2)
// Check for int.min / -1
if (n1 == 0xFFFFFFFF80000000UL && type.toBasetype().ty != Tint64)
{
- e2.error("integer overflow: `int.min / -1`");
+ error(e2.loc, "integer overflow: `int.min / -1`");
emplaceExp!(ErrorExp)(&ue);
return ue;
}
else if (n1 == 0x8000000000000000L) // long.min / -1
{
- e2.error("integer overflow: `long.min / -1L`");
+ error(e2.loc, "integer overflow: `long.min / -1L`");
emplaceExp!(ErrorExp)(&ue);
return ue;
}
@@ -401,7 +401,7 @@ UnionExp Mod(const ref Loc loc, Type type, Expression e1, Expression e2)
n2 = e2.toInteger();
if (n2 == 0)
{
- e2.error("divide by 0");
+ error(e2.loc, "divide by 0");
emplaceExp!(ErrorExp)(&ue);
return ue;
}
@@ -410,13 +410,13 @@ UnionExp Mod(const ref Loc loc, Type type, Expression e1, Expression e2)
// Check for int.min % -1
if (n1 == 0xFFFFFFFF80000000UL && type.toBasetype().ty != Tint64)
{
- e2.error("integer overflow: `int.min %% -1`");
+ error(e2.loc, "integer overflow: `int.min %% -1`");
emplaceExp!(ErrorExp)(&ue);
return ue;
}
else if (n1 == 0x8000000000000000L) // long.min % -1
{
- e2.error("integer overflow: `long.min %% -1L`");
+ error(e2.loc, "integer overflow: `long.min %% -1L`");
emplaceExp!(ErrorExp)(&ue);
return ue;
}
@@ -1135,7 +1135,7 @@ UnionExp Index(Type type, Expression e1, Expression e2, bool indexIsInBounds)
uinteger_t i = e2.toInteger();
if (i >= es1.len)
{
- e1.error("string index %llu is out of bounds `[0 .. %llu]`", i, cast(ulong)es1.len);
+ error(e1.loc, "string index %llu is out of bounds `[0 .. %llu]`", i, cast(ulong)es1.len);
emplaceExp!(ErrorExp)(&ue);
}
else
@@ -1151,7 +1151,7 @@ UnionExp Index(Type type, Expression e1, Expression e2, bool indexIsInBounds)
if (i >= length && (e1.op == EXP.arrayLiteral || !indexIsInBounds))
{
// C code only checks bounds if an ArrayLiteralExp
- e1.error("array index %llu is out of bounds `%s[0 .. %llu]`", i, e1.toChars(), length);
+ error(e1.loc, "array index %llu is out of bounds `%s[0 .. %llu]`", i, e1.toChars(), length);
emplaceExp!(ErrorExp)(&ue);
}
else if (ArrayLiteralExp ale = e1.isArrayLiteralExp())
@@ -1174,7 +1174,7 @@ UnionExp Index(Type type, Expression e1, Expression e2, bool indexIsInBounds)
{
if (i >= ale.elements.length)
{
- e1.error("array index %llu is out of bounds `%s[0 .. %llu]`", i, e1.toChars(), cast(ulong) ale.elements.length);
+ error(e1.loc, "array index %llu is out of bounds `%s[0 .. %llu]`", i, e1.toChars(), cast(ulong) ale.elements.length);
emplaceExp!(ErrorExp)(&ue);
}
else
diff --git a/gcc/d/dmd/cparse.d b/gcc/d/dmd/cparse.d
index 03383d1..d183b82 100644
--- a/gcc/d/dmd/cparse.d
+++ b/gcc/d/dmd/cparse.d
@@ -231,6 +231,9 @@ final class CParser(AST) : Parser!AST
}
goto Lexp; // function call
+ case TOK.semicolon:
+ goto Lexp;
+
default:
{
/* If tokens look like a declaration, assume it is one
@@ -501,7 +504,7 @@ final class CParser(AST) : Parser!AST
auto condition = cparseExpression();
check(TOK.rightParenthesis);
auto _body = cparseStatement(ParseStatementFlags.scope_);
- s = new AST.SwitchStatement(loc, condition, _body, false);
+ s = new AST.SwitchStatement(loc, null, condition, _body, false, token.loc);
break;
}
@@ -626,7 +629,7 @@ final class CParser(AST) : Parser!AST
default:
// ImportC extensions: parse as a D asm block.
- s = parseAsm();
+ s = parseAsm(compileEnv.masm);
break;
}
break;
@@ -3114,12 +3117,13 @@ final class CParser(AST) : Parser!AST
}
Identifier id;
+ const paramLoc = token.loc;
auto t = cparseDeclarator(DTR.xparameter, tspec, id, specifier);
if (token.value == TOK.__attribute__)
cparseGnuAttributes(specifier);
if (specifier.mod & MOD.xconst)
t = toConst(t);
- auto param = new AST.Parameter(specifiersToSTC(LVL.parameter, specifier),
+ auto param = new AST.Parameter(paramLoc, specifiersToSTC(LVL.parameter, specifier),
t, id, null, null);
parameters.push(param);
if (token.value == TOK.rightParenthesis || token.value == TOK.endOfFile)
@@ -3297,8 +3301,10 @@ final class CParser(AST) : Parser!AST
nextToken();
else
{
- error("extended-decl-modifier expected");
- break;
+ error("extended-decl-modifier expected after `__declspec(`, saw `%s` instead", token.toChars());
+ nextToken();
+ if (token.value != TOK.rightParenthesis)
+ break;
}
}
}
diff --git a/gcc/d/dmd/cppmangle.d b/gcc/d/dmd/cppmangle.d
index 5d74ec4..230bfec 100644
--- a/gcc/d/dmd/cppmangle.d
+++ b/gcc/d/dmd/cppmangle.d
@@ -23,7 +23,6 @@
module dmd.cppmangle;
-import core.stdc.string;
import core.stdc.stdio;
import dmd.arraytypes;
@@ -46,7 +45,6 @@ import dmd.common.outbuffer;
import dmd.root.rootobject;
import dmd.root.string;
import dmd.target;
-import dmd.tokens;
import dmd.typesem;
import dmd.visitor;
@@ -485,7 +483,7 @@ private final class CppMangleVisitor : Visitor
}
else
{
- ti.error("internal compiler error: C++ `%s` template value parameter is not supported", tv.valType.toChars());
+ .error(ti.loc, "%s `%s` internal compiler error: C++ `%s` template value parameter is not supported", ti.kind, ti.toPrettyChars, tv.valType.toChars());
fatal();
}
}
@@ -520,13 +518,13 @@ private final class CppMangleVisitor : Visitor
}
else
{
- ti.error("internal compiler error: C++ `%s` template alias parameter is not supported", o.toChars());
+ .error(ti.loc, "%s `%s` internal compiler error: C++ `%s` template alias parameter is not supported", ti.kind, ti.toPrettyChars, o.toChars());
fatal();
}
}
else if (tp.isTemplateThisParameter())
{
- ti.error("internal compiler error: C++ `%s` template this parameter is not supported", o.toChars());
+ .error(ti.loc, "%s `%s` internal compiler error: C++ `%s` template this parameter is not supported", ti.kind, ti.toPrettyChars, o.toChars());
fatal();
}
else
@@ -575,7 +573,7 @@ private final class CppMangleVisitor : Visitor
Type t = isType((*ti.tiargs)[j]);
if (t is null)
{
- ti.error("internal compiler error: C++ `%s` template value parameter is not supported", (*ti.tiargs)[j].toChars());
+ .error(ti.loc, "%s `%s` internal compiler error: C++ `%s` template value parameter is not supported", ti.kind, ti.toPrettyChars, (*ti.tiargs)[j].toChars());
fatal();
}
t.accept(this);
@@ -1013,7 +1011,7 @@ private final class CppMangleVisitor : Visitor
// fake mangling for fields to fix https://issues.dlang.org/show_bug.cgi?id=16525
if (!(d.storage_class & (STC.extern_ | STC.field | STC.gshared)))
{
- d.error("internal compiler error: C++ static non-`__gshared` non-`extern` variables not supported");
+ .error(d.loc, "%s `%s` internal compiler error: C++ static non-`__gshared` non-`extern` variables not supported", d.kind, d.toPrettyChars);
fatal();
}
Dsymbol p = d.toParent();
diff --git a/gcc/d/dmd/ctfe.h b/gcc/d/dmd/ctfe.h
index 1071edf..bb92778 100644
--- a/gcc/d/dmd/ctfe.h
+++ b/gcc/d/dmd/ctfe.h
@@ -37,7 +37,6 @@ class VoidInitExp final : public Expression
public:
VarDeclaration *var;
- const char *toChars() const override;
void accept(Visitor *v) override { v->visit(this); }
};
diff --git a/gcc/d/dmd/ctfeexpr.d b/gcc/d/dmd/ctfeexpr.d
index d355538..ddfb57d 100644
--- a/gcc/d/dmd/ctfeexpr.d
+++ b/gcc/d/dmd/ctfeexpr.d
@@ -12,7 +12,6 @@
module dmd.ctfeexpr;
import core.stdc.stdio;
-import core.stdc.stdlib;
import core.stdc.string;
import dmd.arraytypes;
import dmd.astenums;
@@ -148,7 +147,7 @@ extern (C++) final class ThrownExceptionExp : Expression
UnionExp ue = void;
Expression e = resolveSlice((*thrown.value.elements)[0], &ue);
StringExp se = e.toStringExp();
- thrown.error("uncaught CTFE exception `%s(%s)`", thrown.type.toChars(), se ? se.toChars() : e.toChars());
+ error(thrown.loc, "uncaught CTFE exception `%s(%s)`", thrown.type.toChars(), se ? se.toChars() : e.toChars());
/* Also give the line where the throw statement was. We won't have it
* in the case where the ThrowStatement is generated internally
* (eg, in ScopeStatement)
@@ -306,9 +305,10 @@ UnionExp copyLiteral(Expression e)
}
if (auto aae = e.isAssocArrayLiteralExp())
{
- emplaceExp!(AssocArrayLiteralExp)(&ue, e.loc, copyLiteralArray(aae.keys), copyLiteralArray(aae.values));
+ emplaceExp!(AssocArrayLiteralExp)(&ue, aae.loc, copyLiteralArray(aae.keys), copyLiteralArray(aae.values));
AssocArrayLiteralExp r = ue.exp().isAssocArrayLiteralExp();
- r.type = e.type;
+ r.type = aae.type;
+ r.lowering = aae.lowering;
r.ownedByCtfe = OwnedBy.ctfe;
return ue;
}
@@ -435,7 +435,7 @@ UnionExp copyLiteral(Expression e)
emplaceExp!(UnionExp)(&ue, e);
return ue;
}
- e.error("CTFE internal error: literal `%s`", e.toChars());
+ error(e.loc, "CTFE internal error: literal `%s`", e.toChars());
assert(0);
}
@@ -506,7 +506,7 @@ private UnionExp paintTypeOntoLiteralCopy(Type type, Expression lit)
// Can't type paint from struct to struct*; this needs another
// level of indirection
if (lit.op == EXP.structLiteral && isPointer(type))
- lit.error("CTFE internal error: painting `%s`", type.toChars());
+ error(lit.loc, "CTFE internal error: painting `%s`", type.toChars());
ue = copyLiteral(lit);
}
ue.exp().type = type;
@@ -1919,7 +1919,7 @@ bool isCtfeValueValid(Expression newval)
return true; // uninitialized value
default:
- newval.error("CTFE internal error: illegal CTFE value `%s`", newval.toChars());
+ error(newval.loc, "CTFE internal error: illegal CTFE value `%s`", newval.toChars());
return false;
}
}
diff --git a/gcc/d/dmd/dcast.d b/gcc/d/dmd/dcast.d
index b2aa643..9f661ea 100644
--- a/gcc/d/dmd/dcast.d
+++ b/gcc/d/dmd/dcast.d
@@ -32,7 +32,6 @@ import dmd.globals;
import dmd.hdrgen;
import dmd.location;
import dmd.impcnvtab;
-import dmd.id;
import dmd.importc;
import dmd.init;
import dmd.intrange;
@@ -44,7 +43,6 @@ import dmd.root.rmem;
import dmd.root.utf;
import dmd.tokens;
import dmd.typesem;
-import dmd.visitor;
enum LOG = false;
@@ -73,7 +71,7 @@ Expression implicitCastTo(Expression e, Scope* sc, Type t)
{
// no need for an extra cast when matching is exact
- if (match == MATCH.convert && e.type.isTypeNoreturn())
+ if (match == MATCH.convert && e.type.isTypeNoreturn() && e.op != EXP.type)
{
return specialNoreturnCast(e, t);
}
@@ -138,7 +136,7 @@ Expression implicitCastTo(Expression e, Scope* sc, Type t)
{
if (!t.deco)
{
- e.error("forward reference to type `%s`", t.toChars());
+ error(e.loc, "forward reference to type `%s`", t.toChars());
}
else
{
@@ -146,7 +144,7 @@ Expression implicitCastTo(Expression e, Scope* sc, Type t)
//type = type.typeSemantic(loc, sc);
//printf("type %s t %s\n", type.deco, t.deco);
auto ts = toAutoQualChars(e.type, t);
- e.error("cannot implicitly convert expression `%s` of type `%s` to `%s`",
+ error(e.loc, "cannot implicitly convert expression `%s` of type `%s` to `%s`",
e.toChars(), ts[0], ts[1]);
}
}
@@ -174,7 +172,7 @@ Expression implicitCastTo(Expression e, Scope* sc, Type t)
{
//printf("FuncExp::implicitCastTo type = %p %s, t = %s\n", e.type, e.type ? e.type.toChars() : NULL, t.toChars());
FuncExp fe;
- if (e.matchType(t, sc, &fe) > MATCH.nomatch)
+ if (e.matchType(t, sc, &fe, global.errorSink) > MATCH.nomatch)
{
return fe;
}
@@ -247,7 +245,7 @@ MATCH implicitConvTo(Expression e, Type t)
return MATCH.nomatch;
if (!e.type)
{
- e.error("`%s` is not an expression", e.toChars());
+ error(e.loc, "`%s` is not an expression", e.toChars());
e.type = Type.terror;
}
@@ -718,6 +716,14 @@ MATCH implicitConvTo(Expression e, Type t)
if (e.postfix != 'd')
m = MATCH.convert;
return m;
+ case Tint8:
+ case Tuns8:
+ if (e.hexString)
+ {
+ m = MATCH.convert;
+ return m;
+ }
+ break;
case Tenum:
if (tn.isTypeEnum().sym.isSpecial())
{
@@ -1075,7 +1081,7 @@ MATCH implicitConvTo(Expression e, Type t)
MATCH visitFunc(FuncExp e)
{
//printf("FuncExp::implicitConvTo type = %p %s, t = %s\n", e.type, e.type ? e.type.toChars() : NULL, t.toChars());
- MATCH m = e.matchType(t, null, null, 1);
+ MATCH m = e.matchType(t, null, null, global.errorSinkNull);
if (m > MATCH.nomatch)
{
return m;
@@ -1537,7 +1543,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
{
return e;
}
- if (e.type.isTypeNoreturn())
+ if (e.type.isTypeNoreturn() && e.op != EXP.type)
{
return specialNoreturnCast(e, t);
}
@@ -1674,7 +1680,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
goto Lok;
auto ts = toAutoQualChars(e.type, t);
- e.error("cannot cast expression `%s` of type `%s` to `%s` because of different sizes",
+ error(e.loc, "cannot cast expression `%s` of type `%s` to `%s` because of different sizes",
e.toChars(), ts[0], ts[1]);
return ErrorExp.get();
}
@@ -1703,7 +1709,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
const dim = t1b.isTypeSArray().dim.toInteger();
if (tsize == 0 || (dim * fsize) % tsize != 0)
{
- e.error("cannot cast expression `%s` of type `%s` to `%s` since sizes don't line up",
+ error(e.loc, "cannot cast expression `%s` of type `%s` to `%s` since sizes don't line up",
e.toChars(), e.type.toChars(), t.toChars());
return ErrorExp.get();
}
@@ -1746,7 +1752,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
// void delegate() dg;
// cast(U*)dg; // ==> cast(U*)dg.ptr;
// Note that it happens even when U is a Tfunction!
- e.deprecation("casting from %s to %s is deprecated", e.type.toChars(), t.toChars());
+ deprecation(e.loc, "casting from %s to %s is deprecated", e.type.toChars(), t.toChars());
goto Lok;
}
goto Lfail;
@@ -1764,7 +1770,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
if (result)
return result;
}
- e.error("cannot cast expression `%s` of type `%s` to `%s`", e.toChars(), e.type.toChars(), t.toChars());
+ error(e.loc, "cannot cast expression `%s` of type `%s` to `%s`", e.toChars(), e.type.toChars(), t.toChars());
return ErrorExp.get();
}
@@ -1833,7 +1839,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
if (!e.committed && t.ty == Tpointer && t.nextOf().ty == Tvoid &&
(!sc || !(sc.flags & SCOPE.Cfile)))
{
- e.error("cannot convert string literal to `void*`");
+ error(e.loc, "cannot convert string literal to `void*`");
return ErrorExp.get();
}
@@ -1971,7 +1977,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
{
dchar c;
if (const s = utf_decodeChar(se.peekString(), u, c))
- e.error("%.*s", cast(int)s.length, s.ptr);
+ error(e.loc, "%.*s", cast(int)s.length, s.ptr);
else
buffer.writeUTF16(c);
}
@@ -1984,7 +1990,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
{
dchar c;
if (const s = utf_decodeChar(se.peekString(), u, c))
- e.error("%.*s", cast(int)s.length, s.ptr);
+ error(e.loc, "%.*s", cast(int)s.length, s.ptr);
buffer.write4(c);
newlen++;
}
@@ -1996,7 +2002,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
{
dchar c;
if (const s = utf_decodeWchar(se.peekWstring(), u, c))
- e.error("%.*s", cast(int)s.length, s.ptr);
+ error(e.loc, "%.*s", cast(int)s.length, s.ptr);
else
buffer.writeUTF8(c);
}
@@ -2009,7 +2015,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
{
dchar c;
if (const s = utf_decodeWchar(se.peekWstring(), u, c))
- e.error("%.*s", cast(int)s.length, s.ptr);
+ error(e.loc, "%.*s", cast(int)s.length, s.ptr);
buffer.write4(c);
newlen++;
}
@@ -2021,7 +2027,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
{
uint c = se.peekDstring()[u];
if (!utf_isValidDchar(c))
- e.error("invalid UCS-32 char \\U%08x", c);
+ error(e.loc, "invalid UCS-32 char \\U%08x", c);
else
buffer.writeUTF8(c);
newlen++;
@@ -2035,7 +2041,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
{
uint c = se.peekDstring()[u];
if (!utf_isValidDchar(c))
- e.error("invalid UCS-32 char \\U%08x", c);
+ error(e.loc, "invalid UCS-32 char \\U%08x", c);
else
buffer.writeUTF16(c);
newlen++;
@@ -2395,7 +2401,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
}
else if (f.needThis())
{
- e.error("no `this` to create delegate for `%s`", f.toChars());
+ error(e.loc, "no `this` to create delegate for `%s`", f.toChars());
return ErrorExp.get();
}
else if (f.isNested())
@@ -2405,7 +2411,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
}
else
{
- e.error("cannot cast from function pointer to delegate");
+ error(e.loc, "cannot cast from function pointer to delegate");
return ErrorExp.get();
}
}
@@ -2446,7 +2452,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
int offset;
e.func.tookAddressOf++;
if (e.func.tintro && e.func.tintro.nextOf().isBaseOf(e.func.type.nextOf(), &offset) && offset)
- e.error("%s", msg.ptr);
+ error(e.loc, "%s", msg.ptr);
auto result = e.copy();
result.type = t;
return result;
@@ -2462,7 +2468,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
{
int offset;
if (f.tintro && f.tintro.nextOf().isBaseOf(f.type.nextOf(), &offset) && offset)
- e.error("%s", msg.ptr);
+ error(e.loc, "%s", msg.ptr);
if (f != e.func) // if address not already marked as taken
f.tookAddressOf++;
auto result = new DelegateExp(e.loc, e.e1, f, false, e.vthis2);
@@ -2470,7 +2476,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
return result;
}
if (e.func.tintro)
- e.error("%s", msg.ptr);
+ error(e.loc, "%s", msg.ptr);
}
}
@@ -2489,7 +2495,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
{
//printf("FuncExp::castTo type = %s, t = %s\n", e.type.toChars(), t.toChars());
FuncExp fe;
- if (e.matchType(t, sc, &fe, 1) > MATCH.nomatch)
+ if (e.matchType(t, sc, &fe, global.errorSinkNull) > MATCH.nomatch)
{
return fe;
}
@@ -2599,7 +2605,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null)
}
}
auto ts = toAutoQualChars(tsa ? tsa : e.type, t);
- e.error("cannot cast expression `%s` of type `%s` to `%s`",
+ error(e.loc, "cannot cast expression `%s` of type `%s` to `%s`",
e.toChars(), ts[0], ts[1]);
return ErrorExp.get();
}
@@ -3613,7 +3619,7 @@ Expression integralPromotions(Expression e, Scope* sc)
switch (e.type.toBasetype().ty)
{
case Tvoid:
- e.error("void has no value");
+ error(e.loc, "void has no value");
return ErrorExp.get();
case Tint8:
@@ -3662,7 +3668,7 @@ void fix16997(Scope* sc, UnaExp ue)
case Tchar:
case Twchar:
case Tdchar:
- ue.deprecation("integral promotion not done for `%s`, remove '-revert=intpromote' switch or `%scast(int)(%s)`",
+ deprecation(ue.loc, "integral promotion not done for `%s`, remove '-revert=intpromote' switch or `%scast(int)(%s)`",
ue.toChars(), EXPtoString(ue.op).ptr, ue.e1.toChars());
break;
diff --git a/gcc/d/dmd/dclass.d b/gcc/d/dmd/dclass.d
index 20cb82e..0fbbb11 100644
--- a/gcc/d/dmd/dclass.d
+++ b/gcc/d/dmd/dclass.d
@@ -19,14 +19,13 @@ import core.stdc.string;
import dmd.aggregate;
import dmd.arraytypes;
import dmd.astenums;
-import dmd.attrib;
import dmd.gluelayer;
import dmd.declaration;
import dmd.dscope;
import dmd.dsymbol;
import dmd.dsymbolsem;
+import dmd.errors;
import dmd.func;
-import dmd.globals;
import dmd.id;
import dmd.identifier;
import dmd.location;
@@ -224,7 +223,7 @@ extern (C++) class ClassDeclaration : AggregateDeclaration
// Look for special class names
if (id == Id.__sizeof || id == Id.__xalignof || id == Id._mangleof)
- error("illegal class name");
+ classError("%s `%s` illegal class name", null);
// BUG: What if this is the wrong TypeInfo, i.e. it is nested?
if (id.toChars()[0] == 'T')
@@ -232,103 +231,103 @@ extern (C++) class ClassDeclaration : AggregateDeclaration
if (id == Id.TypeInfo)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.dtypeinfo = this;
}
if (id == Id.TypeInfo_Class)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfoclass = this;
}
if (id == Id.TypeInfo_Interface)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfointerface = this;
}
if (id == Id.TypeInfo_Struct)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfostruct = this;
}
if (id == Id.TypeInfo_Pointer)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfopointer = this;
}
if (id == Id.TypeInfo_Array)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfoarray = this;
}
if (id == Id.TypeInfo_StaticArray)
{
//if (!inObject)
- // Type.typeinfostaticarray.error("%s", msg);
+ // Type.typeinfostaticarray.classError("%s `%s` %s", msg);
Type.typeinfostaticarray = this;
}
if (id == Id.TypeInfo_AssociativeArray)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfoassociativearray = this;
}
if (id == Id.TypeInfo_Enum)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfoenum = this;
}
if (id == Id.TypeInfo_Function)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfofunction = this;
}
if (id == Id.TypeInfo_Delegate)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfodelegate = this;
}
if (id == Id.TypeInfo_Tuple)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfotypelist = this;
}
if (id == Id.TypeInfo_Const)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfoconst = this;
}
if (id == Id.TypeInfo_Invariant)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfoinvariant = this;
}
if (id == Id.TypeInfo_Shared)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfoshared = this;
}
if (id == Id.TypeInfo_Wild)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfowild = this;
}
if (id == Id.TypeInfo_Vector)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
Type.typeinfovector = this;
}
}
@@ -336,38 +335,43 @@ extern (C++) class ClassDeclaration : AggregateDeclaration
if (id == Id.Object)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
object = this;
}
if (id == Id.Throwable)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
throwable = this;
}
if (id == Id.Exception)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
exception = this;
}
if (id == Id.Error)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
errorException = this;
}
if (id == Id.cpp_type_info_ptr)
{
if (!inObject)
- error("%s", msg.ptr);
+ classError("%s `%s` %s", msg.ptr);
cpp_type_info_ptr = this;
}
baseok = Baseok.none;
}
+ final void classError(const(char)* fmt, const(char)* arg)
+ {
+ .error(loc, fmt, kind, toPrettyChars, arg);
+ }
+
static ClassDeclaration create(const ref Loc loc, Identifier id, BaseClasses* baseclasses, Dsymbols* members, bool inObject)
{
return new ClassDeclaration(loc, id, baseclasses, members, inObject);
@@ -483,7 +487,7 @@ extern (C++) class ClassDeclaration : AggregateDeclaration
{
// .stringof is always defined (but may be hidden by some other symbol)
if (ident != Id.stringof && !(flags & IgnoreErrors) && semanticRun < PASS.semanticdone)
- error("is forward referenced when looking for `%s`", ident.toChars());
+ classError("%s `%s` is forward referenced when looking for `%s`", ident.toChars());
//*(char*)0=0;
return null;
}
@@ -505,7 +509,7 @@ extern (C++) class ClassDeclaration : AggregateDeclaration
if (!b.sym.symtab)
{
- error("base `%s` is forward referenced", b.sym.ident.toChars());
+ classError("%s `%s` base `%s` is forward referenced", b.sym.ident.toChars());
continue;
}
@@ -816,7 +820,7 @@ extern (C++) class ClassDeclaration : AggregateDeclaration
}
if (fdambig)
- error("ambiguous virtual function `%s`", fdambig.toChars());
+ classError("%s `%s` ambiguous virtual function `%s`", fdambig.toChars());
return fdmatch;
}
@@ -879,6 +883,10 @@ extern (C++) class ClassDeclaration : AggregateDeclaration
return 0;
}
+ // opaque class is not abstract if it is not declared abstract
+ if (!members)
+ return no();
+
for (size_t i = 0; i < members.length; i++)
{
auto s = (*members)[i];
diff --git a/gcc/d/dmd/declaration.d b/gcc/d/dmd/declaration.d
index 8a91a80..b65e7e8 100644
--- a/gcc/d/dmd/declaration.d
+++ b/gcc/d/dmd/declaration.d
@@ -16,7 +16,6 @@ import core.stdc.stdio;
import dmd.aggregate;
import dmd.arraytypes;
import dmd.astenums;
-import dmd.attrib;
import dmd.ctorflow;
import dmd.dclass;
import dmd.delegatize;
@@ -327,12 +326,12 @@ extern (C++) abstract class Declaration : Dsymbol
continue;
if (sdv.postblit.isDisabled())
{
- p.error(loc, "is not copyable because field `%s` is not copyable", structField.toChars());
+ .error(loc, "%s `%s` is not copyable because field `%s` is not copyable", p.kind, p.toPrettyChars, structField.toChars());
return true;
}
}
}
- p.error(loc, "is not copyable because it has a disabled postblit");
+ .error(loc, "%s `%s` is not copyable because it has a disabled postblit", p.kind, p.toPrettyChars);
return true;
}
}
@@ -358,7 +357,7 @@ extern (C++) abstract class Declaration : Dsymbol
return true;
}
}
- error(loc, "cannot be used because it is annotated with `@disable`");
+ .error(loc, "%s `%s` cannot be used because it is annotated with `@disable`", kind, toPrettyChars);
return true;
}
@@ -389,7 +388,7 @@ extern (C++) abstract class Declaration : Dsymbol
{
const(char)* s = isParameter() && parent.ident != Id.ensure ? "parameter" : "result";
if (!(flag & ModifyFlags.noError))
- error(loc, "cannot modify %s `%s` in contract", s, toChars());
+ error(loc, "%s `%s` cannot modify %s `%s` in contract", kind, toPrettyChars, s, toChars());
return Modifiable.initialization; // do not report type related errors
}
}
@@ -403,7 +402,7 @@ extern (C++) abstract class Declaration : Dsymbol
if (scx.func == vthis.parent && (scx.flags & SCOPE.contract))
{
if (!(flag & ModifyFlags.noError))
- error(loc, "cannot modify parameter `this` in contract");
+ error(loc, "%s `%s` cannot modify parameter `this` in contract", kind, toPrettyChars);
return Modifiable.initialization; // do not report type related errors
}
}
@@ -647,11 +646,11 @@ extern (C++) final class TupleDeclaration : Declaration
{
buf.printf("_%s_%d", ident.toChars(), i);
auto id = Identifier.idPool(buf.extractSlice());
- auto arg = new Parameter(STC.in_, t, id, null);
+ auto arg = new Parameter(Loc.initial, STC.in_, t, id, null);
}
else
{
- auto arg = new Parameter(0, t, null, null, null);
+ auto arg = new Parameter(Loc.initial, 0, t, null, null, null);
}
(*args)[i] = arg;
if (!t.deco)
@@ -946,7 +945,7 @@ extern (C++) final class AliasDeclaration : Declaration
}
if (inuse)
{
- error("recursive alias declaration");
+ .error(loc, "%s `%s` recursive alias declaration", kind, toPrettyChars);
Lerr:
// Avoid breaking "recursive alias" state during errors gagged
@@ -1004,7 +1003,7 @@ extern (C++) final class AliasDeclaration : Declaration
{
if (inuse)
{
- error("recursive alias declaration");
+ .error(loc, "%s `%s` recursive alias declaration", kind, toPrettyChars);
return this;
}
inuse = 1;
@@ -1378,7 +1377,7 @@ extern (C++) class VarDeclaration : Declaration
Dsymbol parent = toParent();
if (!parent && !(storage_class & STC.static_))
{
- error("forward referenced");
+ .error(loc, "%s `%s` forward referenced", kind, toPrettyChars);
type = Type.terror;
}
else if (storage_class & (STC.static_ | STC.extern_ | STC.gshared) ||
@@ -1602,6 +1601,8 @@ extern (C++) class VarDeclaration : Declaration
{
inuse++;
_init = _init.initializerSemantic(_scope, type, INITinterpret);
+ import dmd.semantic2 : lowerStaticAAs;
+ lowerStaticAAs(this, _scope);
_scope = null;
inuse--;
}
diff --git a/gcc/d/dmd/delegatize.d b/gcc/d/dmd/delegatize.d
index 559f103..490ef56 100644
--- a/gcc/d/dmd/delegatize.d
+++ b/gcc/d/dmd/delegatize.d
@@ -21,7 +21,6 @@ import dmd.dsymbol;
import dmd.expression;
import dmd.expressionsem;
import dmd.func;
-import dmd.globals;
import dmd.init;
import dmd.initsem;
import dmd.location;
diff --git a/gcc/d/dmd/denum.d b/gcc/d/dmd/denum.d
index 87b40b8..98bf4dd 100644
--- a/gcc/d/dmd/denum.d
+++ b/gcc/d/dmd/denum.d
@@ -18,19 +18,18 @@ import core.stdc.stdio;
import dmd.astenums;
import dmd.attrib;
+import dmd.errors;
import dmd.gluelayer;
import dmd.declaration;
import dmd.dscope;
import dmd.dsymbol;
import dmd.dsymbolsem;
import dmd.expression;
-import dmd.globals;
import dmd.id;
import dmd.identifier;
import dmd.init;
import dmd.location;
import dmd.mtype;
-import dmd.tokens;
import dmd.typesem;
import dmd.visitor;
@@ -193,7 +192,7 @@ extern (C++) final class EnumDeclaration : ScopeDsymbol
return defaultval = memtype.defaultInit(loc);
}
- error(loc, "is opaque and has no default initializer");
+ error(loc, "%s `%s` is opaque and has no default initializer", kind, toPrettyChars);
return handleErrors();
}
@@ -204,7 +203,7 @@ extern (C++) final class EnumDeclaration : ScopeDsymbol
{
if (em.semanticRun < PASS.semanticdone)
{
- error(loc, "forward reference of `%s.init`", toChars());
+ error(loc, "%s `%s` forward reference of `%s.init`", kind, toPrettyChars, toChars());
return handleErrors();
}
diff --git a/gcc/d/dmd/dimport.d b/gcc/d/dmd/dimport.d
index c4d5ddb..3b8d9f6 100644
--- a/gcc/d/dmd/dimport.d
+++ b/gcc/d/dmd/dimport.d
@@ -93,7 +93,7 @@ extern (C++) final class Import : Dsymbol
extern (D) void addAlias(Identifier name, Identifier _alias)
{
if (isstatic)
- error("cannot have an import bind list");
+ .error(loc, "%s `%s` cannot have an import bind list", kind, toPrettyChars);
if (!aliasId)
this.ident = null; // make it an anonymous import
names.push(name);
diff --git a/gcc/d/dmd/dinterpret.d b/gcc/d/dmd/dinterpret.d
index 5948351..a43be7d 100644
--- a/gcc/d/dmd/dinterpret.d
+++ b/gcc/d/dmd/dinterpret.d
@@ -40,7 +40,6 @@ import dmd.init;
import dmd.initsem;
import dmd.location;
import dmd.mtype;
-import dmd.printast;
import dmd.root.rmem;
import dmd.root.array;
import dmd.root.ctfloat;
@@ -428,17 +427,23 @@ private Expression interpretFunction(UnionExp* pue, FuncDeclaration fd, InterSta
{
printf("\n********\n%s FuncDeclaration::interpret(istate = %p) %s\n", fd.loc.toChars(), istate, fd.toChars());
}
+
+ void fdError(const(char)* msg)
+ {
+ error(fd.loc, "%s `%s` %s", fd.kind, fd.toPrettyChars, msg);
+ }
+
assert(pue);
if (fd.semanticRun == PASS.semantic3)
{
- fd.error("circular dependency. Functions cannot be interpreted while being compiled");
+ fdError("circular dependency. Functions cannot be interpreted while being compiled");
return CTFEExp.cantexp;
}
if (!fd.functionSemantic3())
return CTFEExp.cantexp;
if (fd.semanticRun < PASS.semantic3done)
{
- fd.error("circular dependency. Functions cannot be interpreted while being compiled");
+ fdError("circular dependency. Functions cannot be interpreted while being compiled");
return CTFEExp.cantexp;
}
@@ -446,7 +451,7 @@ private Expression interpretFunction(UnionExp* pue, FuncDeclaration fd, InterSta
if (tf.parameterList.varargs != VarArg.none && arguments &&
((fd.parameters && arguments.length != fd.parameters.length) || (!fd.parameters && arguments.length)))
{
- fd.error("C-style variadic functions are not yet implemented in CTFE");
+ fdError("C-style variadic functions are not yet implemented in CTFE");
return CTFEExp.cantexp;
}
@@ -461,7 +466,7 @@ private Expression interpretFunction(UnionExp* pue, FuncDeclaration fd, InterSta
{
// error, no this. Prevent segfault.
// Here should be unreachable by the strict 'this' check in front-end.
- fd.error("need `this` to access member `%s`", fd.toChars());
+ error(fd.loc, "%s `%s` need `this` to access member `%s`", fd.kind, fd.toPrettyChars, fd.toChars());
return CTFEExp.cantexp;
}
@@ -484,7 +489,7 @@ private Expression interpretFunction(UnionExp* pue, FuncDeclaration fd, InterSta
if (!istate && (fparam.storageClass & STC.out_))
{
// initializing an out parameter involves writing to it.
- earg.error("global `%s` cannot be passed as an `out` parameter at compile time", earg.toChars());
+ error(earg.loc, "global `%s` cannot be passed as an `out` parameter at compile time", earg.toChars());
return CTFEExp.cantexp;
}
// Convert all reference arguments into lvalue references
@@ -578,7 +583,7 @@ private Expression interpretFunction(UnionExp* pue, FuncDeclaration fd, InterSta
VarDeclaration vx = earg.isVarExp().var.isVarDeclaration();
if (!vx)
{
- fd.error("cannot interpret `%s` as a `ref` parameter", earg.toChars());
+ error(fd.loc, "%s `%s` cannot interpret `%s` as a `ref` parameter", fd.kind, fd.toPrettyChars, earg.toChars());
return CTFEExp.cantexp;
}
@@ -635,9 +640,7 @@ private Expression interpretFunction(UnionExp* pue, FuncDeclaration fd, InterSta
{
if (ctfeGlobals.callDepth > CTFE_RECURSION_LIMIT)
{
- // This is a compiler error. It must not be suppressed.
- global.gag = 0;
- fd.error("CTFE recursion limit exceeded");
+ fdError("CTFE recursion limit exceeded");
e = CTFEExp.cantexp;
break;
}
@@ -652,7 +655,7 @@ private Expression interpretFunction(UnionExp* pue, FuncDeclaration fd, InterSta
if (istatex.start)
{
- fd.error("CTFE internal error: failed to resume at statement `%s`", istatex.start.toChars());
+ error(fd.loc, "%s `%s` CTFE internal error: failed to resume at statement `%s`", fd.kind, fd.toPrettyChars, istatex.start.toChars());
return CTFEExp.cantexp;
}
@@ -683,7 +686,7 @@ private Expression interpretFunction(UnionExp* pue, FuncDeclaration fd, InterSta
/* missing a return statement can happen with C functions
* https://issues.dlang.org/show_bug.cgi?id=23056
*/
- fd.error("no return value from function");
+ fdError("no return value from function");
e = CTFEExp.cantexp;
}
}
@@ -790,7 +793,7 @@ Expression interpretStatement(UnionExp* pue, Statement s, InterState* istate)
istate.start = null;
}
- s.error("statement `%s` cannot be interpreted at compile time", s.toChars());
+ error(s.loc, "statement `%s` cannot be interpreted at compile time", s.toChars());
result = CTFEExp.cantexp;
}
@@ -976,7 +979,7 @@ Expression interpretStatement(UnionExp* pue, Statement s, InterState* istate)
{
// To support this, we need to copy all the closure vars
// into the delegate literal.
- s.error("closures are not yet supported in CTFE");
+ error(s.loc, "closures are not yet supported in CTFE");
result = CTFEExp.cantexp;
return;
}
@@ -1258,8 +1261,8 @@ Expression interpretStatement(UnionExp* pue, Statement s, InterState* istate)
}
if (!scase)
{
- if (s.hasNoDefault)
- s.error("no `default` or `case` for `%s` in `switch` statement", econdition.toChars());
+ if (!s.hasDefault)
+ error(s.loc, "no `default` or `case` for `%s` in `switch` statement", econdition.toChars());
scase = s.sdefault;
}
@@ -1422,7 +1425,7 @@ Expression interpretStatement(UnionExp* pue, Statement s, InterState* istate)
setValue(ca.var, ex.thrown);
}
e = interpretStatement(ca.handler, istate);
- if (CTFEExp.isGotoExp(e))
+ while (CTFEExp.isGotoExp(e))
{
/* This is an optimization that relies on the locality of the jump target.
* If the label is in the same catch handler, the following scan
@@ -1434,11 +1437,19 @@ Expression interpretStatement(UnionExp* pue, Statement s, InterState* istate)
istatex.start = istate.gotoTarget; // set starting statement
istatex.gotoTarget = null;
Expression eh = interpretStatement(ca.handler, &istatex);
- if (!istatex.start)
+ if (istatex.start)
+ {
+ // The goto target is outside the current scope.
+ break;
+ }
+ // The goto target was within the body.
+ if (CTFEExp.isCantExp(eh))
{
- istate.gotoTarget = null;
e = eh;
+ break;
}
+ *istate = istatex;
+ e = eh;
}
break;
}
@@ -1565,7 +1576,7 @@ Expression interpretStatement(UnionExp* pue, Statement s, InterState* istate)
ctfeGlobals.stack.push(s.wthis);
setValue(s.wthis, e);
e = interpretStatement(s._body, istate);
- if (CTFEExp.isGotoExp(e))
+ while (CTFEExp.isGotoExp(e))
{
/* This is an optimization that relies on the locality of the jump target.
* If the label is in the same WithStatement, the following scan
@@ -1577,11 +1588,19 @@ Expression interpretStatement(UnionExp* pue, Statement s, InterState* istate)
istatex.start = istate.gotoTarget; // set starting statement
istatex.gotoTarget = null;
Expression ex = interpretStatement(s._body, &istatex);
- if (!istatex.start)
+ if (istatex.start)
+ {
+ // The goto target is outside the current scope.
+ break;
+ }
+ // The goto target was within the body.
+ if (CTFEExp.isCantExp(ex))
{
- istate.gotoTarget = null;
e = ex;
+ break;
}
+ *istate = istatex;
+ e = ex;
}
ctfeGlobals.stack.pop(s.wthis);
result = e;
@@ -1599,7 +1618,7 @@ Expression interpretStatement(UnionExp* pue, Statement s, InterState* istate)
return;
istate.start = null;
}
- s.error("`asm` statements cannot be interpreted at compile time");
+ error(s.loc, "`asm` statements cannot be interpreted at compile time");
result = CTFEExp.cantexp;
}
@@ -1676,7 +1695,7 @@ public:
printf("type = %s\n", e.type.toChars());
showCtfeExpr(e);
}
- e.error("cannot interpret `%s` at compile time", e.toChars());
+ error(e.loc, "cannot interpret `%s` at compile time", e.toChars());
result = CTFEExp.cantexp;
}
@@ -1733,7 +1752,7 @@ public:
assert(result.op == EXP.structLiteral || result.op == EXP.classReference || result.op == EXP.type);
return;
}
- e.error("value of `this` is not known at compile time");
+ error(e.loc, "value of `this` is not known at compile time");
result = CTFEExp.cantexp;
}
@@ -1822,14 +1841,14 @@ public:
if (e.type.ty != Tpointer)
{
// Probably impossible
- e.error("cannot interpret `%s` at compile time", e.toChars());
+ error(e.loc, "cannot interpret `%s` at compile time", e.toChars());
result = CTFEExp.cantexp;
return;
}
Type pointee = (cast(TypePointer)e.type).next;
if (e.var.isThreadlocal())
{
- e.error("cannot take address of thread-local variable %s at compile time", e.var.toChars());
+ error(e.loc, "cannot take address of thread-local variable %s at compile time", e.var.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -1884,7 +1903,7 @@ public:
result = pue.exp();
return;
}
- e.error("reinterpreting cast from `%s` to `%s` is not supported in CTFE", val.type.toChars(), e.type.toChars());
+ error(e.loc, "reinterpreting cast from `%s` to `%s` is not supported in CTFE", val.type.toChars(), e.type.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -1925,7 +1944,7 @@ public:
return;
}
- e.error("cannot convert `&%s` to `%s` at compile time", e.var.type.toChars(), e.type.toChars());
+ error(e.loc, "cannot convert `&%s` to `%s` at compile time", e.var.type.toChars(), e.type.toChars());
result = CTFEExp.cantexp;
}
@@ -1941,7 +1960,7 @@ public:
// We cannot take the address of an imported symbol at compile time
if (decl.isImportedSymbol()) {
- e.error("cannot take address of imported symbol `%s` at compile time", decl.toChars());
+ error(e.loc, "cannot take address of imported symbol `%s` at compile time", decl.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -2186,9 +2205,9 @@ public:
}
if (!v.isCTFE() && v.isDataseg())
- e.error("static variable `%s` cannot be read at compile time", v.toChars());
+ error(e.loc, "static variable `%s` cannot be read at compile time", v.toChars());
else // CTFE initiated from inside a function
- e.error("variable `%s` cannot be read at compile time", v.toChars());
+ error(e.loc, "variable `%s` cannot be read at compile time", v.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -2280,7 +2299,7 @@ public:
}
else
{
- e.error("declaration `%s` is not yet implemented in CTFE", e.toChars());
+ error(e.loc, "declaration `%s` is not yet implemented in CTFE", e.toChars());
result = CTFEExp.cantexp;
return 1;
}
@@ -2319,7 +2338,7 @@ public:
if (result !is null)
return;
}
- e.error("declaration `%s` is not yet implemented in CTFE", e.toChars());
+ error(e.loc, "declaration `%s` is not yet implemented in CTFE", e.toChars());
result = CTFEExp.cantexp;
}
else if (v.type.size() == 0)
@@ -2329,7 +2348,7 @@ public:
}
else
{
- e.error("variable `%s` cannot be modified at compile time", v.toChars());
+ error(e.loc, "variable `%s` cannot be modified at compile time", v.toChars());
result = CTFEExp.cantexp;
}
return;
@@ -2337,7 +2356,7 @@ public:
if (s.isTemplateMixin() || s.isTupleDeclaration())
{
// These can be made to work, too lazy now
- e.error("declaration `%s` is not yet implemented in CTFE", e.toChars());
+ error(e.loc, "declaration `%s` is not yet implemented in CTFE", e.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -2369,13 +2388,13 @@ public:
if (result.op == EXP.null_)
{
- e.error("null pointer dereference evaluating typeid. `%s` is `null`", ex.toChars());
+ error(e.loc, "null pointer dereference evaluating typeid. `%s` is `null`", ex.toChars());
result = CTFEExp.cantexp;
return;
}
if (result.op != EXP.classReference)
{
- e.error("CTFE internal error: determining classinfo");
+ error(e.loc, "CTFE internal error: determining classinfo");
result = CTFEExp.cantexp;
return;
}
@@ -2412,7 +2431,7 @@ public:
continue;
if (ex.op == EXP.voidExpression)
{
- e.error("CTFE internal error: void element `%s` in sequence", exp.toChars());
+ error(e.loc, "CTFE internal error: void element `%s` in sequence", exp.toChars());
assert(0);
}
@@ -2499,7 +2518,7 @@ public:
expandTuples(expsx);
if (expsx.length != dim)
{
- e.error("CTFE internal error: invalid array literal");
+ error(e.loc, "CTFE internal error: invalid array literal");
result = CTFEExp.cantexp;
return;
}
@@ -2562,7 +2581,7 @@ public:
expandTuples(valuesx);
if (keysx.length != valuesx.length)
{
- e.error("CTFE internal error: invalid AA");
+ error(e.loc, "CTFE internal error: invalid AA");
result = CTFEExp.cantexp;
return;
}
@@ -2683,7 +2702,7 @@ public:
expandTuples(expsx);
if (expsx.length != e.sd.fields.length)
{
- e.error("CTFE internal error: invalid struct literal");
+ error(e.loc, "CTFE internal error: invalid struct literal");
result = CTFEExp.cantexp;
return;
}
@@ -2813,7 +2832,7 @@ public:
{
if (v.inuse)
{
- e.error("circular reference to `%s`", v.toPrettyChars());
+ error(e.loc, "circular reference to `%s`", v.toPrettyChars());
result = CTFEExp.cantexp;
return;
}
@@ -2858,7 +2877,9 @@ public:
result = eref;
return;
}
- e.member.error("`%s` cannot be constructed at compile time, because the constructor has no available source code", e.newtype.toChars());
+ auto m = e.member;
+ error(m.loc, "%s `%s` `%s` cannot be constructed at compile time, because the constructor has no available source code",
+ m.kind, m.toPrettyChars, e.newtype.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -2900,7 +2921,7 @@ public:
result = pue.exp();
return;
}
- e.error("cannot interpret `%s` at compile time", e.toChars());
+ error(e.loc, "cannot interpret `%s` at compile time", e.toChars());
result = CTFEExp.cantexp;
}
@@ -3001,7 +3022,7 @@ public:
}
if (e.e1.type.ty == Tpointer || e.e2.type.ty == Tpointer)
{
- e.error("pointer expression `%s` cannot be interpreted at compile time", e.toChars());
+ error(e.loc, "pointer expression `%s` cannot be interpreted at compile time", e.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -3030,7 +3051,7 @@ public:
const uinteger_t sz = e1.type.size() * 8;
if (i2 < 0 || i2 >= sz)
{
- e.error("shift by %lld is outside the range 0..%llu", i2, cast(ulong)sz - 1);
+ error(e.loc, "shift by %lld is outside the range 0..%llu", i2, cast(ulong)sz - 1);
result = CTFEExp.cantexp;
return;
}
@@ -3081,13 +3102,13 @@ public:
if (e1.isConst() != 1)
{
// The following should really be an assert()
- e1.error("CTFE internal error: non-constant value `%s`", e1.toChars());
+ error(e1.loc, "CTFE internal error: non-constant value `%s`", e1.toChars());
emplaceExp!CTFEExp(&ue, EXP.cantExpression);
return ue;
}
if (e2.isConst() != 1)
{
- e2.error("CTFE internal error: non-constant value `%s`", e2.toChars());
+ error(e2.loc, "CTFE internal error: non-constant value `%s`", e2.toChars());
emplaceExp!CTFEExp(&ue, EXP.cantExpression);
return ue;
}
@@ -3098,7 +3119,7 @@ public:
*pue = evaluate(e.loc, e.type, e1, e2);
result = (*pue).exp();
if (CTFEExp.isCantExp(result))
- e.error("`%s` cannot be interpreted at compile time", e.toChars());
+ error(e.loc, "`%s` cannot be interpreted at compile time", e.toChars());
}
extern (D) private void interpretCompareCommon(BinExp e, fp2_t fp)
@@ -3126,7 +3147,7 @@ public:
if (cmp == -1)
{
char dir = (e.op == EXP.greaterThan || e.op == EXP.greaterOrEqual) ? '<' : '>';
- e.error("the ordering of pointers to unrelated memory blocks is indeterminate in CTFE. To check if they point to the same memory block, use both `>` and `<` inside `&&` or `||`, eg `%s && %s %c= %s + 1`", e.toChars(), e.e1.toChars(), dir, e.e2.toChars());
+ error(e.loc, "the ordering of pointers to unrelated memory blocks is indeterminate in CTFE. To check if they point to the same memory block, use both `>` and `<` inside `&&` or `||`, eg `%s && %s %c= %s + 1`", e.toChars(), e.e1.toChars(), dir, e.e2.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -3144,7 +3165,7 @@ public:
return;
if (!isCtfeComparable(e1))
{
- e.error("cannot compare `%s` at compile time", e1.toChars());
+ error(e.loc, "cannot compare `%s` at compile time", e1.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -3153,7 +3174,7 @@ public:
return;
if (!isCtfeComparable(e2))
{
- e.error("cannot compare `%s` at compile time", e2.toChars());
+ error(e.loc, "cannot compare `%s` at compile time", e2.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -3280,7 +3301,7 @@ public:
Expression e1 = e.e1;
if (!istate)
{
- e.error("value of `%s` is not known at compile time", e1.toChars());
+ error(e.loc, "value of `%s` is not known at compile time", e1.toChars());
return;
}
@@ -3594,14 +3615,14 @@ public:
}
else
{
- e.error("pointer expression `%s` cannot be interpreted at compile time", e.toChars());
+ error(e.loc, "pointer expression `%s` cannot be interpreted at compile time", e.toChars());
result = CTFEExp.cantexp;
return;
}
if (exceptionOrCant(newval))
{
if (CTFEExp.isCantExp(newval))
- e.error("cannot interpret `%s` at compile time", e.toChars());
+ error(e.loc, "cannot interpret `%s` at compile time", e.toChars());
return;
}
}
@@ -3610,7 +3631,7 @@ public:
{
if (existingAA.ownedByCtfe != OwnedBy.ctfe)
{
- e.error("cannot modify read-only constant `%s`", existingAA.toChars());
+ error(e.loc, "cannot modify read-only constant `%s`", existingAA.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -3650,7 +3671,7 @@ public:
Type t = e1.type.toBasetype();
if (t.ty != Tarray)
{
- e.error("`%s` is not yet supported at compile time", e.toChars());
+ error(e.loc, "`%s` is not yet supported at compile time", e.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -3728,7 +3749,7 @@ public:
auto v = dve.var.isVarDeclaration();
if (!sle || !v)
{
- e.error("CTFE internal error: dotvar slice assignment");
+ error(e.loc, "CTFE internal error: dotvar slice assignment");
result = CTFEExp.cantexp;
return;
}
@@ -3787,12 +3808,12 @@ public:
auto v = e1.isDotVarExp().var.isVarDeclaration();
if (!sle || !v)
{
- e.error("CTFE internal error: dotvar assignment");
+ error(e.loc, "CTFE internal error: dotvar assignment");
return CTFEExp.cantexp;
}
if (sle.ownedByCtfe != OwnedBy.ctfe)
{
- e.error("cannot modify read-only constant `%s`", sle.toChars());
+ error(e.loc, "cannot modify read-only constant `%s`", sle.toChars());
return CTFEExp.cantexp;
}
@@ -3800,7 +3821,7 @@ public:
: ex.isClassReferenceExp().findFieldIndexByName(v);
if (fieldi == -1)
{
- e.error("CTFE internal error: cannot find field `%s` in `%s`", v.toChars(), ex.toChars());
+ error(e.loc, "CTFE internal error: cannot find field `%s` in `%s`", v.toChars(), ex.toChars());
return CTFEExp.cantexp;
}
assert(0 <= fieldi && fieldi < sle.elements.length);
@@ -3842,7 +3863,7 @@ public:
{
if (existingSE.ownedByCtfe != OwnedBy.ctfe)
{
- e.error("cannot modify read-only string literal `%s`", ie.e1.toChars());
+ error(e.loc, "cannot modify read-only string literal `%s`", ie.e1.toChars());
return CTFEExp.cantexp;
}
existingSE.setCodeUnit(index, cast(dchar)newval.toInteger());
@@ -3850,14 +3871,14 @@ public:
}
if (aggregate.op != EXP.arrayLiteral)
{
- e.error("index assignment `%s` is not yet supported in CTFE ", e.toChars());
+ error(e.loc, "index assignment `%s` is not yet supported in CTFE ", e.toChars());
return CTFEExp.cantexp;
}
ArrayLiteralExp existingAE = aggregate.isArrayLiteralExp();
if (existingAE.ownedByCtfe != OwnedBy.ctfe)
{
- e.error("cannot modify read-only constant `%s`", existingAE.toChars());
+ error(e.loc, "cannot modify read-only constant `%s`", existingAE.toChars());
return CTFEExp.cantexp;
}
@@ -3866,7 +3887,7 @@ public:
}
else
{
- e.error("`%s` cannot be evaluated at compile time", e.toChars());
+ error(e.loc, "`%s` cannot be evaluated at compile time", e.toChars());
return CTFEExp.cantexp;
}
@@ -3902,7 +3923,7 @@ public:
newval = resolveSlice(newval);
if (CTFEExp.isCantExp(newval))
{
- e.error("CTFE internal error: assignment `%s`", e.toChars());
+ error(e.loc, "CTFE internal error: assignment `%s`", e.toChars());
return CTFEExp.cantexp;
}
}
@@ -4038,7 +4059,7 @@ public:
const srclen = resolveArrayLength(newval);
if (srclen != (upperbound - lowerbound))
{
- e.error("array length mismatch assigning `[0..%llu]` to `[%llu..%llu]`",
+ error(e.loc, "array length mismatch assigning `[0..%llu]` to `[%llu..%llu]`",
ulong(srclen), ulong(lowerbound), ulong(upperbound));
return CTFEExp.cantexp;
}
@@ -4048,7 +4069,7 @@ public:
{
if (existingSE.ownedByCtfe != OwnedBy.ctfe)
{
- e.error("cannot modify read-only string literal `%s`", existingSE.toChars());
+ error(e.loc, "cannot modify read-only string literal `%s`", existingSE.toChars());
return CTFEExp.cantexp;
}
@@ -4061,7 +4082,7 @@ public:
if (aggregate == aggr2 &&
lowerbound < srcupper && srclower < upperbound)
{
- e.error("overlapping slice assignment `[%llu..%llu] = [%llu..%llu]`",
+ error(e.loc, "overlapping slice assignment `[%llu..%llu] = [%llu..%llu]`",
ulong(lowerbound), ulong(upperbound), ulong(srclower), ulong(srcupper));
return CTFEExp.cantexp;
}
@@ -4071,7 +4092,7 @@ public:
newval = resolveSlice(newval);
if (CTFEExp.isCantExp(newval))
{
- e.error("CTFE internal error: slice `%s`", orignewval.toChars());
+ error(e.loc, "CTFE internal error: slice `%s`", orignewval.toChars());
return CTFEExp.cantexp;
}
}
@@ -4109,7 +4130,7 @@ public:
{
if (existingAE.ownedByCtfe != OwnedBy.ctfe)
{
- e.error("cannot modify read-only constant `%s`", existingAE.toChars());
+ error(e.loc, "cannot modify read-only constant `%s`", existingAE.toChars());
return CTFEExp.cantexp;
}
@@ -4181,7 +4202,7 @@ public:
if (aggregate == aggr2 &&
lowerbound < srcupper && srclower < upperbound)
{
- e.error("overlapping slice assignment `[%llu..%llu] = [%llu..%llu]`",
+ error(e.loc, "overlapping slice assignment `[%llu..%llu] = [%llu..%llu]`",
ulong(lowerbound), ulong(upperbound), ulong(srclower), ulong(srcupper));
return CTFEExp.cantexp;
}
@@ -4191,7 +4212,7 @@ public:
newval = resolveSlice(newval);
if (CTFEExp.isCantExp(newval))
{
- e.error("CTFE internal error: slice `%s`", orignewval.toChars());
+ error(e.loc, "CTFE internal error: slice `%s`", orignewval.toChars());
return CTFEExp.cantexp;
}
}
@@ -4312,7 +4333,7 @@ public:
return interpret(pue, retslice, istate);
}
- e.error("slice operation `%s = %s` cannot be evaluated at compile time", e1.toChars(), newval.toChars());
+ error(e.loc, "slice operation `%s = %s` cannot be evaluated at compile time", e1.toChars(), newval.toChars());
return CTFEExp.cantexp;
}
@@ -4517,7 +4538,7 @@ public:
}
if (except)
{
- e.error("comparison `%s` of pointers to unrelated memory blocks remains indeterminate at compile time because exception `%s` was thrown while evaluating `%s`", e.e1.toChars(), except.toChars(), e.e2.toChars());
+ error(e.loc, "comparison `%s` of pointers to unrelated memory blocks remains indeterminate at compile time because exception `%s` was thrown while evaluating `%s`", e.e1.toChars(), except.toChars(), e.e2.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -4540,7 +4561,7 @@ public:
// comparison is in the same direction as the first, or else
// more than two memory blocks are involved (either two independent
// invalid comparisons are present, or else agg3 == agg4).
- e.error("comparison `%s` of pointers to unrelated memory blocks is indeterminate at compile time, even when combined with `%s`.", e.e1.toChars(), e.e2.toChars());
+ error(e.loc, "comparison `%s` of pointers to unrelated memory blocks is indeterminate at compile time, even when combined with `%s`.", e.e1.toChars(), e.e2.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -4633,14 +4654,14 @@ public:
res = true;
else
{
- e.error("`%s` does not evaluate to a `bool`", result.toChars());
+ error(e.loc, "`%s` does not evaluate to a `bool`", result.toChars());
result = CTFEExp.cantexp;
return;
}
}
else
{
- e.error("`%s` cannot be interpreted as a `bool`", result.toChars());
+ error(e.loc, "`%s` cannot be interpreted as a `bool`", result.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -4670,7 +4691,7 @@ public:
}
errorSupplemental(callingExp.loc, "called from here: `%s`", callingExp.toChars());
// Quit if it's not worth trying to compress the stack trace
- if (ctfeGlobals.callDepth < 6 || global.params.verbose)
+ if (ctfeGlobals.callDepth < 6 || global.params.v.verbose)
return;
// Recursion happens if the current function already exists in the call stack.
int numToSuppress = 0;
@@ -4854,13 +4875,13 @@ public:
{
// delegate.funcptr()
// others
- e.error("cannot call `%s` at compile time", e.toChars());
+ error(e.loc, "cannot call `%s` at compile time", e.toChars());
result = CTFEExp.cantexp;
return;
}
if (!fd)
{
- e.error("CTFE internal error: cannot evaluate `%s` at compile time", e.toChars());
+ error(e.loc, "CTFE internal error: cannot evaluate `%s` at compile time", e.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -4873,7 +4894,7 @@ public:
if (pthis.op == EXP.typeid_)
{
- pthis.error("static variable `%s` cannot be read at compile time", pthis.toChars());
+ error(pthis.loc, "static variable `%s` cannot be read at compile time", pthis.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -4882,7 +4903,7 @@ public:
if (pthis.op == EXP.null_)
{
assert(pthis.type.toBasetype().ty == Tclass);
- e.error("function call through null class reference `%s`", pthis.toChars());
+ error(e.loc, "function call through null class reference `%s`", pthis.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -4904,7 +4925,7 @@ public:
if (fd && fd.semanticRun >= PASS.semantic3done && fd.hasSemantic3Errors())
{
- e.error("CTFE failed because of previous errors in `%s`", fd.toChars());
+ error(e.loc, "CTFE failed because of previous errors in `%s`", fd.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -4916,7 +4937,7 @@ public:
if (!fd.fbody)
{
- e.error("`%s` cannot be interpreted at compile time, because it has no available source code", fd.toChars());
+ error(e.loc, "`%s` cannot be interpreted at compile time, because it has no available source code", fd.toChars());
result = CTFEExp.showcontext;
return;
}
@@ -5095,7 +5116,7 @@ public:
}
else
{
- e.error("`%s` does not evaluate to boolean result at compile time", e.econd.toChars());
+ error(e.loc, "`%s` does not evaluate to boolean result at compile time", e.econd.toChars());
result = CTFEExp.cantexp;
}
}
@@ -5113,7 +5134,7 @@ public:
return;
if (e1.op != EXP.string_ && e1.op != EXP.arrayLiteral && e1.op != EXP.slice && e1.op != EXP.null_)
{
- e.error("`%s` cannot be evaluated at compile time", e.toChars());
+ error(e.loc, "`%s` cannot be evaluated at compile time", e.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -5166,7 +5187,7 @@ public:
return;
if (e1.op != EXP.arrayLiteral && e1.op != EXP.int64 && e1.op != EXP.float64)
{
- e.error("`%s` cannot be evaluated at compile time", e.toChars());
+ error(e.loc, "`%s` cannot be evaluated at compile time", e.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -5196,7 +5217,7 @@ public:
if (result.op != EXP.vector)
return;
}
- e.error("`%s` cannot be evaluated at compile time", e.toChars());
+ error(e.loc, "`%s` cannot be evaluated at compile time", e.toChars());
result = CTFEExp.cantexp;
}
@@ -5210,7 +5231,7 @@ public:
assert(e1);
if (exceptionOrCant(e1))
return;
- e.error("`%s` cannot be evaluated at compile time", e.toChars());
+ error(e.loc, "`%s` cannot be evaluated at compile time", e.toChars());
result = CTFEExp.cantexp;
}
@@ -5224,7 +5245,7 @@ public:
assert(e1);
if (exceptionOrCant(e1))
return;
- e.error("`%s` cannot be evaluated at compile time", e.toChars());
+ error(e.loc, "`%s` cannot be evaluated at compile time", e.toChars());
result = CTFEExp.cantexp;
}
@@ -5249,18 +5270,18 @@ public:
if (agg.op == EXP.null_)
{
- e.error("cannot index through null pointer `%s`", e.e1.toChars());
+ error(e.loc, "cannot index through null pointer `%s`", e.e1.toChars());
return false;
}
if (agg.op == EXP.int64)
{
- e.error("cannot index through invalid pointer `%s` of value `%s`", e.e1.toChars(), e1.toChars());
+ error(e.loc, "cannot index through invalid pointer `%s` of value `%s`", e.e1.toChars(), e1.toChars());
return false;
}
// Pointer to a non-array variable
if (agg.op == EXP.symbolOffset)
{
- e.error("mutable variable `%s` cannot be %s at compile time, even through a pointer", cast(char*)(modify ? "modified" : "read"), agg.isSymOffExp().var.toChars());
+ error(e.loc, "mutable variable `%s` cannot be %s at compile time, even through a pointer", cast(char*)(modify ? "modified" : "read"), agg.isSymOffExp().var.toChars());
return false;
}
@@ -5269,7 +5290,7 @@ public:
dinteger_t len = resolveArrayLength(agg);
if (ofs + indx >= len)
{
- e.error("pointer index `[%lld]` exceeds allocated memory block `[0..%lld]`", ofs + indx, len);
+ error(e.loc, "pointer index `[%lld]` exceeds allocated memory block `[0..%lld]`", ofs + indx, len);
return false;
}
}
@@ -5277,7 +5298,7 @@ public:
{
if (ofs + indx != 0)
{
- e.error("pointer index `[%lld]` lies outside memory block `[0..1]`", ofs + indx);
+ error(e.loc, "pointer index `[%lld]` lies outside memory block `[0..1]`", ofs + indx);
return false;
}
}
@@ -5291,7 +5312,7 @@ public:
return false;
if (e1.op == EXP.null_)
{
- e.error("cannot index null array `%s`", e.e1.toChars());
+ error(e.loc, "cannot index null array `%s`", e.e1.toChars());
return false;
}
if (auto ve = e1.isVectorExp())
@@ -5309,7 +5330,7 @@ public:
{
if (e1.op != EXP.arrayLiteral && e1.op != EXP.string_ && e1.op != EXP.slice && e1.op != EXP.vector)
{
- e.error("cannot determine length of `%s` at compile time", e.e1.toChars());
+ error(e.loc, "cannot determine length of `%s` at compile time", e.e1.toChars());
return false;
}
len = resolveArrayLength(e1);
@@ -5328,7 +5349,7 @@ public:
return false;
if (e2.op != EXP.int64)
{
- e.error("CTFE internal error: non-integral index `[%s]`", e.e2.toChars());
+ error(e.loc, "CTFE internal error: non-integral index `[%s]`", e.e2.toChars());
return false;
}
@@ -5341,7 +5362,7 @@ public:
if (index > iupr - ilwr)
{
- e.error("index %llu exceeds array length %llu", index, iupr - ilwr);
+ error(e.loc, "index %llu exceeds array length %llu", index, iupr - ilwr);
return false;
}
*pagg = e1.isSliceExp().e1;
@@ -5353,7 +5374,7 @@ public:
*pidx = e2.toInteger();
if (len <= *pidx)
{
- e.error("array index %lld is out of bounds `[0..%lld]`", *pidx, len);
+ error(e.loc, "array index %lld is out of bounds `[0..%lld]`", *pidx, len);
return false;
}
}
@@ -5411,7 +5432,7 @@ public:
{
assert(0); // does not reach here?
}
- e.error("cannot index null array `%s`", e.e1.toChars());
+ error(e.loc, "cannot index null array `%s`", e.e1.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -5439,7 +5460,7 @@ public:
result = findKeyInAA(e.loc, e1.isAssocArrayLiteralExp(), e2);
if (!result)
{
- e.error("key `%s` not found in associative array `%s`", e2.toChars(), e.e1.toChars());
+ error(e.loc, "key `%s` not found in associative array `%s`", e2.toChars(), e.e1.toChars());
result = CTFEExp.cantexp;
}
return;
@@ -5467,7 +5488,7 @@ public:
return;
if (result.op == EXP.void_)
{
- e.error("`%s` is used before initialized", e.toChars());
+ error(e.loc, "`%s` is used before initialized", e.toChars());
errorSupplemental(result.loc, "originally uninitialized here");
result = CTFEExp.cantexp;
return;
@@ -5490,7 +5511,7 @@ public:
return;
if (e1.op == EXP.int64)
{
- e.error("cannot slice invalid pointer `%s` of value `%s`", e.e1.toChars(), e1.toChars());
+ error(e.loc, "cannot slice invalid pointer `%s` of value `%s`", e.e1.toChars(), e1.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -5518,19 +5539,19 @@ public:
result.type = e.type;
return;
}
- e.error("cannot slice null pointer `%s`", e.e1.toChars());
+ error(e.loc, "cannot slice null pointer `%s`", e.e1.toChars());
result = CTFEExp.cantexp;
return;
}
if (agg.op == EXP.symbolOffset)
{
- e.error("slicing pointers to static variables is not supported in CTFE");
+ error(e.loc, "slicing pointers to static variables is not supported in CTFE");
result = CTFEExp.cantexp;
return;
}
if (agg.op != EXP.arrayLiteral && agg.op != EXP.string_)
{
- e.error("pointer `%s` cannot be sliced at compile time (it does not point to an array)", e.e1.toChars());
+ error(e.loc, "pointer `%s` cannot be sliced at compile time (it does not point to an array)", e.e1.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -5539,7 +5560,7 @@ public:
//Type *pointee = ((TypePointer *)agg.type)->next;
if (sliceBoundsCheck(0, len, ilwr, iupr))
{
- e.error("pointer slice `[%lld..%lld]` exceeds allocated memory block `[0..%lld]`", ilwr, iupr, len);
+ error(e.loc, "pointer slice `[%lld..%lld]` exceeds allocated memory block `[0..%lld]`", ilwr, iupr, len);
result = CTFEExp.cantexp;
return;
}
@@ -5587,7 +5608,7 @@ public:
{
if (e1.op != EXP.arrayLiteral && e1.op != EXP.string_ && e1.op != EXP.null_ && e1.op != EXP.slice && e1.op != EXP.vector)
{
- e.error("cannot determine length of `%s` at compile time", e1.toChars());
+ error(e.loc, "cannot determine length of `%s` at compile time", e1.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -5631,7 +5652,7 @@ public:
result = e1;
return;
}
- e1.error("slice `[%llu..%llu]` is out of bounds", ilwr, iupr);
+ error(e1.loc, "slice `[%llu..%llu]` is out of bounds", ilwr, iupr);
result = CTFEExp.cantexp;
return;
}
@@ -5643,7 +5664,7 @@ public:
uinteger_t up1 = se.upr.toInteger();
if (sliceBoundsCheck(0, up1 - lo1, ilwr, iupr))
{
- e.error("slice `[%llu..%llu]` exceeds array bounds `[0..%llu]`", ilwr, iupr, up1 - lo1);
+ error(e.loc, "slice `[%llu..%llu]` exceeds array bounds `[0..%llu]`", ilwr, iupr, up1 - lo1);
result = CTFEExp.cantexp;
return;
}
@@ -5660,7 +5681,7 @@ public:
{
if (sliceBoundsCheck(0, dollar, ilwr, iupr))
{
- e.error("slice `[%lld..%lld]` exceeds array bounds `[0..%lld]`", ilwr, iupr, dollar);
+ error(e.loc, "slice `[%lld..%lld]` exceeds array bounds `[0..%lld]`", ilwr, iupr, dollar);
result = CTFEExp.cantexp;
return;
}
@@ -5690,7 +5711,7 @@ public:
}
if (e2.op != EXP.assocArrayLiteral)
{
- e.error("`%s` cannot be interpreted at compile time", e.toChars());
+ error(e.loc, "`%s` cannot be interpreted at compile time", e.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -5772,7 +5793,7 @@ public:
if (CTFEExp.isCantExp(result))
{
- e.error("`%s` cannot be interpreted at compile time", e.toChars());
+ error(e.loc, "`%s` cannot be interpreted at compile time", e.toChars());
return;
}
// We know we still own it, because we interpreted both e1 and e2
@@ -5814,7 +5835,7 @@ public:
case Tclass:
if (result.op != EXP.classReference)
{
- e.error("`delete` on invalid class reference `%s`", result.toChars());
+ error(e.loc, "`delete` on invalid class reference `%s`", result.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -5905,7 +5926,7 @@ public:
}
else if (ultimatePointee.ty != Tvoid && ultimateSrc.ty != Tvoid && !isSafePointerCast(elemtype, pointee))
{
- e.error("reinterpreting cast from `%s*` to `%s*` is not supported in CTFE", elemtype.toChars(), pointee.toChars());
+ error(e.loc, "reinterpreting cast from `%s*` to `%s*` is not supported in CTFE", elemtype.toChars(), pointee.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -5964,7 +5985,7 @@ public:
}
if (!isSafePointerCast(origType, pointee))
{
- e.error("using `void*` to reinterpret cast from `%s*` to `%s*` is not supported in CTFE", origType.toChars(), pointee.toChars());
+ error(e.loc, "using `void*` to reinterpret cast from `%s*` to `%s*` is not supported in CTFE", origType.toChars(), pointee.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6008,7 +6029,7 @@ public:
Type origType = (cast(SymbolExp)e1).var.type;
if (castBackFromVoid && !isSafePointerCast(origType, pointee))
{
- e.error("using `void*` to reinterpret cast from `%s*` to `%s*` is not supported in CTFE", origType.toChars(), pointee.toChars());
+ error(e.loc, "using `void*` to reinterpret cast from `%s*` to `%s*` is not supported in CTFE", origType.toChars(), pointee.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6025,7 +6046,7 @@ public:
e1 = interpretRegion(e1, istate);
if (e1.op != EXP.null_)
{
- e.error("pointer cast from `%s` to `%s` is not supported at compile time", e1.type.toChars(), e.to.toChars());
+ error(e.loc, "pointer cast from `%s` to `%s` is not supported at compile time", e1.type.toChars(), e.to.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6046,7 +6067,7 @@ public:
SliceExp se = e1.isSliceExp();
if (!isSafePointerCast(se.e1.type.nextOf(), e.to.nextOf()))
{
- e.error("array cast from `%s` to `%s` is not supported at compile time", se.e1.type.toChars(), e.to.toChars());
+ error(e.loc, "array cast from `%s` to `%s` is not supported at compile time", se.e1.type.toChars(), e.to.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6059,7 +6080,7 @@ public:
// types of identical size.
if ((e.to.ty == Tsarray || e.to.ty == Tarray) && (e1.type.ty == Tsarray || e1.type.ty == Tarray) && !isSafePointerCast(e1.type.nextOf(), e.to.nextOf()))
{
- e.error("array cast from `%s` to `%s` is not supported at compile time", e1.type.toChars(), e.to.toChars());
+ error(e.loc, "array cast from `%s` to `%s` is not supported at compile time", e1.type.toChars(), e.to.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6105,19 +6126,20 @@ public:
result = interpret(&ue, e.msg, istate);
if (exceptionOrCant(result))
return;
- if (StringExp se = result.isStringExp())
- e.error("%s", se.toStringz().ptr);
+ result = scrubReturnValue(e.loc, result);
+ if (StringExp se = result.toStringExp())
+ error(e.loc, "%s", se.toStringz().ptr);
else
- e.error("%s", result.toChars());
+ error(e.loc, "%s", result.toChars());
}
else
- e.error("`%s` failed", e.toChars());
+ error(e.loc, "`%s` failed", e.toChars());
result = CTFEExp.cantexp;
return;
}
else
{
- e.error("`%s` is not a compile time boolean expression", e1.toChars());
+ error(e.loc, "`%s` is not a compile time boolean expression", e1.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6195,7 +6217,7 @@ public:
{
if (soe.offset == 0 && soe.var.isFuncDeclaration())
return;
- e.error("cannot dereference pointer to static variable `%s` at compile time", soe.var.toChars());
+ error(e.loc, "cannot dereference pointer to static variable `%s` at compile time", soe.var.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6220,9 +6242,9 @@ public:
if (result.op != EXP.address)
{
if (result.op == EXP.null_)
- e.error("dereference of null pointer `%s`", e.e1.toChars());
+ error(e.loc, "dereference of null pointer `%s`", e.e1.toChars());
else
- e.error("dereference of invalid pointer `%s`", result.toChars());
+ error(e.loc, "dereference of invalid pointer `%s`", result.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6253,7 +6275,7 @@ public:
{
void notImplementedYet()
{
- e.error("`%s.%s` is not yet implemented at compile time", e.e1.toChars(), e.var.toChars());
+ error(e.loc, "`%s.%s` is not yet implemented at compile time", e.e1.toChars(), e.var.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6282,7 +6304,7 @@ public:
VarDeclaration v = e.var.isVarDeclaration();
if (!v)
{
- e.error("CTFE internal error: `%s`", e.toChars());
+ error(e.loc, "CTFE internal error: `%s`", e.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6290,9 +6312,9 @@ public:
if (ex.op == EXP.null_)
{
if (ex.type.toBasetype().ty == Tclass)
- e.error("class `%s` is `null` and cannot be dereferenced", e.e1.toChars());
+ error(e.loc, "class `%s` is `null` and cannot be dereferenced", e.e1.toChars());
else
- e.error("CTFE internal error: null this `%s`", e.e1.toChars());
+ error(e.loc, "CTFE internal error: null this `%s`", e.e1.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6335,7 +6357,7 @@ public:
}
if (i == -1)
{
- e.error("couldn't find field `%s` of type `%s` in `%s`", v.toChars(), e.type.toChars(), se.toChars());
+ error(e.loc, "couldn't find field `%s` of type `%s` in `%s`", v.toChars(), e.type.toChars(), se.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6363,7 +6385,7 @@ public:
result = (*se.elements)[i];
if (!result)
{
- e.error("internal compiler error: null field `%s`", v.toChars());
+ error(e.loc, "internal compiler error: null field `%s`", v.toChars());
result = CTFEExp.cantexp;
return;
}
@@ -6372,11 +6394,11 @@ public:
const s = vie.var.toChars();
if (v.overlapped)
{
- e.error("reinterpretation through overlapped field `%s` is not allowed in CTFE", s);
+ error(e.loc, "reinterpretation through overlapped field `%s` is not allowed in CTFE", s);
result = CTFEExp.cantexp;
return;
}
- e.error("cannot read uninitialized variable `%s` in CTFE", s);
+ error(e.loc, "cannot read uninitialized variable `%s` in CTFE", s);
result = CTFEExp.cantexp;
return;
}
@@ -6446,7 +6468,7 @@ public:
override void visit(VoidInitExp e)
{
- e.error("CTFE internal error: trying to read uninitialized variable");
+ error(e.loc, "CTFE internal error: trying to read uninitialized variable");
assert(0);
}
@@ -6474,7 +6496,7 @@ void interpretThrow(ref Expression result, Expression exp, const ref Loc loc, In
}
else
{
- exp.error("to be thrown `%s` must be non-null", exp.toChars());
+ error(exp.loc, "to be thrown `%s` must be non-null", exp.toChars());
result = ErrorExp.get();
}
}
@@ -7260,7 +7282,7 @@ private Expression foreachApplyUtf(UnionExp* pue, InterState* istate, Expression
auto ale = str.isArrayLiteralExp();
if (!se && !ale)
{
- str.error("CTFE internal error: cannot foreach `%s`", str.toChars());
+ error(str.loc, "CTFE internal error: cannot foreach `%s`", str.toChars());
return CTFEExp.cantexp;
}
Expressions args = Expressions(numParams);
@@ -7412,7 +7434,7 @@ private Expression foreachApplyUtf(UnionExp* pue, InterState* istate, Expression
}
if (errmsg)
{
- deleg.error("`%.*s`", cast(int)errmsg.length, errmsg.ptr);
+ error(deleg.loc, "`%.*s`", cast(int)errmsg.length, errmsg.ptr);
return CTFEExp.cantexp;
}
@@ -7716,6 +7738,6 @@ private void removeHookTraceImpl(ref CallExp ce, ref FuncDeclaration fd)
ce = ctfeEmplaceExp!CallExp(ce.loc, ctfeEmplaceExp!VarExp(ce.loc, fd, false), arguments);
- if (global.params.verbose)
+ if (global.params.v.verbose)
message("strip %s =>\n %s", oldCE.toChars(), ce.toChars());
}
diff --git a/gcc/d/dmd/dmacro.d b/gcc/d/dmd/dmacro.d
index 6fc23e9..6e6c4b1 100644
--- a/gcc/d/dmd/dmacro.d
+++ b/gcc/d/dmd/dmacro.d
@@ -13,11 +13,12 @@ module dmd.dmacro;
import core.stdc.ctype;
import core.stdc.string;
-import dmd.doc;
import dmd.common.outbuffer;
import dmd.root.rmem;
-extern (C++) struct MacroTable
+@trusted:
+
+struct MacroTable
{
/**********************************
* Define name=text macro.
@@ -26,7 +27,7 @@ extern (C++) struct MacroTable
* name = name of macro
* text = text of macro
*/
- extern (D) void define(const(char)[] name, const(char)[] text) nothrow pure @safe
+ void define(const(char)[] name, const(char)[] text) nothrow pure @safe
{
//printf("MacroTable::define('%.*s' = '%.*s')\n", cast(int)name.length, name.ptr, text.length, text.ptr);
if (auto table = name in mactab)
@@ -37,13 +38,16 @@ extern (C++) struct MacroTable
mactab[name] = new Macro(name, text);
}
+ alias fp_t = bool function(const(char)* p) @nogc nothrow pure;
+
/*****************************************************
* Look for macros in buf and expand them in place.
* Only look at the text in buf from start to pend.
*
* Returns: `true` on success, `false` when the recursion limit was reached
*/
- extern (D) bool expand(ref OutBuffer buf, size_t start, ref size_t pend, const(char)[] arg, int recursionLimit) nothrow pure
+ bool expand(ref OutBuffer buf, size_t start, ref size_t pend, const(char)[] arg, int recursionLimit,
+ fp_t isIdStart, fp_t isIdTail) nothrow pure
{
version (none)
{
@@ -101,7 +105,7 @@ extern (C++) struct MacroTable
end += marg.length - 2;
// Scan replaced text for further expansion
size_t mend = u + marg.length;
- const success = expand(buf, u, mend, null, recursionLimit);
+ const success = expand(buf, u, mend, null, recursionLimit, isIdStart, isIdTail);
if (!success)
return false;
end += mend - (u + marg.length);
@@ -119,7 +123,7 @@ extern (C++) struct MacroTable
end += -2 + 2 + marg.length + 2;
// Scan replaced text for further expansion
size_t mend = u + 2 + marg.length;
- const success = expand(buf, u + 2, mend, null, recursionLimit);
+ const success = expand(buf, u + 2, mend, null, recursionLimit, isIdStart, isIdTail);
if (!success)
return false;
end += mend - (u + 2 + marg.length);
@@ -149,7 +153,7 @@ extern (C++) struct MacroTable
/* Scan forward to find end of macro name and
* beginning of macro argument (marg).
*/
- for (v = u + 2; v < end; v += utfStride(p + v))
+ for (v = u + 2; v < end; v += utfStride(p[v]))
{
if (!isIdTail(p + v))
{
@@ -228,7 +232,7 @@ extern (C++) struct MacroTable
// Scan replaced text for further expansion
m.inuse++;
size_t mend = v + 1 + 2 + m.text.length + 2;
- const success = expand(buf, v + 1, mend, marg, recursionLimit);
+ const success = expand(buf, v + 1, mend, marg, recursionLimit, isIdStart, isIdTail);
if (!success)
return false;
end += mend - (v + 1 + 2 + m.text.length + 2);
@@ -260,7 +264,7 @@ extern (C++) struct MacroTable
private:
- extern (D) Macro* search(const(char)[] name) @nogc nothrow pure @safe
+ Macro* search(const(char)[] name) @nogc nothrow pure @safe
{
//printf("Macro::search(%.*s)\n", cast(int)name.length, name.ptr);
if (auto table = name in mactab)
@@ -299,7 +303,7 @@ struct Macro
* copy allocated with mem.xmalloc()
*/
-char[] memdup(const(char)[] p) nothrow pure @trusted
+char[] memdup(const(char)[] p) nothrow pure
{
size_t len = p.length;
return (cast(char*)memcpy(mem.xmalloc(len), p.ptr, len))[0 .. len];
@@ -424,3 +428,35 @@ Largstart:
//printf("extractArg%d('%.*s') = '%.*s'\n", n, cast(int)end, p, cast(int)marg.length, marg.ptr);
return v;
}
+
+/*****************************************
+ * Get number of UTF-8 code units in code point that starts with `c`
+ * Params:
+ * c = starting code unit
+ * Returns: number of UTF-8 code units (i.e. bytes), else 1 on invalid UTF start
+ */
+@safe
+int utfStride(char c) @nogc nothrow pure
+{
+ return
+ c < 0x80 ? 1 :
+ c < 0xC0 ? 1 : // invalid UTF start
+ c < 0xE0 ? 2 :
+ c < 0xF0 ? 3 :
+ c < 0xF8 ? 4 :
+ c < 0xFC ? 5 :
+ c < 0xFE ? 6 :
+ 1; // invalid UTF start
+}
+
+unittest
+{
+ assert(utfStride(0) == 1);
+ assert(utfStride(0x80) == 1);
+ assert(utfStride(0xC0) == 2);
+ assert(utfStride(0xE0) == 3);
+ assert(utfStride(0xF0) == 4);
+ assert(utfStride(0xF8) == 5);
+ assert(utfStride(0xFC) == 6);
+ assert(utfStride(0xFE) == 1);
+}
diff --git a/gcc/d/dmd/dmangle.d b/gcc/d/dmd/dmangle.d
index 9b72308..8fdb1ae 100644
--- a/gcc/d/dmd/dmangle.d
+++ b/gcc/d/dmd/dmangle.d
@@ -142,6 +142,7 @@ import dmd.declaration;
import dmd.dmodule;
import dmd.dsymbol;
import dmd.dtemplate;
+import dmd.errors;
import dmd.expression;
import dmd.func;
import dmd.globals;
@@ -821,7 +822,7 @@ public:
printf("\n");
}
if (!ti.tempdecl)
- ti.error("is not defined");
+ error(ti.loc, "%s `%s` is not defined", ti.kind, ti.toPrettyChars);
else
mangleParent(ti);
@@ -888,7 +889,7 @@ public:
buf.writeByte('V');
if (ea.op == EXP.tuple)
{
- ea.error("sequence is not a valid template value argument");
+ error(ea.loc, "sequence is not a valid template value argument");
continue;
}
// Now that we know it is not an alias, we MUST obtain a value
@@ -926,7 +927,7 @@ public:
}
if (!d.type || !d.type.deco)
{
- ti.error("forward reference of %s `%s`", d.kind(), d.toChars());
+ error(ti.loc, "%s `%s` forward reference of %s `%s`", ti.kind, ti.toPrettyChars, d.kind(), d.toChars());
continue;
}
}
@@ -982,7 +983,8 @@ public:
////////////////////////////////////////////////////////////////////////////
override void visit(Expression e)
{
- e.error("expression `%s` is not a valid template value argument", e.toChars());
+ if (!e.type.isTypeError())
+ error(e.loc, "expression `%s` is not a valid template value argument", e.toChars());
}
override void visit(IntegerExp e)
@@ -1040,7 +1042,7 @@ public:
{
dchar c;
if (const s = utf_decodeWchar(slice, u, c))
- e.error("%.*s", cast(int)s.length, s.ptr);
+ error(e.loc, "%.*s", cast(int)s.length, s.ptr);
else
tmp.writeUTF8(c);
}
@@ -1054,7 +1056,7 @@ public:
foreach (c; slice)
{
if (!utf_isValidDchar(c))
- e.error("invalid UCS-32 char \\U%08x", c);
+ error(e.loc, "invalid UCS-32 char \\U%08x", c);
else
tmp.writeUTF8(c);
}
@@ -1300,7 +1302,7 @@ extern (D) void toBuffer(ref OutBuffer buf, const(char)[] id, Dsymbol s)
{
const len = id.length;
if (buf.length + len >= 8 * 1024 * 1024) // 8 megs ought be enough for anyone
- s.error("excessive length %llu for symbol, possible recursive expansion?", cast(ulong)(buf.length + len));
+ error(s.loc, "%s `%s` excessive length %llu for symbol, possible recursive expansion?", s.kind, s.toPrettyChars, cast(ulong)(buf.length + len));
else
{
buf.print(len);
@@ -1422,7 +1424,7 @@ extern (D) const(char)[] externallyMangledIdentifier(Declaration d)
(d.isVarDeclaration() && d.isDataseg() && d.storage_class & STC.extern_))))
{
if (linkage != LINK.d && d.localNum)
- d.error("the same declaration cannot be in multiple scopes with non-D linkage");
+ error(d.loc, "%s `%s` the same declaration cannot be in multiple scopes with non-D linkage", d.kind, d.toPrettyChars);
final switch (linkage)
{
@@ -1438,7 +1440,7 @@ extern (D) const(char)[] externallyMangledIdentifier(Declaration d)
return p.toDString();
}
case LINK.default_:
- d.error("forward declaration");
+ error(d.loc, "%s `%s` forward declaration", d.kind, d.toPrettyChars);
return d.ident.toString();
case LINK.system:
assert(0);
diff --git a/gcc/d/dmd/dmodule.d b/gcc/d/dmd/dmodule.d
index 4a2e15c..2026303 100644
--- a/gcc/d/dmd/dmodule.d
+++ b/gcc/d/dmd/dmodule.d
@@ -468,7 +468,8 @@ extern (C++) final class Module : Package
!FileName.equalsExt(srcfilename, dd_ext))
{
- error("source file name '%.*s' must have .%.*s extension",
+ error(loc, "%s `%s` source file name '%.*s' must have .%.*s extension",
+ kind, toPrettyChars,
cast(int)srcfilename.length, srcfilename.ptr,
cast(int)mars_ext.length, mars_ext.ptr);
fatal();
@@ -528,7 +529,7 @@ extern (C++) final class Module : Package
if (!m.read(loc))
return null;
- if (global.params.verbose)
+ if (global.params.v.verbose)
{
OutBuffer buf;
foreach (pid; packages)
@@ -593,7 +594,8 @@ extern (C++) final class Module : Package
}
if (FileName.equals(docfilename, srcfile.toString()))
{
- error("source file and output file have same name '%s'", srcfile.toChars());
+ error(loc, "%s `%s` source file and output file have same name '%s'",
+ kind, toPrettyChars, srcfile.toChars());
fatal();
}
return FileName(docfilename);
@@ -780,7 +782,9 @@ extern (C++) final class Module : Package
{
filetype = FileType.c;
+ global.compileEnv.masm = target.os == Target.OS.Windows && !target.omfobj; // Microsoft inline assembler format
scope p = new CParser!AST(this, buf, cast(bool) docfile, global.errorSink, target.c, &defines, &global.compileEnv);
+ global.compileEnv.masm = false;
p.nextToken();
checkCompiledImport();
members = p.parseModule();
@@ -791,7 +795,7 @@ extern (C++) final class Module : Package
{
const bool doUnittests = global.params.useUnitTests || global.params.ddoc.doOutput || global.params.dihdr.doOutput;
scope p = new Parser!AST(this, buf, cast(bool) docfile, global.errorSink, &global.compileEnv, doUnittests);
- p.transitionIn = global.params.vin;
+ p.transitionIn = global.params.v.vin;
p.nextToken();
p.parseModuleDeclaration();
md = p.md;
@@ -850,7 +854,7 @@ extern (C++) final class Module : Package
/* Check to see if module name is a valid identifier
*/
if (!Identifier.isValidIdentifier(this.ident.toChars()))
- error("has non-identifier characters in filename, use module declaration instead");
+ error(loc, "%s `%s` has non-identifier characters in filename, use module declaration instead", kind, toPrettyChars);
}
// Insert module into the symbol table
Dsymbol s = this;
@@ -903,11 +907,11 @@ extern (C++) final class Module : Package
if (Module mprev = prev.isModule())
{
if (!FileName.equals(srcname, mprev.srcfile.toChars()))
- error(loc, "from file %s conflicts with another module %s from file %s", srcname, mprev.toChars(), mprev.srcfile.toChars());
+ error(loc, "%s `%s` from file %s conflicts with another module %s from file %s", kind, toPrettyChars, srcname, mprev.toChars(), mprev.srcfile.toChars());
else if (isRoot() && mprev.isRoot())
- error(loc, "from file %s is specified twice on the command line", srcname);
+ error(loc, "%s `%s` from file %s is specified twice on the command line", kind, toPrettyChars, srcname);
else
- error(loc, "from file %s must be imported with 'import %s;'", srcname, toPrettyChars());
+ error(loc, "%s `%s` from file %s must be imported with 'import %s;'", kind, toPrettyChars, srcname, toPrettyChars());
// https://issues.dlang.org/show_bug.cgi?id=14446
// Return previously parsed module to avoid AST duplication ICE.
return mprev;
@@ -918,7 +922,7 @@ extern (C++) final class Module : Package
if (isPackageFile)
amodules.push(this); // Add to global array of all modules
else
- error(md ? md.loc : loc, "from file %s conflicts with package name %s", srcname, pkg.toChars());
+ error(md ? md.loc : loc, "%s `%s` from file %s conflicts with package name %s", kind, toPrettyChars, srcname, pkg.toChars());
}
else
assert(global.errors);
@@ -939,7 +943,7 @@ extern (C++) final class Module : Package
return; // already done
if (filetype == FileType.ddoc)
{
- error("is a Ddoc file, cannot import it");
+ error(loc, "%s `%s` is a Ddoc file, cannot import it", kind, toPrettyChars);
return;
}
@@ -1024,11 +1028,11 @@ extern (C++) final class Module : Package
const slice = se.peekString();
if (slice.length)
{
- deprecation(loc, "is deprecated - %.*s", cast(int)slice.length, slice.ptr);
+ deprecation(loc, "%s `%s` is deprecated - %.*s", kind, toPrettyChars, cast(int)slice.length, slice.ptr);
return;
}
}
- deprecation(loc, "is deprecated");
+ deprecation(loc, "%s `%s` is deprecated", kind, toPrettyChars);
}
}
@@ -1253,7 +1257,7 @@ extern (C++) final class Module : Package
// Back end
int doppelganger; // sub-module
Symbol* cov; // private uint[] __coverage;
- uint* covb; // bit array of valid code line numbers
+ uint[] covb; // bit array of valid code line numbers
Symbol* sictor; // module order independent constructor
Symbol* sctor; // module constructor
Symbol* sdtor; // module destructor
@@ -1477,7 +1481,8 @@ private const(char)[] processSource (const(ubyte)[] src, Module mod)
if (buf.length & 3)
{
- mod.error("odd length of UTF-32 char source %llu", cast(ulong) buf.length);
+ .error(mod.loc, "%s `%s` odd length of UTF-32 char source %llu",
+ mod.kind, mod.toPrettyChars, cast(ulong) buf.length);
return null;
}
@@ -1493,7 +1498,7 @@ private const(char)[] processSource (const(ubyte)[] src, Module mod)
{
if (u > 0x10FFFF)
{
- mod.error("UTF-32 value %08x greater than 0x10FFFF", u);
+ .error(mod.loc, "%s `%s` UTF-32 value %08x greater than 0x10FFFF", mod.kind, mod.toPrettyChars, u);
return null;
}
dbuf.writeUTF8(u);
@@ -1523,7 +1528,7 @@ private const(char)[] processSource (const(ubyte)[] src, Module mod)
if (buf.length & 1)
{
- mod.error("odd length of UTF-16 char source %llu", cast(ulong) buf.length);
+ .error(mod.loc, "%s `%s` odd length of UTF-16 char source %llu", mod.kind, mod.toPrettyChars, cast(ulong) buf.length);
return null;
}
@@ -1543,13 +1548,13 @@ private const(char)[] processSource (const(ubyte)[] src, Module mod)
i++;
if (i >= eBuf.length)
{
- mod.error("surrogate UTF-16 high value %04x at end of file", u);
+ .error(mod.loc, "%s `%s` surrogate UTF-16 high value %04x at end of file", mod.kind, mod.toPrettyChars, u);
return null;
}
const u2 = readNext(&eBuf[i]);
if (u2 < 0xDC00 || 0xE000 <= u2)
{
- mod.error("surrogate UTF-16 low value %04x out of range", u2);
+ .error(mod.loc, "%s `%s` surrogate UTF-16 low value %04x out of range", mod.kind, mod.toPrettyChars, u2);
return null;
}
u = (u - 0xD7C0) << 10;
@@ -1557,12 +1562,12 @@ private const(char)[] processSource (const(ubyte)[] src, Module mod)
}
else if (u >= 0xDC00 && u <= 0xDFFF)
{
- mod.error("unpaired surrogate UTF-16 value %04x", u);
+ .error(mod.loc, "%s `%s` unpaired surrogate UTF-16 value %04x", mod.kind, mod.toPrettyChars, u);
return null;
}
else if (u == 0xFFFE || u == 0xFFFF)
{
- mod.error("illegal UTF-16 value %04x", u);
+ .error(mod.loc, "%s `%s` illegal UTF-16 value %04x", mod.kind, mod.toPrettyChars, u);
return null;
}
dbuf.writeUTF8(u);
@@ -1621,7 +1626,8 @@ private const(char)[] processSource (const(ubyte)[] src, Module mod)
// It's UTF-8
if (buf[0] >= 0x80)
{
- mod.error("source file must start with BOM or ASCII character, not \\x%02X", buf[0]);
+ auto loc = mod.getLoc();
+ .error(loc, "%s `%s` source file must start with BOM or ASCII character, not \\x%02X", mod.kind, mod.toPrettyChars, buf[0]);
return null;
}
diff --git a/gcc/d/dmd/doc.d b/gcc/d/dmd/doc.d
index 887fd6c..b1a4c2f 100644
--- a/gcc/d/dmd/doc.d
+++ b/gcc/d/dmd/doc.d
@@ -52,9 +52,11 @@ import dmd.root.rmem;
import dmd.root.string;
import dmd.root.utf;
import dmd.tokens;
-import dmd.utils;
import dmd.visitor;
+private:
+
+public
struct Escape
{
const(char)[][char.max] strings;
@@ -94,7 +96,7 @@ struct Escape
/***********************************************************
*/
-private class Section
+class Section
{
const(char)[] name;
const(char)[] body_;
@@ -105,7 +107,7 @@ private class Section
assert(0);
}
- void write(Loc loc, DocComment* dc, Scope* sc, Dsymbols* a, OutBuffer* buf)
+ void write(Loc loc, DocComment* dc, Scope* sc, Dsymbols* a, ref OutBuffer buf)
{
assert(a.length);
if (name.length)
@@ -151,16 +153,16 @@ private class Section
size_t o = buf.length;
buf.write(body_);
escapeStrayParenthesis(loc, buf, o, true, sc.eSink);
- highlightText(sc, a, loc, *buf, o);
+ highlightText(sc, a, loc, buf, o);
buf.writestring(")");
}
}
/***********************************************************
*/
-private final class ParamSection : Section
+final class ParamSection : Section
{
- override void write(Loc loc, DocComment* dc, Scope* sc, Dsymbols* a, OutBuffer* buf)
+ override void write(Loc loc, DocComment* dc, Scope* sc, Dsymbols* a, ref OutBuffer buf)
{
assert(a.length);
Dsymbol s = (*a)[0]; // test
@@ -241,7 +243,7 @@ private final class ParamSection : Section
}
else if (fparam && fparam.type && fparam.ident)
{
- .toCBuffer(fparam.type, buf, fparam.ident, &hgs);
+ toCBuffer(fparam.type, buf, fparam.ident, hgs);
}
else
{
@@ -257,7 +259,7 @@ private final class ParamSection : Section
buf.write(namestart[0 .. namelen]);
}
escapeStrayParenthesis(loc, buf, o, true, sc.eSink);
- highlightCode(sc, a, *buf, o);
+ highlightCode(sc, a, buf, o);
}
buf.writestring(")");
buf.writestring("$(DDOC_PARAM_DESC ");
@@ -265,7 +267,7 @@ private final class ParamSection : Section
size_t o = buf.length;
buf.write(textstart[0 .. textlen]);
escapeStrayParenthesis(loc, buf, o, true, sc.eSink);
- highlightText(sc, a, loc, *buf, o);
+ highlightText(sc, a, loc, buf, o);
}
buf.writestring(")");
}
@@ -317,19 +319,19 @@ private final class ParamSection : Section
/***********************************************************
*/
-private final class MacroSection : Section
+final class MacroSection : Section
{
- override void write(Loc loc, DocComment* dc, Scope* sc, Dsymbols* a, OutBuffer* buf)
+ override void write(Loc loc, DocComment* dc, Scope* sc, Dsymbols* a, ref OutBuffer buf)
{
//printf("MacroSection::write()\n");
DocComment.parseMacros(dc.escapetable, *dc.pmacrotable, body_);
}
}
-private alias Sections = Array!(Section);
+alias Sections = Array!(Section);
// Workaround for missing Parameter instance for variadic params. (it's unnecessary to instantiate one).
-private bool isCVariadicParameter(Dsymbols* a, const(char)[] p) @safe
+bool isCVariadicParameter(Dsymbols* a, const(char)[] p) @safe
{
foreach (member; *a)
{
@@ -340,7 +342,7 @@ private bool isCVariadicParameter(Dsymbols* a, const(char)[] p) @safe
return false;
}
-private Dsymbol getEponymousMember(TemplateDeclaration td) @safe
+Dsymbol getEponymousMember(TemplateDeclaration td) @safe
{
if (!td.onemember)
return null;
@@ -355,7 +357,7 @@ private Dsymbol getEponymousMember(TemplateDeclaration td) @safe
return null;
}
-private TemplateDeclaration getEponymousParent(Dsymbol s) @safe
+TemplateDeclaration getEponymousParent(Dsymbol s) @safe
{
if (!s.parent)
return null;
@@ -363,40 +365,46 @@ private TemplateDeclaration getEponymousParent(Dsymbol s) @safe
return (td && getEponymousMember(td)) ? td : null;
}
-private immutable ddoc_default = import("default_ddoc_theme." ~ ddoc_ext);
-private immutable ddoc_decl_s = "$(DDOC_DECL ";
-private immutable ddoc_decl_e = ")\n";
-private immutable ddoc_decl_dd_s = "$(DDOC_DECL_DD ";
-private immutable ddoc_decl_dd_e = ")\n";
+immutable ddoc_default = import("default_ddoc_theme." ~ ddoc_ext);
+immutable ddoc_decl_s = "$(DDOC_DECL ";
+immutable ddoc_decl_e = ")\n";
+immutable ddoc_decl_dd_s = "$(DDOC_DECL_DD ";
+immutable ddoc_decl_dd_e = ")\n";
/****************************************************
+ * Generate Ddoc file for Module m.
+ * Params:
+ * m = Module
+ * ddoctext_ptr = combined text of .ddoc files for macro definitions
+ * ddoctext_length = extant of ddoctext_ptr
+ * datetime = charz returned by ctime()
+ * eSink = send error messages to eSink
+ * outbuf = append the Ddoc text to this
*/
-extern(C++) void gendocfile(Module m, ErrorSink eSink)
+public
+extern(C++) void gendocfile(Module m, const char* ddoctext_ptr, size_t ddoctext_length, const char* datetime, ErrorSink eSink, ref OutBuffer outbuf)
{
- __gshared OutBuffer mbuf;
- __gshared int mbuf_done;
- OutBuffer buf;
- //printf("Module::gendocfile()\n");
- if (!mbuf_done) // if not already read the ddoc files
- {
- mbuf_done = 1;
- // Use our internal default
- mbuf.writestring(ddoc_default);
- // Override with DDOCFILE specified in the sc.ini file
- char* p = getenv("DDOCFILE");
- if (p)
- global.params.ddoc.files.shift(p);
- // Override with the ddoc macro files from the command line
- for (size_t i = 0; i < global.params.ddoc.files.length; i++)
- {
- auto buffer = readFile(m.loc, global.params.ddoc.files[i]);
- // BUG: convert file contents to UTF-8 before use
- const data = buffer.data;
- //printf("file: '%.*s'\n", cast(int)data.length, data.ptr);
- mbuf.write(data);
- }
- }
- DocComment.parseMacros(m.escapetable, m.macrotable, mbuf[]);
+ gendocfile(m, ddoctext_ptr[0 .. ddoctext_length], datetime, eSink, outbuf);
+}
+
+/****************************************************
+ * Generate Ddoc text for Module `m` and append it to `outbuf`.
+ * Params:
+ * m = Module
+ * ddoctext = combined text of .ddoc files for macro definitions
+ * datetime = charz returned by ctime()
+ * eSink = send error messages to eSink
+ * outbuf = append the Ddoc text to this
+ */
+public
+void gendocfile(Module m, const char[] ddoctext, const char* datetime, ErrorSink eSink, ref OutBuffer outbuf)
+{
+ // Load internal default macros first
+ DocComment.parseMacros(m.escapetable, m.macrotable, ddoc_default[]);
+
+ // Ddoc files override default macros
+ DocComment.parseMacros(m.escapetable, m.macrotable, ddoctext);
+
Scope* sc = Scope.createGlobal(m, eSink); // create root scope
DocComment* dc = DocComment.parse(m, m.comment);
dc.pmacrotable = &m.macrotable;
@@ -409,14 +417,9 @@ extern(C++) void gendocfile(Module m, ErrorSink eSink)
m.macrotable.define("TITLE", p);
}
// Set time macros
- {
- time_t t;
- time(&t);
- char* p = ctime(&t);
- p = mem.xstrdup(p);
- m.macrotable.define("DATETIME", p.toDString());
- m.macrotable.define("YEAR", p[20 .. 20 + 4]);
- }
+ m.macrotable.define("DATETIME", datetime[0 .. 26]);
+ m.macrotable.define("YEAR", datetime[20 .. 20 + 4]);
+
const srcfilename = m.srcfile.toString();
m.macrotable.define("SRCFILENAME", srcfilename);
const docfilename = m.docfile.toString();
@@ -426,6 +429,8 @@ extern(C++) void gendocfile(Module m, ErrorSink eSink)
dc.copyright.nooutput = 1;
m.macrotable.define("COPYRIGHT", dc.copyright.body_);
}
+
+ OutBuffer buf;
if (m.filetype == FileType.ddoc)
{
const ploc = m.md ? &m.md.loc : &m.loc;
@@ -440,7 +445,7 @@ extern(C++) void gendocfile(Module m, ErrorSink eSink)
if (dc.macros)
{
commentlen = dc.macros.name.ptr - m.comment;
- dc.macros.write(loc, dc, sc, &a, &buf);
+ dc.macros.write(loc, dc, sc, &a, buf);
}
buf.write(m.comment[0 .. commentlen]);
highlightText(sc, &a, loc, buf, 0);
@@ -449,73 +454,47 @@ extern(C++) void gendocfile(Module m, ErrorSink eSink)
{
Dsymbols a;
a.push(m);
- dc.writeSections(sc, &a, &buf);
+ dc.writeSections(sc, &a, buf);
emitMemberComments(m, buf, sc);
}
//printf("BODY= '%.*s'\n", cast(int)buf.length, buf.data);
m.macrotable.define("BODY", buf[]);
+
OutBuffer buf2;
buf2.writestring("$(DDOC)");
size_t end = buf2.length;
- const success = m.macrotable.expand(buf2, 0, end, null, global.recursionLimit);
+ // Expand buf in place with macro expansions
+ const success = m.macrotable.expand(buf2, 0, end, null, global.recursionLimit, &isIdStart, &isIdTail);
if (!success)
eSink.error(Loc.initial, "DDoc macro expansion limit exceeded; more than %d expansions.", global.recursionLimit);
- version (all)
+ /* Remove all the escape sequences from buf,
+ * and make CR-LF the newline.
+ */
+ const slice = buf2[];
+ outbuf.reserve(slice.length);
+ auto p = slice.ptr;
+ for (size_t j = 0; j < slice.length; j++)
{
- /* Remove all the escape sequences from buf2,
- * and make CR-LF the newline.
- */
+ char c = p[j];
+ if (c == 0xFF && j + 1 < slice.length)
{
- const slice = buf2[];
- buf.setsize(0);
- buf.reserve(slice.length);
- auto p = slice.ptr;
- for (size_t j = 0; j < slice.length; j++)
- {
- char c = p[j];
- if (c == 0xFF && j + 1 < slice.length)
- {
- j++;
- continue;
- }
- if (c == '\n')
- buf.writeByte('\r');
- else if (c == '\r')
- {
- buf.writestring("\r\n");
- if (j + 1 < slice.length && p[j + 1] == '\n')
- {
- j++;
- }
- continue;
- }
- buf.writeByte(c);
- }
+ j++;
+ continue;
}
- writeFile(m.loc, m.docfile.toString(), buf[]);
- }
- else
- {
- /* Remove all the escape sequences from buf2
- */
+ if (c == '\n')
+ outbuf.writeByte('\r');
+ else if (c == '\r')
{
- size_t i = 0;
- char* p = buf2.data;
- for (size_t j = 0; j < buf2.length; j++)
+ outbuf.writestring("\r\n");
+ if (j + 1 < slice.length && p[j + 1] == '\n')
{
- if (p[j] == 0xFF && j + 1 < buf2.length)
- {
- j++;
- continue;
- }
- p[i] = p[j];
- i++;
+ j++;
}
- buf2.setsize(i);
+ continue;
}
- writeFile(m.loc, m.docfile.toString(), buf2[]);
+ outbuf.writeByte(c);
}
}
@@ -526,11 +505,12 @@ extern(C++) void gendocfile(Module m, ErrorSink eSink)
* to preserve text literally. This also means macros in the
* text won't be expanded.
*/
-void escapeDdocString(OutBuffer* buf, size_t start)
+public
+void escapeDdocString(ref OutBuffer buf, size_t start)
{
for (size_t u = start; u < buf.length; u++)
{
- char c = (*buf)[u];
+ char c = buf[u];
switch (c)
{
case '$':
@@ -568,14 +548,14 @@ void escapeDdocString(OutBuffer* buf, size_t start)
* directly preceeded by a backslash with $(LPAREN) or $(RPAREN) instead of
* counting them as stray parentheses
*/
-private void escapeStrayParenthesis(Loc loc, OutBuffer* buf, size_t start, bool respectBackslashEscapes, ErrorSink eSink)
+private void escapeStrayParenthesis(Loc loc, ref OutBuffer buf, size_t start, bool respectBackslashEscapes, ErrorSink eSink)
{
uint par_open = 0;
char inCode = 0;
bool atLineStart = true;
for (size_t u = start; u < buf.length; u++)
{
- char c = (*buf)[u];
+ char c = buf[u];
switch (c)
{
case '(':
@@ -619,7 +599,7 @@ private void escapeStrayParenthesis(Loc loc, OutBuffer* buf, size_t start, bool
// Issue 15465: don't try to escape unbalanced parens inside code
// blocks.
int numdash = 1;
- for (++u; u < buf.length && (*buf)[u] == c; ++u)
+ for (++u; u < buf.length && buf[u] == c; ++u)
++numdash;
--u;
if (c == '`' || (atLineStart && numdash >= 3))
@@ -635,14 +615,14 @@ private void escapeStrayParenthesis(Loc loc, OutBuffer* buf, size_t start, bool
// replace backslash-escaped parens with their macros
if (!inCode && respectBackslashEscapes && u+1 < buf.length)
{
- if ((*buf)[u+1] == '(' || (*buf)[u+1] == ')')
+ if (buf[u+1] == '(' || buf[u+1] == ')')
{
- const paren = (*buf)[u+1] == '(' ? "$(LPAREN)" : "$(RPAREN)";
+ const paren = buf[u+1] == '(' ? "$(LPAREN)" : "$(RPAREN)";
buf.remove(u, 2); //remove the \)
buf.insert(u, paren); //insert this instead
u += 8; //skip over newly inserted macro
}
- else if ((*buf)[u+1] == '\\')
+ else if (buf[u+1] == '\\')
++u;
}
break;
@@ -657,7 +637,7 @@ private void escapeStrayParenthesis(Loc loc, OutBuffer* buf, size_t start, bool
for (size_t u = buf.length; u > start;)
{
u--;
- char c = (*buf)[u];
+ char c = buf[u];
switch (c)
{
case ')':
@@ -683,14 +663,14 @@ private void escapeStrayParenthesis(Loc loc, OutBuffer* buf, size_t start, bool
// Basically, this is to skip over things like private{} blocks in a struct or
// class definition that don't add any components to the qualified name.
-private Scope* skipNonQualScopes(Scope* sc) @safe
+Scope* skipNonQualScopes(Scope* sc) @safe
{
while (sc && !sc.scopesym)
sc = sc.enclosing;
return sc;
}
-private bool emitAnchorName(ref OutBuffer buf, Dsymbol s, Scope* sc, bool includeParent)
+bool emitAnchorName(ref OutBuffer buf, Dsymbol s, Scope* sc, bool includeParent)
{
if (!s || s.isPackage() || s.isModule())
return false;
@@ -721,7 +701,7 @@ private bool emitAnchorName(ref OutBuffer buf, Dsymbol s, Scope* sc, bool includ
return true;
}
-private void emitAnchor(ref OutBuffer buf, Dsymbol s, Scope* sc, bool forHeader = false)
+void emitAnchor(ref OutBuffer buf, Dsymbol s, Scope* sc, bool forHeader = false)
{
Identifier ident;
{
@@ -846,7 +826,7 @@ private void emitAnchor(ref OutBuffer buf, Dsymbol s, Scope* sc, bool forHeader
/******************************* emitComment **********************************/
/** Get leading indentation from 'src' which represents lines of code. */
-private size_t getCodeIndent(const(char)* src)
+size_t getCodeIndent(const(char)* src)
{
while (src && (*src == '\r' || *src == '\n'))
++src; // skip until we find the first non-empty line
@@ -860,7 +840,7 @@ private size_t getCodeIndent(const(char)* src)
}
/** Recursively expand template mixin member docs into the scope. */
-private void expandTemplateMixinComments(TemplateMixin tm, ref OutBuffer buf, Scope* sc)
+void expandTemplateMixinComments(TemplateMixin tm, ref OutBuffer buf, Scope* sc)
{
if (!tm.semanticRun)
tm.dsymbolSemantic(sc);
@@ -879,7 +859,7 @@ private void expandTemplateMixinComments(TemplateMixin tm, ref OutBuffer buf, Sc
}
}
-private void emitMemberComments(ScopeDsymbol sds, ref OutBuffer buf, Scope* sc)
+void emitMemberComments(ScopeDsymbol sds, ref OutBuffer buf, Scope* sc)
{
if (!sds.members)
return;
@@ -920,14 +900,14 @@ private void emitMemberComments(ScopeDsymbol sds, ref OutBuffer buf, Scope* sc)
buf.writestring(")");
}
-private void emitVisibility(ref OutBuffer buf, Import i)
+void emitVisibility(ref OutBuffer buf, Import i)
{
// imports are private by default, which is different from other declarations
// so they should explicitly show their visibility
emitVisibility(buf, i.visibility);
}
-private void emitVisibility(ref OutBuffer buf, Declaration d)
+void emitVisibility(ref OutBuffer buf, Declaration d)
{
auto vis = d.visibility;
if (vis.kind != Visibility.Kind.undefined && vis.kind != Visibility.Kind.public_)
@@ -936,13 +916,13 @@ private void emitVisibility(ref OutBuffer buf, Declaration d)
}
}
-private void emitVisibility(ref OutBuffer buf, Visibility vis)
+void emitVisibility(ref OutBuffer buf, Visibility vis)
{
- visibilityToBuffer(&buf, vis);
+ visibilityToBuffer(buf, vis);
buf.writeByte(' ');
}
-private void emitComment(Dsymbol s, ref OutBuffer buf, Scope* sc)
+void emitComment(Dsymbol s, ref OutBuffer buf, Scope* sc)
{
extern (C++) final class EmitComment : Visitor
{
@@ -1037,7 +1017,7 @@ private void emitComment(Dsymbol s, ref OutBuffer buf, Scope* sc)
// Put the ddoc comment as the document 'description'
buf.writestring(ddoc_decl_dd_s);
{
- dc.writeSections(sc, &dc.a, buf);
+ dc.writeSections(sc, &dc.a, *buf);
if (ScopeDsymbol sds = dc.a[0].isScopeDsymbol())
emitMemberComments(sds, *buf, sc);
}
@@ -1226,7 +1206,7 @@ private void emitComment(Dsymbol s, ref OutBuffer buf, Scope* sc)
s.accept(v);
}
-private void toDocBuffer(Dsymbol s, ref OutBuffer buf, Scope* sc)
+void toDocBuffer(Dsymbol s, ref OutBuffer buf, Scope* sc)
{
extern (C++) final class ToDocBuffer : Visitor
{
@@ -1246,7 +1226,7 @@ private void toDocBuffer(Dsymbol s, ref OutBuffer buf, Scope* sc)
//printf("Dsymbol::toDocbuffer() %s\n", s.toChars());
HdrGenState hgs;
hgs.ddoc = true;
- .toCBuffer(s, buf, &hgs);
+ toCBuffer(s, *buf, hgs);
}
void prefix(Dsymbol s)
@@ -1296,7 +1276,7 @@ private void toDocBuffer(Dsymbol s, ref OutBuffer buf, Scope* sc)
HdrGenState hgs;
hgs.ddoc = true;
emitVisibility(*buf, i);
- .toCBuffer(i, buf, &hgs);
+ toCBuffer(i, *buf, hgs);
}
override void visit(Declaration d)
@@ -1315,10 +1295,10 @@ private void toDocBuffer(Dsymbol s, ref OutBuffer buf, Scope* sc)
Type origType = d.originalType ? d.originalType : d.type;
if (origType.ty == Tfunction)
{
- functionToBufferFull(cast(TypeFunction)origType, buf, d.ident, &hgs, td);
+ functionToBufferFull(cast(TypeFunction)origType, *buf, d.ident, &hgs, td);
}
else
- .toCBuffer(origType, buf, d.ident, &hgs);
+ toCBuffer(origType, *buf, d.ident, hgs);
}
else
buf.writestring(d.ident.toString());
@@ -1331,7 +1311,7 @@ private void toDocBuffer(Dsymbol s, ref OutBuffer buf, Scope* sc)
{
if (i)
buf.writestring(", ");
- toCBuffer((*td.origParameters)[i], buf, &hgs);
+ toCBuffer((*td.origParameters)[i], *buf, hgs);
}
}
buf.writeByte(')');
@@ -1345,7 +1325,7 @@ private void toDocBuffer(Dsymbol s, ref OutBuffer buf, Scope* sc)
buf.writestring("$(DDOC_CONSTRAINT ");
}
- .toCBuffer(td.constraint, buf, &hgs);
+ toCBuffer(td.constraint, *buf, hgs);
if (noFuncDecl)
{
@@ -1505,7 +1485,7 @@ private void toDocBuffer(Dsymbol s, ref OutBuffer buf, Scope* sc)
else
{
HdrGenState hgs;
- .toCBuffer(bc.type, buf, null, &hgs);
+ toCBuffer(bc.type, *buf, null, hgs);
}
}
buf.writestring(";\n");
@@ -1520,7 +1500,7 @@ private void toDocBuffer(Dsymbol s, ref OutBuffer buf, Scope* sc)
{
buf.writestring(": $(DDOC_ENUM_BASETYPE ");
HdrGenState hgs;
- .toCBuffer(ed.memtype, buf, null, &hgs);
+ toCBuffer(ed.memtype, *buf, null, hgs);
buf.writestring(")");
}
buf.writestring(";\n");
@@ -1540,6 +1520,7 @@ private void toDocBuffer(Dsymbol s, ref OutBuffer buf, Scope* sc)
/***********************************************************
*/
+public
struct DocComment
{
Sections sections; // Section*[]
@@ -1872,7 +1853,7 @@ struct DocComment
}
}
- void writeSections(Scope* sc, Dsymbols* a, OutBuffer* buf)
+ void writeSections(Scope* sc, Dsymbols* a, ref OutBuffer buf)
{
assert(a.length);
//printf("DocComment::writeSections()\n");
@@ -1897,7 +1878,7 @@ struct DocComment
size_t o = buf.length;
buf.write(sec.body_);
escapeStrayParenthesis(loc, buf, o, true, sc.eSink);
- highlightText(sc, a, loc, *buf, o);
+ highlightText(sc, a, loc, buf, o);
buf.writestring(")");
}
else
@@ -1928,7 +1909,7 @@ struct DocComment
buf.writestring("----\n");
buf.writestring(codedoc);
buf.writestring("----\n");
- highlightText(sc, a, loc, *buf, o);
+ highlightText(sc, a, loc, buf, o);
}
buf.writestring(")");
}
@@ -1948,7 +1929,7 @@ struct DocComment
/*****************************************
* Return true if comment consists entirely of "ditto".
*/
-private bool isDitto(const(char)* comment)
+bool isDitto(const(char)* comment)
{
if (comment)
{
@@ -1962,13 +1943,13 @@ private bool isDitto(const(char)* comment)
/**********************************************
* Skip white space.
*/
-private const(char)* skipwhitespace(const(char)* p)
+const(char)* skipwhitespace(const(char)* p)
{
return skipwhitespace(p.toDString).ptr;
}
/// Ditto
-private const(char)[] skipwhitespace(const(char)[] p) @safe
+const(char)[] skipwhitespace(const(char)[] p) @safe
{
foreach (idx, char c; p)
{
@@ -1993,7 +1974,7 @@ private const(char)[] skipwhitespace(const(char)[] p) @safe
* chars = the characters to skip; order is unimportant
* Returns: the index after skipping characters.
*/
-private size_t skipChars(ref OutBuffer buf, size_t i, string chars) @safe
+size_t skipChars(ref OutBuffer buf, size_t i, string chars) @safe
{
Outer:
foreach (j, c; buf[][i..$])
@@ -2028,7 +2009,7 @@ unittest {
* r = the string to replace `c` with
* Returns: `s` with `c` replaced with `r`
*/
-private inout(char)[] replaceChar(inout(char)[] s, char c, string r) pure @safe
+inout(char)[] replaceChar(inout(char)[] s, char c, string r) pure @safe
{
int count = 0;
foreach (char sc; s)
@@ -2070,7 +2051,7 @@ unittest
* s = the string to lowercase
* Returns: the lowercase version of the string or the original if already lowercase
*/
-private string toLowercase(string s) pure @safe
+string toLowercase(string s) pure @safe
{
string lower;
foreach (size_t i; 0..s.length)
@@ -2112,7 +2093,7 @@ unittest
* to = the index within `buf` to stop counting at, exclusive
* Returns: the indent
*/
-private int getMarkdownIndent(ref OutBuffer buf, size_t from, size_t to) @safe
+int getMarkdownIndent(ref OutBuffer buf, size_t from, size_t to) @safe
{
const slice = buf[];
if (to > slice.length)
@@ -2158,7 +2139,7 @@ size_t skiptoident(ref OutBuffer buf, size_t i) @safe
/************************************************
* Scan forward past end of identifier.
*/
-private size_t skippastident(ref OutBuffer buf, size_t i) @safe
+size_t skippastident(ref OutBuffer buf, size_t i) @safe
{
const slice = buf[];
while (i < slice.length)
@@ -2188,7 +2169,7 @@ private size_t skippastident(ref OutBuffer buf, size_t i) @safe
* Scan forward past end of an identifier that might
* contain dots (e.g. `abc.def`)
*/
-private size_t skipPastIdentWithDots(ref OutBuffer buf, size_t i) @safe
+size_t skipPastIdentWithDots(ref OutBuffer buf, size_t i) @safe
{
const slice = buf[];
bool lastCharWasDot;
@@ -2250,7 +2231,7 @@ private size_t skipPastIdentWithDots(ref OutBuffer buf, size_t i) @safe
* i if not a URL
* index just past it if it is a URL
*/
-private size_t skippastURL(ref OutBuffer buf, size_t i)
+size_t skippastURL(ref OutBuffer buf, size_t i)
{
const slice = buf[][i .. $];
size_t j;
@@ -2295,7 +2276,7 @@ Lno:
* i = an index within `buf`. If `i` is after `iAt` then it gets
* reduced by the length of the removed macro.
*/
-private void removeBlankLineMacro(ref OutBuffer buf, ref size_t iAt, ref size_t i)
+void removeBlankLineMacro(ref OutBuffer buf, ref size_t iAt, ref size_t i)
{
if (!iAt)
return;
@@ -2320,7 +2301,7 @@ private void removeBlankLineMacro(ref OutBuffer buf, ref size_t iAt, ref size_t
* loc = the current location within the file
* Returns: whether a thematic break was replaced
*/
-private bool replaceMarkdownThematicBreak(ref OutBuffer buf, ref size_t i, size_t iLineStart, const ref Loc loc)
+bool replaceMarkdownThematicBreak(ref OutBuffer buf, ref size_t i, size_t iLineStart, const ref Loc loc)
{
const slice = buf[];
@@ -2356,7 +2337,7 @@ private bool replaceMarkdownThematicBreak(ref OutBuffer buf, ref size_t i, size_
* the detected heading level from 1 to 6, or
* 0 if not at an ATX heading
*/
-private int detectAtxHeadingLevel(ref OutBuffer buf, const size_t i) @safe
+int detectAtxHeadingLevel(ref OutBuffer buf, const size_t i) @safe
{
const iHeadingStart = i;
const iAfterHashes = skipChars(buf, i, "#");
@@ -2380,7 +2361,7 @@ private int detectAtxHeadingLevel(ref OutBuffer buf, const size_t i) @safe
* buf = an OutBuffer containing the DDoc
* i = the index within `buf` to start looking for a suffix at
*/
-private void removeAnyAtxHeadingSuffix(ref OutBuffer buf, size_t i)
+void removeAnyAtxHeadingSuffix(ref OutBuffer buf, size_t i)
{
size_t j = i;
size_t iSuffixStart = 0;
@@ -2425,7 +2406,7 @@ private void removeAnyAtxHeadingSuffix(ref OutBuffer buf, size_t i)
* headingLevel = the level (1-6) of heading to end. Is set to `0` when this
* function ends.
*/
-private void endMarkdownHeading(ref OutBuffer buf, size_t iStart, ref size_t iEnd, const ref Loc loc, ref int headingLevel)
+void endMarkdownHeading(ref OutBuffer buf, size_t iStart, ref size_t iEnd, const ref Loc loc, ref int headingLevel)
{
char[5] heading = "$(H0 ";
heading[3] = cast(char) ('0' + headingLevel);
@@ -2446,7 +2427,7 @@ private void endMarkdownHeading(ref OutBuffer buf, size_t iStart, ref size_t iEn
* quoteLevel = the current quote level. Is set to `0` when this function ends.
* Returns: the amount that `i` was moved
*/
-private size_t endAllMarkdownQuotes(ref OutBuffer buf, size_t i, ref int quoteLevel)
+size_t endAllMarkdownQuotes(ref OutBuffer buf, size_t i, ref int quoteLevel)
{
const length = quoteLevel;
for (; quoteLevel > 0; --quoteLevel)
@@ -2468,7 +2449,7 @@ private size_t endAllMarkdownQuotes(ref OutBuffer buf, size_t i, ref int quoteLe
* `0` when this function ends.
* Returns: the amount that `i` was moved
*/
-private size_t endAllListsAndQuotes(ref OutBuffer buf, ref size_t i, ref MarkdownList[] nestedLists, ref int quoteLevel, out int quoteMacroLevel)
+size_t endAllListsAndQuotes(ref OutBuffer buf, ref size_t i, ref MarkdownList[] nestedLists, ref int quoteLevel, out int quoteMacroLevel)
{
quoteMacroLevel = 0;
const i0 = i;
@@ -2487,7 +2468,7 @@ private size_t endAllListsAndQuotes(ref OutBuffer buf, ref size_t i, ref Markdow
* downToLevel = the length within `inlineDelimiters`` to reduce emphasis to
* Returns: the number of characters added to the buffer by the replacements
*/
-private size_t replaceMarkdownEmphasis(ref OutBuffer buf, const ref Loc loc, ref MarkdownDelimiter[] inlineDelimiters, int downToLevel = 0)
+size_t replaceMarkdownEmphasis(ref OutBuffer buf, const ref Loc loc, ref MarkdownDelimiter[] inlineDelimiters, int downToLevel = 0)
{
size_t replaceEmphasisPair(ref MarkdownDelimiter start, ref MarkdownDelimiter end)
{
@@ -2566,7 +2547,7 @@ private size_t replaceMarkdownEmphasis(ref OutBuffer buf, const ref Loc loc, ref
/****************************************************
*/
-private bool isIdentifier(Dsymbols* a, const(char)[] s) @safe
+bool isIdentifier(Dsymbols* a, const(char)[] s) @safe
{
foreach (member; *a)
{
@@ -2608,7 +2589,7 @@ private bool isIdentifier(Dsymbols* a, const(char)[] s) @safe
/****************************************************
*/
-private bool isKeyword(const(char)[] str) @safe
+bool isKeyword(const(char)[] str) @safe
{
immutable string[3] table = ["true", "false", "null"];
foreach (s; table)
@@ -2621,7 +2602,7 @@ private bool isKeyword(const(char)[] str) @safe
/****************************************************
*/
-private TypeFunction isTypeFunction(Dsymbol s) @safe
+TypeFunction isTypeFunction(Dsymbol s) @safe
{
FuncDeclaration f = s.isFuncDeclaration();
/* f.type may be NULL for template members.
@@ -2637,7 +2618,7 @@ private TypeFunction isTypeFunction(Dsymbol s) @safe
/****************************************************
*/
-private Parameter isFunctionParameter(Dsymbol s, const(char)[] str) @safe
+Parameter isFunctionParameter(Dsymbol s, const(char)[] str) @safe
{
TypeFunction tf = isTypeFunction(s);
if (tf && tf.parameterList.parameters)
@@ -2655,7 +2636,7 @@ private Parameter isFunctionParameter(Dsymbol s, const(char)[] str) @safe
/****************************************************
*/
-private Parameter isFunctionParameter(Dsymbols* a, const(char)[] p) @safe
+Parameter isFunctionParameter(Dsymbols* a, const(char)[] p) @safe
{
foreach (Dsymbol sym; *a)
{
@@ -2670,7 +2651,7 @@ private Parameter isFunctionParameter(Dsymbols* a, const(char)[] p) @safe
/****************************************************
*/
-private Parameter isEponymousFunctionParameter(Dsymbols *a, const(char)[] p) @safe
+Parameter isEponymousFunctionParameter(Dsymbols *a, const(char)[] p) @safe
{
foreach (Dsymbol dsym; *a)
{
@@ -2718,7 +2699,7 @@ private Parameter isEponymousFunctionParameter(Dsymbols *a, const(char)[] p) @sa
/****************************************************
*/
-private TemplateParameter isTemplateParameter(Dsymbols* a, const(char)* p, size_t len)
+TemplateParameter isTemplateParameter(Dsymbols* a, const(char)* p, size_t len)
{
for (size_t i = 0; i < a.length; i++)
{
@@ -2744,7 +2725,7 @@ private TemplateParameter isTemplateParameter(Dsymbols* a, const(char)* p, size_
* Return true if str is a reserved symbol name
* that starts with a double underscore.
*/
-private bool isReservedName(const(char)[] str) @safe
+bool isReservedName(const(char)[] str) @safe
{
immutable string[] table =
[
@@ -2791,7 +2772,7 @@ private bool isReservedName(const(char)[] str) @safe
/****************************************************
* A delimiter for Markdown inline content like emphasis and links.
*/
-private struct MarkdownDelimiter
+struct MarkdownDelimiter
{
size_t iStart; /// the index where this delimiter starts
int count; /// the length of this delimeter's start sequence
@@ -2811,7 +2792,7 @@ private struct MarkdownDelimiter
/****************************************************
* Info about a Markdown list.
*/
-private struct MarkdownList
+struct MarkdownList
{
string orderedStart; /// an optional start number--if present then the list starts at this number
size_t iStart; /// the index where the list item starts
@@ -3028,7 +3009,7 @@ private struct MarkdownList
/****************************************************
* A Markdown link.
*/
-private struct MarkdownLink
+struct MarkdownLink
{
string href; /// the link destination
string title; /// an optional title for the link
@@ -3607,7 +3588,7 @@ private struct MarkdownLink
/**************************************************
* A set of Markdown link references.
*/
-private struct MarkdownLinkReferences
+struct MarkdownLinkReferences
{
MarkdownLink[string] references; // link references keyed by normalized label
MarkdownLink[string] symbols; // link symbols keyed by name
@@ -3872,7 +3853,7 @@ private struct MarkdownLinkReferences
}
}
-private enum TableColumnAlignment
+enum TableColumnAlignment
{
none,
left,
@@ -3893,7 +3874,7 @@ private enum TableColumnAlignment
* columnAlignments = alignments to populate for each column
* Returns: the index of the end of the parsed delimiter, or `0` if not found
*/
-private size_t parseTableDelimiterRow(ref OutBuffer buf, const size_t iStart, bool inQuote, ref TableColumnAlignment[] columnAlignments) @safe
+size_t parseTableDelimiterRow(ref OutBuffer buf, const size_t iStart, bool inQuote, ref TableColumnAlignment[] columnAlignments) @safe
{
size_t i = skipChars(buf, iStart, inQuote ? ">| \t" : "| \t");
while (i < buf.length && buf[i] != '\r' && buf[i] != '\n')
@@ -3945,7 +3926,7 @@ private size_t parseTableDelimiterRow(ref OutBuffer buf, const size_t iStart, bo
* columnAlignments = the parsed alignments for each column
* Returns: the number of characters added by starting the table, or `0` if unchanged
*/
-private size_t startTable(ref OutBuffer buf, size_t iStart, size_t iEnd, const ref Loc loc, bool inQuote, ref MarkdownDelimiter[] inlineDelimiters, out TableColumnAlignment[] columnAlignments)
+size_t startTable(ref OutBuffer buf, size_t iStart, size_t iEnd, const ref Loc loc, bool inQuote, ref MarkdownDelimiter[] inlineDelimiters, out TableColumnAlignment[] columnAlignments)
{
const iDelimiterRowEnd = parseTableDelimiterRow(buf, iEnd + 1, inQuote, columnAlignments);
if (iDelimiterRowEnd)
@@ -3981,7 +3962,7 @@ private size_t startTable(ref OutBuffer buf, size_t iStart, size_t iEnd, const r
* delta = the number of characters added by replacing the row, or `0` if unchanged
* Returns: `true` if a table row was found and replaced
*/
-private bool replaceTableRow(ref OutBuffer buf, size_t iStart, size_t iEnd, const ref Loc loc, ref MarkdownDelimiter[] inlineDelimiters, TableColumnAlignment[] columnAlignments, bool headerRow, out size_t delta)
+bool replaceTableRow(ref OutBuffer buf, size_t iStart, size_t iEnd, const ref Loc loc, ref MarkdownDelimiter[] inlineDelimiters, TableColumnAlignment[] columnAlignments, bool headerRow, out size_t delta)
{
delta = 0;
@@ -4108,7 +4089,7 @@ private bool replaceTableRow(ref OutBuffer buf, size_t iStart, size_t iEnd, cons
* columnAlignments = alignments for each column; upon return is set to length `0`
* Returns: the number of characters added by ending the table, or `0` if unchanged
*/
-private size_t endTable(ref OutBuffer buf, size_t i, ref TableColumnAlignment[] columnAlignments)
+size_t endTable(ref OutBuffer buf, size_t i, ref TableColumnAlignment[] columnAlignments)
{
if (!columnAlignments.length)
return 0;
@@ -4130,7 +4111,7 @@ private size_t endTable(ref OutBuffer buf, size_t i, ref TableColumnAlignment[]
* columnAlignments = alignments for each column; upon return is set to length `0`
* Returns: the number of characters added by replacing the row, or `0` if unchanged
*/
-private size_t endRowAndTable(ref OutBuffer buf, size_t iStart, size_t iEnd, const ref Loc loc, ref MarkdownDelimiter[] inlineDelimiters, ref TableColumnAlignment[] columnAlignments)
+size_t endRowAndTable(ref OutBuffer buf, size_t iStart, size_t iEnd, const ref Loc loc, ref MarkdownDelimiter[] inlineDelimiters, ref TableColumnAlignment[] columnAlignments)
{
size_t delta;
replaceTableRow(buf, iStart, iEnd, loc, inlineDelimiters, columnAlignments, false, delta);
@@ -4148,7 +4129,7 @@ private size_t endRowAndTable(ref OutBuffer buf, size_t iStart, size_t iEnd, con
* buf = an OutBuffer containing the DDoc
* offset = the index within buf to start highlighting
*/
-private void highlightText(Scope* sc, Dsymbols* a, Loc loc, ref OutBuffer buf, size_t offset)
+void highlightText(Scope* sc, Dsymbols* a, Loc loc, ref OutBuffer buf, size_t offset)
{
const incrementLoc = loc.linnum == 0 ? 1 : 0;
loc.linnum = loc.linnum + incrementLoc;
@@ -4417,7 +4398,7 @@ private void highlightText(Scope* sc, Dsymbols* a, Loc loc, ref OutBuffer buf, s
codebuf.write(buf[iCodeStart + count .. i]);
// escape the contents, but do not perform highlighting except for DDOC_PSYMBOL
highlightCode(sc, a, codebuf, 0);
- escapeStrayParenthesis(loc, &codebuf, 0, false, sc.eSink);
+ escapeStrayParenthesis(loc, codebuf, 0, false, sc.eSink);
buf.remove(iCodeStart, i - iCodeStart + count); // also trimming off the current `
immutable pre = "$(DDOC_BACKQUOTED ";
i = buf.insert(iCodeStart, pre);
@@ -4626,7 +4607,7 @@ private void highlightText(Scope* sc, Dsymbols* a, Loc loc, ref OutBuffer buf, s
highlightCode2(sc, a, codebuf, 0);
else
codebuf.remove(codebuf.length-1, 1); // remove the trailing 0 byte
- escapeStrayParenthesis(loc, &codebuf, 0, false, sc.eSink);
+ escapeStrayParenthesis(loc, codebuf, 0, false, sc.eSink);
buf.remove(iCodeStart, i - iCodeStart);
i = buf.insert(iCodeStart, codebuf[]);
i = buf.insert(i, ")\n");
@@ -5002,7 +4983,7 @@ private void highlightText(Scope* sc, Dsymbols* a, Loc loc, ref OutBuffer buf, s
/**************************************************
* Highlight code for DDOC section.
*/
-private void highlightCode(Scope* sc, Dsymbol s, ref OutBuffer buf, size_t offset)
+void highlightCode(Scope* sc, Dsymbol s, ref OutBuffer buf, size_t offset)
{
auto imp = s.isImport();
if (imp && imp.aliases.length > 0)
@@ -5037,7 +5018,7 @@ private void highlightCode(Scope* sc, Dsymbol s, ref OutBuffer buf, size_t offse
/****************************************************
*/
-private void highlightCode(Scope* sc, Dsymbols* a, ref OutBuffer buf, size_t offset)
+void highlightCode(Scope* sc, Dsymbols* a, ref OutBuffer buf, size_t offset)
{
//printf("highlightCode(a = '%s')\n", a.toChars());
bool resolvedTemplateParameters = false;
@@ -5119,7 +5100,7 @@ private void highlightCode(Scope* sc, Dsymbols* a, ref OutBuffer buf, size_t off
size_t lastOffset = parametersBuf.length;
- .toCBuffer(tp, &parametersBuf, &hgs);
+ toCBuffer(tp, parametersBuf, hgs);
paramLens[parami] = parametersBuf.length - lastOffset;
}
@@ -5163,7 +5144,7 @@ private void highlightCode(Scope* sc, Dsymbols* a, ref OutBuffer buf, size_t off
/****************************************
*/
-private void highlightCode3(Scope* sc, ref OutBuffer buf, const(char)* p, const(char)* pend)
+void highlightCode3(Scope* sc, ref OutBuffer buf, const(char)* p, const(char)* pend)
{
for (; p < pend; p++)
{
@@ -5178,7 +5159,7 @@ private void highlightCode3(Scope* sc, ref OutBuffer buf, const(char)* p, const(
/**************************************************
* Highlight code for CODE section.
*/
-private void highlightCode2(Scope* sc, Dsymbols* a, ref OutBuffer buf, size_t offset)
+void highlightCode2(Scope* sc, Dsymbols* a, ref OutBuffer buf, size_t offset)
{
scope eSinkNull = new ErrorSinkNull();
@@ -5236,7 +5217,7 @@ private void highlightCode2(Scope* sc, Dsymbols* a, ref OutBuffer buf, size_t of
* https://issues.dlang.org/show_bug.cgi?id=7715
* https://issues.dlang.org/show_bug.cgi?id=10519
*/
- escapeDdocString(&res, o);
+ escapeDdocString(res, o);
res.writeByte(')');
}
else
@@ -5252,7 +5233,7 @@ private void highlightCode2(Scope* sc, Dsymbols* a, ref OutBuffer buf, size_t of
/****************************************
* Determine if p points to the start of a "..." parameter identifier.
*/
-private bool isCVariadicArg(const(char)[] p) @nogc nothrow pure @safe
+bool isCVariadicArg(const(char)[] p) @nogc nothrow pure @safe
{
return p.length >= 3 && p[0 .. 3] == "...";
}
@@ -5260,6 +5241,7 @@ private bool isCVariadicArg(const(char)[] p) @nogc nothrow pure @safe
/****************************************
* Determine if p points to the start of an identifier.
*/
+@trusted
bool isIdStart(const(char)* p) @nogc nothrow pure
{
dchar c = *p;
@@ -5279,6 +5261,7 @@ bool isIdStart(const(char)* p) @nogc nothrow pure
/****************************************
* Determine if p points to the rest of an identifier.
*/
+@trusted
bool isIdTail(const(char)* p) @nogc nothrow pure
{
dchar c = *p;
@@ -5298,7 +5281,7 @@ bool isIdTail(const(char)* p) @nogc nothrow pure
/****************************************
* Determine if p points to the indentation space.
*/
-private bool isIndentWS(const(char)* p) @nogc nothrow pure @safe
+bool isIndentWS(const(char)* p) @nogc nothrow pure @safe
{
return (*p == ' ') || (*p == '\t');
}
@@ -5316,7 +5299,7 @@ int utfStride(const(char)* p) @nogc nothrow pure
return cast(int)i;
}
-private inout(char)* stripLeadingNewlines(inout(char)* s) @nogc nothrow pure
+inout(char)* stripLeadingNewlines(inout(char)* s) @nogc nothrow pure
{
while (s && *s == '\n' || *s == '\r')
s++;
diff --git a/gcc/d/dmd/doc.h b/gcc/d/dmd/doc.h
index 669e308..562427f 100644
--- a/gcc/d/dmd/doc.h
+++ b/gcc/d/dmd/doc.h
@@ -13,4 +13,5 @@
class Module;
class ErrorSink;
-void gendocfile(Module *m, ErrorSink *eSink);
+void gendocfile(Module *m, const char *ddoctext_ptr, size_t ddoctext_length,
+ const char *datetime, ErrorSink *eSink, OutBuffer &outbuf);
diff --git a/gcc/d/dmd/dscope.d b/gcc/d/dmd/dscope.d
index c2c0628..981e093 100644
--- a/gcc/d/dmd/dscope.d
+++ b/gcc/d/dmd/dscope.d
@@ -169,6 +169,7 @@ extern (C++) struct Scope
sc.scopesym = new ScopeDsymbol();
sc.scopesym.symtab = new DsymbolTable();
sc.eSink = eSink;
+ assert(eSink);
// Add top level package as member of this global scope
Dsymbol m = _module;
while (m.parent)
diff --git a/gcc/d/dmd/dstruct.d b/gcc/d/dmd/dstruct.d
index 49b9841..56aad3e 100644
--- a/gcc/d/dmd/dstruct.d
+++ b/gcc/d/dmd/dstruct.d
@@ -105,6 +105,7 @@ extern (C++) void semanticTypeInfo(Scope* sc, Type t)
if (!sc) // inline may request TypeInfo.
{
Scope scx;
+ scx.eSink = global.errorSink;
scx._module = sd.getModule();
getTypeInfoType(sd.loc, t, &scx);
sd.requestTypeInfo = true;
@@ -272,7 +273,7 @@ extern (C++) class StructDeclaration : AggregateDeclaration
{
// .stringof is always defined (but may be hidden by some other symbol)
if(ident != Id.stringof && !(flags & IgnoreErrors) && semanticRun < PASS.semanticdone)
- error("is forward referenced when looking for `%s`", ident.toChars());
+ .error(loc, "%s `%s` is forward referenced when looking for `%s`", kind, toPrettyChars, ident.toChars());
return null;
}
diff --git a/gcc/d/dmd/dsymbol.d b/gcc/d/dmd/dsymbol.d
index 0fa4dbc..1f4a466 100644
--- a/gcc/d/dmd/dsymbol.d
+++ b/gcc/d/dmd/dsymbol.d
@@ -375,79 +375,6 @@ extern (C++) class Dsymbol : ASTNode
return '`' ~ cstr.toDString() ~ "`\0";
}
- static if (__VERSION__ < 2092)
- {
- final void error(const ref Loc loc, const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.error, kind(), prettyFormatHelper().ptr);
- va_end(ap);
- }
-
- final void error(const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- const loc = getLoc();
- .verrorReport(loc, format, ap, ErrorKind.error, kind(), prettyFormatHelper().ptr);
- va_end(ap);
- }
-
- final void deprecation(const ref Loc loc, const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.deprecation, kind(), prettyFormatHelper().ptr);
- va_end(ap);
- }
-
- final void deprecation(const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- const loc = getLoc();
- .verrorReport(loc, format, ap, ErrorKind.deprecation, kind(), prettyFormatHelper().ptr);
- va_end(ap);
- }
- }
- else
- {
- pragma(printf) final void error(const ref Loc loc, const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.error, kind(), prettyFormatHelper().ptr);
- va_end(ap);
- }
-
- pragma(printf) final void error(const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- const loc = getLoc();
- .verrorReport(loc, format, ap, ErrorKind.error, kind(), prettyFormatHelper().ptr);
- va_end(ap);
- }
-
- pragma(printf) final void deprecation(const ref Loc loc, const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.deprecation, kind(), prettyFormatHelper().ptr);
- va_end(ap);
- }
-
- pragma(printf) final void deprecation(const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- const loc = getLoc();
- .verrorReport(loc, format, ap, ErrorKind.deprecation, kind(), prettyFormatHelper().ptr);
- va_end(ap);
- }
- }
-
final bool checkDeprecated(const ref Loc loc, Scope* sc)
{
if (global.params.useDeprecated == DiagnosticReporting.off)
@@ -470,9 +397,9 @@ extern (C++) class Dsymbol : ASTNode
break;
}
if (message)
- deprecation(loc, "is deprecated - %s", message);
+ deprecation(loc, "%s `%s` is deprecated - %s", kind, toPrettyChars, message);
else
- deprecation(loc, "is deprecated");
+ deprecation(loc, "%s `%s` is deprecated", kind, toPrettyChars);
if (auto ti = sc.parent ? sc.parent.isInstantiated() : null)
ti.printInstantiationTrace(Classification.deprecation);
@@ -886,7 +813,7 @@ extern (C++) class Dsymbol : ASTNode
if (ident == Id.__sizeof ||
!(sc && sc.flags & SCOPE.Cfile) && (ident == Id.__xalignof || ident == Id._mangleof))
{
- error("`.%s` property cannot be redefined", ident.toChars());
+ .error(loc, "%s `%s` `.%s` property cannot be redefined", kind, toPrettyChars, ident.toChars());
errors = true;
}
}
@@ -1026,7 +953,7 @@ extern (C++) class Dsymbol : ASTNode
*/
uinteger_t size(const ref Loc loc)
{
- error("symbol `%s` has no size", toChars());
+ .error(loc, "%s `%s` symbol `%s` has no size", kind, toPrettyChars, toChars());
return SIZE_INVALID;
}
@@ -1776,7 +1703,7 @@ public:
}
else
{
- s1.error(s1.loc, "conflicts with %s `%s` at %s", s2.kind(), s2.toPrettyChars(), s2.locToChars());
+ .error(s1.loc, "%s `%s` conflicts with %s `%s` at %s", s1.kind, s1.toPrettyChars, s2.kind(), s2.toPrettyChars(), s2.locToChars());
}
}
@@ -1801,6 +1728,7 @@ public:
if (!tfgetmembers)
{
Scope sc;
+ sc.eSink = global.errorSink;
auto parameters = new Parameters();
Parameters* p = new Parameter(STC.in_, Type.tchar.constOf().arrayOf(), null, null);
parameters.push(p);
@@ -2139,7 +2067,7 @@ extern (C++) final class ArrayScopeSymbol : ScopeDsymbol
*/
if (exp.op == EXP.array && (cast(ArrayExp)exp).arguments.length != 1)
{
- exp.error("`%s` only defines opDollar for one dimension", ad.toChars());
+ error(exp.loc, "`%s` only defines opDollar for one dimension", ad.toChars());
return null;
}
Declaration d = s.isDeclaration();
@@ -2148,7 +2076,7 @@ extern (C++) final class ArrayScopeSymbol : ScopeDsymbol
}
e = e.expressionSemantic(sc);
if (!e.type)
- exp.error("`%s` has no value", e.toChars());
+ error(exp.loc, "`%s` has no value", e.toChars());
t = e.type.toBasetype();
if (t && t.ty == Tfunction)
e = new CallExp(e.loc, e);
diff --git a/gcc/d/dmd/dsymbol.h b/gcc/d/dmd/dsymbol.h
index 96fa8fd..0278975 100644
--- a/gcc/d/dmd/dsymbol.h
+++ b/gcc/d/dmd/dsymbol.h
@@ -205,10 +205,6 @@ public:
const char *locToChars();
bool equals(const RootObject * const o) const override;
bool isAnonymous() const;
- void error(const Loc &loc, const char *format, ...);
- void error(const char *format, ...);
- void deprecation(const Loc &loc, const char *format, ...);
- void deprecation(const char *format, ...);
bool checkDeprecated(const Loc &loc, Scope *sc);
Module *getModule();
bool isCsymbol();
diff --git a/gcc/d/dmd/dsymbolsem.d b/gcc/d/dmd/dsymbolsem.d
index 378d3e6..65c0795 100644
--- a/gcc/d/dmd/dsymbolsem.d
+++ b/gcc/d/dmd/dsymbolsem.d
@@ -83,7 +83,7 @@ else version = MARS;
enum LOG = false;
-private uint setMangleOverride(Dsymbol s, const(char)[] sym)
+package uint setMangleOverride(Dsymbol s, const(char)[] sym)
{
if (s.isFuncDeclaration() || s.isVarDeclaration())
{
@@ -205,7 +205,7 @@ const(char)* getMessage(DeprecatedDeclaration dd)
if (auto se = dd.msg.toStringExp())
dd.msgstr = se.toStringz().ptr;
else
- dd.msg.error("compile time constant expected, not `%s`", dd.msg.toChars());
+ error(dd.msg.loc, "compile time constant expected, not `%s`", dd.msg.toChars());
}
return dd.msgstr;
}
@@ -289,7 +289,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
override void visit(Dsymbol dsym)
{
- dsym.error("%p has no semantic routine", dsym);
+ .error(dsym.loc, "%s `%s` %p has no semantic routine", dsym.kind, dsym.toPrettyChars, dsym);
}
override void visit(ScopeDsymbol) { }
@@ -467,7 +467,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
if (dsym.storage_class & STC.extern_ && dsym._init)
- dsym.error("extern symbols cannot have initializers");
+ .error(dsym.loc, "%s `%s` extern symbols cannot have initializers", dsym.kind, dsym.toPrettyChars);
AggregateDeclaration ad = dsym.isThis();
if (ad)
@@ -553,16 +553,17 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (inferred)
{
- dsym.error("- type `%s` is inferred from initializer `%s`, and variables cannot be of type `void`", dsym.type.toChars(), dsym._init.toChars());
+ .error(dsym.loc, "%s `%s` - type `%s` is inferred from initializer `%s`, and variables cannot be of type `void`",
+ dsym.kind, dsym.toPrettyChars, dsym.type.toChars(), toChars(dsym._init));
}
else
- dsym.error("- variables cannot be of type `void`");
+ .error(dsym.loc, "%s `%s` - variables cannot be of type `void`", dsym.kind, dsym.toPrettyChars);
dsym.type = Type.terror;
tb = dsym.type;
}
if (tb.ty == Tfunction)
{
- dsym.error("cannot be declared to be a function");
+ .error(dsym.loc, "%s `%s` cannot be declared to be a function", dsym.kind, dsym.toPrettyChars);
dsym.type = Type.terror;
tb = dsym.type;
}
@@ -572,7 +573,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
// or when the variable is defined externally
if (!ts.sym.members && !(dsym.storage_class & (STC.ref_ | STC.extern_)))
{
- dsym.error("- no definition of struct `%s`", ts.toChars());
+ .error(dsym.loc, "%s `%s` - no definition of struct `%s`", dsym.kind, dsym.toPrettyChars, ts.toChars());
// Explain why the definition is required when it's part of another type
if (!dsym.type.isTypeStruct())
@@ -590,7 +591,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
}
if ((dsym.storage_class & STC.auto_) && !inferred)
- dsym.error("- storage class `auto` has no effect if type is not inferred, did you mean `scope`?");
+ .error(dsym.loc, "%s `%s` - storage class `auto` has no effect if type is not inferred, did you mean `scope`?", dsym.kind, dsym.toPrettyChars);
if (auto tt = tb.isTypeTuple())
{
@@ -762,12 +763,12 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (StorageClass stc = dsym.storage_class & (STC.synchronized_ | STC.override_ | STC.abstract_ | STC.final_))
{
if (stc == STC.final_)
- dsym.error("cannot be `final`, perhaps you meant `const`?");
+ .error(dsym.loc, "%s `%s` cannot be `final`, perhaps you meant `const`?", dsym.kind, dsym.toPrettyChars);
else
{
OutBuffer buf;
- stcToBuffer(&buf, stc);
- dsym.error("cannot be `%s`", buf.peekChars());
+ stcToBuffer(buf, stc);
+ .error(dsym.loc, "%s `%s` cannot be `%s`", dsym.kind, dsym.toPrettyChars, buf.peekChars());
}
dsym.storage_class &= ~stc; // strip off
}
@@ -783,8 +784,8 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (stc)
{
OutBuffer buf;
- stcToBuffer(&buf, stc);
- dsym.error("cannot be `scope` and `%s`", buf.peekChars());
+ stcToBuffer(buf, stc);
+ .error(dsym.loc, "%s `%s` cannot be `scope` and `%s`", dsym.kind, dsym.toPrettyChars, buf.peekChars());
}
else if (dsym.isMember())
{
@@ -809,7 +810,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
AggregateDeclaration aad = parent.isAggregateDeclaration();
if (aad)
{
- if (global.params.vfield && dsym.storage_class & (STC.const_ | STC.immutable_) && dsym._init && !dsym._init.isVoidInitializer())
+ if (global.params.v.field && dsym.storage_class & (STC.const_ | STC.immutable_) && dsym._init && !dsym._init.isVoidInitializer())
{
const(char)* s = (dsym.storage_class & STC.immutable_) ? "immutable" : "const";
message(dsym.loc, "`%s.%s` is `%s` field", ad.toPrettyChars(), dsym.toChars(), s);
@@ -850,7 +851,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
AggregateDeclaration ad2 = ti.tempdecl.isMember();
if (ad2 && dsym.storage_class != STC.undefined_)
{
- dsym.error("- cannot use template to add field to aggregate `%s`", ad2.toChars());
+ .error(dsym.loc, "%s `%s` - cannot use template to add field to aggregate `%s`", dsym.kind, dsym.toPrettyChars, ad2.toChars());
}
}
}
@@ -873,14 +874,14 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if ((dsym.storage_class & (STC.ref_ | STC.parameter | STC.foreach_ | STC.temp | STC.result)) == STC.ref_ && dsym.ident != Id.This)
{
- dsym.error("- only parameters, functions and `foreach` declarations can be `ref`");
+ .error(dsym.loc, "%s `%s` - only parameters, functions and `foreach` declarations can be `ref`", dsym.kind, dsym.toPrettyChars);
}
if (dsym.type.hasWild())
{
if (dsym.storage_class & (STC.static_ | STC.extern_ | STC.gshared | STC.manifest | STC.field) || dsym.isDataseg())
{
- dsym.error("- only parameters or stack-based variables can be `inout`");
+ .error(dsym.loc, "%s `%s` - only parameters or stack-based variables can be `inout`", dsym.kind, dsym.toPrettyChars);
}
FuncDeclaration func = sc.func;
if (func)
@@ -898,7 +899,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
if (!isWild)
{
- dsym.error("- `inout` variables can only be declared inside `inout` functions");
+ .error(dsym.loc, "%s `%s` - `inout` variables can only be declared inside `inout` functions", dsym.kind, dsym.toPrettyChars);
}
}
}
@@ -918,7 +919,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
}
else
- dsym.error("- default construction is disabled for type `%s`", dsym.type.toChars());
+ .error(dsym.loc, "%s `%s` - default construction is disabled for type `%s`", dsym.kind, dsym.toPrettyChars, dsym.type.toChars());
}
}
@@ -927,7 +928,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (dsym.storage_class & (STC.field | STC.out_ | STC.ref_ | STC.static_ | STC.manifest | STC.gshared) || !fd)
{
- dsym.error("globals, statics, fields, manifest constants, ref and out parameters cannot be `scope`");
+ .error(dsym.loc, "%s `%s` globals, statics, fields, manifest constants, ref and out parameters cannot be `scope`", dsym.kind, dsym.toPrettyChars);
}
// @@@DEPRECATED_2.097@@@ https://dlang.org/deprecate.html#scope%20as%20a%20type%20constraint
@@ -936,7 +937,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (!(dsym.storage_class & STC.scope_))
{
if (!(dsym.storage_class & STC.parameter) && dsym.ident != Id.withSym)
- dsym.error("reference to `scope class` must be `scope`");
+ .error(dsym.loc, "%s `%s` reference to `scope class` must be `scope`", dsym.kind, dsym.toPrettyChars);
}
}
@@ -973,13 +974,13 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (dsym._init)
{ } // remember we had an explicit initializer
else if (dsym.storage_class & STC.manifest)
- dsym.error("- manifest constants must have initializers");
+ .error(dsym.loc, "%s `%s` - manifest constants must have initializers", dsym.kind, dsym.toPrettyChars);
// Don't allow non-extern, non-__gshared variables to be interfaced with C++
if (dsym._linkage == LINK.cpp && !(dsym.storage_class & (STC.ctfe | STC.extern_ | STC.gshared)) && dsym.isDataseg())
{
const char* p = (dsym.storage_class & STC.shared_) ? "shared" : "static";
- dsym.error("cannot have `extern(C++)` linkage because it is `%s`", p);
+ .error(dsym.loc, "%s `%s` cannot have `extern(C++)` linkage because it is `%s`", dsym.kind, dsym.toPrettyChars, p);
errorSupplemental(dsym.loc, "perhaps declare it as `__gshared` instead");
dsym.errors = true;
}
@@ -1001,7 +1002,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
//printf("Providing default initializer for '%s'\n", dsym.toChars());
if (sz == SIZE_INVALID && dsym.type.ty != Terror)
- dsym.error("- size of type `%s` is invalid", dsym.type.toChars());
+ .error(dsym.loc, "%s `%s` - size of type `%s` is invalid", dsym.kind, dsym.toPrettyChars, dsym.type.toChars());
Type tv = dsym.type;
while (tv.ty == Tsarray) // Don't skip Tenum
@@ -1036,7 +1037,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
if (dsym.type.baseElemOf().ty == Tvoid)
{
- dsym.error("of type `%s` does not have a default initializer", dsym.type.toChars());
+ .error(dsym.loc, "%s `%s` of type `%s` does not have a default initializer", dsym.kind, dsym.toPrettyChars, dsym.type.toChars());
}
else if (auto e = dsym.type.defaultInit(dsym.loc))
{
@@ -1057,7 +1058,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
dsym._init.isVoidInitializer() &&
!(dsym.storage_class & STC.field))
{
- dsym.error("- incomplete array type must have initializer");
+ .error(dsym.loc, "%s `%s` - incomplete array type must have initializer", dsym.kind, dsym.toPrettyChars);
}
ExpInitializer ei = dsym._init.isExpInitializer();
@@ -1089,7 +1090,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
e = dsym._init.initializerToExpression(null, (sc.flags & SCOPE.Cfile) != 0);
if (!e)
{
- dsym.error("is not a static and cannot have static initializer");
+ .error(dsym.loc, "%s `%s` is not a static and cannot have static initializer", dsym.kind, dsym.toPrettyChars);
e = ErrorExp.get();
}
}
@@ -1121,11 +1122,9 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (ne.member && !(ne.member.storage_class & STC.scope_))
{
import dmd.escape : setUnsafeDIP1000;
- const inSafeFunc = sc.func && sc.func.isSafeBypassingInference();
+ const inSafeFunc = sc.func && sc.func.isSafeBypassingInference(); // isSafeBypassingInference may call setUnsafe().
if (sc.setUnsafeDIP1000(false, dsym.loc, "`scope` allocation of `%s` requires that constructor be annotated with `scope`", dsym))
errorSupplemental(ne.member.loc, "is the location of the constructor");
- else if (global.params.obsolete && inSafeFunc)
- warningSupplemental(ne.member.loc, "is the location of the constructor");
}
ne.onstack = 1;
dsym.onstack = true;
@@ -1251,7 +1250,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
// The only allowable initializer is a (non-copy) constructor
if (ei.exp.isLvalue())
- dsym.error("of type struct `%s` uses `this(this)`, which is not allowed in static initialization", tb2.toChars());
+ .error(dsym.loc, "%s `%s` of type struct `%s` uses `this(this)`, which is not allowed in static initialization", dsym.kind, dsym.toPrettyChars, tb2.toChars());
}
}
}
@@ -1288,7 +1287,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
// currently disabled because of std.stdio.stdin, stdout and stderr
if (dsym.isDataseg() && !(dsym.storage_class & STC.extern_))
- dsym.error("static storage variables cannot have destructors");
+ .error(dsym.loc, "%s `%s` static storage variables cannot have destructors", dsym.kind, dsym.toPrettyChars);
}
}
@@ -1321,11 +1320,11 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
return;
if (!(global.params.bitfields || sc.flags & SCOPE.Cfile))
- dsym.error("use -preview=bitfields for bitfield support");
+ .error(dsym.loc, "%s `%s` use -preview=bitfields for bitfield support", dsym.kind, dsym.toPrettyChars);
if (!dsym.parent.isStructDeclaration() && !dsym.parent.isClassDeclaration())
{
- dsym.error("- bit-field must be member of struct, union, or class");
+ .error(dsym.loc, "%s `%s` - bit-field must be member of struct, union, or class", dsym.kind, dsym.toPrettyChars);
}
sc = sc.startCTFE();
@@ -1335,18 +1334,18 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (!dsym.type.isintegral())
{
// C11 6.7.2.1-5
- width.error("bit-field type `%s` is not an integer type", dsym.type.toChars());
+ error(width.loc, "bit-field type `%s` is not an integer type", dsym.type.toChars());
dsym.errors = true;
}
if (!width.isIntegerExp())
{
- width.error("bit-field width `%s` is not an integer constant", dsym.width.toChars());
+ error(width.loc, "bit-field width `%s` is not an integer constant", dsym.width.toChars());
dsym.errors = true;
}
const uwidth = width.toInteger(); // uwidth is unsigned
if (uwidth == 0 && !dsym.isAnonymous())
{
- width.error("bit-field `%s` has zero width", dsym.toChars());
+ error(width.loc, "bit-field `%s` has zero width", dsym.toChars());
dsym.errors = true;
}
const sz = dsym.type.size();
@@ -1355,7 +1354,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
const max_width = sz * 8;
if (uwidth > max_width)
{
- width.error("width `%lld` of bit-field `%s` does not fit in type `%s`", cast(long)uwidth, dsym.toChars(), dsym.type.toChars());
+ error(width.loc, "width `%lld` of bit-field `%s` does not fit in type `%s`", cast(long)uwidth, dsym.toChars(), dsym.type.toChars());
dsym.errors = true;
}
dsym.fieldWidth = cast(uint)uwidth;
@@ -1455,7 +1454,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
import dmd.access : symbolIsVisible;
if (!symbolIsVisible(sc, sym) && !sym.errors)
{
- imp.mod.error(imp.loc, "member `%s` is not visible from module `%s`",
+ .error(imp.loc, "%s `%s` member `%s` is not visible from module `%s`", imp.mod.kind, imp.mod.toPrettyChars,
imp.names[i].toChars(), sc._module.toChars());
sym.errors = true;
}
@@ -1470,9 +1469,9 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
// https://issues.dlang.org/show_bug.cgi?id=23908
// Don't suggest symbols from the importer's module
if (s && s.parent != importer)
- imp.mod.error(imp.loc, "import `%s` not found, did you mean %s `%s`?", imp.names[i].toChars(), s.kind(), s.toPrettyChars());
+ .error(imp.loc, "%s `%s` import `%s` not found, did you mean %s `%s`?", imp.mod.kind, imp.mod.toPrettyChars, imp.names[i].toChars(), s.kind(), s.toPrettyChars());
else
- imp.mod.error(imp.loc, "import `%s` not found", imp.names[i].toChars());
+ .error(imp.loc, "%s `%s` import `%s` not found", imp.mod.kind, imp.mod.toPrettyChars, imp.names[i].toChars());
ad.type = Type.terror;
}
}
@@ -1512,11 +1511,11 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
ob.writestring(") : ");
// use visibility instead of sc.visibility because it couldn't be
// resolved yet, see the comment above
- visibilityToBuffer(ob, imp.visibility);
+ visibilityToBuffer(*ob, imp.visibility);
ob.writeByte(' ');
if (imp.isstatic)
{
- stcToBuffer(ob, STC.static_);
+ stcToBuffer(*ob, STC.static_);
ob.writeByte(' ');
}
ob.writestring(": ");
@@ -1623,12 +1622,12 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
e = se;
if (!se.len)
{
- pd.error("- zero-length string not allowed for mangled name");
+ .error(pd.loc, "%s `%s` - zero-length string not allowed for mangled name", pd.kind, pd.toPrettyChars);
return null;
}
if (se.sz != 1)
{
- pd.error("- mangled name characters can only be of type `char`");
+ .error(pd.loc, "%s `%s` - mangled name characters can only be of type `char`", pd.kind, pd.toPrettyChars);
return null;
}
version (all)
@@ -1651,18 +1650,18 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
else
{
- pd.error("char 0x%02x not allowed in mangled name", c);
+ .error(pd.loc, "%s `%s` char 0x%02x not allowed in mangled name", pd.kind, pd.toPrettyChars, c);
break;
}
}
if (const msg = utf_decodeChar(slice, i, c))
{
- pd.error("%.*s", cast(int)msg.length, msg.ptr);
+ .error(pd.loc, "%s `%s` %.*s", pd.kind, pd.toPrettyChars, cast(int)msg.length, msg.ptr);
break;
}
if (!isUniAlpha(c))
{
- pd.error("char `0x%04x` not allowed in mangled name", c);
+ .error(pd.loc, "%s `%s` char `0x%04x` not allowed in mangled name", pd.kind, pd.toPrettyChars, c);
break;
}
}
@@ -1714,7 +1713,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
e = se;
}
else
- e.error("must be a string");
+ error(e.loc, "must be a string");
}
if (agg)
{
@@ -1731,7 +1730,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
else if (auto td = s.isTemplateDeclaration())
{
- pd.error("cannot apply to a template declaration");
+ .error(pd.loc, "%s `%s` cannot apply to a template declaration", pd.kind, pd.toPrettyChars);
errorSupplemental(pd.loc, "use `template Class(Args...){ pragma(mangle, \"other_name\") class Class {} }`");
}
else if (auto se = verifyMangleString((*pd.args)[0]))
@@ -1739,7 +1738,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
const name = (cast(const(char)[])se.peekData()).xarraydup;
uint cnt = setMangleOverride(s, name);
if (cnt > 1)
- pd.error("can only apply to a single declaration");
+ .error(pd.loc, "%s `%s` can only apply to a single declaration", pd.kind, pd.toPrettyChars);
}
}
}
@@ -1748,7 +1747,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (pd.decl)
{
- pd.error("is missing a terminating `;`");
+ .error(pd.loc, "%s `%s` is missing a terminating `;`", pd.kind, pd.toPrettyChars);
declarations();
// do them anyway, to avoid segfaults.
}
@@ -1761,14 +1760,14 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (pd.ident == Id.linkerDirective)
{
if (!pd.args || pd.args.length != 1)
- pd.error("one string argument expected for pragma(linkerDirective)");
+ .error(pd.loc, "%s `%s` one string argument expected for pragma(linkerDirective)", pd.kind, pd.toPrettyChars);
else
{
auto se = semanticString(sc, (*pd.args)[0], "linker directive");
if (!se)
return noDeclarations();
(*pd.args)[0] = se;
- if (global.params.verbose)
+ if (global.params.v.verbose)
message("linkopt %.*s", cast(int)se.len, se.peekString().ptr);
}
return noDeclarations();
@@ -1787,7 +1786,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
else if (pd.ident == Id.lib)
{
if (!pd.args || pd.args.length != 1)
- pd.error("string expected for library name");
+ .error(pd.loc, "%s `%s` string expected for library name", pd.kind, pd.toPrettyChars);
else
{
auto se = semanticString(sc, (*pd.args)[0], "library name");
@@ -1796,7 +1795,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
(*pd.args)[0] = se;
auto name = se.peekString().xarraydup;
- if (global.params.verbose)
+ if (global.params.v.verbose)
message("library %s", name.ptr);
if (global.params.moduleDeps.buffer && !global.params.moduleDeps.name)
{
@@ -1831,8 +1830,8 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
pd.args = new Expressions();
if (pd.args.length == 0 || pd.args.length > 2)
{
- pd.error(pd.args.length == 0 ? "- string expected for mangled name"
- : "expected 1 or 2 arguments");
+ .error(pd.loc, pd.args.length == 0 ? "%s `%s` - string expected for mangled name"
+ : "%s `%s` expected 1 or 2 arguments", pd.kind, pd.toPrettyChars);
pd.args.setDim(1);
(*pd.args)[0] = ErrorExp.get(); // error recovery
}
@@ -1841,7 +1840,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
else if (pd.ident == Id.crt_constructor || pd.ident == Id.crt_destructor)
{
if (pd.args && pd.args.length != 0)
- pd.error("takes no argument");
+ .error(pd.loc, "%s `%s` takes no argument", pd.kind, pd.toPrettyChars);
else
{
immutable isCtor = pd.ident == Id.crt_constructor;
@@ -1874,14 +1873,14 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
if (recurse(pd, isCtor) > 1)
- pd.error("can only apply to a single declaration");
+ .error(pd.loc, "%s `%s` can only apply to a single declaration", pd.kind, pd.toPrettyChars);
}
return declarations();
}
else if (pd.ident == Id.printf || pd.ident == Id.scanf)
{
if (pd.args && pd.args.length != 0)
- pd.error("takes no argument");
+ .error(pd.loc, "%s `%s` takes no argument", pd.kind, pd.toPrettyChars);
return declarations();
}
else if (!global.params.ignoreUnsupportedPragmas)
@@ -1890,7 +1889,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
return declarations();
}
- if (!global.params.verbose)
+ if (!global.params.v.verbose)
return declarations();
/* Print unrecognized pragmas
@@ -1946,7 +1945,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
const bool doUnittests = global.params.useUnitTests || global.params.ddoc.doOutput || global.params.dihdr.doOutput;
auto loc = adjustLocForMixin(str, cd.loc, global.params.mixinOut);
scope p = new Parser!ASTCodegen(loc, sc._module, str, false, global.errorSink, &global.compileEnv, doUnittests);
- p.transitionIn = global.params.vin;
+ p.transitionIn = global.params.v.vin;
p.nextToken();
auto d = p.parseDeclDefs(0);
@@ -1955,7 +1954,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (p.token.value != TOK.endOfFile)
{
- cd.error("incomplete mixin declaration `%s`", str.ptr);
+ .error(cd.loc, "%s `%s` incomplete mixin declaration `%s`", cd.kind, cd.toPrettyChars, str.ptr);
return null;
}
return d;
@@ -1992,7 +1991,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
const sident = se.toStringz();
if (!sident.length || !Identifier.isValidIdentifier(sident))
{
- ns.exp.error("expected valid identifier for C++ namespace but got `%.*s`",
+ error(ns.exp.loc, "expected valid identifier for C++ namespace but got `%.*s`",
cast(int)sident.length, sident.ptr);
return null;
}
@@ -2030,7 +2029,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
return; // An error happened in `identFromSE`
}
else
- ns.exp.error("`%s`: index %llu is not a string constant, it is a `%s`",
+ error(ns.exp.loc, "`%s`: index %llu is not a string constant, it is a `%s`",
ns.exp.toChars(), cast(ulong) d, ns.exp.type.toChars());
}
}
@@ -2040,8 +2039,8 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
else if (ns.exp.isTypeExp() && ns.exp.isTypeExp().type.toBasetype().isTypeTuple())
{
}
- else
- ns.exp.error("compile time string constant (or sequence) expected, not `%s`",
+ else if (!ns.exp.type.isTypeError())
+ error(ns.exp.loc, "compile time string constant (or sequence) expected, not `%s`",
ns.exp.toChars());
attribSemantic(ns);
}
@@ -2198,7 +2197,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
if (ed.memtype.ty == Tvoid)
{
- ed.error("base type must not be `void`");
+ .error(ed.loc, "%s `%s` base type must not be `void`", ed.kind, ed.toPrettyChars);
ed.memtype = Type.terror;
}
if (ed.memtype.ty == Terror)
@@ -2219,7 +2218,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (ed.members.length == 0)
{
- ed.error("enum `%s` must have at least one member", ed.toChars());
+ .error(ed.loc, "%s `%s enum `%s` must have at least one member", ed.kind, ed.toPrettyChars, ed.toChars());
ed.errors = true;
ed.semanticRun = PASS.semanticdone;
return;
@@ -2310,13 +2309,13 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (!ie)
{
// C11 6.7.2.2-2
- em.error("enum member must be an integral constant expression, not `%s` of type `%s`", e.toChars(), e.type.toChars());
+ .error(em.loc, "%s `%s` enum member must be an integral constant expression, not `%s` of type `%s`", em.kind, em.toPrettyChars, e.toChars(), e.type.toChars());
return errorReturn(em);
}
if (ed.memtype && !ir.contains(getIntRange(ie)))
{
// C11 6.7.2.2-2
- em.error("enum member value `%s` does not fit in `%s`", e.toChars(), commonType.toChars());
+ .error(em.loc, "%s `%s` enum member value `%s` does not fit in `%s`", em.kind, em.toPrettyChars, e.toChars(), commonType.toChars());
return errorReturn(em);
}
nextValue = ie.toInteger();
@@ -2333,7 +2332,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
Expression max = getProperty(commonType, null, em.loc, Id.max, 0);
if (nextValue == max.toInteger())
{
- em.error("initialization with `%s+1` causes overflow for type `%s`", max.toChars(), commonType.toChars());
+ .error(em.loc, "%s `%s` initialization with `%s+1` causes overflow for type `%s`", em.kind, em.toPrettyChars, max.toChars(), commonType.toChars());
return errorReturn(em);
}
nextValue += 1;
@@ -2393,7 +2392,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
return;
if (em.semanticRun == PASS.semantic)
{
- em.error("circular reference to `enum` member");
+ .error(em.loc, "%s `%s` circular reference to `enum` member", em.kind, em.toPrettyChars);
return errorReturn();
}
assert(em.ed);
@@ -2528,9 +2527,14 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (!em.ed.isAnonymous())
em.ed.memtype = t;
}
+ const errors = global.startGagging();
Expression e = new IntegerExp(em.loc, 0, t);
e = e.ctfeInterpret();
-
+ if (global.endGagging(errors))
+ {
+ error(em.loc, "cannot generate 0 value of type `%s` for `%s`",
+ t.toChars(), em.toChars());
+ }
// save origValue for better json output
em.origValue = e;
@@ -2564,7 +2568,9 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (emprev.errors)
return errorReturn();
+ auto errors = global.startGagging();
Expression eprev = emprev.value;
+ assert(eprev);
// .toHeadMutable() due to https://issues.dlang.org/show_bug.cgi?id=18645
Type tprev = eprev.type.toHeadMutable().equals(em.ed.type.toHeadMutable())
? em.ed.memtype
@@ -2578,28 +2584,47 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
emax = emax.expressionSemantic(sc);
emax = emax.ctfeInterpret();
- // Set value to (eprev + 1).
- // But first check that (eprev != emax)
- assert(eprev);
+ // check that (eprev != emax)
Expression e = new EqualExp(EXP.equal, em.loc, eprev, emax);
e = e.expressionSemantic(sc);
e = e.ctfeInterpret();
+ if (global.endGagging(errors))
+ {
+ // display an introductory error before showing what actually failed
+ error(em.loc, "cannot check `%s` value for overflow", em.toPrettyChars());
+ // rerun to show errors
+ Expression e2 = DotIdExp.create(em.ed.loc, new TypeExp(em.ed.loc, tprev), Id.max);
+ e2 = e2.expressionSemantic(sc);
+ e2 = e2.ctfeInterpret();
+ e2 = new EqualExp(EXP.equal, em.loc, eprev, e2);
+ e2 = e2.expressionSemantic(sc);
+ e2 = e2.ctfeInterpret();
+ }
+ // now any errors are for generating a value
if (e.toInteger())
{
auto mt = em.ed.memtype;
if (!mt)
mt = eprev.type;
- em.error("initialization with `%s.%s+1` causes overflow for type `%s`",
+ .error(em.loc, "%s `%s` initialization with `%s.%s+1` causes overflow for type `%s`", em.kind, em.toPrettyChars,
emprev.ed.toChars(), emprev.toChars(), mt.toChars());
return errorReturn();
}
-
+ errors = global.startGagging();
// Now set e to (eprev + 1)
e = new AddExp(em.loc, eprev, IntegerExp.literal!1);
e = e.expressionSemantic(sc);
e = e.castTo(sc, eprev.type);
e = e.ctfeInterpret();
-
+ if (global.endGagging(errors))
+ {
+ error(em.loc, "cannot generate value for `%s`", em.toPrettyChars());
+ // rerun to show errors
+ Expression e2 = new AddExp(em.loc, eprev, IntegerExp.literal!1);
+ e2 = e2.expressionSemantic(sc);
+ e2 = e2.castTo(sc, eprev.type);
+ e2 = e2.ctfeInterpret();
+ }
// save origValue (without cast) for better json output
if (e.op != EXP.error) // avoid duplicate diagnostics
{
@@ -2619,7 +2644,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
etest = etest.ctfeInterpret();
if (etest.toInteger())
{
- em.error("has inexact value due to loss of precision");
+ .error(em.loc, "%s `%s` has inexact value due to loss of precision", em.kind, em.toPrettyChars);
return errorReturn();
}
}
@@ -2714,7 +2739,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
if (i + 1 != tempdecl.parameters.length && tp.isTemplateTupleParameter())
{
- tempdecl.error("template sequence parameter must be the last one");
+ .error(tempdecl.loc, "%s `%s` template sequence parameter must be the last one", tempdecl.kind, tempdecl.toPrettyChars);
tempdecl.errors = true;
}
}
@@ -2901,7 +2926,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
else
assert(0);
}
- tm.error("recursive mixin instantiation");
+ .error(tm.loc, "%s `%s` recursive mixin instantiation", tm.kind, tm.toPrettyChars);
return;
Lcontinue:
@@ -2958,7 +2983,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (++nest > global.recursionLimit)
{
global.gag = 0; // ensure error message gets printed
- tm.error("recursive expansion");
+ .error(tm.loc, "%s `%s` recursive expansion", tm.kind, tm.toPrettyChars);
fatal();
}
@@ -2986,7 +3011,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
// Give additional context info if error occurred during instantiation
if (global.errors != errorsave)
{
- tm.error("error instantiating");
+ .error(tm.loc, "%s `%s` error instantiating", tm.kind, tm.toPrettyChars);
tm.errors = true;
}
@@ -3214,7 +3239,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (!fd.type.isTypeError())
{
- fd.error("`%s` must be a function instead of `%s`", fd.toChars(), fd.type.toChars());
+ .error(fd.loc, "%s `%s` `%s` must be a function instead of `%s`", fd.kind, fd.toPrettyChars, fd.toChars(), fd.type.toChars());
fd.type = Type.terror;
}
fd.errors = true;
@@ -3351,8 +3376,8 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (!tf.isNaked() && !(funcdecl.isThis() || funcdecl.isNested()))
{
OutBuffer buf;
- MODtoBuffer(&buf, tf.mod);
- funcdecl.error("without `this` cannot be `%s`", buf.peekChars());
+ MODtoBuffer(buf, tf.mod);
+ .error(funcdecl.loc, "%s `%s` without `this` cannot be `%s`", funcdecl.kind, funcdecl.toPrettyChars, buf.peekChars());
tf.mod = 0; // remove qualifiers
}
@@ -3401,11 +3426,11 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
const idStr = funcdecl.isCrtCtor ? "crt_constructor" : "crt_destructor";
if (f.nextOf().ty != Tvoid)
- funcdecl.error("must return `void` for `pragma(%s)`", idStr.ptr);
+ .error(funcdecl.loc, "%s `%s` must return `void` for `pragma(%s)`", funcdecl.kind, funcdecl.toPrettyChars, idStr.ptr);
if (funcdecl._linkage != LINK.c && f.parameterList.length != 0)
- funcdecl.error("must be `extern(C)` for `pragma(%s)` when taking parameters", idStr.ptr);
+ .error(funcdecl.loc, "%s `%s` must be `extern(C)` for `pragma(%s)` when taking parameters", funcdecl.kind, funcdecl.toPrettyChars, idStr.ptr);
if (funcdecl.isThis())
- funcdecl.error("cannot be a non-static member function for `pragma(%s)`", idStr.ptr);
+ .error(funcdecl.loc, "%s `%s` cannot be a non-static member function for `pragma(%s)`", funcdecl.kind, funcdecl.toPrettyChars, idStr.ptr);
}
if (funcdecl.overnext && funcdecl.isCsymbol())
@@ -3419,7 +3444,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
auto fn = fnext.type.isTypeFunction();
if (!fn || !cFuncEquivalence(f, fn))
{
- funcdecl.error("redeclaration with different type");
+ .error(funcdecl.loc, "%s `%s` redeclaration with different type", funcdecl.kind, funcdecl.toPrettyChars);
//printf("t1: %s\n", f.toChars());
//printf("t2: %s\n", fn.toChars());
}
@@ -3427,7 +3452,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
if ((funcdecl.storage_class & STC.auto_) && !f.isref && !funcdecl.inferRetType)
- funcdecl.error("storage class `auto` has no effect if return type is not inferred");
+ .error(funcdecl.loc, "%s `%s` storage class `auto` has no effect if return type is not inferred", funcdecl.kind, funcdecl.toPrettyChars);
if (f.isreturn && !funcdecl.needThis() && !funcdecl.isNested())
{
@@ -3435,7 +3460,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
* the 'return' applies
*/
if (sc.scopesym && sc.scopesym.isAggregateDeclaration())
- funcdecl.error("`static` member has no `this` to which `return` can apply");
+ .error(funcdecl.loc, "%s `%s` `static` member has no `this` to which `return` can apply", funcdecl.kind, funcdecl.toPrettyChars);
else
error(funcdecl.loc, "top-level function `%s` has no `this` to which `return` can apply", funcdecl.toChars());
}
@@ -3449,20 +3474,20 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
sfunc = visibilityToChars(funcdecl.visibility.kind);
else
sfunc = "final";
- funcdecl.error("`%s` functions cannot be `abstract`", sfunc);
+ .error(funcdecl.loc, "%s `%s` `%s` functions cannot be `abstract`", funcdecl.kind, funcdecl.toPrettyChars, sfunc);
}
if (funcdecl.isOverride() && !funcdecl.isVirtual() && !funcdecl.isFuncLiteralDeclaration())
{
Visibility.Kind kind = funcdecl.visible().kind;
if ((kind == Visibility.Kind.private_ || kind == Visibility.Kind.package_) && funcdecl.isMember())
- funcdecl.error("`%s` method is not virtual and cannot override", visibilityToChars(kind));
+ .error(funcdecl.loc, "%s `%s` `%s` method is not virtual and cannot override", funcdecl.kind, funcdecl.toPrettyChars, visibilityToChars(kind));
else
- funcdecl.error("cannot override a non-virtual function");
+ .error(funcdecl.loc, "%s `%s` cannot override a non-virtual function", funcdecl.kind, funcdecl.toPrettyChars);
}
if (funcdecl.isAbstract() && funcdecl.isFinalFunc())
- funcdecl.error("cannot be both `final` and `abstract`");
+ .error(funcdecl.loc, "%s `%s` cannot be both `final` and `abstract`", funcdecl.kind, funcdecl.toPrettyChars);
if (funcdecl.printf || funcdecl.scanf)
{
@@ -3473,15 +3498,15 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
funcdecl.storage_class |= STC.abstract_;
if (funcdecl.isCtorDeclaration() || funcdecl.isPostBlitDeclaration() || funcdecl.isDtorDeclaration() || funcdecl.isInvariantDeclaration() || funcdecl.isNewDeclaration() || funcdecl.isDelete())
- funcdecl.error("constructors, destructors, postblits, invariants, new and delete functions are not allowed in interface `%s`", id.toChars());
+ .error(funcdecl.loc, "%s `%s` constructors, destructors, postblits, invariants, new and delete functions are not allowed in interface `%s`", funcdecl.kind, funcdecl.toPrettyChars, id.toChars());
if (funcdecl.fbody && funcdecl.isVirtual())
- funcdecl.error("function body only allowed in `final` functions in interface `%s`", id.toChars());
+ .error(funcdecl.loc, "%s `%s` function body only allowed in `final` functions in interface `%s`", funcdecl.kind, funcdecl.toPrettyChars, id.toChars());
}
if (UnionDeclaration ud = parent.isUnionDeclaration())
{
if (funcdecl.isPostBlitDeclaration() || funcdecl.isDtorDeclaration() || funcdecl.isInvariantDeclaration())
- funcdecl.error("destructors, postblits and invariants are not allowed in union `%s`", ud.toChars());
+ .error(funcdecl.loc, "%s `%s` destructors, postblits and invariants are not allowed in union `%s`", funcdecl.kind, funcdecl.toPrettyChars, ud.toChars());
}
if (StructDeclaration sd = parent.isStructDeclaration())
@@ -3539,7 +3564,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
/* If same name function exists in base class but 'this' is auto return,
* cannot find index of base class's vtbl[] to override.
*/
- funcdecl.error("return type inference is not supported if may override base class function");
+ .error(funcdecl.loc, "%s `%s` return type inference is not supported if may override base class function", funcdecl.kind, funcdecl.toPrettyChars);
}
/* Find index of existing function in base class's vtbl[] to override
@@ -3567,7 +3592,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
f2 = f2.overloadExactMatch(funcdecl.type);
if (f2 && f2.isFinalFunc() && f2.visible().kind != Visibility.Kind.private_)
- funcdecl.error("cannot override `final` function `%s`", f2.toPrettyChars());
+ .error(funcdecl.loc, "%s `%s` cannot override `final` function `%s`", funcdecl.kind, funcdecl.toPrettyChars, f2.toPrettyChars());
}
}
}
@@ -3657,7 +3682,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
/* the derived class cd doesn't have its vtbl[] allocated yet.
* https://issues.dlang.org/show_bug.cgi?id=21008
*/
- funcdecl.error("circular reference to class `%s`", cd.toChars());
+ .error(funcdecl.loc, "%s `%s` circular reference to class `%s`", funcdecl.kind, funcdecl.toPrettyChars, cd.toChars());
funcdecl.errors = true;
return;
}
@@ -3673,7 +3698,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
auto vtf = getFunctionType(fdv);
if (vtf.trust > TRUST.system && f.trust == TRUST.system)
- funcdecl.error("cannot override `@safe` method `%s` with a `@system` attribute",
+ .error(funcdecl.loc, "%s `%s` cannot override `@safe` method `%s` with a `@system` attribute", funcdecl.kind, funcdecl.toPrettyChars,
fdv.toPrettyChars);
if (fdc.toParent() == parent)
@@ -3694,7 +3719,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
// This function overrides fdv
if (fdv.isFinalFunc())
- funcdecl.error("cannot override `final` function `%s`", fdv.toPrettyChars());
+ .error(funcdecl.loc, "%s `%s` cannot override `final` function `%s`", funcdecl.kind, funcdecl.toPrettyChars, fdv.toPrettyChars());
if (!funcdecl.isOverride())
{
@@ -3720,7 +3745,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
bool fdcmixin = fdc.parent.isClassDeclaration() !is null;
if (thismixin == fdcmixin)
{
- funcdecl.error("multiple overrides of same function");
+ .error(funcdecl.loc, "%s `%s` multiple overrides of same function", funcdecl.kind, funcdecl.toPrettyChars);
}
/*
* https://issues.dlang.org/show_bug.cgi?id=711
@@ -3854,7 +3879,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (!funcdecl.tintro.nextOf().equals(ti.nextOf()) && !funcdecl.tintro.nextOf().isBaseOf(ti.nextOf(), null) && !ti.nextOf().isBaseOf(funcdecl.tintro.nextOf(), null))
{
- funcdecl.error("incompatible covariant types `%s` and `%s`", funcdecl.tintro.toChars(), ti.toChars());
+ .error(funcdecl.loc, "%s `%s` incompatible covariant types `%s` and `%s`", funcdecl.kind, funcdecl.toPrettyChars, funcdecl.tintro.toChars(), ti.toChars());
}
}
else
@@ -3889,7 +3914,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
OutBuffer buf;
auto fd = s.isFuncDeclaration();
- functionToBufferFull(cast(TypeFunction)(funcdecl.type), &buf,
+ functionToBufferFull(cast(TypeFunction)(funcdecl.type), buf,
new Identifier(funcdecl.toPrettyChars()), &hgs, null);
const(char)* funcdeclToChars = buf.peekChars();
@@ -3912,7 +3937,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
else
{
- functionToBufferFull(cast(TypeFunction)(fd.type), &buf1,
+ functionToBufferFull(cast(TypeFunction)(fd.type), buf1,
new Identifier(fd.toPrettyChars()), &hgs, null);
error(funcdecl.loc, "function `%s` does not override any function, did you mean to override `%s`?",
@@ -3927,7 +3952,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
}
else
- funcdecl.error("does not override any function");
+ .error(funcdecl.loc, "%s `%s` does not override any function", funcdecl.kind, funcdecl.toPrettyChars);
}
L2:
@@ -3949,7 +3974,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
f2 = f2.overloadExactMatch(funcdecl.type);
if (f2 && f2.isFinalFunc() && f2.visible().kind != Visibility.Kind.private_)
- funcdecl.error("cannot override `final` function `%s.%s`", b.sym.toChars(), f2.toPrettyChars());
+ .error(funcdecl.loc, "%s `%s` cannot override `final` function `%s.%s`", funcdecl.kind, funcdecl.toPrettyChars, b.sym.toChars(), f2.toPrettyChars());
}
}
}
@@ -3970,7 +3995,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
else if (funcdecl.isOverride() && !parent.isTemplateInstance())
- funcdecl.error("`override` only applies to class member functions");
+ .error(funcdecl.loc, "%s `%s` `override` only applies to class member functions", funcdecl.kind, funcdecl.toPrettyChars);
if (auto ti = parent.isTemplateInstance)
{
@@ -3985,7 +4010,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
Ldone:
if (!funcdecl.fbody && !funcdecl.allowsContractWithoutBody())
- funcdecl.error("`in` and `out` contracts can only appear without a body when they are virtual interface functions or abstract");
+ .error(funcdecl.loc, "%s `%s` `in` and `out` contracts can only appear without a body when they are virtual interface functions or abstract", funcdecl.kind, funcdecl.toPrettyChars);
/* Do not allow template instances to add virtual functions
* to a class.
@@ -4007,7 +4032,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
ClassDeclaration cd = ti.tempdecl.isClassMember();
if (cd)
{
- funcdecl.error("cannot use template to add virtual function to class `%s`", cd.toChars());
+ .error(funcdecl.loc, "%s `%s` cannot use template to add virtual function to class `%s`", funcdecl.kind, funcdecl.toPrettyChars, cd.toChars());
}
}
}
@@ -4029,7 +4054,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
funcdecl._scope.setNoFree();
__gshared bool printedMain = false; // semantic might run more than once
- if (global.params.verbose && !printedMain)
+ if (global.params.v.verbose && !printedMain)
{
const(char)* type = funcdecl.isMain() ? "main" : funcdecl.isWinMain() ? "winmain" : funcdecl.isDllMain() ? "dllmain" : cast(const(char)*)null;
Module mod = sc._module;
@@ -4147,8 +4172,8 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (ctd.fbody || !(ctd.storage_class & STC.disable))
{
- ctd.error("default constructor for structs only allowed " ~
- "with `@disable`, no body, and no parameters");
+ .error(ctd.loc, "%s `%s` default constructor for structs only allowed " ~
+ "with `@disable`, no body, and no parameters", ctd.kind, ctd.toPrettyChars);
ctd.storage_class |= STC.disable;
ctd.fbody = null;
}
@@ -4161,13 +4186,13 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (ctd.storage_class & STC.disable)
{
- ctd.error("is marked `@disable`, so it cannot have default "~
- "arguments for all parameters.");
+ .error(ctd.loc, "%s `%s` is marked `@disable`, so it cannot have default "~
+ "arguments for all parameters.", ctd.kind, ctd.toPrettyChars);
errorSupplemental(ctd.loc, "Use `@disable this();` if you want to disable default initialization.");
}
else
- ctd.error("all parameters have default arguments, "~
- "but structs cannot have default constructors.");
+ .error(ctd.loc, "%s `%s` all parameters have default arguments, "~
+ "but structs cannot have default constructors.", ctd.kind, ctd.toPrettyChars);
}
else if ((dim == 1 || (dim > 1 && tf.parameterList[1].defaultArg)))
{
@@ -4345,7 +4370,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
e = doAtomicOp("+=", v.ident, IntegerExp.literal!(1));
if (e is null)
{
- scd.error("shared static constructor within a template require `core.atomic : atomicOp` to be present");
+ .error(scd.loc, "%s `%s` shared static constructor within a template require `core.atomic : atomicOp` to be present", scd.kind, scd.toPrettyChars);
return;
}
}
@@ -4441,7 +4466,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
e = doAtomicOp("-=", v.ident, IntegerExp.literal!(1));
if (e is null)
{
- sdd.error("shared static destructo within a template require `core.atomic : atomicOp` to be present");
+ .error(sdd.loc, "%s `%s` shared static destructo within a template require `core.atomic : atomicOp` to be present", sdd.kind, sdd.toPrettyChars);
return;
}
}
@@ -4642,7 +4667,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
sd.storage_class |= sc.stc;
if (sd.storage_class & STC.abstract_)
- sd.error("structs, unions cannot be `abstract`");
+ .error(sd.loc, "%s `%s` structs, unions cannot be `abstract`", sd.kind, sd.toPrettyChars);
sd.userAttribDecl = sc.userAttribDecl;
@@ -4688,7 +4713,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (sd.type.ty != Terror)
{
- sd.error(sd.loc, "circular or forward reference");
+ .error(sd.loc, "%s `%s` circular or forward reference", sd.kind, sd.toPrettyChars);
sd.errors = true;
sd.type = Type.terror;
}
@@ -4762,7 +4787,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (fcall && fcall.isStatic())
{
- sd.error(fcall.loc, "`static opCall` is hidden by constructors and can never be called");
+ .error(fcall.loc, "%s `%s` `static opCall` is hidden by constructors and can never be called", sd.kind, sd.toPrettyChars);
errorSupplemental(fcall.loc, "Please use a factory method instead, or replace all constructors with `static opCall`.");
}
}
@@ -4786,7 +4811,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
printf("type = %d sym = %p, %s\n", sd.type.ty, sym, sym.toPrettyChars());
}
// https://issues.dlang.org/show_bug.cgi?id=19024
- sd.error("already exists at %s. Perhaps in another function with the same name?", sym.loc.toChars());
+ .error(sd.loc, "%s `%s` already exists at %s. Perhaps in another function with the same name?", sd.kind, sd.toPrettyChars, sym.loc.toChars());
}
}
@@ -4877,7 +4902,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
cldec.storage_class |= sc.stc;
if (cldec.storage_class & STC.auto_)
- cldec.error("storage class `auto` is invalid when declaring a class, did you mean to use `scope`?");
+ .error(cldec.loc, "%s `%s` storage class `auto` is invalid when declaring a class, did you mean to use `scope`?", cldec.kind, cldec.toPrettyChars);
if (cldec.storage_class & STC.scope_)
cldec.stack = true;
if (cldec.storage_class & STC.abstract_)
@@ -4974,7 +4999,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (!tc)
{
if (b.type != Type.terror)
- cldec.error("base type must be `class` or `interface`, not `%s`", b.type.toChars());
+ .error(cldec.loc, "%s `%s` base type must be `class` or `interface`, not `%s`", cldec.kind, cldec.toPrettyChars, b.type.toChars());
cldec.baseclasses.remove(0);
goto L7;
}
@@ -4994,7 +5019,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (cdb == cldec)
{
- cldec.error("circular inheritance");
+ .error(cldec.loc, "%s `%s` circular inheritance", cldec.kind, cldec.toPrettyChars);
cldec.baseclasses.remove(0);
goto L7;
}
@@ -5038,14 +5063,14 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (multiClassError == 0)
{
- error(cldec.loc,"`%s`: base class must be specified first, " ~
+ .error(cldec.loc,"`%s`: base class must be specified first, " ~
"before any interfaces.", cldec.toPrettyChars());
multiClassError += 1;
}
else if (multiClassError >= 1)
{
if(multiClassError == 1)
- error(cldec.loc,"`%s`: multiple class inheritance is not supported." ~
+ .error(cldec.loc, "`%s`: multiple class inheritance is not supported." ~
" Use multiple interface inheritance and/or composition.", cldec.toPrettyChars());
multiClassError += 1;
@@ -5073,7 +5098,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
BaseClass* b2 = (*cldec.baseclasses)[j];
if (b2.sym == tc.sym)
{
- cldec.error("inherits from duplicate interface `%s`", b2.sym.toChars());
+ .error(cldec.loc, "%s `%s` inherits from duplicate interface `%s`", cldec.kind, cldec.toPrettyChars, b2.sym.toChars());
cldec.baseclasses.remove(i);
continue BCLoop;
}
@@ -5117,7 +5142,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
void badObjectDotD()
{
- cldec.error("missing or corrupt object.d");
+ .error(cldec.loc, "%s `%s` missing or corrupt object.d", cldec.kind, cldec.toPrettyChars);
fatal();
}
@@ -5141,7 +5166,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (cldec.baseClass)
{
if (cldec.baseClass.storage_class & STC.final_)
- cldec.error("cannot inherit from class `%s` because it is `final`", cldec.baseClass.toChars());
+ .error(cldec.loc, "%s `%s` cannot inherit from class `%s` because it is `final`", cldec.kind, cldec.toPrettyChars, cldec.baseClass.toChars());
// Inherit properties from base class
if (cldec.baseClass.isCOMclass())
@@ -5149,7 +5174,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (cldec.baseClass.isCPPclass())
cldec.classKind = ClassKind.cpp;
if (cldec.classKind != cldec.baseClass.classKind)
- cldec.error("with %s linkage cannot inherit from class `%s` with %s linkage",
+ .error(cldec.loc, "%s `%s` with %s linkage cannot inherit from class `%s` with %s linkage", cldec.kind, cldec.toPrettyChars,
cldec.classKind.toChars(), cldec.baseClass.toChars(), cldec.baseClass.classKind.toChars());
if (cldec.baseClass.stack)
@@ -5167,7 +5192,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
cldec.com = true;
if (cldec.classKind == ClassKind.cpp && !b.sym.isCPPinterface())
{
- error(cldec.loc, "C++ class `%s` cannot implement D interface `%s`",
+ .error(cldec.loc, "C++ class `%s` cannot implement D interface `%s`",
cldec.toPrettyChars(), b.sym.toPrettyChars());
}
}
@@ -5229,7 +5254,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (cldec.classKind == ClassKind.cpp && cldec.baseClass.vtbl.length == 0)
{
- cldec.error("C++ base class `%s` needs at least one virtual function", cldec.baseClass.toChars());
+ .error(cldec.loc, "%s `%s` C++ base class `%s` needs at least one virtual function", cldec.kind, cldec.toPrettyChars, cldec.baseClass.toChars());
}
// Copy vtbl[] from base class
@@ -5255,7 +5280,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
// Use the base class's 'this' member
if (cldec.storage_class & STC.static_)
- cldec.error("static class cannot inherit from nested class `%s`", cldec.baseClass.toChars());
+ .error(cldec.loc, "%s `%s` static class cannot inherit from nested class `%s`", cldec.kind, cldec.toPrettyChars, cldec.baseClass.toChars());
if (cldec.toParentLocal() != cldec.baseClass.toParentLocal() &&
(!cldec.toParentLocal() ||
!cldec.baseClass.toParentLocal().getType() ||
@@ -5263,14 +5288,14 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (cldec.toParentLocal())
{
- cldec.error("is nested within `%s`, but super class `%s` is nested within `%s`",
+ .error(cldec.loc, "%s `%s` is nested within `%s`, but super class `%s` is nested within `%s`", cldec.kind, cldec.toPrettyChars,
cldec.toParentLocal().toChars(),
cldec.baseClass.toChars(),
cldec.baseClass.toParentLocal().toChars());
}
else
{
- cldec.error("is not nested, but super class `%s` is nested within `%s`",
+ .error(cldec.loc, "%s `%s` is not nested, but super class `%s` is nested within `%s`", cldec.kind, cldec.toPrettyChars,
cldec.baseClass.toChars(),
cldec.baseClass.toParentLocal().toChars());
}
@@ -5284,14 +5309,14 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
{
if (cldec.toParent2() && cldec.toParent2() != cldec.toParentLocal())
{
- cldec.error("needs the frame pointer of `%s`, but super class `%s` needs the frame pointer of `%s`",
+ .error(cldec.loc, "%s `%s` needs the frame pointer of `%s`, but super class `%s` needs the frame pointer of `%s`", cldec.kind, cldec.toPrettyChars,
cldec.toParent2().toChars(),
cldec.baseClass.toChars(),
cldec.baseClass.toParent2().toChars());
}
else
{
- cldec.error("doesn't need a frame pointer, but super class `%s` needs the frame pointer of `%s`",
+ .error(cldec.loc, "%s `%s` doesn't need a frame pointer, but super class `%s` needs the frame pointer of `%s`", cldec.kind, cldec.toPrettyChars,
cldec.baseClass.toChars(),
cldec.baseClass.toParent2().toChars());
}
@@ -5386,7 +5411,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
}
else
{
- cldec.error("cannot implicitly generate a default constructor when base class `%s` is missing a default constructor",
+ .error(cldec.loc, "%s `%s` cannot implicitly generate a default constructor when base class `%s` is missing a default constructor", cldec.kind, cldec.toPrettyChars,
cldec.baseClass.toPrettyChars());
}
}
@@ -5410,7 +5435,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (auto f = hasIdentityOpAssign(cldec, sc2))
{
if (!(f.storage_class & STC.disable))
- cldec.error(f.loc, "identity assignment operator overload is illegal");
+ .error(f.loc, "%s `%s` identity assignment operator overload is illegal", cldec.kind, cldec.toPrettyChars);
}
cldec.inv = buildInv(cldec, sc2);
@@ -5430,7 +5455,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
cldec.isAbstract(); // recalculate
if (cldec.isabstract != isabstractsave)
{
- cldec.error("cannot infer `abstract` attribute due to circular dependencies");
+ .error(cldec.loc, "%s `%s` cannot infer `abstract` attribute due to circular dependencies", cldec.kind, cldec.toPrettyChars);
}
}
@@ -5443,7 +5468,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
printf("this = %p %s\n", cldec, cldec.toPrettyChars());
printf("type = %d sym = %p, %s\n", cldec.type.ty, cd, cd.toPrettyChars());
}
- cldec.error("already exists at %s. Perhaps in another function with the same name?", cd.loc.toChars());
+ .error(cldec.loc, "%s `%s` already exists at %s. Perhaps in another function with the same name?", cldec.kind, cldec.toPrettyChars, cd.loc.toChars());
}
if (global.errors != errors || (cldec.baseClass && cldec.baseClass.errors))
@@ -5464,7 +5489,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (!vd.isThisDeclaration() &&
vd.visible() >= Visibility(Visibility.Kind.public_))
{
- vd.error("Field members of a `synchronized` class cannot be `%s`",
+ .error(vd.loc, "%s `%s` Field members of a `synchronized` class cannot be `%s`", vd.kind, vd.toPrettyChars,
visibilityToChars(vd.visible().kind));
}
}
@@ -5626,7 +5651,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
if (!tc || !tc.sym.isInterfaceDeclaration())
{
if (b.type != Type.terror)
- idec.error("base type must be `interface`, not `%s`", b.type.toChars());
+ .error(idec.loc, "%s `%s` base type must be `interface`, not `%s`", idec.kind, idec.toPrettyChars, b.type.toChars());
idec.baseclasses.remove(i);
continue;
}
@@ -5637,14 +5662,14 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
BaseClass* b2 = (*idec.baseclasses)[j];
if (b2.sym == tc.sym)
{
- idec.error("inherits from duplicate interface `%s`", b2.sym.toChars());
+ .error(idec.loc, "%s `%s` inherits from duplicate interface `%s`", idec.kind, idec.toPrettyChars, b2.sym.toChars());
idec.baseclasses.remove(i);
continue BCLoop;
}
}
if (tc.sym == idec || idec.isBaseOf2(tc.sym))
{
- idec.error("circular inheritance of interface");
+ .error(idec.loc, "%s `%s` circular inheritance of interface", idec.kind, idec.toPrettyChars);
idec.baseclasses.remove(i);
continue;
}
@@ -5907,7 +5932,7 @@ void templateInstanceSemantic(TemplateInstance tempinst, Scope* sc, ArgumentList
auto ungag = Ungag(global.gag);
if (!tempinst.gagged)
global.gag = 0;
- tempinst.error(tempinst.loc, "recursive template expansion");
+ .error(tempinst.loc, "%s `%s` recursive template expansion", tempinst.kind, tempinst.toPrettyChars);
if (tempinst.gagged)
tempinst.semanticRun = PASS.initial;
else
@@ -5965,7 +5990,7 @@ void templateInstanceSemantic(TemplateInstance tempinst, Scope* sc, ArgumentList
// If tempdecl is a mixin, disallow it
if (tempdecl.ismixin)
{
- tempinst.error("mixin templates are not regular templates");
+ .error(tempinst.loc, "%s `%s` mixin templates are not regular templates", tempinst.kind, tempinst.toPrettyChars);
goto Lerror;
}
@@ -6186,7 +6211,8 @@ void templateInstanceSemantic(TemplateInstance tempinst, Scope* sc, ArgumentList
Scope* _scope = tempdecl._scope;
if (tempdecl.semanticRun == PASS.initial)
{
- tempinst.error("template instantiation `%s` forward references template declaration `%s`", tempinst.toChars(), tempdecl.toChars());
+ .error(tempinst.loc, "%s `%s` template instantiation `%s` forward references template declaration `%s`",
+ tempinst.kind, tempinst.toPrettyChars, tempinst.toChars(), tempdecl.toChars());
return;
}
@@ -6430,7 +6456,7 @@ void templateInstanceSemantic(TemplateInstance tempinst, Scope* sc, ArgumentList
if (++nest > global.recursionLimit)
{
global.gag = 0; // ensure error message gets printed
- tempinst.error("recursive expansion");
+ .error(tempinst.loc, "%s `%s` recursive expansion", tempinst.kind, tempinst.toPrettyChars);
fatal();
}
}
@@ -6478,7 +6504,7 @@ Laftersemantic:
if (!tempinst.errors)
{
if (!tempdecl.literal)
- tempinst.error(tempinst.loc, "error instantiating");
+ .error(tempinst.loc, "%s `%s` error instantiating", tempinst.kind, tempinst.toPrettyChars);
if (tempinst.tinst)
tempinst.tinst.printInstantiationTrace();
}
@@ -6688,7 +6714,7 @@ void aliasSemantic(AliasDeclaration ds, Scope* sc)
return errorRet();
if (s == ds)
{
- ds.error("cannot resolve");
+ .error(ds.loc, "%s `%s` cannot resolve", ds.kind, ds.toPrettyChars);
return errorRet();
}
if (!s || !s.isEnumMember())
@@ -6719,7 +6745,7 @@ void aliasSemantic(AliasDeclaration ds, Scope* sc)
if (!s)
{
if (e.op != EXP.error)
- ds.error("cannot alias an expression `%s`", e.toChars());
+ .error(ds.loc, "%s `%s` cannot alias an expression `%s`", ds.kind, ds.toPrettyChars, e.toChars());
return errorRet();
}
}
@@ -6775,7 +6801,7 @@ private void aliasAssignSemantic(AliasAssign ds, Scope* sc)
Dsymbol as = sc.search(ds.loc, ds.ident, &scopesym);
if (!as)
{
- ds.error("undefined identifier `%s`", ds.ident.toChars());
+ .error(ds.loc, "%s `%s` undefined identifier `%s`", ds.kind, ds.toPrettyChars, ds.ident.toChars());
return null;
}
if (as.errors)
@@ -6784,13 +6810,13 @@ private void aliasAssignSemantic(AliasAssign ds, Scope* sc)
auto ad = as.isAliasDeclaration();
if (!ad)
{
- ds.error("identifier `%s` must be an alias declaration", as.toChars());
+ .error(ds.loc, "%s `%s` identifier `%s` must be an alias declaration", ds.kind, ds.toPrettyChars, as.toChars());
return null;
}
if (ad.overnext)
{
- ds.error("cannot reassign overloaded alias");
+ error(ds.loc, "%s `%s` cannot reassign overloaded alias", ds.kind, ds.toPrettyChars);
return null;
}
@@ -6800,12 +6826,12 @@ private void aliasAssignSemantic(AliasAssign ds, Scope* sc)
{
if (!adParent)
adParent = ds.toParent();
- error(ds.loc, "`%s` must have same parent `%s` as alias `%s`", ds.ident.toChars(), adParent.toChars(), ad.toChars());
+ .error(ds.loc, "`%s` must have same parent `%s` as alias `%s`", ds.ident.toChars(), adParent.toChars(), ad.toChars());
return null;
}
if (!adParent.isTemplateInstance())
{
- ds.error("must be a member of a template");
+ .error(ds.loc, "%s `%s` must be a member of a template", ds.kind, ds.toPrettyChars);
return null;
}
@@ -6886,7 +6912,7 @@ private void aliasAssignSemantic(AliasAssign ds, Scope* sc)
return errorRet();
if (s == aliassym)
{
- ds.error("cannot resolve");
+ .error(ds.loc, "%s `%s` cannot resolve", ds.kind, ds.toPrettyChars);
return errorRet();
}
@@ -6918,7 +6944,7 @@ private void aliasAssignSemantic(AliasAssign ds, Scope* sc)
if (!s)
{
if (e.op != EXP.error)
- ds.error("cannot alias an expression `%s`", e.toChars());
+ .error(ds.loc, "%s `%s` cannot alias an expression `%s`", ds.kind, ds.toPrettyChars, e.toChars());
return errorRet();
}
}
@@ -7151,7 +7177,7 @@ bool determineFields(AggregateDeclaration ad)
if (ad == tvs.sym)
{
const(char)* psz = (v.type.toBasetype().ty == Tsarray) ? "static array of " : "";
- ad.error("cannot have field `%s` with %ssame struct type", v.toChars(), psz);
+ .error(ad.loc, "%s `%s` cannot have field `%s` with %ssame struct type", ad.kind, ad.toPrettyChars, v.toChars(), psz);
ad.type = Type.terror;
ad.errors = true;
return 1;
diff --git a/gcc/d/dmd/dtemplate.d b/gcc/d/dmd/dtemplate.d
index e492c7e..23d1140 100644
--- a/gcc/d/dmd/dtemplate.d
+++ b/gcc/d/dmd/dtemplate.d
@@ -750,7 +750,7 @@ extern (C++) final class TemplateDeclaration : ScopeDsymbol
{
if (i)
buf.writestring(", ");
- .toCBuffer(tp, &buf, &hgs);
+ toCBuffer(tp, buf, hgs);
}
buf.writeByte(')');
@@ -768,7 +768,7 @@ extern (C++) final class TemplateDeclaration : ScopeDsymbol
constraint)
{
buf.writestring(" if (");
- .toCBuffer(constraint, &buf, &hgs);
+ toCBuffer(constraint, buf, hgs);
buf.writeByte(')');
}
@@ -865,14 +865,14 @@ extern (C++) final class TemplateDeclaration : ScopeDsymbol
if (!fparam.ident)
continue;
// don't add it, if it has no name
- auto v = new VarDeclaration(loc, fparam.type, fparam.ident, null);
+ auto v = new VarDeclaration(fparam.loc, fparam.type, fparam.ident, null);
fparam.storageClass |= STC.parameter;
v.storage_class = fparam.storageClass;
v.dsymbolSemantic(scx);
if (!ti.symtab)
ti.symtab = new DsymbolTable();
if (!scx.insert(v))
- error("parameter `%s.%s` is already defined", toChars(), v.toChars());
+ .error(loc, "%s `%s` parameter `%s.%s` is already defined", kind, toPrettyChars, toChars(), v.toChars());
else
v.parent = fd;
}
@@ -916,7 +916,7 @@ extern (C++) final class TemplateDeclaration : ScopeDsymbol
import dmd.staticcond;
// there will be a full tree view in verbose mode, and more compact list in the usual
- const full = global.params.verbose;
+ const full = global.params.v.verbose;
uint count;
const msg = visualizeStaticCondition(constraint, lastConstraint, lastConstraintNegs[], full, count);
scope (exit)
@@ -1769,7 +1769,7 @@ extern (C++) final class TemplateDeclaration : ScopeDsymbol
if (m2 < matchTiargs)
matchTiargs = m2; // pick worst match
if (!(*dedtypes)[i].equals(oded))
- error("specialization not allowed for deduced parameter `%s`", tparam.ident.toChars());
+ .error(loc, "%s `%s` specialization not allowed for deduced parameter `%s`", kind, toPrettyChars, kind, toPrettyChars, tparam.ident.toChars());
}
else
{
@@ -2147,7 +2147,7 @@ extern (C++) final class TemplateDeclaration : ScopeDsymbol
if (m2 < matchTiargs)
matchTiargs = m2; // pick worst match
if (!(*dedtypes)[i].equals(oded))
- error("specialization not allowed for deduced parameter `%s`", tparam.ident.toChars());
+ .error(loc, "%s `%s` specialization not allowed for deduced parameter `%s`", kind, toPrettyChars, tparam.ident.toChars());
}
else
{
@@ -2194,7 +2194,7 @@ extern (C++) final class TemplateDeclaration : ScopeDsymbol
if (m2 < matchTiargs)
matchTiargs = m2; // pick worst match
if (!(*dedtypes)[i].equals(oded))
- error("specialization not allowed for deduced parameter `%s`", tparam.ident.toChars());
+ .error(loc, "%s `%s` specialization not allowed for deduced parameter `%s`", kind, toPrettyChars, tparam.ident.toChars());
}
}
oded = declareParameter(paramscope, tparam, oded);
@@ -2346,7 +2346,7 @@ extern (C++) final class TemplateDeclaration : ScopeDsymbol
}
if (!sc.insert(d))
- error("declaration `%s` is already defined", tp.ident.toChars());
+ .error(loc, "%s `%s` declaration `%s` is already defined", kind, toPrettyChars, tp.ident.toChars());
d.dsymbolSemantic(sc);
/* So the caller's o gets updated with the result of semantic() being run on o
*/
@@ -5629,7 +5629,8 @@ extern (C++) final class TemplateValueParameter : TemplateParameter
// i.e: `template T(int arg = T)`
// Raise error now before calling resolveProperties otherwise we'll
// start looping on the expansion of the template instance.
- sc.tinst.tempdecl.error("recursive template expansion");
+ auto td = sc.tinst.tempdecl;
+ .error(td.loc, "%s `%s` recursive template expansion", td.kind, td.toPrettyChars);
return ErrorExp.get();
}
}
@@ -5993,7 +5994,7 @@ extern (C++) class TemplateInstance : ScopeDsymbol
}
if (!inst)
{
- error("cannot resolve forward reference");
+ .error(loc, "%s `%s` cannot resolve forward reference", kind, toPrettyChars);
errors = true;
return this;
}
@@ -6024,14 +6025,14 @@ extern (C++) class TemplateInstance : ScopeDsymbol
override const(char)* toChars() const
{
OutBuffer buf;
- toCBufferInstance(this, &buf);
+ toCBufferInstance(this, buf);
return buf.extractChars();
}
override final const(char)* toPrettyCharsHelper()
{
OutBuffer buf;
- toCBufferInstance(this, &buf, true);
+ toCBufferInstance(this, buf, true);
return buf.extractChars();
}
@@ -6039,17 +6040,17 @@ extern (C++) class TemplateInstance : ScopeDsymbol
* Given an error instantiating the TemplateInstance,
* give the nested TemplateInstance instantiations that got
* us here. Those are a list threaded into the nested scopes.
+ * Params:
+ * cl = classification of this trace as printing either errors or deprecations
+ * max_shown = maximum number of trace elements printed (controlled with -v/-verror-limit)
*/
- extern(D) final void printInstantiationTrace(Classification cl = Classification.error)
+ extern(D) final void printInstantiationTrace(Classification cl = Classification.error,
+ const(uint) max_shown = global.params.v.errorSupplementCount())
{
if (global.gag)
return;
// Print full trace for verbose mode, otherwise only short traces
- const(uint) max_shown = !global.params.verbose ?
- (global.params.errorSupplementLimit ? global.params.errorSupplementLimit : uint.max)
- : uint.max;
-
const(char)* format = "instantiated from here: `%s`";
// This returns a function pointer
@@ -6058,11 +6059,9 @@ extern (C++) class TemplateInstance : ScopeDsymbol
{
case Classification.error:
return &errorSupplemental;
- case Classification.warning:
- return &warningSupplemental;
case Classification.deprecation:
return &deprecationSupplemental;
- case Classification.gagged, Classification.tip:
+ case Classification.gagged, Classification.tip, Classification.warning:
assert(0);
}
}();
@@ -6458,9 +6457,9 @@ extern (C++) class TemplateInstance : ScopeDsymbol
{
s = sc.search_correct(id);
if (s)
- error("template `%s` is not defined, did you mean %s?", id.toChars(), s.toChars());
+ .error(loc, "%s `%s` template `%s` is not defined, did you mean %s?", kind, toPrettyChars, id.toChars(), s.toChars());
else
- error("template `%s` is not defined", id.toChars());
+ .error(loc, "%s `%s` template `%s` is not defined", kind, toPrettyChars, id.toChars());
return false;
}
static if (LOG)
@@ -6528,7 +6527,7 @@ extern (C++) class TemplateInstance : ScopeDsymbol
}
if (td.semanticRun == PASS.initial)
{
- error("`%s` forward references template declaration `%s`",
+ .error(loc, "%s `%s` `%s` forward references template declaration `%s`", kind, toPrettyChars,
toChars(), td.toChars());
return 1;
}
@@ -6583,7 +6582,7 @@ extern (C++) class TemplateInstance : ScopeDsymbol
}
if (!s)
{
- error("template `%s` is not defined", id.toChars());
+ .error(loc, "%s `%s` template `%s` is not defined", kind, toPrettyChars, id.toChars());
return false;
}
}
@@ -6654,7 +6653,7 @@ extern (C++) class TemplateInstance : ScopeDsymbol
}
else
{
- error("`%s` is not a template declaration, it is a %s", id.toChars(), s.kind());
+ .error(loc, "%s `%s` `%s` is not a template declaration, it is a %s", kind, toPrettyChars, id.toChars(), s.kind());
return false;
}
}
@@ -6972,7 +6971,7 @@ extern (C++) class TemplateInstance : ScopeDsymbol
tdtypes.setDim(tempdecl.parameters.length);
if (!tempdecl.matchWithInstance(sc, this, &tdtypes, argumentList, 2))
{
- error("incompatible arguments for template instantiation");
+ .error(loc, "%s `%s` incompatible arguments for template instantiation", kind, toPrettyChars);
return false;
}
// TODO: Normalizing tiargs for https://issues.dlang.org/show_bug.cgi?id=7469 is necessary?
@@ -7056,10 +7055,10 @@ extern (C++) class TemplateInstance : ScopeDsymbol
if (td_ambig)
{
- .error(loc, "%s `%s.%s` matches more than one template declaration:\n%s: `%s`\nand\n%s: `%s`",
- td_best.kind(), td_best.parent.toPrettyChars(), td_best.ident.toChars(),
- td_best.loc.toChars(), td_best.toChars(),
- td_ambig.loc.toChars(), td_ambig.toChars());
+ .error(loc, "%s `%s.%s` matches more than one template declaration:",
+ td_best.kind(), td_best.parent.toPrettyChars(), td_best.ident.toChars());
+ .errorSupplemental(td_best.loc, "`%s`\nand:", td_best.toChars());
+ .errorSupplemental(td_ambig.loc, "`%s`", td_ambig.toChars());
return false;
}
if (td_best)
@@ -7132,13 +7131,13 @@ extern (C++) class TemplateInstance : ScopeDsymbol
const cmsg = tdecl.getConstraintEvalError(tip);
if (cmsg)
{
- error("%s `%s`\n%s", msg, tmsg, cmsg);
+ .error(loc, "%s `%s` %s `%s`\n%s", kind, toPrettyChars, msg, tmsg, cmsg);
if (tip)
.tip(tip);
}
else
{
- error("%s `%s`", msg, tmsg);
+ .error(loc, "%s `%s` %s `%s`", kind, toPrettyChars, msg, tmsg);
if (tdecl.parameters.length == tiargs.length)
{
@@ -7292,7 +7291,7 @@ extern (C++) class TemplateInstance : ScopeDsymbol
}
if (td.semanticRun == PASS.initial)
{
- error("`%s` forward references template declaration `%s`", toChars(), td.toChars());
+ .error(loc, "%s `%s` `%s` forward references template declaration `%s`", kind, toPrettyChars, toChars(), td.toChars());
return 1;
}
}
@@ -7372,7 +7371,8 @@ extern (C++) class TemplateInstance : ScopeDsymbol
// Emulate Expression.toMangleBuffer call that had exist in TemplateInstance.genIdent.
if (ea.op != EXP.int64 && ea.op != EXP.float64 && ea.op != EXP.complex80 && ea.op != EXP.null_ && ea.op != EXP.string_ && ea.op != EXP.arrayLiteral && ea.op != EXP.assocArrayLiteral && ea.op != EXP.structLiteral)
{
- ea.error("expression `%s` is not a valid template value argument", ea.toChars());
+ if (!ea.type.isTypeError())
+ .error(ea.loc, "%s `%s` expression `%s` is not a valid template value argument", kind, toPrettyChars, ea.toChars());
errors = true;
}
}
@@ -7427,7 +7427,7 @@ extern (C++) class TemplateInstance : ScopeDsymbol
goto L1;
}
}
- error("`%s` is nested in both `%s` and `%s`", toChars(), enclosing.toChars(), dparent.toChars());
+ .error(loc, "%s `%s` `%s` is nested in both `%s` and `%s`", kind, toPrettyChars, toChars(), enclosing.toChars(), dparent.toChars());
errors = true;
}
L1:
@@ -7613,7 +7613,7 @@ extern (C++) class TemplateInstance : ScopeDsymbol
if (++nest > global.recursionLimit)
{
global.gag = 0; // ensure error message gets printed
- error("recursive expansion exceeded allowed nesting limit");
+ .error(loc, "%s `%s` recursive expansion exceeded allowed nesting limit", kind, toPrettyChars);
fatal();
}
@@ -7630,7 +7630,7 @@ extern (C++) class TemplateInstance : ScopeDsymbol
if (++nest > global.recursionLimit)
{
global.gag = 0; // ensure error message gets printed
- error("recursive expansion exceeded allowed nesting limit");
+ .error(loc, "%s `%s` recursive expansion exceeded allowed nesting limit", kind, toPrettyChars);
fatal();
}
@@ -7825,7 +7825,7 @@ extern (C++) final class TemplateMixin : TemplateInstance
override const(char)* toChars() const
{
OutBuffer buf;
- toCBufferInstance(this, &buf);
+ toCBufferInstance(this, buf);
return buf.extractChars();
}
@@ -7840,7 +7840,7 @@ extern (C++) final class TemplateMixin : TemplateInstance
tqual.resolve(loc, sc, e, t, s);
if (!s)
{
- error("is not defined");
+ .error(loc, "%s `%s` is not defined", kind, toPrettyChars);
return false;
}
s = s.toAlias();
@@ -7868,7 +7868,7 @@ extern (C++) final class TemplateMixin : TemplateInstance
}
if (!tempdecl)
{
- error("- `%s` is a %s, not a template", s.toChars(), s.kind());
+ .error(loc, "%s `%s` - `%s` is a %s, not a template", kind, toPrettyChars, s.toChars(), s.kind());
return false;
}
}
@@ -8272,6 +8272,7 @@ MATCH matchArg(TemplateParameter tp, Scope* sc, RootObject oarg, size_t i, Templ
*/
if (tap.specType)
{
+ tap.specType = typeSemantic(tap.specType, tap.loc, sc);
Declaration d = (cast(Dsymbol)sa).isDeclaration();
if (!d)
return matchArgNoMatch();
@@ -8455,12 +8456,12 @@ struct TemplateStats
{
if (ts.allInstances is null)
ts.allInstances = new TemplateInstances();
- if (global.params.vtemplatesListInstances)
+ if (global.params.v.templatesListInstances)
ts.allInstances.push(cast() ti);
}
// message(ti.loc, "incInstance %p %p", td, ti);
- if (!global.params.vtemplates)
+ if (!global.params.v.templates)
return;
if (!td)
return;
@@ -8484,7 +8485,7 @@ struct TemplateStats
const TemplateInstance ti)
{
// message(ti.loc, "incUnique %p %p", td, ti);
- if (!global.params.vtemplates)
+ if (!global.params.v.templates)
return;
if (!td)
return;
@@ -8513,7 +8514,7 @@ extern (C++) void printTemplateStats()
}
}
- if (!global.params.vtemplates)
+ if (!global.params.v.templates)
return;
Array!(TemplateDeclarationStats) sortedStats;
@@ -8527,7 +8528,7 @@ extern (C++) void printTemplateStats()
foreach (const ref ss; sortedStats[])
{
- if (global.params.vtemplatesListInstances &&
+ if (global.params.v.templatesListInstances &&
ss.ts.allInstances)
{
message(ss.td.loc,
diff --git a/gcc/d/dmd/dtoh.d b/gcc/d/dmd/dtoh.d
index 6a7442a..f906ee1 100644
--- a/gcc/d/dmd/dtoh.d
+++ b/gcc/d/dmd/dtoh.d
@@ -23,6 +23,7 @@ import dmd.dsymbol;
import dmd.errors;
import dmd.globals;
import dmd.hdrgen;
+import dmd.id;
import dmd.identifier;
import dmd.location;
import dmd.root.filename;
@@ -199,7 +200,8 @@ struct _d_dynamicArray final
else
{
const(char)[] name = FileName.combine(global.params.cxxhdr.dir, global.params.cxxhdr.name);
- writeFile(Loc.initial, name, buf[]);
+ if (!writeFile(Loc.initial, name, buf[]))
+ return fatal();
}
}
@@ -1045,6 +1047,10 @@ public:
{
debug (Debug_DtoH) mixin(traceVisit!ad);
+ // Declared in object.d but already included in `#include`s
+ if (ad.ident == Id._size_t || ad.ident == Id._ptrdiff_t)
+ return;
+
if (!shouldEmitAndMarkVisited(ad))
return;
@@ -2327,7 +2333,12 @@ public:
{
//printf("%s %d\n", p.defaultArg.toChars, p.defaultArg.op);
buf.writestring(" = ");
+ // Always emit the FDN of a symbol for the default argument,
+ // to avoid generating an ambiguous assignment.
+ auto save = adparent;
+ adparent = null;
printExpressionFor(p.type, p.defaultArg);
+ adparent = save;
}
}
@@ -2636,7 +2647,7 @@ public:
import dmd.hdrgen;
// Hex floating point literals were introduced in C++ 17
const allowHex = global.params.cplusplus >= CppStdRevision.cpp17;
- floatToBuffer(e.type, e.value, buf, allowHex);
+ floatToBuffer(e.type, e.value, *buf, allowHex);
}
}
@@ -3205,6 +3216,21 @@ const(char*) keywordClass(const Identifier ident)
if (global.params.cplusplus >= CppStdRevision.cpp20)
return "keyword in C++20";
return null;
+ case "restrict":
+ case "_Alignas":
+ case "_Alignof":
+ case "_Atomic":
+ case "_Bool":
+ //case "_Complex": // handled above in C++
+ case "_Generic":
+ case "_Imaginary":
+ case "_Noreturn":
+ case "_Static_assert":
+ case "_Thread_local":
+ case "_assert":
+ case "_import":
+ //case "__...": handled in default case below
+ return "Keyword in C";
default:
// Identifiers starting with __ are reserved
diff --git a/gcc/d/dmd/dversion.d b/gcc/d/dmd/dversion.d
index 0945b54..aa22532 100644
--- a/gcc/d/dmd/dversion.d
+++ b/gcc/d/dmd/dversion.d
@@ -20,6 +20,7 @@ import dmd.dmodule;
import dmd.dscope;
import dmd.dsymbol;
import dmd.dsymbolsem;
+import dmd.errors;
import dmd.globals;
import dmd.identifier;
import dmd.location;
@@ -77,14 +78,14 @@ extern (C++) final class DebugSymbol : Dsymbol
{
if (!m)
{
- error("declaration must be at module level");
+ .error(loc, "%s `%s` declaration must be at module level", kind, toPrettyChars);
errors = true;
}
else
{
if (findCondition(m.debugidsNot, ident))
{
- error("defined after use");
+ .error(loc, "%s `%s` defined after use", kind, toPrettyChars);
errors = true;
}
if (!m.debugids)
@@ -96,7 +97,7 @@ extern (C++) final class DebugSymbol : Dsymbol
{
if (!m)
{
- error("level declaration must be at module level");
+ .error(loc, "%s `%s` level declaration must be at module level", kind, toPrettyChars);
errors = true;
}
else
@@ -172,14 +173,14 @@ extern (C++) final class VersionSymbol : Dsymbol
VersionCondition.checkReserved(loc, ident.toString());
if (!m)
{
- error("declaration must be at module level");
+ .error(loc, "%s `%s` declaration must be at module level", kind, toPrettyChars);
errors = true;
}
else
{
if (findCondition(m.versionidsNot, ident))
{
- error("defined after use");
+ .error(loc, "%s `%s` defined after use", kind, toPrettyChars);
errors = true;
}
if (!m.versionids)
@@ -191,7 +192,7 @@ extern (C++) final class VersionSymbol : Dsymbol
{
if (!m)
{
- error("level declaration must be at module level");
+ .error(loc, "%s `%s` level declaration must be at module level", kind, toPrettyChars);
errors = true;
}
else
diff --git a/gcc/d/dmd/errors.h b/gcc/d/dmd/errors.h
index c6b5975..759ad27 100644
--- a/gcc/d/dmd/errors.h
+++ b/gcc/d/dmd/errors.h
@@ -14,6 +14,7 @@
struct Loc;
+// Constants used to discriminate kinds of error messages.
enum class ErrorKind
{
warning = 0,
@@ -43,7 +44,7 @@ D_ATTRIBUTE_FORMAT(1, 2) void message(const char *format, ...);
D_ATTRIBUTE_FORMAT(2, 3) void message(const Loc& loc, const char *format, ...);
D_ATTRIBUTE_FORMAT(1, 2) void tip(const char *format, ...);
-D_ATTRIBUTE_FORMAT(2, 0) void verrorReport(const Loc& loc, const char *format, va_list ap, const char *p1 = NULL, const char *p2 = NULL);
+D_ATTRIBUTE_FORMAT(2, 0) void verrorReport(const Loc& loc, const char *format, va_list ap, ErrorKind kind, const char *p1 = NULL, const char *p2 = NULL);
D_ATTRIBUTE_FORMAT(2, 0) void verrorReportSupplemental(const Loc& loc, const char* format, va_list ap, ErrorKind kind);
#if defined(__GNUC__) || defined(__clang__)
diff --git a/gcc/d/dmd/errorsink.d b/gcc/d/dmd/errorsink.d
index e14829e..ce23517 100644
--- a/gcc/d/dmd/errorsink.d
+++ b/gcc/d/dmd/errorsink.d
@@ -62,6 +62,7 @@ class ErrorSinkNull : ErrorSink
/*****************************************
* Simplest implementation, just sends messages to stderr.
+ * See also: ErrorSinkCompiler.
*/
class ErrorSinkStderr : ErrorSink
{
diff --git a/gcc/d/dmd/escape.d b/gcc/d/dmd/escape.d
index f817a4e..8562e2e 100644
--- a/gcc/d/dmd/escape.d
+++ b/gcc/d/dmd/escape.d
@@ -37,6 +37,8 @@ import dmd.tokens;
import dmd.visitor;
import dmd.arraytypes;
+private:
+
/// Groups global state for escape checking together
package(dmd) struct EscapeState
{
@@ -69,6 +71,7 @@ package(dmd) struct EscapeState
* Returns:
* `true` if error
*/
+public
bool checkMutableArguments(Scope* sc, FuncDeclaration fd, TypeFunction tf,
Expression ethis, Expressions* arguments, bool gag)
{
@@ -179,7 +182,7 @@ bool checkMutableArguments(Scope* sc, FuncDeclaration fd, TypeFunction tf,
const(char)* msg = eb.isMutable && eb2.isMutable
? "more than one mutable reference %s `%s` in arguments to `%s()`"
: "mutable and const references %s `%s` in arguments to `%s()`";
- error((*arguments)[i].loc, msg,
+ sc.eSink.error((*arguments)[i].loc, msg,
referenceVerb,
v.toChars(),
fd ? fd.toPrettyChars() : "indirectly");
@@ -226,6 +229,7 @@ bool checkMutableArguments(Scope* sc, FuncDeclaration fd, TypeFunction tf,
* Returns:
* `true` if any elements escaped
*/
+public
bool checkArrayLiteralEscape(Scope *sc, ArrayLiteralExp ae, bool gag)
{
bool errors;
@@ -249,6 +253,7 @@ bool checkArrayLiteralEscape(Scope *sc, ArrayLiteralExp ae, bool gag)
* Returns:
* `true` if any elements escaped
*/
+public
bool checkAssocArrayLiteralEscape(Scope *sc, AssocArrayLiteralExp ae, bool gag)
{
bool errors;
@@ -274,6 +279,7 @@ bool checkAssocArrayLiteralEscape(Scope *sc, AssocArrayLiteralExp ae, bool gag)
* v = parameter that was not inferred
* recursionLimit = recursion limit for printing the reason
*/
+private
void printScopeFailure(E)(E printFunc, VarDeclaration v, int recursionLimit)
{
recursionLimit--;
@@ -316,12 +322,13 @@ void printScopeFailure(E)(E printFunc, VarDeclaration v, int recursionLimit)
* Returns:
* `true` if pointers to the stack can escape via assignment
*/
+public
bool checkParamArgumentEscape(Scope* sc, FuncDeclaration fdc, Identifier parId, VarDeclaration vPar, STC parStc, Expression arg, bool assertmsg, bool gag)
{
enum log = false;
- if (log) printf("checkParamArgumentEscape(arg: %s par: %s)\n",
+ if (log) printf("checkParamArgumentEscape(arg: %s par: %s parSTC: %llx)\n",
arg ? arg.toChars() : "null",
- parId ? parId.toChars() : "null");
+ parId ? parId.toChars() : "null", parStc);
//printf("type = %s, %d\n", arg.type.toChars(), arg.type.hasPointers());
if (!arg.type.hasPointers())
@@ -334,7 +341,7 @@ bool checkParamArgumentEscape(Scope* sc, FuncDeclaration fdc, Identifier parId,
if (parStc & STC.scope_)
{
// These errors only apply to non-scope parameters
- // When the paraneter is `scope`, only `checkScopeVarAddr` on `er.byref` is needed
+ // When the parameter is `scope`, only `checkScopeVarAddr` on `er.byref` is needed
er.byfunc.setDim(0);
er.byvalue.setDim(0);
er.byexp.setDim(0);
@@ -467,6 +474,7 @@ bool checkParamArgumentEscape(Scope* sc, FuncDeclaration fdc, Identifier parId,
* Returns:
* `true` if assignment to `firstArg` would cause an error
*/
+public
bool checkParamArgumentReturn(Scope* sc, Expression firstArg, Expression arg, Parameter param, bool gag)
{
enum log = false;
@@ -502,6 +510,7 @@ bool checkParamArgumentReturn(Scope* sc, Expression firstArg, Expression arg, Pa
* Returns:
* `true` if construction would cause an escaping reference error
*/
+public
bool checkConstructorEscape(Scope* sc, CallExp ce, bool gag)
{
enum log = false;
@@ -543,6 +552,7 @@ bool checkConstructorEscape(Scope* sc, CallExp ce, bool gag)
}
/// How a `return` parameter escapes its pointer value
+public
enum ReturnParamDest
{
returnVal, /// through return statement: `return x`
@@ -564,6 +574,7 @@ enum ReturnParamDest
* tthis = type of `this` parameter, or `null` if none
* Returns: What a `return` parameter should transfer the lifetime of the argument to
*/
+public
ReturnParamDest returnParamDest(TypeFunction tf, Type tthis)
{
assert(tf);
@@ -596,6 +607,7 @@ ReturnParamDest returnParamDest(TypeFunction tf, Type tthis)
* Returns:
* `true` if pointers to the stack can escape via assignment
*/
+public
bool checkAssignEscape(Scope* sc, Expression e, bool gag, bool byRef)
{
enum log = false;
@@ -912,7 +924,7 @@ bool checkAssignEscape(Scope* sc, Expression e, bool gag, bool byRef)
!(va && va.storage_class & STC.temp))
{
if (!gag)
- deprecation(ee.loc, "slice of static array temporary returned by `%s` assigned to longer lived variable `%s`",
+ sc.eSink.deprecation(ee.loc, "slice of static array temporary returned by `%s` assigned to longer lived variable `%s`",
ee.toChars(), e1.toChars());
//result = true;
continue;
@@ -959,6 +971,7 @@ bool checkAssignEscape(Scope* sc, Expression e, bool gag, bool byRef)
* Returns:
* `true` if pointers to the stack can escape
*/
+public
bool checkThrowEscape(Scope* sc, Expression e, bool gag)
{
//printf("[%s] checkThrowEscape, e = %s\n", e.loc.toChars(), e.toChars());
@@ -1002,6 +1015,7 @@ bool checkThrowEscape(Scope* sc, Expression e, bool gag)
* Returns:
* `true` if pointers to the stack can escape
*/
+public
bool checkNewEscape(Scope* sc, Expression e, bool gag)
{
import dmd.globals: FeatureState;
@@ -1124,7 +1138,7 @@ bool checkNewEscape(Scope* sc, Expression e, bool gag)
{
if (log) printf("byexp %s\n", ee.toChars());
if (!gag)
- error(ee.loc, "storing reference to stack allocated value returned by `%s` into allocated memory causes it to escape",
+ sc.eSink.error(ee.loc, "storing reference to stack allocated value returned by `%s` into allocated memory causes it to escape",
ee.toChars());
result = true;
}
@@ -1144,6 +1158,7 @@ bool checkNewEscape(Scope* sc, Expression e, bool gag)
* Returns:
* `true` if pointers to the stack can escape
*/
+public
bool checkReturnEscape(Scope* sc, Expression e, bool gag)
{
//printf("[%s] checkReturnEscape, e: %s\n", e.loc.toChars(), e.toChars());
@@ -1161,6 +1176,7 @@ bool checkReturnEscape(Scope* sc, Expression e, bool gag)
* Returns:
* `true` if references to the stack can escape
*/
+public
bool checkReturnEscapeRef(Scope* sc, Expression e, bool gag)
{
version (none)
@@ -1266,7 +1282,7 @@ private bool checkReturnEscapeImpl(Scope* sc, Expression e, bool refs, bool gag)
else if (v.isTypesafeVariadicArray && p == sc.func)
{
if (!gag)
- error(e.loc, "returning `%s` escapes a reference to variadic parameter `%s`", e.toChars(), v.toChars());
+ sc.eSink.error(e.loc, "returning `%s` escapes a reference to variadic parameter `%s`", e.toChars(), v.toChars());
result = false;
}
else
@@ -1420,7 +1436,7 @@ private bool checkReturnEscapeImpl(Scope* sc, Expression e, bool refs, bool gag)
else
{
if (!gag)
- error(ee.loc, "escaping reference to stack allocated value returned by `%s`", ee.toChars());
+ sc.eSink.error(ee.loc, "escaping reference to stack allocated value returned by `%s`", ee.toChars());
result = true;
}
}
@@ -1434,6 +1450,7 @@ private bool checkReturnEscapeImpl(Scope* sc, Expression e, bool refs, bool gag)
* va = variable to infer scope for
* Returns: `true` if succesful or already `scope`
*/
+private
bool inferScope(VarDeclaration va)
{
if (!va)
@@ -1526,6 +1543,7 @@ private bool inferReturn(FuncDeclaration fd, VarDeclaration v, bool returnScope)
* live = if @live semantics apply, i.e. expressions `p`, `*p`, `**p`, etc., all return `p`.
* retRefTransition = if `e` is returned through a `return ref scope` function call
*/
+public
void escapeByValue(Expression e, EscapeByResults* er, bool live = false, bool retRefTransition = false)
{
//printf("[%s] escapeByValue, e: %s\n", e.loc.toChars(), e.toChars());
@@ -1924,6 +1942,7 @@ void escapeByValue(Expression e, EscapeByResults* er, bool live = false, bool re
* live = if @live semantics apply, i.e. expressions `p`, `*p`, `**p`, etc., all return `p`.
* retRefTransition = if `e` is returned through a `return ref scope` function call
*/
+private
void escapeByRef(Expression e, EscapeByResults* er, bool live = false, bool retRefTransition = false)
{
//printf("[%s] escapeByRef, e: %s, retRefTransition: %d\n", e.loc.toChars(), e.toChars(), retRefTransition);
@@ -2158,6 +2177,7 @@ void escapeByRef(Expression e, EscapeByResults* er, bool live = false, bool retR
/************************************
* Aggregate the data collected by the escapeBy??() functions.
*/
+public
struct EscapeByResults
{
VarDeclarations byref; // array into which variables being returned by ref are inserted
@@ -2295,6 +2315,7 @@ private void doNotInferScope(VarDeclaration v, RootObject o)
* f = final function type. `funcdecl.type` started as the 'premature type' before attribute
* inference, then its inferred attributes are copied over to final type `f`
*/
+public
void finishScopeParamInference(FuncDeclaration funcdecl, ref TypeFunction f)
{
@@ -2427,6 +2448,7 @@ private void eliminateMaybeScopes(VarDeclaration[] array)
* Returns:
* true if it's a pointer (or reference) to mutable data
*/
+private
bool isReferenceToMutable(Type t)
{
t = t.baseElemOf();
@@ -2486,6 +2508,7 @@ bool isReferenceToMutable(Type t)
* Returns:
* true if it's a pointer (or reference) to mutable data
*/
+private
bool isReferenceToMutable(Parameter p, Type t)
{
if (p.isReference())
@@ -2561,6 +2584,7 @@ private void addMaybe(VarDeclaration va, VarDeclaration v)
}
// `setUnsafePreview` partially evaluated for dip1000
+public
bool setUnsafeDIP1000(Scope* sc, bool gag, Loc loc, const(char)* msg,
RootObject arg0 = null, RootObject arg1 = null, RootObject arg2 = null)
{
diff --git a/gcc/d/dmd/expression.d b/gcc/d/dmd/expression.d
index 07cc8d4..7205231 100644
--- a/gcc/d/dmd/expression.d
+++ b/gcc/d/dmd/expression.d
@@ -40,6 +40,7 @@ import dmd.dsymbol;
import dmd.dsymbolsem;
import dmd.dtemplate;
import dmd.errors;
+import dmd.errorsink;
import dmd.escape;
import dmd.expressionsem;
import dmd.func;
@@ -237,17 +238,6 @@ bool isNeedThisScope(Scope* sc, Declaration d)
return true;
}
-/******************************
- * check e is exp.opDispatch!(tiargs) or not
- * It's used to switch to UFCS the semantic analysis path
- */
-bool isDotOpDispatch(Expression e)
-{
- if (auto dtie = e.isDotTemplateInstanceExp())
- return dtie.ti.name == Id.opDispatch;
- return false;
-}
-
/****************************************
* Expand tuples in-place.
*
@@ -378,41 +368,6 @@ TupleDeclaration isAliasThisTuple(Expression e)
}
}
-int expandAliasThisTuples(Expressions* exps, size_t starti = 0)
-{
- if (!exps || exps.length == 0)
- return -1;
-
- for (size_t u = starti; u < exps.length; u++)
- {
- Expression exp = (*exps)[u];
- if (TupleDeclaration td = exp.isAliasThisTuple)
- {
- exps.remove(u);
- size_t i;
- td.foreachVar((s)
- {
- auto d = s.isDeclaration();
- auto e = new DotVarExp(exp.loc, exp, d);
- assert(d.type);
- e.type = d.type;
- exps.insert(u + i, e);
- ++i;
- });
- version (none)
- {
- printf("expansion ->\n");
- foreach (e; exps)
- {
- printf("\texps[%d] e = %s %s\n", i, EXPtoString(e.op), e.toChars());
- }
- }
- return cast(int)u;
- }
- }
- return -1;
-}
-
/****************************************
* If `s` is a function template, i.e. the only member of a template
* and that member is a function, return that template.
@@ -601,7 +556,7 @@ extern (C++) struct UnionExp
private:
// Ensure that the union is suitably aligned.
- align(8) union __AnonStruct__u
+ align(8) union _AnonStruct_u
{
char[__traits(classInstanceSize, Expression)] exp;
char[__traits(classInstanceSize, IntegerExp)] integerexp;
@@ -622,23 +577,7 @@ private:
char[__traits(classInstanceSize, VectorExp)] vectorexp;
}
- __AnonStruct__u u;
-}
-
-/********************************
- * Test to see if two reals are the same.
- * Regard NaN's as equivalent.
- * Regard +0 and -0 as different.
- * Params:
- * x1 = first operand
- * x2 = second operand
- * Returns:
- * true if x1 is x2
- * else false
- */
-bool RealIdentical(real_t x1, real_t x2) @safe
-{
- return (CTFloat.isNaN(x1) && CTFloat.isNaN(x2)) || CTFloat.isIdentical(x1, x2);
+ _AnonStruct_u u;
}
/************************ TypeDotIdExp ************************************/
@@ -813,103 +752,10 @@ extern (C++) abstract class Expression : ASTNode
{
OutBuffer buf;
HdrGenState hgs;
- toCBuffer(this, &buf, &hgs);
+ toCBuffer(this, buf, hgs);
return buf.extractChars();
}
- static if (__VERSION__ < 2092)
- {
- final void error(const(char)* format, ...) const
- {
- if (type != Type.terror)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.error);
- va_end(ap);
- }
- }
-
- final void errorSupplemental(const(char)* format, ...)
- {
- if (type == Type.terror)
- return;
-
- va_list ap;
- va_start(ap, format);
- .verrorReportSupplemental(loc, format, ap, ErrorKind.error);
- va_end(ap);
- }
-
- final void warning(const(char)* format, ...) const
- {
- if (type != Type.terror)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.warning);
- va_end(ap);
- }
- }
-
- final void deprecation(const(char)* format, ...) const
- {
- if (type != Type.terror)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.deprecation);
- va_end(ap);
- }
- }
- }
- else
- {
- pragma(printf) final void error(const(char)* format, ...) const
- {
- if (type != Type.terror)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.error);
- va_end(ap);
- }
- }
-
- pragma(printf) final void errorSupplemental(const(char)* format, ...)
- {
- if (type == Type.terror)
- return;
-
- va_list ap;
- va_start(ap, format);
- .verrorReportSupplemental(loc, format, ap, ErrorKind.error);
- va_end(ap);
- }
-
- pragma(printf) final void warning(const(char)* format, ...) const
- {
- if (type != Type.terror)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.warning);
- va_end(ap);
- }
- }
-
- pragma(printf) final void deprecation(const(char)* format, ...) const
- {
- if (type != Type.terror)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.deprecation);
- va_end(ap);
- }
- }
- }
-
/**********************************
* Combine e1 and e2 by CommaExp if both are not NULL.
*/
@@ -991,7 +837,8 @@ extern (C++) abstract class Expression : ASTNode
dinteger_t toInteger()
{
//printf("Expression %s\n", EXPtoString(op).ptr);
- error("integer constant expression expected instead of `%s`", toChars());
+ if (!type.isTypeError())
+ error(loc, "integer constant expression expected instead of `%s`", toChars());
return 0;
}
@@ -1003,19 +850,19 @@ extern (C++) abstract class Expression : ASTNode
real_t toReal()
{
- error("floating point constant expression expected instead of `%s`", toChars());
+ error(loc, "floating point constant expression expected instead of `%s`", toChars());
return CTFloat.zero;
}
real_t toImaginary()
{
- error("floating point constant expression expected instead of `%s`", toChars());
+ error(loc, "floating point constant expression expected instead of `%s`", toChars());
return CTFloat.zero;
}
complex_t toComplex()
{
- error("floating point constant expression expected instead of `%s`", toChars());
+ error(loc, "floating point constant expression expected instead of `%s`", toChars());
return complex_t(CTFloat.zero);
}
@@ -1044,9 +891,9 @@ extern (C++) abstract class Expression : ASTNode
loc = e.loc;
if (e.op == EXP.type)
- error("`%s` is a `%s` definition and cannot be modified", e.type.toChars(), e.type.kind());
+ error(loc, "`%s` is a `%s` definition and cannot be modified", e.type.toChars(), e.type.kind());
else
- error("`%s` is not an lvalue and cannot be modified", e.toChars());
+ error(loc, "`%s` is not an lvalue and cannot be modified", e.toChars());
return ErrorExp.get();
}
@@ -1070,17 +917,17 @@ extern (C++) abstract class Expression : ASTNode
break;
if (!ff.type.isMutable)
{
- error("cannot modify `%s` in `%s` function", toChars(), MODtoChars(type.mod));
+ error(loc, "cannot modify `%s` in `%s` function", toChars(), MODtoChars(type.mod));
return ErrorExp.get();
}
}
}
- error("cannot modify `%s` expression `%s`", MODtoChars(type.mod), toChars());
+ error(loc, "cannot modify `%s` expression `%s`", MODtoChars(type.mod), toChars());
return ErrorExp.get();
}
else if (!type.isAssignable())
{
- error("cannot modify struct instance `%s` of type `%s` because it contains `const` or `immutable` members",
+ error(loc, "cannot modify struct instance `%s` of type `%s` because it contains `const` or `immutable` members",
toChars(), type.toChars());
return ErrorExp.get();
}
@@ -1135,7 +982,7 @@ extern (C++) abstract class Expression : ASTNode
{
if (type && type.toBasetype().ty == Tvoid)
{
- error("expression `%s` is `void` and has no value", toChars());
+ error(loc, "expression `%s` is `void` and has no value", toChars());
//print(); assert(0);
if (!global.gag)
type = Type.terror;
@@ -1152,7 +999,7 @@ extern (C++) abstract class Expression : ASTNode
return true;
if (!type.isscalar())
{
- error("`%s` is not a scalar, it is a `%s`", toChars(), type.toChars());
+ error(loc, "`%s` is not a scalar, it is a `%s`", toChars(), type.toChars());
return true;
}
return checkValue();
@@ -1166,7 +1013,7 @@ extern (C++) abstract class Expression : ASTNode
return true;
if (type.toBasetype().ty == Tbool)
{
- error("operation not allowed on `bool` `%s`", toChars());
+ error(loc, "operation not allowed on `bool` `%s`", toChars());
return true;
}
return false;
@@ -1180,13 +1027,13 @@ extern (C++) abstract class Expression : ASTNode
return true;
if (!type.isintegral())
{
- error("`%s` is not of integral type, it is a `%s`", toChars(), type.toChars());
+ error(loc, "`%s` is not of integral type, it is a `%s`", toChars(), type.toChars());
return true;
}
return checkValue();
}
- extern (D) final bool checkArithmetic()
+ extern (D) final bool checkArithmetic(EXP op)
{
if (op == EXP.error)
return true;
@@ -1194,7 +1041,11 @@ extern (C++) abstract class Expression : ASTNode
return true;
if (!type.isintegral() && !type.isfloating())
{
- error("`%s` is not of arithmetic type, it is a `%s`", toChars(), type.toChars());
+ // unary aggregate ops error here
+ const char* msg = type.isAggregate() ?
+ "operator `%s` is not defined for `%s` of type `%s`" :
+ "illegal operator `%s` for `%s` of type `%s`";
+ error(loc, msg, EXPtoString(op).ptr, toChars(), type.toChars());
return true;
}
return checkValue();
@@ -1235,7 +1086,7 @@ extern (C++) abstract class Expression : ASTNode
// If the call has a pure parent, then the called func must be pure.
if (!f.isPure() && checkImpure(sc, loc, null, f))
{
- error("`pure` %s `%s` cannot call impure %s `%s`",
+ error(loc, "`pure` %s `%s` cannot call impure %s `%s`",
sc.func.kind(), sc.func.toPrettyChars(), f.kind(),
f.toPrettyChars());
@@ -1374,7 +1225,7 @@ extern (C++) abstract class Expression : ASTNode
if (checkImpure(sc, loc, "`pure` %s `%s` cannot access mutable static data `%s`", v))
{
- error("`pure` %s `%s` cannot access mutable static data `%s`",
+ error(loc, "`pure` %s `%s` cannot access mutable static data `%s`",
sc.func.kind(), sc.func.toPrettyChars(), v.toChars());
err = true;
}
@@ -1419,7 +1270,7 @@ extern (C++) abstract class Expression : ASTNode
OutBuffer vbuf;
MODMatchToBuffer(&ffbuf, ff.type.mod, v.type.mod);
MODMatchToBuffer(&vbuf, v.type.mod, ff.type.mod);
- error("%s%s `%s` cannot access %sdata `%s`",
+ error(loc, "%s%s `%s` cannot access %sdata `%s`",
ffbuf.peekChars(), ff.kind(), ff.toPrettyChars(), vbuf.peekChars(), v.toChars());
err = true;
break;
@@ -1478,7 +1329,7 @@ extern (C++) abstract class Expression : ASTNode
{
if (sc.varDecl.storage_class & STC.safe)
{
- error("`@safe` variable `%s` cannot be initialized by calling `@system` function `%s`",
+ error(loc, "`@safe` variable `%s` cannot be initialized by calling `@system` function `%s`",
sc.varDecl.toChars(), f.toChars());
return true;
}
@@ -1499,7 +1350,7 @@ extern (C++) abstract class Expression : ASTNode
loc = sc.func.loc;
const prettyChars = f.toPrettyChars();
- error("`@safe` %s `%s` cannot call `@system` %s `%s`",
+ error(loc, "`@safe` %s `%s` cannot call `@system` %s `%s`",
sc.func.kind(), sc.func.toPrettyChars(), f.kind(),
prettyChars);
if (!f.isDtorDeclaration)
@@ -1563,7 +1414,7 @@ extern (C++) abstract class Expression : ASTNode
|| f.ident == Id._d_arrayappendT || f.ident == Id._d_arrayappendcTX
|| f.ident == Id._d_arraycatnTX || f.ident == Id._d_newclassT))
{
- error("`@nogc` %s `%s` cannot call non-@nogc %s `%s`",
+ error(loc, "`@nogc` %s `%s` cannot call non-@nogc %s `%s`",
sc.func.kind(), sc.func.toPrettyChars(), f.kind(), f.toPrettyChars());
if (!f.isDtorDeclaration)
@@ -1623,7 +1474,7 @@ extern (C++) abstract class Expression : ASTNode
// sc.intypeof, sc.getStructClassScope(), func, fdthis);
auto t = ve.var.isThis();
assert(t);
- error("accessing non-static variable `%s` requires an instance of `%s`", ve.var.toChars(), t.toChars());
+ error(loc, "accessing non-static variable `%s` requires an instance of `%s`", ve.var.toChars(), t.toChars());
return true;
}
}
@@ -1656,8 +1507,8 @@ extern (C++) abstract class Expression : ASTNode
break;
}
- error("read-modify-write operations are not allowed for `shared` variables");
- errorSupplemental("Use `core.atomic.atomicOp!\"%s\"(%s, %s)` instead",
+ error(loc, "read-modify-write operations are not allowed for `shared` variables");
+ errorSupplemental(loc, "Use `core.atomic.atomicOp!\"%s\"(%s, %s)` instead",
EXPtoString(rmwOp).ptr, toChars(), ex ? ex.toChars() : "1");
return true;
}
@@ -1857,7 +1708,10 @@ extern (C++) abstract class Expression : ASTNode
inout(IdentityExp) isIdentityExp() { return (op == EXP.identity || op == EXP.notIdentity) ? cast(typeof(return))this : null; }
inout(CondExp) isCondExp() { return op == EXP.question ? cast(typeof(return))this : null; }
inout(GenericExp) isGenericExp() { return op == EXP._Generic ? cast(typeof(return))this : null; }
- inout(DefaultInitExp) isDefaultInitExp() { return isDefaultInitOp(op) ? cast(typeof(return))this: null; }
+ inout(DefaultInitExp) isDefaultInitExp() { return
+ (op == EXP.prettyFunction || op == EXP.functionString ||
+ op == EXP.line || op == EXP.moduleString ||
+ op == EXP.file || op == EXP.fileFullPath ) ? cast(typeof(return))this : null; }
inout(FileInitExp) isFileInitExp() { return (op == EXP.file || op == EXP.fileFullPath) ? cast(typeof(return))this : null; }
inout(LineInitExp) isLineInitExp() { return op == EXP.line ? cast(typeof(return))this : null; }
inout(ModuleInitExp) isModuleInitExp() { return op == EXP.moduleString ? cast(typeof(return))this : null; }
@@ -1905,7 +1759,7 @@ extern (C++) final class IntegerExp : Expression
{
//printf("%s, loc = %d\n", toChars(), loc.linnum);
if (type.ty != Terror)
- error("integral constant must be scalar type, not `%s`", type.toChars());
+ error(loc, "integral constant must be scalar type, not `%s`", type.toChars());
type = Type.terror;
}
this.type = type;
@@ -1983,7 +1837,7 @@ extern (C++) final class IntegerExp : Expression
e = this;
else if (!loc.isValid())
loc = e.loc;
- e.error("cannot modify constant `%s`", e.toChars());
+ error(e.loc, "cannot modify constant `%s`", e.toChars());
return ErrorExp.get();
}
@@ -2167,11 +2021,6 @@ extern (C++) final class VoidInitExp : Expression
this.type = var.type;
}
- override const(char)* toChars() const
- {
- return "void";
- }
-
override void accept(Visitor v)
{
v.visit(this);
@@ -2205,6 +2054,21 @@ extern (C++) final class RealExp : Expression
emplaceExp!(RealExp)(pue, loc, value, type);
}
+ /********************************
+ * Test to see if two reals are the same.
+ * Regard NaN's as equivalent.
+ * Regard +0 and -0 as different.
+ * Params:
+ * x1 = first operand
+ * x2 = second operand
+ * Returns:
+ * true if x1 is x2
+ * else false
+ */
+ private static bool RealIdentical(real_t x1, real_t x2) @safe
+ {
+ return (CTFloat.isNaN(x1) && CTFloat.isNaN(x2)) || CTFloat.isIdentical(x1, x2);
+ }
override bool equals(const RootObject o) const
{
if (this == o)
@@ -2294,7 +2158,9 @@ extern (C++) final class ComplexExp : Expression
return true;
if (auto ne = (cast(Expression)o).isComplexExp())
{
- if (type.toHeadMutable().equals(ne.type.toHeadMutable()) && RealIdentical(creall(value), creall(ne.value)) && RealIdentical(cimagl(value), cimagl(ne.value)))
+ if (type.toHeadMutable().equals(ne.type.toHeadMutable()) &&
+ RealExp.RealIdentical(creall(value), creall(ne.value)) &&
+ RealExp.RealIdentical(cimagl(value), cimagl(ne.value)))
{
return true;
}
@@ -2589,6 +2455,9 @@ extern (C++) final class StringExp : Expression
*/
bool committed;
+ /// If the string is parsed from a hex string literal
+ bool hexString = false;
+
enum char NoPostfix = 0;
extern (D) this(const ref Loc loc, const(void)[] string) scope
@@ -2680,7 +2549,7 @@ extern (C++) final class StringExp : Expression
{
if (const s = utf_decodeChar(string[0 .. len], u, c))
{
- error("%.*s", cast(int)s.length, s.ptr);
+ error(loc, "%.*s", cast(int)s.length, s.ptr);
return 0;
}
result += utf_codeLength(encSize, c);
@@ -2692,7 +2561,7 @@ extern (C++) final class StringExp : Expression
{
if (const s = utf_decodeWchar(wstring[0 .. len], u, c))
{
- error("%.*s", cast(int)s.length, s.ptr);
+ error(loc, "%.*s", cast(int)s.length, s.ptr);
return 0;
}
result += utf_codeLength(encSize, c);
@@ -2897,7 +2766,7 @@ extern (C++) final class StringExp : Expression
override Expression modifiableLvalue(Scope* sc, Expression e)
{
- error("cannot modify string literal `%s`", toChars());
+ error(loc, "cannot modify string literal `%s`", toChars());
return ErrorExp.get();
}
@@ -3030,7 +2899,7 @@ extern (C++) final class TupleExp : Expression
}
else
{
- error("`%s` is not an expression", o.toChars());
+ error(loc, "`%s` is not an expression", o.toChars());
}
}
}
@@ -3250,6 +3119,8 @@ extern (C++) final class AssocArrayLiteralExp : Expression
Expressions* keys;
Expressions* values;
+ /// Lower to core.internal.newaa for static initializaton
+ Expression lowering;
extern (D) this(const ref Loc loc, Expressions* keys, Expressions* values) @safe
{
@@ -3564,13 +3435,13 @@ extern (C++) final class TypeExp : Expression
override bool checkType()
{
- error("type `%s` is not an expression", toChars());
+ error(loc, "type `%s` is not an expression", toChars());
return true;
}
override bool checkValue()
{
- error("type `%s` has no value", toChars());
+ error(loc, "type `%s` has no value", toChars());
return true;
}
@@ -3610,7 +3481,7 @@ extern (C++) final class ScopeExp : Expression
{
if (sds.isPackage())
{
- error("%s `%s` has no type", sds.kind(), sds.toChars());
+ error(loc, "%s `%s` has no type", sds.kind(), sds.toChars());
return true;
}
if (auto ti = sds.isTemplateInstance())
@@ -3620,7 +3491,7 @@ extern (C++) final class ScopeExp : Expression
ti.semantictiargsdone &&
ti.semanticRun == PASS.initial)
{
- error("partial %s `%s` has no type", sds.kind(), toChars());
+ error(loc, "partial %s `%s` has no type", sds.kind(), toChars());
return true;
}
}
@@ -3629,7 +3500,7 @@ extern (C++) final class ScopeExp : Expression
override bool checkValue()
{
- error("%s `%s` has no value", sds.kind(), sds.toChars());
+ error(loc, "%s `%s` has no value", sds.kind(), sds.toChars());
return true;
}
@@ -3671,13 +3542,13 @@ extern (C++) final class TemplateExp : Expression
override bool checkType()
{
- error("%s `%s` has no type", td.kind(), toChars());
+ error(loc, "%s `%s` has no type", td.kind(), toChars());
return true;
}
override bool checkValue()
{
- error("%s `%s` has no value", td.kind(), toChars());
+ error(loc, "%s `%s` has no value", td.kind(), toChars());
return true;
}
@@ -3870,22 +3741,22 @@ extern (C++) final class VarExp : SymbolExp
{
if (var.storage_class & STC.manifest)
{
- error("manifest constant `%s` cannot be modified", var.toChars());
+ error(loc, "manifest constant `%s` cannot be modified", var.toChars());
return ErrorExp.get();
}
if (var.storage_class & STC.lazy_ && !delegateWasExtracted)
{
- error("lazy variable `%s` cannot be modified", var.toChars());
+ error(loc, "lazy variable `%s` cannot be modified", var.toChars());
return ErrorExp.get();
}
if (var.ident == Id.ctfe)
{
- error("cannot modify compiler-generated variable `__ctfe`");
+ error(loc, "cannot modify compiler-generated variable `__ctfe`");
return ErrorExp.get();
}
if (var.ident == Id.dollar) // https://issues.dlang.org/show_bug.cgi?id=13574
{
- error("cannot modify operator `$`");
+ error(loc, "cannot modify operator `$`");
return ErrorExp.get();
}
return this;
@@ -3896,7 +3767,7 @@ extern (C++) final class VarExp : SymbolExp
//printf("VarExp::modifiableLvalue('%s')\n", var.toChars());
if (var.storage_class & STC.manifest)
{
- error("cannot modify manifest constant `%s`", toChars());
+ error(loc, "cannot modify manifest constant `%s`", toChars());
return ErrorExp.get();
}
// See if this expression is a modifiable lvalue (i.e. not const)
@@ -4036,13 +3907,11 @@ extern (C++) final class FuncExp : Expression
return new FuncExp(loc, fd);
}
- extern (D) MATCH matchType(Type to, Scope* sc, FuncExp* presult, int flag = 0)
+ extern (D) MATCH matchType(Type to, Scope* sc, FuncExp* presult, ErrorSink eSink)
{
-
- static MATCH cannotInfer(Expression e, Type to, int flag)
+ MATCH cannotInfer()
{
- if (!flag)
- e.error("cannot infer parameter types from `%s`", to.toChars());
+ eSink.error(loc, "cannot infer parameter types from `%s`", to.toChars());
return MATCH.nomatch;
}
@@ -4055,8 +3924,7 @@ extern (C++) final class FuncExp : Expression
{
if (tok == TOK.function_)
{
- if (!flag)
- error("cannot match function literal to delegate type `%s`", to.toChars());
+ eSink.error(loc, "cannot match function literal to delegate type `%s`", to.toChars());
return MATCH.nomatch;
}
tof = cast(TypeFunction)to.nextOf();
@@ -4065,8 +3933,7 @@ extern (C++) final class FuncExp : Expression
{
if (tok == TOK.delegate_)
{
- if (!flag)
- error("cannot match delegate literal to function pointer type `%s`", to.toChars());
+ eSink.error(loc, "cannot match delegate literal to function pointer type `%s`", to.toChars());
return MATCH.nomatch;
}
}
@@ -4075,7 +3942,7 @@ extern (C++) final class FuncExp : Expression
{
if (!tof)
{
- return cannotInfer(this, to, flag);
+ return cannotInfer();
}
// Parameter types inference from 'tof'
@@ -4086,7 +3953,7 @@ extern (C++) final class FuncExp : Expression
const dim = tf.parameterList.length;
if (tof.parameterList.length != dim || tof.parameterList.varargs != tf.parameterList.varargs)
- return cannotInfer(this, to, flag);
+ return cannotInfer();
auto tiargs = new Objects();
tiargs.reserve(td.parameters.length);
@@ -4106,7 +3973,7 @@ extern (C++) final class FuncExp : Expression
Parameter pto = tof.parameterList[u];
Type t = pto.type;
if (t.ty == Terror)
- return cannotInfer(this, to, flag);
+ return cannotInfer();
tf.parameterList[u].storageClass = tof.parameterList[u].storageClass;
tiargs.push(t);
}
@@ -4124,9 +3991,9 @@ extern (C++) final class FuncExp : Expression
if (ex.op == EXP.error)
return MATCH.nomatch;
if (auto ef = ex.isFuncExp())
- return ef.matchType(to, sc, presult, flag);
+ return ef.matchType(to, sc, presult, eSink);
else
- return cannotInfer(this, to, flag);
+ return cannotInfer();
}
if (!tof || !tof.next)
@@ -4198,10 +4065,10 @@ extern (C++) final class FuncExp : Expression
(*presult).fd.modifyReturns(sc, tof.next);
}
}
- else if (!flag)
+ else if (!cast(ErrorSinkNull)eSink)
{
auto ts = toAutoQualChars(tx, to);
- error("cannot implicitly convert expression `%s` of type `%s` to `%s`",
+ eSink.error(loc, "cannot implicitly convert expression `%s` of type `%s` to `%s`",
toChars(), ts[0], ts[1]);
}
return m;
@@ -4216,7 +4083,7 @@ extern (C++) final class FuncExp : Expression
{
if (td)
{
- error("template lambda has no type");
+ error(loc, "template lambda has no type");
return true;
}
return false;
@@ -4226,7 +4093,7 @@ extern (C++) final class FuncExp : Expression
{
if (td)
{
- error("template lambda has no value");
+ error(loc, "template lambda has no value");
return true;
}
return false;
@@ -4422,11 +4289,11 @@ extern (C++) abstract class UnaExp : Expression
if (e1.op == EXP.type)
{
- error("incompatible type for `%s(%s)`: cannot use `%s` with types", EXPtoString(op).ptr, e1.toChars(), EXPtoString(op).ptr);
+ error(loc, "incompatible type for `%s(%s)`: cannot use `%s` with types", EXPtoString(op).ptr, e1.toChars(), EXPtoString(op).ptr);
}
else
{
- error("incompatible type for `%s(%s)`: `%s`", EXPtoString(op).ptr, e1.toChars(), e1.type.toChars());
+ error(loc, "incompatible type for `%s(%s)`: `%s`", EXPtoString(op).ptr, e1.toChars(), e1.type.toChars());
}
return ErrorExp.get();
}
@@ -4501,18 +4368,18 @@ extern (C++) abstract class BinExp : Expression
const(char)* thisOp = (op == EXP.question) ? ":" : EXPtoString(op).ptr;
if (e1.op == EXP.type || e2.op == EXP.type)
{
- error("incompatible types for `(%s) %s (%s)`: cannot use `%s` with types",
+ error(loc, "incompatible types for `(%s) %s (%s)`: cannot use `%s` with types",
e1.toChars(), thisOp, e2.toChars(), EXPtoString(op).ptr);
}
else if (e1.type.equals(e2.type))
{
- error("incompatible types for `(%s) %s (%s)`: both operands are of type `%s`",
+ error(loc, "incompatible types for `(%s) %s (%s)`: both operands are of type `%s`",
e1.toChars(), thisOp, e2.toChars(), e1.type.toChars());
}
else
{
auto ts = toAutoQualChars(e1.type, e2.type);
- error("incompatible types for `(%s) %s (%s)`: `%s` and `%s`",
+ error(loc, "incompatible types for `(%s) %s (%s)`: `%s` and `%s`",
e1.toChars(), thisOp, e2.toChars(), ts[0], ts[1]);
}
return ErrorExp.get();
@@ -4533,7 +4400,7 @@ extern (C++) abstract class BinExp : Expression
{
if ((type.isintegral() && t2.isfloating()))
{
- warning("`%s %s %s` is performing truncating conversion", type.toChars(), EXPtoString(op).ptr, t2.toChars());
+ warning(loc, "`%s %s %s` is performing truncating conversion", type.toChars(), EXPtoString(op).ptr, t2.toChars());
}
}
@@ -4545,17 +4412,17 @@ extern (C++) abstract class BinExp : Expression
const(char)* opstr = EXPtoString(op).ptr;
if (t1.isreal() && t2.iscomplex())
{
- error("`%s %s %s` is undefined. Did you mean `%s %s %s.re`?", t1.toChars(), opstr, t2.toChars(), t1.toChars(), opstr, t2.toChars());
+ error(loc, "`%s %s %s` is undefined. Did you mean `%s %s %s.re`?", t1.toChars(), opstr, t2.toChars(), t1.toChars(), opstr, t2.toChars());
return ErrorExp.get();
}
else if (t1.isimaginary() && t2.iscomplex())
{
- error("`%s %s %s` is undefined. Did you mean `%s %s %s.im`?", t1.toChars(), opstr, t2.toChars(), t1.toChars(), opstr, t2.toChars());
+ error(loc, "`%s %s %s` is undefined. Did you mean `%s %s %s.im`?", t1.toChars(), opstr, t2.toChars(), t1.toChars(), opstr, t2.toChars());
return ErrorExp.get();
}
else if ((t1.isreal() || t1.isimaginary()) && t2.isimaginary())
{
- error("`%s %s %s` is an undefined operation", t1.toChars(), opstr, t2.toChars());
+ error(loc, "`%s %s %s` is an undefined operation", t1.toChars(), opstr, t2.toChars());
return ErrorExp.get();
}
}
@@ -4567,7 +4434,7 @@ extern (C++) abstract class BinExp : Expression
// Thus, r+=i, r+=c, i+=r, i+=c are all forbidden operations.
if ((t1.isreal() && (t2.isimaginary() || t2.iscomplex())) || (t1.isimaginary() && (t2.isreal() || t2.iscomplex())))
{
- error("`%s %s %s` is undefined (result is complex)", t1.toChars(), EXPtoString(op).ptr, t2.toChars());
+ error(loc, "`%s %s %s` is undefined (result is complex)", t1.toChars(), EXPtoString(op).ptr, t2.toChars());
return ErrorExp.get();
}
if (type.isreal() || type.isimaginary())
@@ -4658,7 +4525,7 @@ extern (C++) abstract class BinExp : Expression
{
if (t2.iscomplex())
{
- error("cannot perform modulo complex arithmetic");
+ error(loc, "cannot perform modulo complex arithmetic");
return ErrorExp.get();
}
}
@@ -4674,8 +4541,8 @@ extern (C++) abstract class BinExp : Expression
extern (D) final bool checkArithmeticBin()
{
- bool r1 = e1.checkArithmetic();
- bool r2 = e2.checkArithmetic();
+ bool r1 = e1.checkArithmetic(this.op);
+ bool r2 = e2.checkArithmetic(this.op);
return (r1 || r2);
}
@@ -4947,13 +4814,13 @@ extern (C++) final class DotTemplateExp : UnaExp
override bool checkType()
{
- error("%s `%s` has no type", td.kind(), toChars());
+ error(loc, "%s `%s` has no type", td.kind(), toChars());
return true;
}
override bool checkValue()
{
- error("%s `%s` has no value", td.kind(), toChars());
+ error(loc, "%s `%s` has no value", td.kind(), toChars());
return true;
}
@@ -5122,7 +4989,7 @@ extern (C++) final class DotTemplateInstanceExp : UnaExp
ti.semantictiargsdone &&
ti.semanticRun == PASS.initial)
{
- error("partial %s `%s` has no type", ti.kind(), toChars());
+ error(loc, "partial %s `%s` has no type", ti.kind(), toChars());
return true;
}
return false;
@@ -5134,9 +5001,9 @@ extern (C++) final class DotTemplateInstanceExp : UnaExp
ti.semantictiargsdone &&
ti.semanticRun == PASS.initial)
- error("partial %s `%s` has no value", ti.kind(), toChars());
+ error(loc, "partial %s `%s` has no value", ti.kind(), toChars());
else
- error("%s `%s` has no value", ti.kind(), ti.toChars());
+ error(loc, "%s `%s` has no value", ti.kind(), ti.toChars());
return true;
}
@@ -5481,9 +5348,9 @@ extern (C++) final class PtrExp : UnaExp
if (var && var.type.isFunction_Delegate_PtrToFunction())
{
if (var.type.isTypeFunction())
- error("function `%s` is not an lvalue and cannot be modified", var.toChars());
+ error(loc, "function `%s` is not an lvalue and cannot be modified", var.toChars());
else
- error("function pointed to by `%s` is not an lvalue and cannot be modified", var.toChars());
+ error(loc, "function pointed to by `%s` is not an lvalue and cannot be modified", var.toChars());
return ErrorExp.get();
}
return Expression.modifiableLvalue(sc, e);
@@ -5771,7 +5638,7 @@ extern (C++) final class SliceExp : UnaExp
override Expression modifiableLvalue(Scope* sc, Expression e)
{
- error("slice expression `%s` is not a modifiable lvalue", toChars());
+ error(loc, "slice expression `%s` is not a modifiable lvalue", toChars());
return this;
}
@@ -5845,7 +5712,7 @@ extern (C++) final class ArrayExp : UnaExp
override Expression toLvalue(Scope* sc, Expression e)
{
if (type && type.toBasetype().ty == Tvoid)
- error("`void`s have no value");
+ error(loc, "`void`s have no value");
return this;
}
@@ -6113,7 +5980,7 @@ extern (C++) final class IndexExp : BinExp
Type t2b = e2.type.toBasetype();
if (t2b.ty == Tarray && t2b.nextOf().isMutable())
{
- error("associative arrays can only be assigned values with immutable keys, not `%s`", e2.type.toChars());
+ error(loc, "associative arrays can only be assigned values with immutable keys, not `%s`", e2.type.toChars());
return ErrorExp.get();
}
modifiable = true;
@@ -6972,7 +6839,7 @@ extern (C++) final class CondExp : BinExp
{
if (!e1.isLvalue() && !e2.isLvalue())
{
- error("conditional expression `%s` is not a modifiable lvalue", toChars());
+ error(loc, "conditional expression `%s` is not a modifiable lvalue", toChars());
return ErrorExp.get();
}
e1 = e1.modifiableLvalue(sc, e1);
@@ -7058,14 +6925,6 @@ extern (C++) final class CondExp : BinExp
}
}
-/// Returns: if this token is the `op` for a derived `DefaultInitExp` class.
-bool isDefaultInitOp(EXP op) pure nothrow @safe @nogc
-{
- return op == EXP.prettyFunction || op == EXP.functionString ||
- op == EXP.line || op == EXP.moduleString ||
- op == EXP.file || op == EXP.fileFullPath ;
-}
-
/***********************************************************
* A special keyword when used as a function's default argument
*
@@ -7082,6 +6941,12 @@ bool isDefaultInitOp(EXP op) pure nothrow @safe @nogc
*/
extern (C++) class DefaultInitExp : Expression
{
+ /*************************
+ * Params:
+ * loc = location
+ * op = EXP.prettyFunction, EXP.functionString, EXP.moduleString,
+ * EXP.line, EXP.file, EXP.fileFullPath
+ */
extern (D) this(const ref Loc loc, EXP op) @safe
{
super(loc, op);
@@ -7217,7 +7082,7 @@ extern (C++) final class PrettyFuncInitExp : DefaultInitExp
{
const funcStr = fd.Dsymbol.toPrettyChars();
OutBuffer buf;
- functionToBufferWithIdent(fd.type.isTypeFunction(), &buf, funcStr, fd.isStatic);
+ functionToBufferWithIdent(fd.type.isTypeFunction(), buf, funcStr, fd.isStatic);
s = buf.extractChars();
}
else
diff --git a/gcc/d/dmd/expression.h b/gcc/d/dmd/expression.h
index 1f04c6c..5c656ee 100644
--- a/gcc/d/dmd/expression.h
+++ b/gcc/d/dmd/expression.h
@@ -51,7 +51,7 @@ bool isTrivialExp(Expression *e);
bool hasSideEffect(Expression *e, bool assumeImpureCalls = false);
enum BE : int32_t;
-BE canThrow(Expression *e, FuncDeclaration *func, bool mustNotThrow);
+BE canThrow(Expression *e, FuncDeclaration *func, ErrorSink *eSink);
typedef unsigned char OwnedBy;
enum
@@ -93,9 +93,6 @@ public:
DYNCAST dyncast() const override final { return DYNCAST_EXPRESSION; }
const char *toChars() const override;
- void error(const char *format, ...) const;
- void warning(const char *format, ...) const;
- void deprecation(const char *format, ...) const;
virtual dinteger_t toInteger();
virtual uinteger_t toUInteger();
@@ -380,6 +377,7 @@ public:
size_t len; // number of chars, wchars, or dchars
unsigned char sz; // 1: char, 2: wchar, 4: dchar
bool committed; // if type is committed
+ bool hexString; // if string is parsed from a hex string literal
static StringExp *create(const Loc &loc, const char *s);
static StringExp *create(const Loc &loc, const void *s, d_size_t len);
@@ -446,6 +444,7 @@ public:
OwnedBy ownedByCtfe;
Expressions *keys;
Expressions *values;
+ Expression* lowering;
bool equals(const RootObject * const o) const override;
AssocArrayLiteralExp *syntaxCopy() override;
diff --git a/gcc/d/dmd/expressionsem.d b/gcc/d/dmd/expressionsem.d
index 69999cb..0bdcda9 100644
--- a/gcc/d/dmd/expressionsem.d
+++ b/gcc/d/dmd/expressionsem.d
@@ -171,7 +171,7 @@ StringExp semanticString(Scope *sc, Expression exp, const char* s)
auto se = e.toStringExp();
if (!se)
{
- exp.error("`string` expected for %s, not `(%s)` of type `%s`",
+ error(exp.loc, "`string` expected for %s, not `(%s)` of type `%s`",
s, exp.toChars(), exp.type.toChars());
return null;
}
@@ -224,7 +224,7 @@ Expression resolveOpDollar(Scope* sc, ArrayExp ae, Expression* pe0)
Lfallback:
if (ae.arguments.length == 1)
return null;
- ae.error("multi-dimensional slicing requires template `opSlice`");
+ error(ae.loc, "multi-dimensional slicing requires template `opSlice`");
return ErrorExp.get();
}
//printf("[%d] e = %s\n", i, e.toChars());
@@ -274,7 +274,7 @@ Expression resolveOpDollar(Scope* sc, ArrayExp ae, Expression* pe0)
if (!e.type)
{
- ae.error("`%s` has no value", e.toChars());
+ error(ae.loc, "`%s` has no value", e.toChars());
e = ErrorExp.get();
}
if (e.op == EXP.error)
@@ -311,7 +311,7 @@ Expression resolveOpDollar(Scope* sc, ArrayExp ae, IntervalExp ie, Expression* p
e = resolveProperties(sc, e);
if (!e.type)
{
- ae.error("`%s` has no value", e.toChars());
+ error(ae.loc, "`%s` has no value", e.toChars());
errors = true;
}
return e;
@@ -483,6 +483,18 @@ private Expression searchUFCS(Scope* sc, UnaExp ue, Identifier ident)
}
/******************************
+ * check e is exp.opDispatch!(tiargs) or not
+ * It's used to switch to UFCS the semantic analysis path
+ */
+private bool isDotOpDispatch(Expression e)
+{
+ if (auto dtie = e.isDotTemplateInstanceExp())
+ return dtie.ti.name == Id.opDispatch;
+ return false;
+}
+
+
+/******************************
* Pull out callable entity with UFCS.
*/
private Expression resolveUFCS(Scope* sc, CallExp ce)
@@ -519,12 +531,12 @@ private Expression resolveUFCS(Scope* sc, CallExp ce)
*/
if (!ce.arguments || ce.arguments.length != 1)
{
- ce.error("expected key as argument to `aa.remove()`");
+ error(ce.loc, "expected key as argument to `aa.remove()`");
return ErrorExp.get();
}
if (!eleft.type.isMutable())
{
- ce.error("cannot remove key from `%s` associative array `%s`", MODtoChars(t.mod), eleft.toChars());
+ error(ce.loc, "cannot remove key from `%s` associative array `%s`", MODtoChars(t.mod), eleft.toChars());
return ErrorExp.get();
}
Expression key = (*ce.arguments)[0];
@@ -638,6 +650,41 @@ private Expression resolveUFCS(Scope* sc, CallExp ce)
return null;
}
+int expandAliasThisTuples(Expressions* exps, size_t starti = 0)
+{
+ if (!exps || exps.length == 0)
+ return -1;
+
+ for (size_t u = starti; u < exps.length; u++)
+ {
+ Expression exp = (*exps)[u];
+ if (TupleDeclaration td = exp.isAliasThisTuple)
+ {
+ exps.remove(u);
+ size_t i;
+ td.foreachVar((s)
+ {
+ auto d = s.isDeclaration();
+ auto e = new DotVarExp(exp.loc, exp, d);
+ assert(d.type);
+ e.type = d.type;
+ exps.insert(u + i, e);
+ ++i;
+ });
+ version (none)
+ {
+ printf("expansion ->\n");
+ foreach (e; exps)
+ {
+ printf("\texps[%d] e = %s %s\n", i, EXPtoString(e.op), e.toChars());
+ }
+ }
+ return cast(int)u;
+ }
+ }
+ return -1;
+}
+
/******************************
* Pull out property with UFCS.
*/
@@ -1137,7 +1184,7 @@ L1:
*/
if (flag)
return null;
- e1.error("`this` for `%s` needs to be type `%s` not type `%s`", var.toChars(), ad.toChars(), t.toChars());
+ error(e1.loc, "`this` for `%s` needs to be type `%s` not type `%s`", var.toChars(), ad.toChars(), t.toChars());
return ErrorExp.get();
}
}
@@ -1197,6 +1244,11 @@ private bool haveSameThis(FuncDeclaration outerFunc, FuncDeclaration calledFunc)
if (thisAd == requiredAd)
return true;
+ // if outerfunc is the member of a nested aggregate, then let
+ // getRightThis take care of this.
+ if (thisAd.isNested())
+ return true;
+
// outerfunc is the member of a base class that contains calledFunc,
// then we consider that they have the same this.
auto cd = requiredAd.isClassDeclaration();
@@ -1206,11 +1258,6 @@ private bool haveSameThis(FuncDeclaration outerFunc, FuncDeclaration calledFunc)
if (cd.isBaseOf2(thisAd.isClassDeclaration()))
return true;
- // if outerfunc is the member of a nested aggregate, then let
- // getRightThis take care of this.
- if (thisAd.isNested())
- return true;
-
return false;
}
@@ -1510,7 +1557,7 @@ private Type arrayExpressionToCommonType(Scope* sc, ref Expressions exps)
e = resolveProperties(sc, e);
if (!e.type)
{
- e.error("`%s` has no value", e.toChars());
+ error(e.loc, "`%s` has no value", e.toChars());
t0 = Type.terror;
continue;
}
@@ -1706,7 +1753,7 @@ private bool preFunctionParameters(Scope* sc, ArgumentList argumentList, const b
{
if (reportErrors)
{
- arg.error("cannot pass type `%s` as a function argument", arg.toChars());
+ error(arg.loc, "cannot pass type `%s` as a function argument", arg.toChars());
arg = ErrorExp.get();
}
err = true;
@@ -1716,7 +1763,7 @@ private bool preFunctionParameters(Scope* sc, ArgumentList argumentList, const b
{
if (reportErrors)
{
- arg.error("cannot pass function `%s` as a function argument", arg.toChars());
+ error(arg.loc, "cannot pass function `%s` as a function argument", arg.toChars());
arg = ErrorExp.get();
}
err = true;
@@ -1745,7 +1792,7 @@ private bool checkDefCtor(Loc loc, Type t)
StructDeclaration sd = ts.sym;
if (sd.noDefaultCtor)
{
- sd.error(loc, "default construction is disabled");
+ .error(loc, "%s `%s` default construction is disabled", sd.kind, sd.toPrettyChars);
return true;
}
}
@@ -1887,7 +1934,7 @@ private bool functionParameters(const ref Loc loc, Scope* sc,
}
else
{
- if (isDefaultInitOp(arg.op))
+ if (arg.isDefaultInitExp())
{
arg = arg.resolveLoc(loc, sc);
(*arguments)[i] = arg;
@@ -2138,7 +2185,7 @@ private bool functionParameters(const ref Loc loc, Scope* sc,
Type t = arg.type;
if (!t.isMutable() || !t.isAssignable()) // check blit assignable
{
- arg.error("cannot modify struct `%s` with immutable members", arg.toChars());
+ error(arg.loc, "cannot modify struct `%s` with immutable members", arg.toChars());
err = true;
}
else
@@ -2270,12 +2317,12 @@ private bool functionParameters(const ref Loc loc, Scope* sc,
const(char)* p = tf.linkage == LINK.c ? "extern(C)" : "extern(C++)";
if (arg.type.ty == Tarray)
{
- arg.error("cannot pass dynamic arrays to `%s` vararg functions", p);
+ error(arg.loc, "cannot pass dynamic arrays to `%s` vararg functions", p);
err = true;
}
if (arg.type.ty == Tsarray)
{
- arg.error("cannot pass static arrays to `%s` vararg functions", p);
+ error(arg.loc, "cannot pass static arrays to `%s` vararg functions", p);
err = true;
}
}
@@ -2284,12 +2331,12 @@ private bool functionParameters(const ref Loc loc, Scope* sc,
// Do not allow types that need destructors or copy constructors.
if (arg.type.needsDestruction())
{
- arg.error("cannot pass types that need destruction as variadic arguments");
+ error(arg.loc, "cannot pass types that need destruction as variadic arguments");
err = true;
}
if (arg.type.needsCopyOrPostblit())
{
- arg.error("cannot pass types with postblits or copy constructors as variadic arguments");
+ error(arg.loc, "cannot pass types with postblits or copy constructors as variadic arguments");
err = true;
}
@@ -2313,12 +2360,13 @@ private bool functionParameters(const ref Loc loc, Scope* sc,
{
if (se.hasOverloads && !se.var.isFuncDeclaration().isUnique())
{
- arg.error("function `%s` is overloaded", arg.toChars());
+ error(arg.loc, "function `%s` is overloaded", arg.toChars());
err = true;
}
}
err |= arg.checkValue();
err |= arg.checkSharedAccess(sc);
+ err |= checkParamArgumentEscape(sc, fd, Id.dotdotdot, null, cast(STC) tf.parameterList.stc, arg, false, false);
arg = arg.optimize(WANTvalue);
}
(*arguments)[i] = arg;
@@ -2331,14 +2379,14 @@ private bool functionParameters(const ref Loc loc, Scope* sc,
{
if (auto se = (*arguments)[nparams - 1 - isVa_list].isStringExp())
{
- checkPrintfFormat(se.loc, se.peekString(), (*arguments)[nparams .. nargs], isVa_list);
+ checkPrintfFormat(se.loc, se.peekString(), (*arguments)[nparams .. nargs], isVa_list, sc.eSink);
}
}
else if (fd && fd.scanf)
{
if (auto se = (*arguments)[nparams - 1 - isVa_list].isStringExp())
{
- checkScanfFormat(se.loc, se.peekString(), (*arguments)[nparams .. nargs], isVa_list);
+ checkScanfFormat(se.loc, se.peekString(), (*arguments)[nparams .. nargs], isVa_list, sc.eSink);
}
}
else
@@ -2369,7 +2417,7 @@ private bool functionParameters(const ref Loc loc, Scope* sc,
for (ptrdiff_t i = 0; i != nargs; i++)
{
Expression arg = (*arguments)[i];
- if (canThrow(arg, sc.func, false))
+ if (canThrow(arg, sc.func, null))
lastthrow = i;
if (arg.type.needsDestruction())
{
@@ -2528,7 +2576,8 @@ private bool functionParameters(const ref Loc loc, Scope* sc,
auto args = new Parameters(arguments.length - nparams);
for (size_t i = 0; i < arguments.length - nparams; i++)
{
- auto arg = new Parameter(STC.in_, (*arguments)[nparams + i].type, null, null, null);
+ Expression earg = (*arguments)[nparams + i];
+ auto arg = new Parameter(earg.loc, STC.in_, earg.type, null, null, null);
(*args)[i] = arg;
}
auto tup = new TypeTuple(args);
@@ -2750,7 +2799,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
Dsymbol s2;
if (scx.scopesym && scx.scopesym.symtab && (s2 = scx.scopesym.symtab.lookup(s.ident)) !is null && s != s2)
{
- exp.error("with symbol `%s` is shadowing local symbol `%s`", s.toPrettyChars(), s2.toPrettyChars());
+ error(exp.loc, "with symbol `%s` is shadowing local symbol `%s`", s.toPrettyChars(), s2.toPrettyChars());
return setError();
}
}
@@ -2850,7 +2899,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (sc.flags & SCOPE.ctfe)
{
- exp.error("variable `__ctfe` cannot be read at compile time");
+ error(exp.loc, "variable `__ctfe` cannot be read at compile time");
return setError();
}
@@ -2912,15 +2961,15 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
/* Look for what user might have meant
*/
if (const n = importHint(exp.ident.toString()))
- exp.error("`%s` is not defined, perhaps `import %.*s;` is needed?", exp.ident.toChars(), cast(int)n.length, n.ptr);
+ error(exp.loc, "`%s` is not defined, perhaps `import %.*s;` is needed?", exp.ident.toChars(), cast(int)n.length, n.ptr);
else if (auto s2 = sc.search_correct(exp.ident))
- exp.error("undefined identifier `%s`, did you mean %s `%s`?", exp.ident.toChars(), s2.kind(), s2.toChars());
+ error(exp.loc, "undefined identifier `%s`, did you mean %s `%s`?", exp.ident.toChars(), s2.kind(), s2.toChars());
else if (const p = Scope.search_correct_C(exp.ident))
- exp.error("undefined identifier `%s`, did you mean `%s`?", exp.ident.toChars(), p);
+ error(exp.loc, "undefined identifier `%s`, did you mean `%s`?", exp.ident.toChars(), p);
else if (exp.ident == Id.dollar)
- exp.error("undefined identifier `$`");
+ error(exp.loc, "undefined identifier `$`");
else
- exp.error("undefined identifier `%s`", exp.ident.toChars());
+ error(exp.loc, "undefined identifier `%s`", exp.ident.toChars());
result = ErrorExp.get();
}
@@ -2955,7 +3004,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (!s)
{
- e.error("`%s` is not in a class or struct scope", e.toChars());
+ error(e.loc, "`%s` is not in a class or struct scope", e.toChars());
return setError();
}
ClassDeclaration cd = s.isClassDeclaration();
@@ -2976,7 +3025,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
if (!fd)
{
- e.error("`this` is only defined in non-static member functions, not `%s`", sc.parent.toChars());
+ error(e.loc, "`this` is only defined in non-static member functions, not `%s`", sc.parent.toChars());
return setError();
}
@@ -3021,7 +3070,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (!s)
{
- e.error("`%s` is not in a class scope", e.toChars());
+ error(e.loc, "`%s` is not in a class scope", e.toChars());
return setError();
}
cd = s.isClassDeclaration();
@@ -3030,7 +3079,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
cd = cd.baseClass;
if (!cd)
{
- e.error("class `%s` has no `super`", s.toChars());
+ error(e.loc, "class `%s` has no `super`", s.toChars());
return setError();
}
e.type = cd.type;
@@ -3055,7 +3104,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
goto Lerr;
if (!cd.baseClass)
{
- e.error("no base class for `%s`", cd.toChars());
+ error(e.loc, "no base class for `%s`", cd.toChars());
e.type = cd.type.addMod(e.var.type.mod);
}
else
@@ -3071,7 +3120,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
return;
Lerr:
- e.error("`super` is only allowed in non-static class member functions");
+ error(e.loc, "`super` is only allowed in non-static class member functions");
result = ErrorExp.get();
}
@@ -3115,7 +3164,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (const p = utf_decodeChar(e.peekString(), u, c))
{
- e.error("%.*s", cast(int)p.length, p.ptr);
+ error(e.loc, "%.*s", cast(int)p.length, p.ptr);
return setError();
}
else
@@ -3138,7 +3187,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (const p = utf_decodeChar(e.peekString(), u, c))
{
- e.error("%.*s", cast(int)p.length, p.ptr);
+ error(e.loc, "%.*s", cast(int)p.length, p.ptr);
return setError();
}
else
@@ -3199,7 +3248,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
e = e.expressionSemantic(sc);
if (!e.type)
{
- exp.error("`%s` has no value", e.toChars());
+ error(exp.loc, "`%s` has no value", e.toChars());
err = true;
}
else if (e.op == EXP.error)
@@ -3255,7 +3304,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
*/
if (e.elements.length > 0 && t0.ty == Tvoid)
{
- e.error("`%s` of type `%s` has no value", e.toChars(), e.type.toChars());
+ error(e.loc, "`%s` of type `%s` has no value", e.toChars(), e.type.toChars());
return setError();
}
@@ -3287,7 +3336,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
expandTuples(e.values);
if (e.keys.length != e.values.length)
{
- e.error("number of keys is %llu, must match number of values %llu",
+ error(e.loc, "number of keys is %llu, must match number of values %llu",
cast(ulong) e.keys.length, cast(ulong) e.values.length);
return setError();
}
@@ -3498,7 +3547,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (!v.type)
{
- exp.error("forward reference of %s `%s`", v.kind(), v.toChars());
+ error(exp.loc, "forward reference of %s `%s`", v.kind(), v.toChars());
return setError();
}
if ((v.storage_class & STC.manifest) && v._init)
@@ -3519,7 +3568,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
*/
if (ti.inuse)
{
- exp.error("recursive expansion of %s `%s`", ti.kind(), ti.toPrettyChars());
+ error(exp.loc, "recursive expansion of %s `%s`", ti.kind(), ti.toPrettyChars());
return setError();
}
v.checkDeprecated(exp.loc, sc);
@@ -3656,7 +3705,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
cdthis = exp.thisexp.type.isClassHandle();
if (!cdthis)
{
- exp.error("`this` for nested class must be a class type, not `%s`", exp.thisexp.type.toChars());
+ error(exp.loc, "`this` for nested class must be a class type, not `%s`", exp.thisexp.type.toChars());
return setError();
}
@@ -3704,7 +3753,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (exp.thisexp && tb.ty != Tclass)
{
- exp.error("`.new` is only for allocating nested classes, not `%s`", tb.toChars());
+ error(exp.loc, "`.new` is only for allocating nested classes, not `%s`", tb.toChars());
return setError();
}
@@ -3723,19 +3772,19 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
cd.ctor = cd.searchCtor();
if (cd.noDefaultCtor && !nargs && !cd.defaultCtor)
{
- exp.error("default construction is disabled for type `%s`", cd.type.toChars());
+ error(exp.loc, "default construction is disabled for type `%s`", cd.type.toChars());
return setError();
}
if (cd.isInterfaceDeclaration())
{
- exp.error("cannot create instance of interface `%s`", cd.toChars());
+ error(exp.loc, "cannot create instance of interface `%s`", cd.toChars());
return setError();
}
if (cd.isAbstract())
{
- exp.error("cannot create instance of abstract class `%s`", cd.toChars());
+ error(exp.loc, "cannot create instance of abstract class `%s`", cd.toChars());
errorSupplemental(cd.loc, "class `%s` is declared here", cd.toChars());
for (size_t i = 0; i < cd.vtbl.length; i++)
{
@@ -3765,9 +3814,9 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
void noReferenceToOuterClass()
{
if (cd.isAnonymous)
- exp.error("cannot construct anonymous nested class because no implicit `this` reference to outer class is available");
+ error(exp.loc, "cannot construct anonymous nested class because no implicit `this` reference to outer class is available");
else
- exp.error("cannot construct nested class `%s` because no implicit `this` reference to outer class `%s` is available",
+ error(exp.loc, "cannot construct nested class `%s` because no implicit `this` reference to outer class `%s` is available",
cd.toChars(), cdn.toChars());
return setError();
}
@@ -3798,20 +3847,20 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (cdthis != cdn && !cdn.isBaseOf(cdthis, null))
{
//printf("cdthis = %s\n", cdthis.toChars());
- exp.error("`this` for nested class must be of type `%s`, not `%s`",
+ error(exp.loc, "`this` for nested class must be of type `%s`, not `%s`",
cdn.toChars(), exp.thisexp.type.toChars());
return setError();
}
if (!MODimplicitConv(exp.thisexp.type.mod, exp.newtype.mod))
{
- exp.error("nested type `%s` should have the same or weaker constancy as enclosing type `%s`",
+ error(exp.loc, "nested type `%s` should have the same or weaker constancy as enclosing type `%s`",
exp.newtype.toChars(), exp.thisexp.type.toChars());
return setError();
}
}
else if (exp.thisexp)
{
- exp.error("`.new` is only for allocating nested classes");
+ error(exp.loc, "`.new` is only for allocating nested classes");
return setError();
}
else if (auto fdn = s.isFuncDeclaration())
@@ -3819,7 +3868,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
// make sure the parent context fdn of cd is reachable from sc
if (!ensureStaticLinkTo(sc.parent, fdn))
{
- exp.error("outer function context of `%s` is needed to `new` nested class `%s`",
+ error(exp.loc, "outer function context of `%s` is needed to `new` nested class `%s`",
fdn.toPrettyChars(), cd.toPrettyChars());
return setError();
}
@@ -3829,7 +3878,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else if (exp.thisexp)
{
- exp.error("`.new` is only for allocating nested classes");
+ error(exp.loc, "`.new` is only for allocating nested classes");
return setError();
}
@@ -3842,7 +3891,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
te = getRightThis(exp.loc, sc, ad2, te, cd);
if (te.op == EXP.error)
{
- exp.error("need `this` of type `%s` needed to `new` nested class `%s`", ad2.toChars(), cd.toChars());
+ error(exp.loc, "need `this` of type `%s` needed to `new` nested class `%s`", ad2.toChars(), cd.toChars());
return setError();
}
}
@@ -3850,7 +3899,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (cd.disableNew && !exp.onstack)
{
- exp.error("cannot allocate `class %s` with `new` because it is annotated with `@disable new()`",
+ error(exp.loc, "cannot allocate `class %s` with `new` because it is annotated with `@disable new()`",
originalNewtype.toChars());
return setError();
}
@@ -3877,7 +3926,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (nargs)
{
- exp.error("no constructor for `%s`", cd.toChars());
+ error(exp.loc, "no constructor for `%s`", cd.toChars());
return setError();
}
@@ -3889,10 +3938,12 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
foreach (v; c.fields)
{
if (v.inuse || v._scope is null || v._init is null ||
- v._init.isVoidInitializer())
+ v._init.isVoidInitializer() || v.semanticRun >= PASS.semantic2done)
continue;
v.inuse++;
v._init = v._init.initializerSemantic(v._scope, v.type, INITinterpret);
+ import dmd.semantic2 : lowerStaticAAs;
+ lowerStaticAAs(v, sc);
v.inuse--;
}
}
@@ -3968,14 +4019,14 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
sd.ctor = sd.searchCtor();
if (sd.noDefaultCtor && !nargs)
{
- exp.error("default construction is disabled for type `%s`", sd.type.toChars());
+ error(exp.loc, "default construction is disabled for type `%s`", sd.type.toChars());
return setError();
}
// checkDeprecated() is already done in newtype.typeSemantic().
if (sd.disableNew)
{
- exp.error("cannot allocate `struct %s` with `new` because it is annotated with `@disable new()`",
+ error(exp.loc, "cannot allocate `struct %s` with `new` because it is annotated with `@disable new()`",
originalNewtype.toChars());
return setError();
}
@@ -4049,7 +4100,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
// https://issues.dlang.org/show_bug.cgi?id=20422
// Without this check the compiler would give a misleading error
- exp.error("missing length argument for array");
+ error(exp.loc, "missing length argument for array");
return setError();
}
@@ -4058,21 +4109,21 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
AggregateDeclaration ad = s ? s.isAggregateDeclaration() : null;
if (ad && ad.noDefaultCtor)
{
- exp.error("default construction is disabled for type `%s`", tb.nextOf().toChars());
+ error(exp.loc, "default construction is disabled for type `%s`", tb.nextOf().toChars());
return setError();
}
for (size_t i = 0; i < nargs; i++)
{
if (tb.ty != Tarray)
{
- exp.error("too many arguments for array");
+ error(exp.loc, "too many arguments for array");
return setError();
}
Expression arg = (*exp.arguments)[i];
if (exp.names && (*exp.names)[i])
{
- exp.error("no named argument `%s` allowed for array dimension", (*exp.names)[i].toChars());
+ error(exp.loc, "no named argument `%s` allowed for array dimension", (*exp.names)[i].toChars());
return setError();
}
@@ -4084,7 +4135,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (arg.op == EXP.int64 && (target.isLP64 ?
cast(sinteger_t)arg.toInteger() : cast(int)arg.toInteger()) < 0)
{
- exp.error("negative array dimension `%s`", (*exp.arguments)[i].toChars());
+ error(exp.loc, "negative array dimension `%s`", (*exp.arguments)[i].toChars());
return setError();
}
(*exp.arguments)[i] = arg;
@@ -4100,7 +4151,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (exp.names && (*exp.names)[0])
{
- exp.error("no named argument `%s` allowed for scalar", (*exp.names)[0].toChars());
+ error(exp.loc, "no named argument `%s` allowed for scalar", (*exp.names)[0].toChars());
return setError();
}
Expression e = (*exp.arguments)[0];
@@ -4109,7 +4160,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else
{
- exp.error("more than one argument for construction of `%s`", exp.type.toChars());
+ error(exp.loc, "more than one argument for construction of `%s`", exp.type.toChars());
return setError();
}
@@ -4121,13 +4172,13 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
// e.g. `new Alias(args)`
if (nargs)
{
- exp.error("`new` cannot take arguments for an associative array");
+ error(exp.loc, "`new` cannot take arguments for an associative array");
return setError();
}
}
else
{
- exp.error("cannot create a `%s` with `new`", exp.type.toChars());
+ error(exp.loc, "cannot create a `%s` with `new`", exp.type.toChars());
return setError();
}
@@ -4312,7 +4363,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (exp.fd.treq) // defer type determination
{
FuncExp fe;
- if (exp.matchType(exp.fd.treq, sc, &fe) > MATCH.nomatch)
+ if (exp.matchType(exp.fd.treq, sc, &fe, sc.eSink) > MATCH.nomatch)
e = fe;
else
e = ErrorExp.get();
@@ -4421,10 +4472,10 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
OutBuffer buf;
foreach (idx, ref arg; *arguments)
buf.printf("%s%s", (idx ? ", ".ptr : "".ptr), arg.type.toChars());
- exp.error("function literal `%s%s` is not callable using argument types `(%s)`",
+ error(exp.loc, "function literal `%s%s` is not callable using argument types `(%s)`",
exp.fd.toChars(), parametersTypeToChars(tfl.parameterList),
buf.peekChars());
- exp.errorSupplemental("too %s arguments, expected %d, got %d",
+ errorSupplemental(exp.loc, "too %s arguments, expected %d, got %d",
arguments.length < dim ? "few".ptr : "many".ptr,
cast(int)dim, cast(int)arguments.length);
return ErrorExp.get();
@@ -4634,7 +4685,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
__gshared int nest;
if (++nest > global.recursionLimit)
{
- exp.error("recursive evaluation of `%s`", exp.toChars());
+ error(exp.loc, "recursive evaluation of `%s`", exp.toChars());
--nest;
return setError();
}
@@ -4713,12 +4764,12 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else
{
- arg.error("identifier or `(` expected");
+ error(arg.loc, "identifier or `(` expected");
result = ErrorExp.get();
}
return;
}
- exp.error("identifier or `(` expected before `)`");
+ error(exp.loc, "identifier or `(` expected before `)`");
result = ErrorExp.get();
return;
}
@@ -4823,7 +4874,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
exp.e1 = resolveAliasThis(sc, exp.e1);
goto Lagain;
}
- exp.error("%s `%s` does not overload ()", sd.kind(), sd.toChars());
+ error(exp.loc, "%s `%s` does not overload ()", sd.kind(), sd.toChars());
return setError();
}
@@ -4884,7 +4935,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else
{
- exp.error("more than one argument for construction of `%s`", t1.toChars());
+ error(exp.loc, "more than one argument for construction of `%s`", t1.toChars());
return setError();
}
e = e.expressionSemantic(sc);
@@ -5008,7 +5059,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
*/
if (sc.func && sc.func.isInvariantDeclaration() && ue.e1.op == EXP.this_ && exp.f.addPostInvariant())
{
- exp.error("cannot call `public`/`export` function `%s` from invariant", exp.f.toChars());
+ error(exp.loc, "cannot call `public`/`export` function `%s` from invariant", exp.f.toChars());
return setError();
}
@@ -5024,7 +5075,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else if (!checkSymbolAccess(sc, exp.f))
{
- exp.error("%s `%s` of type `%s` is not accessible from module `%s`",
+ error(exp.loc, "%s `%s` of type `%s` is not accessible from module `%s`",
exp.f.kind(), exp.f.toPrettyChars(), exp.f.type.toChars(), sc._module.toChars);
return setError();
}
@@ -5102,12 +5153,12 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
// Base class constructor call
if (!cd || !cd.baseClass || !sc.func.isCtorDeclaration())
{
- exp.error("super class constructor call must be in a constructor");
+ error(exp.loc, "super class constructor call must be in a constructor");
return setError();
}
if (!cd.baseClass.ctor)
{
- exp.error("no super class constructor for `%s`", cd.baseClass.toChars());
+ error(exp.loc, "no super class constructor for `%s`", cd.baseClass.toChars());
return setError();
}
}
@@ -5117,7 +5168,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
// constructor
if (!ad || !sc.func.isCtorDeclaration())
{
- exp.error("constructor call must be in a constructor");
+ error(exp.loc, "constructor call must be in a constructor");
return setError();
}
@@ -5134,11 +5185,11 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (!sc.intypeof && !(sc.ctorflow.callSuper & CSX.halt))
{
if (sc.inLoop || sc.ctorflow.callSuper & CSX.label)
- exp.error("constructor calls not allowed in loops or after labels");
+ error(exp.loc, "constructor calls not allowed in loops or after labels");
if (sc.ctorflow.callSuper & (CSX.super_ctor | CSX.this_ctor))
- exp.error("multiple constructor calls");
+ error(exp.loc, "multiple constructor calls");
if ((sc.ctorflow.callSuper & CSX.return_) && !(sc.ctorflow.callSuper & CSX.any_ctor))
- exp.error("an earlier `return` statement skips constructor");
+ error(exp.loc, "an earlier `return` statement skips constructor");
sc.ctorflow.callSuper |= CSX.any_ctor | (isSuper ? CSX.super_ctor : CSX.this_ctor);
}
@@ -5166,7 +5217,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
// call graph
if (exp.f == sc.func)
{
- exp.error("cyclic constructor call");
+ error(exp.loc, "cyclic constructor call");
return setError();
}
}
@@ -5183,7 +5234,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else if (!t1)
{
- exp.error("function expected before `()`, not `%s`", exp.e1.toChars());
+ error(exp.loc, "function expected before `()`, not `%s`", exp.e1.toChars());
return setError();
}
else if (t1.ty == Terror)
@@ -5266,7 +5317,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else
{
- exp.error("function expected before `()`, not `%s` of type `%s`", exp.e1.toChars(), exp.e1.type.toChars());
+ error(exp.loc, "function expected before `()`, not `%s` of type `%s`", exp.e1.toChars(), exp.e1.type.toChars());
return setError();
}
@@ -5275,10 +5326,10 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
OutBuffer buf;
buf.writeByte('(');
- argExpTypesToCBuffer(&buf, exp.arguments);
+ argExpTypesToCBuffer(buf, exp.arguments);
buf.writeByte(')');
if (tthis)
- tthis.modToBuffer(&buf);
+ tthis.modToBuffer(buf);
//printf("tf = %s, args = %s\n", tf.deco, (*arguments)[0].type.deco);
.error(exp.loc, "%s `%s%s` is not callable using argument types `%s`",
@@ -5301,20 +5352,20 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
bool err = false;
if (!tf.purity && sc.func.setImpure(exp.loc, "`pure` %s `%s` cannot call impure `%s`", exp.e1))
{
- exp.error("`pure` %s `%s` cannot call impure %s `%s`",
+ error(exp.loc, "`pure` %s `%s` cannot call impure %s `%s`",
sc.func.kind(), sc.func.toPrettyChars(), p, exp.e1.toChars());
err = true;
}
if (!tf.isnogc && sc.func.setGC(exp.loc, "`@nogc` %s `%s` cannot call non-@nogc `%s`", exp.e1))
{
- exp.error("`@nogc` %s `%s` cannot call non-@nogc %s `%s`",
+ error(exp.loc, "`@nogc` %s `%s` cannot call non-@nogc %s `%s`",
sc.func.kind(), sc.func.toPrettyChars(), p, exp.e1.toChars());
err = true;
}
if (tf.trust <= TRUST.system && sc.setUnsafe(true, exp.loc,
"`@safe` function `%s` cannot call `@system` `%s`", sc.func, exp.e1))
{
- exp.error("`@safe` %s `%s` cannot call `@system` %s `%s`",
+ error(exp.loc, "`@safe` %s `%s` cannot call `@system` %s `%s`",
sc.func.kind(), sc.func.toPrettyChars(), p, exp.e1.toChars());
err = true;
}
@@ -5348,7 +5399,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
OutBuffer buf;
buf.writeByte('(');
- argExpTypesToCBuffer(&buf, exp.arguments);
+ argExpTypesToCBuffer(buf, exp.arguments);
buf.writeByte(')');
//printf("tf = %s, args = %s\n", tf.deco, (*arguments)[0].type.deco);
@@ -5432,7 +5483,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
exp.e1 = e1org; // https://issues.dlang.org/show_bug.cgi?id=10922
// avoid recursive expression printing
- exp.error("forward reference to inferred return type of function call `%s`", exp.toChars());
+ error(exp.loc, "forward reference to inferred return type of function call `%s`", exp.toChars());
return setError();
}
@@ -5484,6 +5535,14 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
}
+ // `super.fun()` with fun being abstract and unimplemented
+ auto supDotFun = exp.e1.isDotVarExp();
+ if (supDotFun && supDotFun.e1.isSuperExp() && exp.f && exp.f.isAbstract() && !exp.f.fbody)
+ {
+ error(exp.loc, "call to unimplemented abstract function `%s`", exp.f.toFullSignature());
+ errorSupplemental(exp.loc, "declared here: %s", exp.f.loc.toChars());
+ }
+
// declare dual-context container
if (exp.f && exp.f.hasDualContext() && !sc.intypeof && sc.func)
{
@@ -5495,7 +5554,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
te = getRightThis(exp.loc, sc, ad2, te, exp.f);
if (te.op == EXP.error)
{
- exp.error("need `this` of type `%s` to call function `%s`", ad2.toChars(), exp.f.toChars());
+ error(exp.loc, "need `this` of type `%s` to call function `%s`", ad2.toChars(), exp.f.toChars());
return setError();
}
}
@@ -5575,7 +5634,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (!sc.insert(s))
{
auto conflict = sc.search(Loc.initial, s.ident, null);
- e.error("declaration `%s` is already defined", s.toPrettyChars());
+ error(e.loc, "declaration `%s` is already defined", s.toPrettyChars());
errorSupplemental(conflict.loc, "`%s` `%s` is defined here",
conflict.kind(), conflict.toChars());
return setError();
@@ -5609,7 +5668,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
// 65535 should be enough for anyone
if (!s.localNum)
{
- e.error("more than 65535 symbols with name `%s` generated", s.ident.toChars());
+ error(e.loc, "more than 65535 symbols with name `%s` generated", s.ident.toChars());
return setError();
}
@@ -5642,11 +5701,11 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (sc.func.fes)
{
- e.deprecation("%s `%s` is shadowing %s `%s`. Rename the `foreach` variable.", s.kind(), s.ident.toChars(), s2.kind(), s2.toPrettyChars());
+ deprecation(e.loc, "%s `%s` is shadowing %s `%s`. Rename the `foreach` variable.", s.kind(), s.ident.toChars(), s2.kind(), s2.toPrettyChars());
}
else
{
- e.error("%s `%s` is shadowing %s `%s`", s.kind(), s.ident.toChars(), s2.kind(), s2.toPrettyChars());
+ error(e.loc, "%s `%s` is shadowing %s `%s`", s.kind(), s.ident.toChars(), s2.kind(), s2.toPrettyChars());
return setError();
}
}
@@ -5711,7 +5770,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (!ta)
{
//printf("ta %p ea %p sa %p\n", ta, ea, sa);
- exp.error("no type for `typeid(%s)`", ea ? ea.toChars() : (sa ? sa.toChars() : ""));
+ error(exp.loc, "no type for `typeid(%s)`", ea ? ea.toChars() : (sa ? sa.toChars() : ""));
return setError();
}
@@ -5817,7 +5876,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (!tup && !sc.insert(s))
{
auto conflict = sc.search(Loc.initial, s.ident, null);
- e.error("declaration `%s` is already defined", s.toPrettyChars());
+ error(e.loc, "declaration `%s` is already defined", s.toPrettyChars());
errorSupplemental(conflict.loc, "`%s` `%s` is defined here",
conflict.kind(), conflict.toChars());
}
@@ -5838,7 +5897,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
if (e.id && !(sc.flags & SCOPE.condition))
{
- e.error("can only declare type aliases within `static if` conditionals or `static assert`s");
+ error(e.loc, "can only declare type aliases within `static if` conditionals or `static assert`s");
return setError();
}
@@ -5947,7 +6006,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
for (size_t i = 0; i < cd.baseclasses.length; i++)
{
BaseClass* b = (*cd.baseclasses)[i];
- args.push(new Parameter(STC.in_, b.type, null, null, null));
+ args.push(new Parameter(Loc.initial, STC.in_, b.type, null, null, null));
}
tded = new TypeTuple(args);
}
@@ -5993,7 +6052,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
*/
if (e.tok2 == TOK.parameters && arg.defaultArg && arg.defaultArg.op == EXP.error)
return setError();
- args.push(new Parameter(arg.storageClass, arg.type, (e.tok2 == TOK.parameters) ? arg.ident : null, (e.tok2 == TOK.parameters) ? arg.defaultArg : null, arg.userAttribDecl));
+ args.push(new Parameter(arg.loc, arg.storageClass, arg.type, (e.tok2 == TOK.parameters) ? arg.ident : null, (e.tok2 == TOK.parameters) ? arg.defaultArg : null, arg.userAttribDecl));
}
tded = new TypeTuple(args);
break;
@@ -6124,7 +6183,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (!sc.insert(s))
{
auto conflict = sc.search(Loc.initial, s.ident, null);
- e.error("declaration `%s` is already defined", s.toPrettyChars());
+ error(e.loc, "declaration `%s` is already defined", s.toPrettyChars());
errorSupplemental(conflict.loc, "`%s` `%s` is defined here",
conflict.kind(), conflict.toChars());
}
@@ -6273,7 +6332,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
const bool doUnittests = global.params.useUnitTests || global.params.ddoc.doOutput || global.params.dihdr.doOutput;
auto loc = adjustLocForMixin(str, exp.loc, global.params.mixinOut);
scope p = new Parser!ASTCodegen(loc, sc._module, str, false, global.errorSink, &global.compileEnv, doUnittests);
- p.transitionIn = global.params.vin;
+ p.transitionIn = global.params.v.vin;
p.nextToken();
//printf("p.loc.linnum = %d\n", p.loc.linnum);
@@ -6283,9 +6342,9 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (p.token.value != TOK.endOfFile)
{
- e.error("unexpected token `%s` after %s expression",
+ error(e.loc, "unexpected token `%s` after %s expression",
p.token.toChars(), EXPtoString(e.op).ptr);
- e.errorSupplemental("while parsing string mixin expression `%s`",
+ errorSupplemental(e.loc, "while parsing string mixin expression `%s`",
str.ptr);
return null;
}
@@ -6323,7 +6382,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
auto namez = se.toStringz();
if (!global.filePath)
{
- e.error("need `-J` switch to import text file `%s`", namez.ptr);
+ error(e.loc, "need `-J` switch to import text file `%s`", namez.ptr);
return setError();
}
@@ -6334,41 +6393,41 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (FileName.absolute(namez))
{
- e.error("absolute path is not allowed in import expression: `%s`", se.toChars());
+ error(e.loc, "absolute path is not allowed in import expression: `%s`", se.toChars());
return setError();
}
auto idxReserved = FileName.findReservedChar(namez);
if (idxReserved != size_t.max)
{
- e.error("`%s` is not a valid filename on this platform", se.toChars());
- e.errorSupplemental("Character `'%c'` is reserved and cannot be used", namez[idxReserved]);
+ error(e.loc, "`%s` is not a valid filename on this platform", se.toChars());
+ errorSupplemental(e.loc, "Character `'%c'` is reserved and cannot be used", namez[idxReserved]);
return setError();
}
if (FileName.refersToParentDir(namez))
{
- e.error("path refers to parent (`..`) directory: `%s`", se.toChars());
+ error(e.loc, "path refers to parent (`..`) directory: `%s`", se.toChars());
return setError();
}
auto resolvedNamez = FileName.searchPath(global.filePath, namez, false);
if (!resolvedNamez)
{
- e.error("file `%s` cannot be found or not in a path specified with `-J`", se.toChars());
- e.errorSupplemental("Path(s) searched (as provided by `-J`):");
+ error(e.loc, "file `%s` cannot be found or not in a path specified with `-J`", se.toChars());
+ errorSupplemental(e.loc, "Path(s) searched (as provided by `-J`):");
foreach (idx, path; *global.filePath)
{
const attr = FileName.exists(path);
const(char)* err = attr == 2 ? "" :
(attr == 1 ? " (not a directory)" : " (path not found)");
- e.errorSupplemental("[%llu]: `%s`%s", cast(ulong)idx, path, err);
+ errorSupplemental(e.loc, "[%llu]: `%s`%s", cast(ulong)idx, path, err);
}
return setError();
}
sc._module.contentImportedFiles.push(resolvedNamez.ptr);
- if (global.params.verbose)
+ if (global.params.v.verbose)
{
const slice = se.peekString();
message("file %.*s\t(%s)", cast(int)slice.length, slice.ptr, resolvedNamez.ptr);
@@ -6405,7 +6464,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else
{
- e.error("cannot read file `%s`", resolvedNamez.ptr);
+ error(e.loc, "cannot read file `%s`", resolvedNamez.ptr);
return setError();
}
}
@@ -6423,7 +6482,9 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
const generateMsg = !exp.msg &&
sc.needsCodegen() && // let ctfe interpreter handle the error message
global.params.checkAction == CHECKACTION.context &&
- global.params.useAssert == CHECKENABLE.on;
+ global.params.useAssert == CHECKENABLE.on &&
+ !((exp.e1.isIntegerExp() && (exp.e1.toInteger() == 0)) ||
+ exp.e1.isNullExp());
Expression temporariesPrefix;
if (generateMsg)
@@ -6772,7 +6833,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
dmd.typesem.resolve(exp.e1.type, exp.e1.loc, sc, e, t, s, true);
if (e)
{
- exp.e1.error("argument to `_Alignof` must be a type");
+ error(exp.e1.loc, "argument to `_Alignof` must be a type");
return setError();
}
else if (t)
@@ -6785,7 +6846,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else if (s)
{
- exp.e1.error("argument to `_Alignof` must be a type");
+ error(exp.e1.loc, "argument to `_Alignof` must be a type");
return setError();
}
else
@@ -6904,7 +6965,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
e = new TypeExp(exp.loc, cast(Type)o);
break;
default:
- exp.error("`%s` is not an expression", o.toChars());
+ error(exp.loc, "`%s` is not an expression", o.toChars());
return setError();
}
if (var)
@@ -7067,7 +7128,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
OutBuffer thisBuf, funcBuf;
MODMatchToBuffer(&thisBuf, e.e1.type.mod, tf.mod);
MODMatchToBuffer(&funcBuf, tf.mod, e.e1.type.mod);
- e.error("%smethod `%s` is not callable using a %s`%s`",
+ error(e.loc, "%smethod `%s` is not callable using a %s`%s`",
funcBuf.peekChars(), f.toPrettyChars(), thisBuf.peekChars(), e.e1.toChars());
return setError();
}
@@ -7091,7 +7152,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
te = getRightThis(e.loc, sc, ad2, te, f);
if (te.op == EXP.error)
{
- e.error("need `this` of type `%s` to make delegate from function `%s`", ad2.toChars(), f.toChars());
+ error(e.loc, "need `this` of type `%s` to make delegate from function `%s`", ad2.toChars(), f.toChars());
return setError();
}
}
@@ -7263,7 +7324,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (!exp.e1.type)
{
- exp.error("cannot take address of `%s`", exp.e1.toChars());
+ error(exp.loc, "cannot take address of `%s`", exp.e1.toChars());
return setError();
}
if (!checkAddressable(exp, sc))
@@ -7278,16 +7339,16 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
else if (!exp.e1.type.deco)
{
// try to resolve the type
- exp.e1.type = exp.e1.type.typeSemantic(exp.e1.loc, null);
+ exp.e1.type = exp.e1.type.typeSemantic(exp.e1.loc, sc);
if (!exp.e1.type.deco) // still couldn't resolve it
{
if (auto ve = exp.e1.isVarExp())
{
Declaration d = ve.var;
- exp.error("forward reference to %s `%s`", d.kind(), d.toChars());
+ error(exp.loc, "forward reference to %s `%s`", d.kind(), d.toChars());
}
else
- exp.error("forward reference to type `%s` of expression `%s`", exp.e1.type.toChars(), exp.e1.toChars());
+ error(exp.loc, "forward reference to type `%s` of expression `%s`", exp.e1.type.toChars(), exp.e1.toChars());
return setError();
}
}
@@ -7457,7 +7518,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
case Tarray:
if (isNonAssignmentArrayOp(exp.e1))
goto default;
- exp.error("using `*` on an array is no longer supported; use `*(%s).ptr` instead", exp.e1.toChars());
+ error(exp.loc, "using `*` on an array is no longer supported; use `*(%s).ptr` instead", exp.e1.toChars());
exp.type = (cast(TypeArray)tb).next;
exp.e1 = exp.e1.castTo(sc, exp.type.pointerTo());
break;
@@ -7470,7 +7531,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
break;
default:
- exp.error("can only `*` a pointer, not a `%s`", exp.e1.type.toChars());
+ error(exp.loc, "can only `*` a pointer, not a `%s`", exp.e1.type.toChars());
goto case Terror;
}
@@ -7527,7 +7588,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
if (exp.e1.checkNoBool())
return setError();
- if (exp.e1.checkArithmetic() ||
+ if (exp.e1.checkArithmetic(exp.op) ||
exp.e1.checkSharedAccess(sc))
return setError();
@@ -7557,7 +7618,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
if (exp.e1.checkNoBool())
return setError();
- if (exp.e1.checkArithmetic())
+ if (exp.e1.checkArithmetic(exp.op))
return setError();
if (exp.e1.checkSharedAccess(sc))
return setError();
@@ -7684,7 +7745,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
* `isRAII` has been set to true for the deletion of a `scope class`. */
if (tb.ty != Tclass)
{
- exp.error("cannot delete type `%s`", exp.e1.type.toChars());
+ error(exp.loc, "cannot delete type `%s`", exp.e1.type.toChars());
return setError();
}
@@ -7693,7 +7754,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
/* Because COM classes are deleted by IUnknown.Release()
*/
- exp.error("cannot `delete` instance of COM interface `%s`", cd.toChars());
+ error(exp.loc, "cannot `delete` instance of COM interface `%s`", cd.toChars());
return setError();
}
@@ -7797,7 +7858,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (!exp.e1.type)
{
- exp.error("cannot cast `%s`", exp.e1.toChars());
+ error(exp.loc, "cannot cast `%s`", exp.e1.toChars());
return setError();
}
@@ -7835,7 +7896,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (exp.to.ty == Ttuple)
{
- exp.error("cannot cast `%s` of type `%s` to type sequence `%s`", exp.e1.toChars(), exp.e1.type.toChars(), exp.to.toChars());
+ error(exp.loc, "cannot cast `%s` of type `%s` to type sequence `%s`", exp.e1.toChars(), exp.e1.type.toChars(), exp.to.toChars());
return setError();
}
@@ -8018,7 +8079,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (elem.isConst() == 1)
return false;
- exp.error("constant expression expected, not `%s`", elem.toChars());
+ error(exp.loc, "constant expression expected, not `%s`", elem.toChars());
return true;
}
@@ -8083,7 +8144,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (exp.lwr || exp.upr)
{
- exp.error("cannot slice type `%s`", exp.e1.toChars());
+ error(exp.loc, "cannot slice type `%s`", exp.e1.toChars());
return setError();
}
Expression e = new TypeExp(exp.loc, exp.e1.type.arrayOf());
@@ -8135,12 +8196,12 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (t1b.isPtrToFunction())
{
- exp.error("cannot slice function pointer `%s`", exp.e1.toChars());
+ error(exp.loc, "cannot slice function pointer `%s`", exp.e1.toChars());
return setError();
}
if (!exp.lwr || !exp.upr)
{
- exp.error("upper and lower bounds are needed to slice a pointer");
+ error(exp.loc, "upper and lower bounds are needed to slice a pointer");
if (auto ad = isAggregate(tp.next.toBasetype()))
{
auto s = search_function(ad, Id.index);
@@ -8150,7 +8211,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
auto fd = s.isFuncDeclaration();
if ((fd && !fd.getParameterList().length) || s.isTemplateDeclaration())
{
- exp.errorSupplemental(
+ errorSupplemental(exp.loc,
"pointer `%s` points to an aggregate that defines an `%s`, perhaps you meant `(*%s)[]`",
exp.e1.toChars(),
s.ident.toChars(),
@@ -8181,7 +8242,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
if (!exp.lwr || !exp.upr)
{
- exp.error("need upper and lower bound to slice a sequence");
+ error(exp.loc, "need upper and lower bound to slice a sequence");
return setError();
}
}
@@ -8195,7 +8256,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else
{
- exp.error("`%s` cannot be sliced with `[]`", t1b.ty == Tvoid ? exp.e1.toChars() : t1b.toChars());
+ error(exp.loc, "`%s` cannot be sliced with `[]`", t1b.ty == Tvoid ? exp.e1.toChars() : t1b.toChars());
return setError();
}
@@ -8261,7 +8322,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (i2 < i1 || length < i2)
{
- exp.error("string slice `[%llu .. %llu]` is out of bounds", i1, i2);
+ error(exp.loc, "string slice `[%llu .. %llu]` is out of bounds", i1, i2);
return setError();
}
@@ -8401,13 +8462,13 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
if (isAggregate(exp.e1.type))
- exp.error("no `[]` operator overload for type `%s`", exp.e1.type.toChars());
+ error(exp.loc, "no `[]` operator overload for type `%s`", exp.e1.type.toChars());
else if (exp.e1.op == EXP.type && exp.e1.type.ty != Ttuple)
- exp.error("static array of `%s` with multiple lengths not allowed", exp.e1.type.toChars());
+ error(exp.loc, "static array of `%s` with multiple lengths not allowed", exp.e1.type.toChars());
else if (isIndexableNonAggregate(exp.e1.type))
- exp.error("only one index allowed to index `%s`", exp.e1.type.toChars());
+ error(exp.loc, "only one index allowed to index `%s`", exp.e1.type.toChars());
else
- exp.error("cannot use `[]` operator on expression of type `%s`", exp.e1.type.toChars());
+ error(exp.loc, "cannot use `[]` operator on expression of type `%s`", exp.e1.type.toChars());
result = ErrorExp.get();
}
@@ -8485,7 +8546,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
discardValue(e.e1);
}
else if (!e.allowCommaExp && !e.isGenerated)
- e.error("using the result of a comma expression is not allowed");
+ error(e.loc, "using the result of a comma expression is not allowed");
}
override void visit(IntervalExp e)
@@ -8656,7 +8717,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
case Tpointer:
if (t1b.isPtrToFunction())
{
- exp.error("cannot index function pointer `%s`", exp.e1.toChars());
+ error(exp.loc, "cannot index function pointer `%s`", exp.e1.toChars());
return setError();
}
exp.e2 = exp.e2.implicitCastTo(sc, Type.tsize_t);
@@ -8738,7 +8799,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (length <= index)
{
- exp.error("array index `[%llu]` is outside array bounds `[0 .. %llu]`", index, cast(ulong)length);
+ error(exp.loc, "array index `[%llu]` is outside array bounds `[0 .. %llu]`", index, cast(ulong)length);
return setError();
}
Expression e;
@@ -8753,7 +8814,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
return;
}
default:
- exp.error("`%s` must be an array or pointer type, not `%s`", exp.e1.toChars(), exp.e1.type.toChars());
+ error(exp.loc, "`%s` must be an array or pointer type, not `%s`", exp.e1.toChars(), exp.e1.type.toChars());
return setError();
}
@@ -8843,7 +8904,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (exp.e1.op == EXP.slice)
{
const(char)* s = exp.op == EXP.plusPlus ? "increment" : "decrement";
- exp.error("cannot post-%s array slice `%s`, use pre-%s instead", s, exp.e1.toChars(), s);
+ error(exp.loc, "cannot post-%s array slice `%s`, use pre-%s instead", s, exp.e1.toChars(), s);
return setError();
}
@@ -8983,7 +9044,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (auto e2comma = exp.e2.isCommaExp())
{
if (!e2comma.isGenerated && !(sc.flags & SCOPE.Cfile))
- exp.error("using the result of a comma expression is not allowed");
+ error(exp.loc, "using the result of a comma expression is not allowed");
/* Rewrite to get rid of the comma from rvalue
* e1=(e0,e2) => e0,(e1=e2)
@@ -9226,7 +9287,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
Expression e = null;
if (dim != tup2.exps.length)
{
- exp.error("mismatched sequence lengths, %d and %d", cast(int)dim, cast(int)tup2.exps.length);
+ error(exp.loc, "mismatched sequence lengths, %d and %d", cast(int)dim, cast(int)tup2.exps.length);
return setError();
}
if (dim == 0)
@@ -9458,7 +9519,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (!e2x.type.implicitConvTo(e1x.type))
{
- exp.error("conversion error from `%s` to `%s`",
+ error(exp.loc, "conversion error from `%s` to `%s`",
e2x.type.toChars(), e1x.type.toChars());
return setError();
}
@@ -9776,7 +9837,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
if (dim1 != dim2)
{
- exp.error("mismatched array lengths, %d and %d", cast(int)dim1, cast(int)dim2);
+ error(exp.loc, "mismatched array lengths, %d and %d", cast(int)dim1, cast(int)dim2);
return setError();
}
}
@@ -9806,7 +9867,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (overflow || dim >= uint.max)
{
// dym exceeds maximum array size
- exp.error("static array `%s` size overflowed to %llu",
+ error(exp.loc, "static array `%s` size overflowed to %llu",
e1x.type.toChars(), cast(ulong) dim);
return setError();
}
@@ -9910,13 +9971,13 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
// https://issues.dlang.org/show_bug.cgi?id=9884
(!fun || (fun && !fun.isStaticCtorDeclaration())))
{
- exp.error("slice `%s` is not mutable", se.toChars());
+ error(exp.loc, "slice `%s` is not mutable", se.toChars());
return setError();
}
if (exp.op == EXP.assign && !tn.baseElemOf().isAssignable())
{
- exp.error("slice `%s` is not mutable, struct `%s` has immutable members",
+ error(exp.loc, "slice `%s` is not mutable, struct `%s` has immutable members",
exp.e1.toChars(), tn.baseElemOf().toChars());
result = ErrorExp.get();
return;
@@ -9939,7 +10000,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
Type tn = exp.e1.type.nextOf();
if (tn && !tn.baseElemOf().isAssignable())
{
- exp.error("array `%s` is not mutable, struct `%s` has immutable members",
+ error(exp.loc, "array `%s` is not mutable, struct `%s` has immutable members",
exp.e1.toChars(), tn.baseElemOf().toChars());
result = ErrorExp.get();
return;
@@ -10010,7 +10071,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
uinteger_t dim2 = tsa2.dim.toInteger();
if (dim1 != dim2)
{
- exp.error("mismatched array lengths %d and %d for assignment `%s`", cast(int)dim1, cast(int)dim2, exp.toChars());
+ error(exp.loc, "mismatched array lengths %d and %d for assignment `%s`", cast(int)dim1, cast(int)dim2, exp.toChars());
return setError();
}
}
@@ -10024,19 +10085,22 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
return setError();
}
- if (0 && global.params.warnings != DiagnosticReporting.off && !global.gag && exp.op == EXP.assign &&
- e2x.op != EXP.slice && e2x.op != EXP.assign &&
- e2x.op != EXP.arrayLiteral && e2x.op != EXP.string_ &&
- !(e2x.op == EXP.add || e2x.op == EXP.min ||
- e2x.op == EXP.mul || e2x.op == EXP.div ||
- e2x.op == EXP.mod || e2x.op == EXP.xor ||
- e2x.op == EXP.and || e2x.op == EXP.or ||
- e2x.op == EXP.pow ||
- e2x.op == EXP.tilde || e2x.op == EXP.negate))
+ version (none)
{
- const(char)* e1str = exp.e1.toChars();
- const(char)* e2str = e2x.toChars();
- exp.warning("explicit element-wise assignment `%s = (%s)[]` is better than `%s = %s`", e1str, e2str, e1str, e2str);
+ if (global.params.warnings != DiagnosticReporting.off && !global.gag && exp.op == EXP.assign &&
+ e2x.op != EXP.slice && e2x.op != EXP.assign &&
+ e2x.op != EXP.arrayLiteral && e2x.op != EXP.string_ &&
+ !(e2x.op == EXP.add || e2x.op == EXP.min ||
+ e2x.op == EXP.mul || e2x.op == EXP.div ||
+ e2x.op == EXP.mod || e2x.op == EXP.xor ||
+ e2x.op == EXP.and || e2x.op == EXP.or ||
+ e2x.op == EXP.pow ||
+ e2x.op == EXP.tilde || e2x.op == EXP.negate))
+ {
+ const(char)* e1str = exp.e1.toChars();
+ const(char)* e2str = e2x.toChars();
+ exp.warning("explicit element-wise assignment `%s = (%s)[]` is better than `%s = %s`", e1str, e2str, e1str, e2str);
+ }
}
Type t2n = t2.nextOf();
@@ -10085,17 +10149,20 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
else
{
- if (0 && global.params.warnings != DiagnosticReporting.off && !global.gag && exp.op == EXP.assign &&
- t1.ty == Tarray && t2.ty == Tsarray &&
- e2x.op != EXP.slice &&
- t2.implicitConvTo(t1))
+ version (none)
{
- // Disallow ar[] = sa (Converted to ar[] = sa[])
- // Disallow da = sa (Converted to da = sa[])
- const(char)* e1str = exp.e1.toChars();
- const(char)* e2str = e2x.toChars();
- const(char)* atypestr = exp.e1.op == EXP.slice ? "element-wise" : "slice";
- exp.warning("explicit %s assignment `%s = (%s)[]` is better than `%s = %s`", atypestr, e1str, e2str, e1str, e2str);
+ if (global.params.warnings != DiagnosticReporting.off && !global.gag && exp.op == EXP.assign &&
+ t1.ty == Tarray && t2.ty == Tsarray &&
+ e2x.op != EXP.slice &&
+ t2.implicitConvTo(t1))
+ {
+ // Disallow ar[] = sa (Converted to ar[] = sa[])
+ // Disallow da = sa (Converted to da = sa[])
+ const(char)* e1str = exp.e1.toChars();
+ const(char)* e2str = e2x.toChars();
+ const(char)* atypestr = exp.e1.op == EXP.slice ? "element-wise" : "slice";
+ exp.warning("explicit %s assignment `%s = (%s)[]` is better than `%s = %s`", atypestr, e1str, e2str, e1str, e2str);
+ }
}
if (exp.op == EXP.blit)
e2x = e2x.castTo(sc, exp.e1.type);
@@ -10169,13 +10236,13 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (vd && vd.onstack)
{
assert(t1.ty == Tclass);
- exp.error("cannot rebind scope variables");
+ error(exp.loc, "cannot rebind scope variables");
}
}
if (exp.e1.op == EXP.variable && (cast(VarExp)exp.e1).var.ident == Id.ctfe)
{
- exp.error("cannot modify compiler-generated variable `__ctfe`");
+ error(exp.loc, "cannot modify compiler-generated variable `__ctfe`");
}
exp.type = exp.e1.type;
@@ -10277,7 +10344,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
res = Expression.combine(e0, ce).expressionSemantic(sc);
}
- if (global.params.verbose)
+ if (global.params.v.verbose)
message("lowered %s =>\n %s", exp.toChars(), res.toChars());
}
}
@@ -10367,7 +10434,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (isArrayAssign)
res = Expression.combine(res, ae.e1).expressionSemantic(sc);
- if (global.params.verbose)
+ if (global.params.v.verbose)
message("lowered %s =>\n %s", ae.toChars(), res.toChars());
res = new LoweredAssignExp(ae, res);
@@ -10480,7 +10547,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (se.e1.type.toBasetype().ty == Tsarray)
{
- exp.error("cannot append to static array `%s`", se.e1.type.toChars());
+ error(exp.loc, "cannot append to static array `%s`", se.e1.type.toChars());
return setError();
}
}
@@ -10604,7 +10671,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (result)
return;
- exp.error("cannot append type `%s` to type `%s`", tb2.toChars(), tb1.toChars());
+ error(exp.loc, "cannot append type `%s` to type `%s`", tb2.toChars(), tb1.toChars());
return setError();
}
@@ -10785,11 +10852,11 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
bool err = false;
if (tb1.ty == Tdelegate || tb1.isPtrToFunction())
{
- err |= exp.e1.checkArithmetic() || exp.e1.checkSharedAccess(sc);
+ err |= exp.e1.checkArithmetic(exp.op) || exp.e1.checkSharedAccess(sc);
}
if (tb2.ty == Tdelegate || tb2.isPtrToFunction())
{
- err |= exp.e2.checkArithmetic() || exp.e2.checkSharedAccess(sc);
+ err |= exp.e2.checkArithmetic(exp.op) || exp.e2.checkSharedAccess(sc);
}
if (err)
return setError();
@@ -10891,11 +10958,11 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
bool err = false;
if (t1.ty == Tdelegate || t1.isPtrToFunction())
{
- err |= exp.e1.checkArithmetic() || exp.e1.checkSharedAccess(sc);
+ err |= exp.e1.checkArithmetic(exp.op) || exp.e1.checkSharedAccess(sc);
}
if (t2.ty == Tdelegate || t2.isPtrToFunction())
{
- err |= exp.e2.checkArithmetic() || exp.e2.checkSharedAccess(sc);
+ err |= exp.e2.checkArithmetic(exp.op) || exp.e2.checkSharedAccess(sc);
}
if (err)
return setError();
@@ -10950,7 +11017,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
e = scaleFactor(exp, sc);
else
{
- exp.error("can't subtract `%s` from pointer", t2.toChars());
+ error(exp.loc, "can't subtract `%s` from pointer", t2.toChars());
e = ErrorExp.get();
}
result = e;
@@ -10959,7 +11026,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (t2.ty == Tpointer)
{
exp.type = exp.e2.type;
- exp.error("can't subtract pointer from `%s`", exp.e1.type.toChars());
+ error(exp.loc, "can't subtract pointer from `%s`", exp.e1.type.toChars());
return setError();
}
@@ -11032,7 +11099,9 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
(exp.e2.isStringExp() && (exp.e1.isIntegerExp() || exp.e1.isStringExp())))
return exp;
- Identifier hook = global.params.tracegc ? Id._d_arraycatnTXTrace : Id._d_arraycatnTX;
+ bool useTraceGCHook = global.params.tracegc && sc.needsCodegen();
+
+ Identifier hook = useTraceGCHook ? Id._d_arraycatnTXTrace : Id._d_arraycatnTX;
if (!verifyHookExist(exp.loc, *sc, hook, "concatenating arrays"))
{
setError();
@@ -11061,7 +11130,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
}
auto arguments = new Expressions();
- if (global.params.tracegc)
+ if (useTraceGCHook)
{
auto funcname = (sc.callsc && sc.callsc.func) ?
sc.callsc.func.toPrettyChars() : sc.func.toPrettyChars();
@@ -11088,7 +11157,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
/* `_d_arraycatnTX` canot be used with `-betterC`, but `CatExp`s may be
* used with `-betterC`, but only during CTFE.
*/
- if (!global.params.useGC || !sc.needsCodegen())
+ if (!global.params.useGC)
return;
if (auto ce = exp.isCatExp())
@@ -11534,7 +11603,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
exp.type = exp.e1.type;
if (exp.e2.type.iscomplex())
{
- exp.error("cannot perform modulo complex arithmetic");
+ error(exp.loc, "cannot perform modulo complex arithmetic");
return setError();
}
}
@@ -11601,7 +11670,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
Module mmath = Module.loadStdMath();
if (!mmath)
{
- e.error("`%s` requires `std.math` for `^^` operators", e.toChars());
+ error(e.loc, "`%s` requires `std.math` for `^^` operators", e.toChars());
return setError();
}
e = new ScopeExp(exp.loc, mmath);
@@ -11956,7 +12025,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (e2x.op == EXP.type || e2x.op == EXP.scope_)
{
- exp.error("`%s` is not an expression", exp.e2.toChars());
+ error(exp.loc, "`%s` is not an expression", exp.e2.toChars());
return setError();
}
if (e1x.op == EXP.error || e1x.type.ty == Tnoreturn)
@@ -12005,7 +12074,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
Type t2 = exp.e2.type.toBasetype();
if (t1.ty == Tclass && exp.e2.op == EXP.null_ || t2.ty == Tclass && exp.e1.op == EXP.null_)
{
- exp.error("do not use `null` when comparing class types");
+ error(exp.loc, "do not use `null` when comparing class types");
return setError();
}
@@ -12015,7 +12084,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
{
if (!e.type.isscalar() && e.type.equals(exp.e1.type))
{
- exp.error("recursive `opCmp` expansion");
+ error(exp.loc, "recursive `opCmp` expansion");
return setError();
}
if (e.op == EXP.call)
@@ -12083,12 +12152,11 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
Type t2next = t2.nextOf();
if (t1next.implicitConvTo(t2next) < MATCH.constant && t2next.implicitConvTo(t1next) < MATCH.constant && (t1next.ty != Tvoid && t2next.ty != Tvoid))
{
- exp.error("array comparison type mismatch, `%s` vs `%s`", t1next.toChars(), t2next.toChars());
+ error(exp.loc, "array comparison type mismatch, `%s` vs `%s`", t1next.toChars(), t2next.toChars());
return setError();
}
- if (sc.needsCodegen() &&
- (t1.ty == Tarray || t1.ty == Tsarray) &&
+ if ((t1.ty == Tarray || t1.ty == Tsarray) &&
(t2.ty == Tarray || t2.ty == Tsarray))
{
if (!verifyHookExist(exp.loc, *sc, Id.__cmp, "comparing arrays"))
@@ -12113,19 +12181,19 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
else if (t1.ty == Tstruct || t2.ty == Tstruct || (t1.ty == Tclass && t2.ty == Tclass))
{
if (t2.ty == Tstruct)
- exp.error("need member function `opCmp()` for %s `%s` to compare", t2.toDsymbol(sc).kind(), t2.toChars());
+ error(exp.loc, "need member function `opCmp()` for %s `%s` to compare", t2.toDsymbol(sc).kind(), t2.toChars());
else
- exp.error("need member function `opCmp()` for %s `%s` to compare", t1.toDsymbol(sc).kind(), t1.toChars());
+ error(exp.loc, "need member function `opCmp()` for %s `%s` to compare", t1.toDsymbol(sc).kind(), t1.toChars());
return setError();
}
else if (t1.iscomplex() || t2.iscomplex())
{
- exp.error("compare not defined for complex operands");
+ error(exp.loc, "compare not defined for complex operands");
return setError();
}
else if (t1.ty == Taarray || t2.ty == Taarray)
{
- exp.error("`%s` is not defined for associative arrays", EXPtoString(exp.op).ptr);
+ error(exp.loc, "`%s` is not defined for associative arrays", EXPtoString(exp.op).ptr);
return setError();
}
else if (!target.isVectorOpSupported(t1, exp.op, t2))
@@ -12202,9 +12270,9 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
case Tarray, Tsarray:
result = exp.incompatibleTypes();
- exp.errorSupplemental("`in` is only allowed on associative arrays");
+ errorSupplemental(exp.loc, "`in` is only allowed on associative arrays");
const(char)* slice = (t2b.ty == Tsarray) ? "[]" : "";
- exp.errorSupplemental("perhaps use `std.algorithm.find(%s, %s%s)` instead",
+ errorSupplemental(exp.loc, "perhaps use `std.algorithm.find(%s, %s%s)` instead",
exp.e1.toChars(), exp.e2.toChars(), slice);
return;
@@ -12282,7 +12350,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
auto t1 = exp.e1.type;
auto t2 = exp.e2.type;
if (t1.ty == Tenum && t2.ty == Tenum && !t1.equivalent(t2))
- exp.error("comparison between different enumeration types `%s` and `%s`; If this behavior is intended consider using `std.conv.asOriginalType`",
+ error(exp.loc, "comparison between different enumeration types `%s` and `%s`; If this behavior is intended consider using `std.conv.asOriginalType`",
t1.toChars(), t2.toChars());
}
@@ -12424,7 +12492,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
__equals = __equals.trySemantic(sc); // for better error message
if (!__equals)
{
- exp.error("incompatible types for array comparison: `%s` and `%s`",
+ error(exp.loc, "incompatible types for array comparison: `%s` and `%s`",
exp.e1.type.toChars(), exp.e2.type.toChars());
__equals = ErrorExp.get();
}
@@ -12506,7 +12574,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor
if (exp.e1.type.toBasetype().ty == Tsarray ||
exp.e2.type.toBasetype().ty == Tsarray)
- exp.deprecation("identity comparison of static arrays "
+ deprecation(exp.loc, "identity comparison of static arrays "
~ "implicitly coerces them to slices, "
~ "which are compared by reference");
@@ -12925,7 +12993,7 @@ private Expression dotIdSemanticPropX(DotIdExp exp, Scope* sc)
if (f.purityInprocess || f.safetyInprocess || f.nothrowInprocess || f.nogcInprocess)
{
- f.error(loc, "cannot retrieve its `.mangleof` while inferring attributes");
+ error(loc, "%s `%s` cannot retrieve its `.mangleof` while inferring attributes", f.kind, f.toPrettyChars);
return ErrorExp.get();
}
}
@@ -12991,12 +13059,12 @@ private Expression dotIdSemanticPropX(DotIdExp exp, Scope* sc)
// Template has no built-in properties except for 'stringof'.
if ((exp.e1.isDotTemplateExp() || exp.e1.isTemplateExp()) && exp.ident != Id.stringof)
{
- exp.error("template `%s` does not have property `%s`", exp.e1.toChars(), exp.ident.toChars());
+ error(exp.loc, "template `%s` does not have property `%s`", exp.e1.toChars(), exp.ident.toChars());
return ErrorExp.get();
}
if (!exp.e1.type)
{
- exp.error("expression `%s` does not have property `%s`", exp.e1.toChars(), exp.ident.toChars());
+ error(exp.loc, "expression `%s` does not have property `%s`", exp.e1.toChars(), exp.ident.toChars());
return ErrorExp.get();
}
@@ -13110,9 +13178,9 @@ Expression dotIdSemanticProp(DotIdExp exp, Scope* sc, bool gag)
!v.type.deco && v.inuse)
{
if (v.inuse)
- exp.error("circular reference to %s `%s`", v.kind(), v.toPrettyChars());
+ error(exp.loc, "circular reference to %s `%s`", v.kind(), v.toPrettyChars());
else
- exp.error("forward reference to %s `%s`", v.kind(), v.toPrettyChars());
+ error(exp.loc, "forward reference to %s `%s`", v.kind(), v.toPrettyChars());
return ErrorExp.get();
}
if (v.type.isTypeError())
@@ -13273,12 +13341,12 @@ Expression dotIdSemanticProp(DotIdExp exp, Scope* sc, bool gag)
if (s && symbolIsVisible(sc, s))
{
if (s.isPackage())
- exp.error("undefined identifier `%s` in %s `%s`, perhaps add `static import %s;`", exp.ident.toChars(), ie.sds.kind(), ie.sds.toPrettyChars(), s.toPrettyChars());
+ error(exp.loc, "undefined identifier `%s` in %s `%s`, perhaps add `static import %s;`", exp.ident.toChars(), ie.sds.kind(), ie.sds.toPrettyChars(), s.toPrettyChars());
else
- exp.error("undefined identifier `%s` in %s `%s`, did you mean %s `%s`?", exp.ident.toChars(), ie.sds.kind(), ie.sds.toPrettyChars(), s.kind(), s.toChars());
+ error(exp.loc, "undefined identifier `%s` in %s `%s`, did you mean %s `%s`?", exp.ident.toChars(), ie.sds.kind(), ie.sds.toPrettyChars(), s.kind(), s.toChars());
}
else
- exp.error("undefined identifier `%s` in %s `%s`", exp.ident.toChars(), ie.sds.kind(), ie.sds.toPrettyChars());
+ error(exp.loc, "undefined identifier `%s` in %s `%s`", exp.ident.toChars(), ie.sds.kind(), ie.sds.toPrettyChars());
return ErrorExp.get();
}
else if (t1b.ty == Tpointer && exp.e1.type.ty != Tenum &&
@@ -13533,7 +13601,7 @@ Expression dotTemplateSemanticProp(DotTemplateInstanceExp exp, Scope* sc, bool g
}
Lerr:
- exp.error("`%s` isn't a template", e.toChars());
+ error(exp.loc, "`%s` isn't a template", e.toChars());
return errorExp();
}
@@ -13567,7 +13635,7 @@ bool checkSharedAccess(Expression e, Scope* sc, bool returnRef = false)
bool sharedError(Expression e)
{
// https://dlang.org/phobos/core_atomic.html
- e.error("direct access to shared `%s` is not allowed, see `core.atomic`", e.toChars());
+ error(e.loc, "direct access to shared `%s` is not allowed, see `core.atomic`", e.toChars());
return true;
}
@@ -13726,7 +13794,7 @@ bool checkAddressVar(Scope* sc, Expression exp, VarDeclaration v)
if (!v.canTakeAddressOf())
{
- exp.error("cannot take address of `%s`", exp.toChars());
+ error(exp.loc, "cannot take address of `%s`", exp.toChars());
return false;
}
if (sc.func && !sc.intypeof && !v.isDataseg())
@@ -13763,7 +13831,7 @@ bool checkAddressable(Expression e, Scope* sc)
// whether SCOPE.Cfile is set.
if (auto bf = ex.isDotVarExp().var.isBitFieldDeclaration())
{
- e.error("cannot take address of bit-field `%s`", bf.toChars());
+ error(e.loc, "cannot take address of bit-field `%s`", bf.toChars());
return false;
}
goto case EXP.cast_;
@@ -13788,9 +13856,9 @@ bool checkAddressable(Expression e, Scope* sc)
if (ex.isVarExp().var.storage_class & STC.register)
{
if (e.isIndexExp())
- e.error("cannot index through register variable `%s`", ex.toChars());
+ error(e.loc, "cannot index through register variable `%s`", ex.toChars());
else
- e.error("cannot take address of register variable `%s`", ex.toChars());
+ error(e.loc, "cannot take address of register variable `%s`", ex.toChars());
return false;
}
}
@@ -13870,7 +13938,7 @@ Expression getThisSkipNestedFuncs(const ref Loc loc, Scope* sc, Dsymbol s, Aggre
{
if (flag)
return null;
- e1.error("need `this` of type `%s` to access member `%s` from static function `%s`", ad.toChars(), var.toChars(), f.toChars());
+ error(e1.loc, "need `this` of type `%s` to access member `%s` from static function `%s`", ad.toChars(), var.toChars(), f.toChars());
e1 = ErrorExp.get();
return e1;
}
@@ -14105,7 +14173,7 @@ Expression toBoolean(Expression exp, Scope* sc)
switch(exp.op)
{
case EXP.delete_:
- exp.error("`delete` does not give a boolean result");
+ error(exp.loc, "`delete` does not give a boolean result");
return ErrorExp.get();
case EXP.comma:
@@ -14126,7 +14194,7 @@ Expression toBoolean(Expression exp, Scope* sc)
// Things like:
// if (a = b) ...
// are usually mistakes.
- exp.error("assignment cannot be used as a condition, perhaps `==` was meant?");
+ error(exp.loc, "assignment cannot be used as a condition, perhaps `==` was meant?");
return ErrorExp.get();
//LogicalExp
@@ -14190,7 +14258,7 @@ Expression toBoolean(Expression exp, Scope* sc)
if (!t.isBoolean())
{
if (tb != Type.terror)
- exp.error("expression `%s` of type `%s` does not have a boolean value",
+ error(exp.loc, "expression `%s` of type `%s` does not have a boolean value",
exp.toChars(), t.toChars());
return ErrorExp.get();
}
diff --git a/gcc/d/dmd/func.d b/gcc/d/dmd/func.d
index 73f1ba7..81bb028 100644
--- a/gcc/d/dmd/func.d
+++ b/gcc/d/dmd/func.d
@@ -173,7 +173,7 @@ public:
Identifier id = Identifier.generateId("__o");
Statement handler = new PeelStatement(sexception);
- if (sexception.blockExit(fd, false) & BE.fallthru)
+ if (sexception.blockExit(fd, null) & BE.fallthru)
{
auto ts = new ThrowStatement(Loc.initial, new IdentifierExp(Loc.initial, id));
ts.internalThrow = true;
@@ -744,7 +744,7 @@ extern (C++) class FuncDeclaration : Declaration
if (exactvi >= 0)
{
- error("cannot determine overridden function");
+ .error(loc, "%s `%s` cannot determine overridden function", kind, toPrettyChars);
return exactvi;
}
exactvi = vi;
@@ -1057,7 +1057,7 @@ extern (C++) class FuncDeclaration : Declaration
OutBuffer thisBuf, funcBuf;
MODMatchToBuffer(&thisBuf, tthis.mod, tf.mod);
MODMatchToBuffer(&funcBuf, tf.mod, tthis.mod);
- .error(loc, "%smethod %s is not callable using a %sobject",
+ .error(loc, "%smethod %s is not callable using a %sobject", kind, toPrettyChars,
funcBuf.peekChars(), this.toPrettyChars(), thisBuf.peekChars());
}
}
@@ -1311,7 +1311,7 @@ extern (C++) class FuncDeclaration : Declaration
final const(char)* toFullSignature()
{
OutBuffer buf;
- functionToBufferWithIdent(type.toTypeFunction(), &buf, toChars(), isStatic);
+ functionToBufferWithIdent(type.toTypeFunction(), buf, toChars(), isStatic);
return buf.extractChars();
}
@@ -1696,7 +1696,7 @@ extern (C++) class FuncDeclaration : Declaration
extern (D) final void printGCUsage(const ref Loc loc, const(char)* warn)
{
- if (!global.params.vgc)
+ if (!global.params.v.gc)
return;
Module m = getModule();
@@ -2240,13 +2240,13 @@ extern (C++) class FuncDeclaration : Declaration
if (setGC(loc, "%s `%s` is `@nogc` yet allocates closure for `%s()` with the GC", this))
{
- error("is `@nogc` yet allocates closure for `%s()` with the GC", toChars());
+ .error(loc, "%s `%s` is `@nogc` yet allocates closure for `%s()` with the GC", kind, toPrettyChars, toChars());
if (global.gag) // need not report supplemental errors
return true;
}
else if (!global.params.useGC)
{
- error("is `-betterC` yet allocates closure for `%s()` with the GC", toChars());
+ .error(loc, "%s `%s` is `-betterC` yet allocates closure for `%s()` with the GC", kind, toPrettyChars, toChars());
if (global.gag) // need not report supplemental errors
return true;
}
@@ -2279,8 +2279,11 @@ extern (C++) class FuncDeclaration : Declaration
break LcheckAncestorsOfANestedRef;
}
a.push(f);
- .errorSupplemental(f.loc, "`%s` closes over variable `%s` at %s",
- f.toPrettyChars(), v.toChars(), v.loc.toChars());
+ .errorSupplemental(f.loc, "%s `%s` closes over variable `%s`",
+ f.kind, f.toPrettyChars(), v.toChars());
+ if (v.ident != Id.This)
+ .errorSupplemental(v.loc, "`%s` declared here", v.toChars());
+
break LcheckAncestorsOfANestedRef;
}
}
@@ -2367,7 +2370,7 @@ extern (C++) class FuncDeclaration : Declaration
vresult.dsymbolSemantic(sc);
if (!sc.insert(vresult))
- error("out result %s is already defined", vresult.toChars());
+ .error(loc, "%s `%s` out result %s is already defined", kind, toPrettyChars, vresult.toChars());
assert(vresult.parent == this);
}
}
@@ -2657,7 +2660,7 @@ extern (C++) class FuncDeclaration : Declaration
auto fparams = new Parameters();
if (canBuildResultVar())
{
- Parameter p = new Parameter(STC.ref_ | STC.const_, f.nextOf(), Id.result, null, null);
+ Parameter p = new Parameter(loc, STC.ref_ | STC.const_, f.nextOf(), Id.result, null, null);
fparams.push(p);
}
auto fo = cast(TypeFunction)(originalType ? originalType : f);
@@ -2874,7 +2877,7 @@ extern (C++) class FuncDeclaration : Declaration
}
if (tf.parameterList.varargs || nparams >= 2 || argerr)
- error("parameter list must be empty or accept one parameter of type `string[]`");
+ .error(loc, "%s `%s` parameter list must be empty or accept one parameter of type `string[]`", kind, toPrettyChars);
}
else if (linkage == LINK.c)
@@ -2909,7 +2912,7 @@ extern (C++) class FuncDeclaration : Declaration
if (argerr)
{
- error("parameters must match one of the following signatures");
+ .error(loc, "%s `%s` parameters must match one of the following signatures", kind, toPrettyChars);
loc.errorSupplemental("`main()`");
loc.errorSupplemental("`main(int argc, char** argv)`");
loc.errorSupplemental("`main(int argc, char** argv, char** environ)` [POSIX extension]");
@@ -2922,7 +2925,7 @@ extern (C++) class FuncDeclaration : Declaration
retType = retType.toBasetype();
if (retType.ty != Tint32 && retType.ty != Tvoid && retType.ty != Tnoreturn)
- error("must return `int`, `void` or `noreturn`, not `%s`", tf.nextOf().toChars());
+ .error(loc, "%s `%s` must return `int`, `void` or `noreturn`, not `%s`", kind, toPrettyChars, tf.nextOf().toChars());
}
/***********************************************
@@ -3107,7 +3110,7 @@ extern (D) int overloadApply(Dsymbol fstart, scope int delegate(Dsymbol) dg, Sco
}
else
{
- d.error("is aliased to a function");
+ .error(d.loc, "%s `%s` is aliased to a function", d.kind, d.toPrettyChars);
break;
}
next = fa.overnext;
@@ -3146,7 +3149,7 @@ extern (D) int overloadApply(Dsymbol fstart, scope int delegate(Dsymbol) dg, Sco
}
else
{
- d.error("is aliased to a function");
+ .error(d.loc, "%s `%s` is aliased to a function", d.kind, d.toPrettyChars);
break;
// BUG: should print error message?
}
@@ -3340,14 +3343,14 @@ FuncDeclaration resolveFuncCall(const ref Loc loc, Scope* sc, Dsymbol s,
s = fd = td.funcroot;
OutBuffer tiargsBuf;
- arrayObjectsToBuffer(&tiargsBuf, tiargs);
+ arrayObjectsToBuffer(tiargsBuf, tiargs);
OutBuffer fargsBuf;
fargsBuf.writeByte('(');
- argExpTypesToCBuffer(&fargsBuf, fargs);
+ argExpTypesToCBuffer(fargsBuf, fargs);
fargsBuf.writeByte(')');
if (tthis)
- tthis.modToBuffer(&fargsBuf);
+ tthis.modToBuffer(fargsBuf);
// The call is ambiguous
if (m.lastf && m.nextf)
@@ -3388,7 +3391,7 @@ FuncDeclaration resolveFuncCall(const ref Loc loc, Scope* sc, Dsymbol s,
td.kind(), td.parent.toPrettyChars(), td.ident.toChars(),
tiargsBuf.peekChars(), fargsBuf.peekChars());
- if (!global.gag || global.params.showGaggedErrors)
+ if (!global.gag || global.params.v.showGaggedErrors)
printCandidates(loc, td, sc.isDeprecated());
return null;
}
@@ -3427,7 +3430,7 @@ FuncDeclaration resolveFuncCall(const ref Loc loc, Scope* sc, Dsymbol s,
{
.error(loc, "none of the overloads of `%s` are callable using a %sobject",
fd.ident.toChars(), thisBuf.peekChars());
- if (!global.gag || global.params.showGaggedErrors)
+ if (!global.gag || global.params.v.showGaggedErrors)
printCandidates(loc, fd, sc.isDeprecated());
return null;
}
@@ -3458,7 +3461,7 @@ FuncDeclaration resolveFuncCall(const ref Loc loc, Scope* sc, Dsymbol s,
{
.error(loc, "none of the overloads of `%s` are callable using argument types `%s`",
fd.toChars(), fargsBuf.peekChars());
- if (!global.gag || global.params.showGaggedErrors)
+ if (!global.gag || global.params.v.showGaggedErrors)
printCandidates(loc, fd, sc.isDeprecated());
return null;
}
@@ -3468,7 +3471,7 @@ FuncDeclaration resolveFuncCall(const ref Loc loc, Scope* sc, Dsymbol s,
tf.modToChars(), fargsBuf.peekChars());
// re-resolve to check for supplemental message
- if (!global.gag || global.params.showGaggedErrors)
+ if (!global.gag || global.params.v.showGaggedErrors)
{
if (tthis)
{
@@ -3511,9 +3514,7 @@ private void printCandidates(Decl)(const ref Loc loc, Decl declaration, bool sho
if (is(Decl == TemplateDeclaration) || is(Decl == FuncDeclaration))
{
// max num of overloads to print (-v or -verror-supplements overrides this).
- const int DisplayLimit = !global.params.verbose ?
- (global.params.errorSupplementLimit ? global.params.errorSupplementLimit : int.max)
- : int.max;
+ const uint DisplayLimit = global.params.v.errorSupplementCount();
const(char)* constraintsTip;
// determine if the first candidate was printed
int printed;
@@ -3577,7 +3578,7 @@ if (is(Decl == TemplateDeclaration) || is(Decl == FuncDeclaration))
});
int skipped = 0;
overloadApply(declaration, (s) {
- if (global.params.verbose || printed < DisplayLimit)
+ if (global.params.v.verbose || printed < DisplayLimit)
{
if (matchSymbol(s, true, count == 1))
printed++;
@@ -4623,7 +4624,7 @@ bool setUnsafePreview(Scope* sc, FeatureState fs, bool gag, Loc loc, const(char)
{
if (!gag)
{
- if (!sc.isDeprecated() && global.params.obsolete)
+ version (none) // disable obsolete warning
warning(loc, msg, arg0 ? arg0.toChars() : "", arg1 ? arg1.toChars() : "", arg2 ? arg2.toChars() : "");
}
}
diff --git a/gcc/d/dmd/globals.d b/gcc/d/dmd/globals.d
index af711a0..2f6fae3 100644
--- a/gcc/d/dmd/globals.d
+++ b/gcc/d/dmd/globals.d
@@ -101,6 +101,55 @@ extern(C++) struct Output
OutBuffer* buffer; // if this output is buffered, this is the buffer
int bufferLines; // number of lines written to the buffer
}
+
+/// Command line state related to printing usage about other switches
+extern(C++) struct Help
+{
+ bool manual; // open browser on compiler manual
+ bool usage; // print usage and exit
+ // print help of switch:
+ bool mcpu; // -mcpu
+ bool transition; // -transition
+ bool check; // -check
+ bool checkAction; // -checkaction
+ bool revert; // -revert
+ bool preview; // -preview
+ bool externStd; // -extern-std
+ bool hc; // -HC
+}
+
+extern(C++) struct Verbose
+{
+ bool verbose; // verbose compile
+ bool showColumns; // print character (column) numbers in diagnostics
+ bool tls; // identify thread local variables
+ bool templates; // collect and list statistics on template instantiations
+ // collect and list statistics on template instantiations origins.
+ // TODO: make this an enum when we want to list other kinds of instances
+ bool templatesListInstances;
+ bool gc; // identify gc usage
+ bool field; // identify non-mutable field variables
+ bool complex = true; // identify complex/imaginary type usage
+ bool vin; // identify 'in' parameters
+ bool showGaggedErrors; // print gagged errors anyway
+ bool printErrorContext; // print errors with the error context (the error line in the source file)
+ bool logo; // print compiler logo
+ bool color; // use ANSI colors in console output
+ bool cov; // generate code coverage data
+ MessageStyle messageStyle = MessageStyle.digitalmars; // style of file/line annotations on messages
+ uint errorLimit = 20;
+ uint errorSupplementLimit = 6; // Limit the number of supplemental messages for each error (0 means unlimited)
+
+ uint errorSupplementCount()
+ {
+ if (verbose)
+ return uint.max;
+ if (errorSupplementLimit == 0)
+ return uint.max;
+ return errorSupplementLimit;
+ }
+}
+
/// Put command line switches in here
extern (C++) struct Param
{
@@ -108,24 +157,13 @@ extern (C++) struct Param
bool multiobj; // break one object file into multiple ones
bool trace; // insert profiling hooks
bool tracegc; // instrument calls to 'new'
- bool verbose; // verbose compile
bool vcg_ast; // write-out codegen-ast
- bool showColumns; // print character (column) numbers in diagnostics
- bool vtls; // identify thread local variables
- bool vtemplates; // collect and list statistics on template instantiations
- bool vtemplatesListInstances; // collect and list statistics on template instantiations origins. TODO: make this an enum when we want to list other kinds of instances
- bool vgc; // identify gc usage
- bool vfield; // identify non-mutable field variables
- bool vcomplex = true; // identify complex/imaginary type usage
- bool vin; // identify 'in' parameters
DiagnosticReporting useDeprecated = DiagnosticReporting.inform; // how use of deprecated features are handled
bool useUnitTests; // generate unittest code
bool useInline = false; // inline expand functions
bool release; // build release version
bool preservePaths; // true means don't strip path from source file
DiagnosticReporting warnings = DiagnosticReporting.off; // how compiler warnings are handled
- bool obsolete; // enable warnings about use of obsolete messages
- bool color; // use ANSI colors in console output
bool cov; // generate code coverage data
ubyte covPercent; // 0..100 code coverage percentage required
bool ctfe_cov = false; // generate coverage data for ctfe
@@ -141,19 +179,8 @@ extern (C++) struct Param
CppStdRevision cplusplus = CppStdRevision.cpp11; // version of C++ standard to support
- bool showGaggedErrors; // print gagged errors anyway
- bool printErrorContext; // print errors with the error context (the error line in the source file)
- bool manual; // open browser on compiler manual
- bool usage; // print usage and exit
- bool mcpuUsage; // print help on -mcpu switch
- bool transitionUsage; // print help on -transition switch
- bool checkUsage; // print help on -check switch
- bool checkActionUsage; // print help on -checkaction switch
- bool revertUsage; // print help on -revert switch
- bool previewUsage; // print help on -preview switch
- bool externStdUsage; // print help on -extern-std switch
- bool hcUsage; // print help on -HC switch
- bool logo; // print compiler logo
+ Help help;
+ Verbose v;
// Options for `-preview=/-revert=`
FeatureState useDIP25 = FeatureState.enabled; // implement https://wiki.dlang.org/DIP25
@@ -188,9 +215,6 @@ extern (C++) struct Param
CHECKACTION checkAction = CHECKACTION.D; // action to take when bounds, asserts or switch defaults are violated
- uint errorLimit = 20;
- uint errorSupplementLimit = 6; // Limit the number of supplemental messages for each error (0 means unlimited)
-
const(char)[] argv0; // program name
Array!(const(char)*) modFileAliasStrings; // array of char*'s of -I module filename alias strings
Array!(const(char)*)* imppath; // array of char*'s of where to look for import modules
@@ -209,13 +233,7 @@ extern (C++) struct Param
Output moduleDeps; // Generate `.deps` module dependencies
uint debuglevel; // debug level
- Array!(const(char)*)* debugids; // debug identifiers
-
uint versionlevel; // version level
- Array!(const(char)*)* versionids; // version identifiers
-
-
- MessageStyle messageStyle = MessageStyle.digitalmars; // style of file/line annotations on messages
bool run; // run resulting executable
Strings runargs; // arguments for executable
@@ -258,6 +276,7 @@ extern (C++) struct Global
Array!(const(char)*)* filePath; /// Array of char*'s which form the file import lookup path
private enum string _version = import("VERSION");
+ char[26] datetime; /// string returned by ctime()
CompileEnv compileEnv;
Param params; /// command line parameters
@@ -281,6 +300,7 @@ extern (C++) struct Global
enum recursionLimit = 500; /// number of recursive template expansions before abort
ErrorSink errorSink; /// where the error messages go
+ ErrorSink errorSinkNull; /// where the error messages are ignored
extern (C++) FileName function(FileName, ref const Loc, out bool, OutBuffer*) preprocess;
@@ -337,6 +357,7 @@ extern (C++) struct Global
extern (C++) void _init()
{
errorSink = new ErrorSinkCompiler;
+ errorSinkNull = new ErrorSinkNull;
this.fileManager = new FileManager();
version (MARS)
@@ -345,7 +366,7 @@ extern (C++) struct Global
// -color=auto is the default value
import dmd.console : detectTerminal, detectColorPreference;
- params.color = detectTerminal() && detectColorPreference();
+ params.v.color = detectTerminal() && detectColorPreference();
}
else version (IN_GCC)
{
@@ -369,6 +390,7 @@ extern (C++) struct Global
core.stdc.time.time(&ct);
const p = ctime(&ct);
assert(p);
+ datetime[] = p[0 .. 26];
__gshared char[11 + 1] date = 0; // put in BSS segment
__gshared char[8 + 1] time = 0;
diff --git a/gcc/d/dmd/globals.h b/gcc/d/dmd/globals.h
index 0ef9eed..4048286 100644
--- a/gcc/d/dmd/globals.h
+++ b/gcc/d/dmd/globals.h
@@ -96,6 +96,46 @@ struct Output
int bufferLines; // number of lines written to the buffer
};
+/// Command line state related to printing uasage about other switches
+struct Help
+{
+ d_bool manual; // open browser on compiler manual
+ d_bool usage; // print usage and exit
+ // print help of switch:
+ d_bool mcpu; // -mcpu
+ d_bool transition; // -transition
+ d_bool check; // -check
+ d_bool checkAction; // -checkaction
+ d_bool revert; // -revert
+ d_bool preview; // -preview
+ d_bool externStd; // -extern-std
+ d_bool hc; // -HC
+};
+
+struct Verbose
+{
+ d_bool verbose; // verbose compile
+ d_bool showColumns; // print character (column) numbers in diagnostics
+ d_bool tls; // identify thread local variables
+ d_bool templates; // collect and list statistics on template instantiations
+ // collect and list statistics on template instantiations origins.
+ // TODO: make this an enum when we want to list other kinds of instances
+ d_bool templatesListInstances;
+ d_bool gc; // identify gc usage
+ d_bool field; // identify non-mutable field variables
+ d_bool complex = true; // identify complex/imaginary type usage
+ d_bool vin; // identify 'in' parameters
+ d_bool showGaggedErrors; // print gagged errors anyway
+ d_bool printErrorContext; // print errors with the error context (the error line in the source file)
+ d_bool logo; // print compiler logo
+ d_bool color; // use ANSI colors in console output
+ d_bool cov; // generate code coverage data
+ MessageStyle messageStyle; // style of file/line annotations on messages
+ unsigned errorLimit;
+ unsigned errorSupplementLimit; // Limit the number of supplemental messages for each error (0 means unlimited)
+ unsigned errorSupplementCount();
+};
+
// Put command line switches in here
struct Param
{
@@ -103,24 +143,13 @@ struct Param
d_bool multiobj; // break one object file into multiple ones
d_bool trace; // insert profiling hooks
d_bool tracegc; // instrument calls to 'new'
- d_bool verbose; // verbose compile
d_bool vcg_ast; // write-out codegen-ast
- d_bool showColumns; // print character (column) numbers in diagnostics
- d_bool vtls; // identify thread local variables
- d_bool vtemplates; // collect and list statistics on template instantiations
- d_bool vtemplatesListInstances; // collect and list statistics on template instantiations origins
- d_bool vgc; // identify gc usage
- d_bool vfield; // identify non-mutable field variables
- d_bool vcomplex; // identify complex/imaginary type usage
- d_bool vin; // identify 'in' parameters
Diagnostic useDeprecated;
d_bool useUnitTests; // generate unittest code
d_bool useInline; // inline expand functions
d_bool release; // build release version
d_bool preservePaths; // true means don't strip path from source file
Diagnostic warnings;
- d_bool obsolete; // warn about use of obsolete features
- d_bool color; // use ANSI colors in console output
d_bool cov; // generate code coverage data
unsigned char covPercent; // 0..100 code coverage percentage required
d_bool ctfe_cov; // generate coverage data for ctfe
@@ -134,19 +163,9 @@ struct Param
d_bool allInst; // generate code for all template instantiations
d_bool bitfields; // support C style bit fields
CppStdRevision cplusplus; // version of C++ name mangling to support
- d_bool showGaggedErrors; // print gagged errors anyway
- d_bool printErrorContext; // print errors with the error context (the error line in the source file)
- d_bool manual; // open browser on compiler manual
- d_bool usage; // print usage and exit
- d_bool mcpuUsage; // print help on -mcpu switch
- d_bool transitionUsage; // print help on -transition switch
- d_bool checkUsage; // print help on -check switch
- d_bool checkActionUsage; // print help on -checkaction switch
- d_bool revertUsage; // print help on -revert switch
- d_bool previewUsage; // print help on -preview switch
- d_bool externStdUsage; // print help on -extern-std switch
- d_bool hcUsage; // print help on -HC switch
- d_bool logo; // print logo;
+
+ Help help;
+ Verbose v;
// Options for `-preview=/-revert=`
FeatureState useDIP25; // implement https://wiki.dlang.org/DIP25
@@ -181,9 +200,6 @@ struct Param
CHECKACTION checkAction; // action to take when bounds, asserts or switch defaults are violated
- unsigned errorLimit;
- unsigned errorSupplementLimit; // Limit the number of supplemental messages for each error (0 means unlimited)
-
DString argv0; // program name
Array<const char *> modFileAliasStrings; // array of char*'s of -I module filename alias strings
Array<const char *> *imppath; // array of char*'s of where to look for import modules
@@ -202,13 +218,7 @@ struct Param
Output moduleDeps; // Generate `.deps` module dependencies
unsigned debuglevel; // debug level
- Array<const char *> *debugids; // debug identifiers
-
unsigned versionlevel; // version level
- Array<const char *> *versionids; // version identifiers
-
-
- MessageStyle messageStyle; // style of file/line annotations on messages
d_bool run; // run resulting executable
Strings runargs; // arguments for executable
@@ -265,7 +275,6 @@ struct CompileEnv
bool previewIn;
bool ddocOutput;
bool shortenedMethods;
- bool obsolete;
};
struct Global
@@ -277,6 +286,7 @@ struct Global
Array<const char *> *path; // Array of char*'s which form the import lookup path
Array<const char *> *filePath; // Array of char*'s which form the file import lookup path
+ char datetime[26]; /// string returned by ctime()
CompileEnv compileEnv;
Param params;
@@ -296,6 +306,7 @@ struct Global
FileManager* fileManager;
ErrorSink* errorSink; // where the error messages go
+ ErrorSink* errorSinkNull; // where the error messages disappear
FileName (*preprocess)(FileName, const Loc&, bool&, OutBuffer&);
@@ -358,8 +369,8 @@ struct Loc
{
private:
unsigned _linnum;
- unsigned short _charnum;
- unsigned short fileIndex;
+ unsigned _charnum;
+ unsigned fileIndex;
public:
static void set(bool showColumns, MessageStyle messageStyle);
diff --git a/gcc/d/dmd/hdrgen.d b/gcc/d/dmd/hdrgen.d
index 33cbc19..056e486 100644
--- a/gcc/d/dmd/hdrgen.d
+++ b/gcc/d/dmd/hdrgen.d
@@ -51,7 +51,6 @@ import dmd.statement;
import dmd.staticassert;
import dmd.target;
import dmd.tokens;
-import dmd.utils;
import dmd.visitor;
struct HdrGenState
@@ -73,16 +72,53 @@ struct HdrGenState
enum TEST_EMIT_ALL = 0;
-extern (C++) void genhdrfile(Module m)
+/****************************************
+ * Generate a header (.di) file for Module m.
+ * Params:
+ * m = Module to generate header for
+ * buf = buffer to write the data to
+ */
+extern (C++) void genhdrfile(Module m, ref OutBuffer buf)
{
- OutBuffer buf;
buf.doindent = 1;
buf.printf("// D import file generated from '%s'", m.srcfile.toChars());
buf.writenl();
HdrGenState hgs;
hgs.hdrgen = true;
- toCBuffer(m, &buf, &hgs);
- writeFile(m.loc, m.hdrfile.toString(), buf[]);
+ toCBuffer(m, buf, hgs);
+}
+
+/***************************************
+ * Turn a Statement into a string suitable for printf.
+ * Leaks memory.
+ * Params:
+ * s = Statement to convert
+ * Returns:
+ * 0-terminated string
+ */
+public extern (C++) const(char)* toChars(const Statement s)
+{
+ HdrGenState hgs;
+ OutBuffer buf;
+ toCBuffer(s, buf, hgs);
+ buf.writeByte(0);
+ return buf.extractSlice().ptr;
+}
+
+public extern (C++) const(char)* toChars(const Initializer i)
+{
+ OutBuffer buf;
+ HdrGenState hgs;
+ toCBuffer(i, buf, hgs);
+ return buf.extractChars();
+}
+
+public const(char)[] toString(const Initializer i)
+{
+ OutBuffer buf;
+ HdrGenState hgs;
+ toCBuffer(i, buf, hgs);
+ return buf.extractSlice();
}
/**
@@ -91,14 +127,14 @@ extern (C++) void genhdrfile(Module m)
* buf = buffer to write to.
* m = module to visit all members of.
*/
-extern (C++) void moduleToBuffer(OutBuffer* buf, Module m)
+extern (C++) void moduleToBuffer(ref OutBuffer buf, Module m)
{
HdrGenState hgs;
hgs.fullDump = true;
- toCBuffer(m, buf, &hgs);
+ toCBuffer(m, buf, hgs);
}
-void moduleToBuffer2(Module m, OutBuffer* buf, HdrGenState* hgs)
+void moduleToBuffer2(Module m, ref OutBuffer buf, HdrGenState* hgs)
{
if (m.md)
{
@@ -132,7 +168,7 @@ void moduleToBuffer2(Module m, OutBuffer* buf, HdrGenState* hgs)
}
}
-private void statementToBuffer(Statement s, OutBuffer* buf, HdrGenState* hgs)
+private void statementToBuffer(Statement s, ref OutBuffer buf, HdrGenState* hgs)
{
void visitDefaultCase(Statement s)
{
@@ -201,8 +237,7 @@ private void statementToBuffer(Statement s, OutBuffer* buf, HdrGenState* hgs)
auto d = ds.exp.isDeclarationExp().declaration;
if (auto v = d.isVarDeclaration())
{
- scope ppv = new DsymbolPrettyPrintVisitor(buf, hgs);
- ppv.visitVarDecl(v, anywritten);
+ visitVarDecl(v, anywritten, buf, *hgs);
}
else
d.dsymbolToBuffer(buf, hgs);
@@ -508,6 +543,20 @@ private void statementToBuffer(Statement s, OutBuffer* buf, HdrGenState* hgs)
void visitSwitch(SwitchStatement s)
{
buf.writestring(s.isFinal ? "final switch (" : "switch (");
+ if (auto p = s.param)
+ {
+ // Print condition assignment
+ StorageClass stc = p.storageClass;
+ if (!p.type && !stc)
+ stc = STC.auto_;
+ if (stcToBuffer(buf, stc))
+ buf.writeByte(' ');
+ if (p.type)
+ typeToBuffer(p.type, p.ident, buf, hgs);
+ else
+ buf.writestring(p.ident.toString());
+ buf.writestring(" = ");
+ }
s.condition.expressionToBuffer(buf, hgs);
buf.writeByte(')');
buf.writenl();
@@ -792,50 +841,36 @@ private void statementToBuffer(Statement s, OutBuffer* buf, HdrGenState* hgs)
visit.VisitStatement(s);
}
-private void dsymbolToBuffer(Dsymbol s, OutBuffer* buf, HdrGenState* hgs)
+private void dsymbolToBuffer(Dsymbol s, ref OutBuffer buf, HdrGenState* hgs)
{
- scope v = new DsymbolPrettyPrintVisitor(buf, hgs);
- s.accept(v);
+ toCBuffer(s, buf, *hgs);
}
-private extern (C++) final class DsymbolPrettyPrintVisitor : Visitor
+void toCBuffer(Dsymbol s, ref OutBuffer buf, ref HdrGenState hgs)
{
- alias visit = Visitor.visit;
-public:
- OutBuffer* buf;
- HdrGenState* hgs;
-
- extern (D) this(OutBuffer* buf, HdrGenState* hgs) scope @safe
- {
- this.buf = buf;
- this.hgs = hgs;
- }
-
- ////////////////////////////////////////////////////////////////////////////
-
- override void visit(Dsymbol s)
+ void visitDsymbol(Dsymbol s)
{
buf.writestring(s.toChars());
}
- override void visit(StaticAssert s)
+ void visitStaticAssert(StaticAssert s)
{
buf.writestring(s.kind());
buf.writeByte('(');
- s.exp.expressionToBuffer(buf, hgs);
+ s.exp.expressionToBuffer(buf, &hgs);
if (s.msgs)
{
foreach (m; (*s.msgs)[])
{
buf.writestring(", ");
- m.expressionToBuffer(buf, hgs);
+ m.expressionToBuffer(buf, &hgs);
}
}
buf.writestring(");");
buf.writenl();
}
- override void visit(DebugSymbol s)
+ void visitDebugSymbol(DebugSymbol s)
{
buf.writestring("debug = ");
if (s.ident)
@@ -846,7 +881,7 @@ public:
buf.writenl();
}
- override void visit(VersionSymbol s)
+ void visitVersionSymbol(VersionSymbol s)
{
buf.writestring("version = ");
if (s.ident)
@@ -857,20 +892,20 @@ public:
buf.writenl();
}
- override void visit(EnumMember em)
+ void visitEnumMember(EnumMember em)
{
if (em.type)
- typeToBuffer(em.type, em.ident, buf, hgs);
+ typeToBuffer(em.type, em.ident, buf, &hgs);
else
buf.writestring(em.ident.toString());
if (em.value)
{
buf.writestring(" = ");
- em.value.expressionToBuffer(buf, hgs);
+ em.value.expressionToBuffer(buf, &hgs);
}
}
- override void visit(Import imp)
+ void visitImport(Import imp)
{
if (hgs.hdrgen && imp.id == Id.object)
return; // object is imported by default
@@ -904,14 +939,14 @@ public:
buf.writenl();
}
- override void visit(AliasThis d)
+ void visitAliasThis(AliasThis d)
{
buf.writestring("alias ");
buf.writestring(d.ident.toString());
buf.writestring(" this;\n");
}
- override void visit(AttribDeclaration d)
+ void visitAttribDeclaration(AttribDeclaration d)
{
bool hasSTC;
if (auto stcd = d.isStorageClassDeclaration)
@@ -934,7 +969,7 @@ public:
else if (d.decl.length == 1)
{
if (hasSTC) buf.writeByte(' ');
- (*d.decl)[0].accept(this);
+ toCBuffer((*d.decl)[0], buf, hgs);
return;
}
else
@@ -944,35 +979,35 @@ public:
buf.writenl();
buf.level++;
foreach (de; *d.decl)
- de.accept(this);
+ toCBuffer(de, buf, hgs);
buf.level--;
buf.writeByte('}');
}
buf.writenl();
}
- override void visit(StorageClassDeclaration d)
+ void visitStorageClassDeclaration(StorageClassDeclaration d)
{
- visit(cast(AttribDeclaration)d);
+ visitAttribDeclaration(d);
}
- override void visit(DeprecatedDeclaration d)
+ void visitDeprecatedDeclaration(DeprecatedDeclaration d)
{
buf.writestring("deprecated(");
- d.msg.expressionToBuffer(buf, hgs);
+ d.msg.expressionToBuffer(buf, &hgs);
buf.writestring(") ");
- visit(cast(AttribDeclaration)d);
+ visitAttribDeclaration(d);
}
- override void visit(LinkDeclaration d)
+ void visitLinkDeclaration(LinkDeclaration d)
{
buf.writestring("extern (");
buf.writestring(linkageToString(d.linkage));
buf.writestring(") ");
- visit(cast(AttribDeclaration)d);
+ visitAttribDeclaration(d);
}
- override void visit(CPPMangleDeclaration d)
+ void visitCPPMangleDeclaration(CPPMangleDeclaration d)
{
string s;
final switch (d.cppmangle)
@@ -989,22 +1024,22 @@ public:
buf.writestring("extern (C++, ");
buf.writestring(s);
buf.writestring(") ");
- visit(cast(AttribDeclaration)d);
+ visitAttribDeclaration(d);
}
- override void visit(VisibilityDeclaration d)
+ void visitVisibilityDeclaration(VisibilityDeclaration d)
{
visibilityToBuffer(buf, d.visibility);
AttribDeclaration ad = cast(AttribDeclaration)d;
if (ad.decl.length <= 1)
buf.writeByte(' ');
if (ad.decl.length == 1 && (*ad.decl)[0].isVisibilityDeclaration)
- visit(cast(AttribDeclaration)(*ad.decl)[0]);
+ visitAttribDeclaration((*ad.decl)[0].isVisibilityDeclaration);
else
- visit(cast(AttribDeclaration)d);
+ visitAttribDeclaration(d);
}
- override void visit(AlignDeclaration d)
+ void visitAlignDeclaration(AlignDeclaration d)
{
if (d.exps)
{
@@ -1020,10 +1055,10 @@ public:
else
buf.writestring("align ");
- visit(d.isAttribDeclaration());
+ visitAttribDeclaration(d.isAttribDeclaration());
}
- override void visit(AnonDeclaration d)
+ void visitAnonDeclaration(AnonDeclaration d)
{
buf.writestring(d.isunion ? "union" : "struct");
buf.writenl();
@@ -1033,21 +1068,21 @@ public:
if (d.decl)
{
foreach (de; *d.decl)
- de.accept(this);
+ toCBuffer(de, buf, hgs);
}
buf.level--;
buf.writestring("}");
buf.writenl();
}
- override void visit(PragmaDeclaration d)
+ void visitPragmaDeclaration(PragmaDeclaration d)
{
buf.writestring("pragma (");
buf.writestring(d.ident.toString());
if (d.args && d.args.length)
{
buf.writestring(", ");
- argsToBuffer(d.args, buf, hgs);
+ argsToBuffer(d.args, buf, &hgs);
}
buf.writeByte(')');
@@ -1059,13 +1094,13 @@ public:
if (d.ident == Id.Pinline)
global.params.dihdr.fullOutput = true;
- visit(cast(AttribDeclaration)d);
+ visitAttribDeclaration(d);
global.params.dihdr.fullOutput = savedFullDump;
}
- override void visit(ConditionalDeclaration d)
+ void visitConditionalDeclaration(ConditionalDeclaration d)
{
- d.condition.conditionToBuffer(buf, hgs);
+ d.condition.conditionToBuffer(buf, &hgs);
if (d.decl || d.elsedecl)
{
buf.writenl();
@@ -1075,7 +1110,7 @@ public:
if (d.decl)
{
foreach (de; *d.decl)
- de.accept(this);
+ toCBuffer(de, buf, hgs);
}
buf.level--;
buf.writeByte('}');
@@ -1088,7 +1123,7 @@ public:
buf.writenl();
buf.level++;
foreach (de; *d.elsedecl)
- de.accept(this);
+ toCBuffer(de, buf, hgs);
buf.level--;
buf.writeByte('}');
}
@@ -1098,7 +1133,7 @@ public:
buf.writenl();
}
- override void visit(StaticForeachDeclaration s)
+ void visitStaticForeachDeclaration(StaticForeachDeclaration s)
{
void foreachWithoutBody(ForeachStatement s)
{
@@ -1111,12 +1146,12 @@ public:
if (stcToBuffer(buf, p.storageClass))
buf.writeByte(' ');
if (p.type)
- typeToBuffer(p.type, p.ident, buf, hgs);
+ typeToBuffer(p.type, p.ident, buf, &hgs);
else
buf.writestring(p.ident.toString());
}
buf.writestring("; ");
- s.aggr.expressionToBuffer(buf, hgs);
+ s.aggr.expressionToBuffer(buf, &hgs);
buf.writeByte(')');
buf.writenl();
}
@@ -1128,13 +1163,13 @@ public:
buf.writestring(Token.toString(s.op));
buf.writestring(" (");
if (s.prm.type)
- typeToBuffer(s.prm.type, s.prm.ident, buf, hgs);
+ typeToBuffer(s.prm.type, s.prm.ident, buf, &hgs);
else
buf.writestring(s.prm.ident.toString());
buf.writestring("; ");
- s.lwr.expressionToBuffer(buf, hgs);
+ s.lwr.expressionToBuffer(buf, &hgs);
buf.writestring(" .. ");
- s.upr.expressionToBuffer(buf, hgs);
+ s.upr.expressionToBuffer(buf, &hgs);
buf.writeByte(')');
buf.writenl();
}
@@ -1152,62 +1187,161 @@ public:
buf.writeByte('{');
buf.writenl();
buf.level++;
- visit(cast(AttribDeclaration)s);
+ visitAttribDeclaration(s);
buf.level--;
buf.writeByte('}');
buf.writenl();
}
- override void visit(MixinDeclaration d)
+ void visitMixinDeclaration(MixinDeclaration d)
{
buf.writestring("mixin(");
- argsToBuffer(d.exps, buf, hgs, null);
+ argsToBuffer(d.exps, buf, &hgs, null);
buf.writestring(");");
buf.writenl();
}
- override void visit(UserAttributeDeclaration d)
+ void visitUserAttributeDeclaration(UserAttributeDeclaration d)
{
buf.writestring("@(");
- argsToBuffer(d.atts, buf, hgs);
+ argsToBuffer(d.atts, buf, &hgs);
buf.writeByte(')');
- visit(cast(AttribDeclaration)d);
+ visitAttribDeclaration(d);
}
- override void visit(TemplateDeclaration d)
+ void visitTemplateConstraint(Expression constraint)
{
- version (none)
- {
- // Should handle template functions for doc generation
- if (onemember && onemember.isFuncDeclaration())
- buf.writestring("foo ");
- }
- if ((hgs.hdrgen || hgs.fullDump) && visitEponymousMember(d))
+ if (!constraint)
return;
- if (hgs.ddoc)
- buf.writestring(d.kind());
- else
- buf.writestring("template");
- buf.writeByte(' ');
- buf.writestring(d.ident.toString());
- buf.writeByte('(');
- visitTemplateParameters(hgs.ddoc ? d.origParameters : d.parameters);
+ buf.writestring(" if (");
+ constraint.expressionToBuffer(buf, &hgs);
buf.writeByte(')');
- visitTemplateConstraint(d.constraint);
- if (hgs.hdrgen || hgs.fullDump)
+ }
+
+ /// Returns: whether `do` is needed to write the function body
+ bool contractsToBuffer(FuncDeclaration f)
+ {
+ bool requireDo = false;
+ // in{}
+ if (f.frequires)
{
- hgs.tpltMember++;
- buf.writenl();
- buf.writeByte('{');
+ foreach (frequire; *f.frequires)
+ {
+ buf.writestring("in");
+ if (auto es = frequire.isExpStatement())
+ {
+ assert(es.exp && es.exp.op == EXP.assert_);
+ buf.writestring(" (");
+ (cast(AssertExp)es.exp).e1.expressionToBuffer(buf, &hgs);
+ buf.writeByte(')');
+ buf.writenl();
+ requireDo = false;
+ }
+ else
+ {
+ buf.writenl();
+ frequire.statementToBuffer(buf, &hgs);
+ requireDo = true;
+ }
+ }
+ }
+ // out{}
+ if (f.fensures)
+ {
+ foreach (fensure; *f.fensures)
+ {
+ buf.writestring("out");
+ if (auto es = fensure.ensure.isExpStatement())
+ {
+ assert(es.exp && es.exp.op == EXP.assert_);
+ buf.writestring(" (");
+ if (fensure.id)
+ {
+ buf.writestring(fensure.id.toString());
+ }
+ buf.writestring("; ");
+ (cast(AssertExp)es.exp).e1.expressionToBuffer(buf, &hgs);
+ buf.writeByte(')');
+ buf.writenl();
+ requireDo = false;
+ }
+ else
+ {
+ if (fensure.id)
+ {
+ buf.writeByte('(');
+ buf.writestring(fensure.id.toString());
+ buf.writeByte(')');
+ }
+ buf.writenl();
+ fensure.ensure.statementToBuffer(buf, &hgs);
+ requireDo = true;
+ }
+ }
+ }
+ return requireDo;
+ }
+
+ void bodyToBuffer(FuncDeclaration f)
+ {
+ if (!f.fbody || (hgs.hdrgen && global.params.dihdr.fullOutput == false && !hgs.autoMember && !hgs.tpltMember && !hgs.insideFuncBody))
+ {
+ if (!f.fbody && (f.fensures || f.frequires))
+ {
+ buf.writenl();
+ contractsToBuffer(f);
+ }
+ buf.writeByte(';');
buf.writenl();
- buf.level++;
- foreach (s; *d.members)
- s.accept(this);
- buf.level--;
- buf.writeByte('}');
+ return;
+ }
+
+ // there is no way to know if a function is nested
+ // or not after parsing. We need scope information
+ // for that, which is avaible during semantic
+ // analysis. To overcome that, a simple mechanism
+ // is implemented: everytime we print a function
+ // body (templated or not) we increment a counter.
+ // We decredement the counter when we stop
+ // printing the function body.
+ ++hgs.insideFuncBody;
+ scope(exit) { --hgs.insideFuncBody; }
+
+ const savetlpt = hgs.tpltMember;
+ const saveauto = hgs.autoMember;
+ hgs.tpltMember = 0;
+ hgs.autoMember = 0;
+ buf.writenl();
+ bool requireDo = contractsToBuffer(f);
+
+ if (requireDo)
+ {
+ buf.writestring("do");
buf.writenl();
- hgs.tpltMember--;
+ }
+ buf.writeByte('{');
+ buf.writenl();
+ buf.level++;
+ f.fbody.statementToBuffer(buf, &hgs);
+ buf.level--;
+ buf.writeByte('}');
+ buf.writenl();
+ hgs.tpltMember = savetlpt;
+ hgs.autoMember = saveauto;
+ }
+
+ void visitBaseClasses(ClassDeclaration d)
+ {
+ if (!d || !d.baseclasses.length)
+ return;
+ if (!d.isAnonymous())
+ buf.writestring(" : ");
+ foreach (i, b; *d.baseclasses)
+ {
+ if (i)
+ buf.writestring(", ");
+ typeToBuffer(b.type, null, buf, &hgs);
}
}
@@ -1223,7 +1357,7 @@ public:
assert(fd.type);
if (stcToBuffer(buf, fd.storage_class))
buf.writeByte(' ');
- functionToBufferFull(cast(TypeFunction)fd.type, buf, d.ident, hgs, d);
+ functionToBufferFull(cast(TypeFunction)fd.type, buf, d.ident, &hgs, d);
visitTemplateConstraint(d.constraint);
hgs.tpltMember++;
bodyToBuffer(fd);
@@ -1236,7 +1370,7 @@ public:
buf.writeByte(' ');
buf.writestring(ad.ident.toString());
buf.writeByte('(');
- visitTemplateParameters(hgs.ddoc ? d.origParameters : d.parameters);
+ visitTemplateParameters(hgs.ddoc ? d.origParameters : d.parameters, buf, hgs);
buf.writeByte(')');
visitTemplateConstraint(d.constraint);
visitBaseClasses(ad.isClassDeclaration());
@@ -1248,7 +1382,7 @@ public:
buf.writenl();
buf.level++;
foreach (s; *ad.members)
- s.accept(this);
+ toCBuffer(s, buf, hgs);
buf.level--;
buf.writeByte('}');
}
@@ -1265,20 +1399,20 @@ public:
if (stcToBuffer(buf, vd.storage_class))
buf.writeByte(' ');
if (vd.type)
- typeToBuffer(vd.type, vd.ident, buf, hgs);
+ typeToBuffer(vd.type, vd.ident, buf, &hgs);
else
buf.writestring(vd.ident.toString());
buf.writeByte('(');
- visitTemplateParameters(hgs.ddoc ? d.origParameters : d.parameters);
+ visitTemplateParameters(hgs.ddoc ? d.origParameters : d.parameters, buf, hgs);
buf.writeByte(')');
if (vd._init)
{
buf.writestring(" = ");
ExpInitializer ie = vd._init.isExpInitializer();
if (ie && (ie.exp.op == EXP.construct || ie.exp.op == EXP.blit))
- (cast(AssignExp)ie.exp).e2.expressionToBuffer(buf, hgs);
+ (cast(AssignExp)ie.exp).e2.expressionToBuffer(buf, &hgs);
else
- vd._init.initializerToBuffer(buf, hgs);
+ vd._init.initializerToBuffer(buf, &hgs);
}
buf.writeByte(';');
buf.writenl();
@@ -1287,44 +1421,59 @@ public:
return false;
}
- void visitTemplateParameters(TemplateParameters* parameters)
+ void visitTemplateDeclaration(TemplateDeclaration d)
{
- if (!parameters || !parameters.length)
- return;
- foreach (i, p; *parameters)
+ version (none)
{
- if (i)
- buf.writestring(", ");
- p.templateParameterToBuffer(buf, hgs);
+ // Should handle template functions for doc generation
+ if (onemember && onemember.isFuncDeclaration())
+ buf.writestring("foo ");
}
- }
-
- void visitTemplateConstraint(Expression constraint)
- {
- if (!constraint)
+ if ((hgs.hdrgen || hgs.fullDump) && visitEponymousMember(d))
return;
- buf.writestring(" if (");
- constraint.expressionToBuffer(buf, hgs);
+ if (hgs.ddoc)
+ buf.writestring(d.kind());
+ else
+ buf.writestring("template");
+ buf.writeByte(' ');
+ buf.writestring(d.ident.toString());
+ buf.writeByte('(');
+ visitTemplateParameters(hgs.ddoc ? d.origParameters : d.parameters, buf, hgs);
buf.writeByte(')');
+ visitTemplateConstraint(d.constraint);
+ if (hgs.hdrgen || hgs.fullDump)
+ {
+ hgs.tpltMember++;
+ buf.writenl();
+ buf.writeByte('{');
+ buf.writenl();
+ buf.level++;
+ foreach (s; *d.members)
+ toCBuffer(s, buf, hgs);
+ buf.level--;
+ buf.writeByte('}');
+ buf.writenl();
+ hgs.tpltMember--;
+ }
}
- override void visit(TemplateInstance ti)
+ void visitTemplateInstance(TemplateInstance ti)
{
buf.writestring(ti.name.toChars());
- tiargsToBuffer(ti, buf, hgs);
+ tiargsToBuffer(ti, buf, &hgs);
if (hgs.fullDump)
{
buf.writenl();
- dumpTemplateInstance(ti, buf, hgs);
+ dumpTemplateInstance(ti, buf, &hgs);
}
}
- override void visit(TemplateMixin tm)
+ void visitTemplateMixin(TemplateMixin tm)
{
buf.writestring("mixin ");
- typeToBuffer(tm.tqual, null, buf, hgs);
- tiargsToBuffer(tm, buf, hgs);
+ typeToBuffer(tm.tqual, null, buf, &hgs);
+ tiargsToBuffer(tm, buf, &hgs);
if (tm.ident && memcmp(tm.ident.toChars(), cast(const(char)*)"__mixin", 7) != 0)
{
buf.writeByte(' ');
@@ -1333,10 +1482,10 @@ public:
buf.writeByte(';');
buf.writenl();
if (hgs.fullDump)
- dumpTemplateInstance(tm, buf, hgs);
+ dumpTemplateInstance(tm, buf, &hgs);
}
- override void visit(EnumDeclaration d)
+ void visitEnumDeclaration(EnumDeclaration d)
{
auto oldInEnumDecl = hgs.inEnumDecl;
scope(exit) hgs.inEnumDecl = oldInEnumDecl;
@@ -1349,7 +1498,7 @@ public:
if (d.memtype)
{
buf.writestring(" : ");
- typeToBuffer(d.memtype, null, buf, hgs);
+ typeToBuffer(d.memtype, null, buf, &hgs);
}
if (!d.members)
{
@@ -1365,7 +1514,7 @@ public:
{
if (!em)
continue;
- em.accept(this);
+ toCBuffer(em, buf, hgs);
buf.writeByte(',');
buf.writenl();
}
@@ -1374,7 +1523,7 @@ public:
buf.writenl();
}
- override void visit(Nspace d)
+ void visitNspace(Nspace d)
{
buf.writestring("extern (C++, ");
buf.writestring(d.ident.toString());
@@ -1384,13 +1533,13 @@ public:
buf.writenl();
buf.level++;
foreach (s; *d.members)
- s.accept(this);
+ toCBuffer(s, buf, hgs);
buf.level--;
buf.writeByte('}');
buf.writenl();
}
- override void visit(StructDeclaration d)
+ void visitStructDeclaration(StructDeclaration d)
{
buf.writestring(d.kind());
buf.writeByte(' ');
@@ -1408,14 +1557,14 @@ public:
buf.level++;
hgs.insideAggregate++;
foreach (s; *d.members)
- s.accept(this);
+ toCBuffer(s, buf, hgs);
hgs.insideAggregate--;
buf.level--;
buf.writeByte('}');
buf.writenl();
}
- override void visit(ClassDeclaration d)
+ void visitClassDeclaration(ClassDeclaration d)
{
if (!d.isAnonymous())
{
@@ -1432,7 +1581,7 @@ public:
buf.level++;
hgs.insideAggregate++;
foreach (s; *d.members)
- s.accept(this);
+ toCBuffer(s, buf, hgs);
hgs.insideAggregate--;
buf.level--;
buf.writeByte('}');
@@ -1442,21 +1591,7 @@ public:
buf.writenl();
}
- void visitBaseClasses(ClassDeclaration d)
- {
- if (!d || !d.baseclasses.length)
- return;
- if (!d.isAnonymous())
- buf.writestring(" : ");
- foreach (i, b; *d.baseclasses)
- {
- if (i)
- buf.writestring(", ");
- typeToBuffer(b.type, null, buf, hgs);
- }
- }
-
- override void visit(AliasDeclaration d)
+ void visitAliasDeclaration(AliasDeclaration d)
{
if (d.storage_class & STC.local)
return;
@@ -1479,14 +1614,14 @@ public:
}
else
{
- d.aliassym.accept(this);
+ toCBuffer(d.aliassym, buf, hgs);
}
}
else if (d.type.ty == Tfunction)
{
if (stcToBuffer(buf, d.storage_class))
buf.writeByte(' ');
- typeToBuffer(d.type, d.ident, buf, hgs);
+ typeToBuffer(d.type, d.ident, buf, &hgs);
}
else if (d.ident)
{
@@ -1495,92 +1630,41 @@ public:
buf.writestring(" = ");
if (stcToBuffer(buf, d.storage_class))
buf.writeByte(' ');
- typeToBuffer(d.type, null, buf, hgs);
+ typeToBuffer(d.type, null, buf, &hgs);
hgs.declstring = false;
}
buf.writeByte(';');
buf.writenl();
}
- override void visit(AliasAssign d)
+ void visitAliasAssign(AliasAssign d)
{
buf.writestring(d.ident.toString());
buf.writestring(" = ");
if (d.aliassym)
- d.aliassym.accept(this);
+ toCBuffer(d.aliassym, buf, hgs);
else // d.type
- typeToBuffer(d.type, null, buf, hgs);
+ typeToBuffer(d.type, null, buf, &hgs);
buf.writeByte(';');
buf.writenl();
}
- override void visit(VarDeclaration d)
+ void visitVarDeclaration(VarDeclaration d)
{
if (d.storage_class & STC.local)
return;
- visitVarDecl(d, false);
+ visitVarDecl(d, false, buf, hgs);
buf.writeByte(';');
buf.writenl();
}
- void visitVarDecl(VarDeclaration v, bool anywritten)
- {
- const bool isextern = hgs.hdrgen &&
- !hgs.insideFuncBody &&
- !hgs.tpltMember &&
- !hgs.insideAggregate &&
- !(v.storage_class & STC.manifest);
-
- void vinit(VarDeclaration v)
- {
- auto ie = v._init.isExpInitializer();
- if (ie && (ie.exp.op == EXP.construct || ie.exp.op == EXP.blit))
- (cast(AssignExp)ie.exp).e2.expressionToBuffer(buf, hgs);
- else
- v._init.initializerToBuffer(buf, hgs);
- }
-
- if (anywritten)
- {
- buf.writestring(", ");
- buf.writestring(v.ident.toString());
- }
- else
- {
- const bool useTypeof = isextern && v._init && !v.type;
- auto stc = v.storage_class;
- if (isextern)
- stc |= STC.extern_;
- if (useTypeof)
- stc &= ~STC.auto_;
- if (stcToBuffer(buf, stc))
- buf.writeByte(' ');
- if (v.type)
- typeToBuffer(v.type, v.ident, buf, hgs);
- else if (useTypeof)
- {
- buf.writestring("typeof(");
- vinit(v);
- buf.writestring(") ");
- buf.writestring(v.ident.toString());
- }
- else
- buf.writestring(v.ident.toString());
- }
- if (v._init && !isextern)
- {
- buf.writestring(" = ");
- vinit(v);
- }
- }
-
- override void visit(FuncDeclaration f)
+ void visitFuncDeclaration(FuncDeclaration f)
{
//printf("FuncDeclaration::toCBuffer() '%s'\n", f.toChars());
if (stcToBuffer(buf, f.storage_class))
buf.writeByte(' ');
auto tf = cast(TypeFunction)f.type;
- typeToBuffer(tf, f.ident, buf, hgs);
+ typeToBuffer(tf, f.ident, buf, &hgs);
if (hgs.hdrgen)
{
@@ -1613,119 +1697,7 @@ public:
bodyToBuffer(f);
}
- /// Returns: whether `do` is needed to write the function body
- bool contractsToBuffer(FuncDeclaration f)
- {
- bool requireDo = false;
- // in{}
- if (f.frequires)
- {
- foreach (frequire; *f.frequires)
- {
- buf.writestring("in");
- if (auto es = frequire.isExpStatement())
- {
- assert(es.exp && es.exp.op == EXP.assert_);
- buf.writestring(" (");
- (cast(AssertExp)es.exp).e1.expressionToBuffer(buf, hgs);
- buf.writeByte(')');
- buf.writenl();
- requireDo = false;
- }
- else
- {
- buf.writenl();
- frequire.statementToBuffer(buf, hgs);
- requireDo = true;
- }
- }
- }
- // out{}
- if (f.fensures)
- {
- foreach (fensure; *f.fensures)
- {
- buf.writestring("out");
- if (auto es = fensure.ensure.isExpStatement())
- {
- assert(es.exp && es.exp.op == EXP.assert_);
- buf.writestring(" (");
- if (fensure.id)
- {
- buf.writestring(fensure.id.toString());
- }
- buf.writestring("; ");
- (cast(AssertExp)es.exp).e1.expressionToBuffer(buf, hgs);
- buf.writeByte(')');
- buf.writenl();
- requireDo = false;
- }
- else
- {
- if (fensure.id)
- {
- buf.writeByte('(');
- buf.writestring(fensure.id.toString());
- buf.writeByte(')');
- }
- buf.writenl();
- fensure.ensure.statementToBuffer(buf, hgs);
- requireDo = true;
- }
- }
- }
- return requireDo;
- }
-
- void bodyToBuffer(FuncDeclaration f)
- {
- if (!f.fbody || (hgs.hdrgen && global.params.dihdr.fullOutput == false && !hgs.autoMember && !hgs.tpltMember && !hgs.insideFuncBody))
- {
- if (!f.fbody && (f.fensures || f.frequires))
- {
- buf.writenl();
- contractsToBuffer(f);
- }
- buf.writeByte(';');
- buf.writenl();
- return;
- }
-
- // there is no way to know if a function is nested
- // or not after parsing. We need scope information
- // for that, which is avaible during semantic
- // analysis. To overcome that, a simple mechanism
- // is implemented: everytime we print a function
- // body (templated or not) we increment a counter.
- // We decredement the counter when we stop
- // printing the function body.
- ++hgs.insideFuncBody;
- scope(exit) { --hgs.insideFuncBody; }
-
- const savetlpt = hgs.tpltMember;
- const saveauto = hgs.autoMember;
- hgs.tpltMember = 0;
- hgs.autoMember = 0;
- buf.writenl();
- bool requireDo = contractsToBuffer(f);
-
- if (requireDo)
- {
- buf.writestring("do");
- buf.writenl();
- }
- buf.writeByte('{');
- buf.writenl();
- buf.level++;
- f.fbody.statementToBuffer(buf, hgs);
- buf.level--;
- buf.writeByte('}');
- buf.writenl();
- hgs.tpltMember = savetlpt;
- hgs.autoMember = saveauto;
- }
-
- override void visit(FuncLiteralDeclaration f)
+ void visitFuncLiteralDeclaration(FuncLiteralDeclaration f)
{
if (f.type.ty == Terror)
{
@@ -1740,8 +1712,8 @@ public:
TypeFunction tf = cast(TypeFunction)f.type;
if (!f.inferRetType && tf.next)
- typeToBuffer(tf.next, null, buf, hgs);
- parametersToBuffer(tf.parameterList, buf, hgs);
+ typeToBuffer(tf.next, null, buf, &hgs);
+ parametersToBuffer(tf.parameterList, buf, &hgs);
// https://issues.dlang.org/show_bug.cgi?id=20074
void printAttribute(string str)
@@ -1764,7 +1736,7 @@ public:
if (rs && rs.exp)
{
buf.writestring(" => ");
- rs.exp.expressionToBuffer(buf, hgs);
+ rs.exp.expressionToBuffer(buf, &hgs);
}
else
{
@@ -1774,7 +1746,7 @@ public:
}
}
- override void visit(PostBlitDeclaration d)
+ void visitPostBlitDeclaration(PostBlitDeclaration d)
{
if (stcToBuffer(buf, d.storage_class))
buf.writeByte(' ');
@@ -1782,7 +1754,7 @@ public:
bodyToBuffer(d);
}
- override void visit(DtorDeclaration d)
+ void visitDtorDeclaration(DtorDeclaration d)
{
if (stcToBuffer(buf, d.storage_class))
buf.writeByte(' ');
@@ -1790,7 +1762,7 @@ public:
bodyToBuffer(d);
}
- override void visit(StaticCtorDeclaration d)
+ void visitStaticCtorDeclaration(StaticCtorDeclaration d)
{
if (stcToBuffer(buf, d.storage_class & ~STC.static_))
buf.writeByte(' ');
@@ -1806,7 +1778,7 @@ public:
bodyToBuffer(d);
}
- override void visit(StaticDtorDeclaration d)
+ void visitStaticDtorDeclaration(StaticDtorDeclaration d)
{
if (stcToBuffer(buf, d.storage_class & ~STC.static_))
buf.writeByte(' ');
@@ -1822,7 +1794,7 @@ public:
bodyToBuffer(d);
}
- override void visit(InvariantDeclaration d)
+ void visitInvariantDeclaration(InvariantDeclaration d)
{
if (hgs.hdrgen)
return;
@@ -1833,7 +1805,7 @@ public:
{
assert(es.exp && es.exp.op == EXP.assert_);
buf.writestring(" (");
- (cast(AssertExp)es.exp).e1.expressionToBuffer(buf, hgs);
+ (cast(AssertExp)es.exp).e1.expressionToBuffer(buf, &hgs);
buf.writestring(");");
buf.writenl();
}
@@ -1843,7 +1815,7 @@ public:
}
}
- override void visit(UnitTestDeclaration d)
+ void visitUnitTestDeclaration(UnitTestDeclaration d)
{
if (hgs.hdrgen)
return;
@@ -1853,35 +1825,160 @@ public:
bodyToBuffer(d);
}
- override void visit(BitFieldDeclaration d)
+ void visitBitFieldDeclaration(BitFieldDeclaration d)
{
if (stcToBuffer(buf, d.storage_class))
buf.writeByte(' ');
Identifier id = d.isAnonymous() ? null : d.ident;
- typeToBuffer(d.type, id, buf, hgs);
+ typeToBuffer(d.type, id, buf, &hgs);
buf.writestring(" : ");
- d.width.expressionToBuffer(buf, hgs);
+ d.width.expressionToBuffer(buf, &hgs);
buf.writeByte(';');
buf.writenl();
}
- override void visit(NewDeclaration d)
+ void visitNewDeclaration(NewDeclaration d)
{
if (stcToBuffer(buf, d.storage_class & ~STC.static_))
buf.writeByte(' ');
buf.writestring("new();");
}
- override void visit(Module m)
+ void visitModule(Module m)
+ {
+ moduleToBuffer2(m, buf, &hgs);
+ }
+
+ extern (C++)
+ final class DsymbolPrettyPrintVisitor : Visitor
+ {
+ alias visit = Visitor.visit;
+
+ public:
+ override:
+ void visit(Dsymbol s) { visitDsymbol(s); }
+ void visit(StaticAssert s) { visitStaticAssert(s); }
+ void visit(DebugSymbol s) { visitDebugSymbol(s); }
+ void visit(VersionSymbol s) { visitVersionSymbol(s); }
+ void visit(EnumMember em) { visitEnumMember(em); }
+ void visit(Import imp) { visitImport(imp); }
+ void visit(AliasThis d) { visitAliasThis(d); }
+ void visit(AttribDeclaration d) { visitAttribDeclaration(d); }
+ void visit(StorageClassDeclaration d) { visitStorageClassDeclaration(d); }
+ void visit(DeprecatedDeclaration d) { visitDeprecatedDeclaration(d); }
+ void visit(LinkDeclaration d) { visitLinkDeclaration(d); }
+ void visit(CPPMangleDeclaration d) { visitCPPMangleDeclaration(d); }
+ void visit(VisibilityDeclaration d) { visitVisibilityDeclaration(d); }
+ void visit(AlignDeclaration d) { visitAlignDeclaration(d); }
+ void visit(AnonDeclaration d) { visitAnonDeclaration(d); }
+ void visit(PragmaDeclaration d) { visitPragmaDeclaration(d); }
+ void visit(ConditionalDeclaration d) { visitConditionalDeclaration(d); }
+ void visit(StaticForeachDeclaration s) { visitStaticForeachDeclaration(s); }
+ void visit(MixinDeclaration d) { visitMixinDeclaration(d); }
+ void visit(UserAttributeDeclaration d) { visitUserAttributeDeclaration(d); }
+ void visit(TemplateDeclaration d) { visitTemplateDeclaration(d); }
+ void visit(TemplateInstance ti) { visitTemplateInstance(ti); }
+ void visit(TemplateMixin tm) { visitTemplateMixin(tm); }
+ void visit(EnumDeclaration d) { visitEnumDeclaration(d); }
+ void visit(Nspace d) { visitNspace(d); }
+ void visit(StructDeclaration d) { visitStructDeclaration(d); }
+ void visit(ClassDeclaration d) { visitClassDeclaration(d); }
+ void visit(AliasDeclaration d) { visitAliasDeclaration(d); }
+ void visit(AliasAssign d) { visitAliasAssign(d); }
+ void visit(VarDeclaration d) { visitVarDeclaration(d); }
+ void visit(FuncDeclaration f) { visitFuncDeclaration(f); }
+ void visit(FuncLiteralDeclaration f) { visitFuncLiteralDeclaration(f); }
+ void visit(PostBlitDeclaration d) { visitPostBlitDeclaration(d); }
+ void visit(DtorDeclaration d) { visitDtorDeclaration(d); }
+ void visit(StaticCtorDeclaration d) { visitStaticCtorDeclaration(d); }
+ void visit(StaticDtorDeclaration d) { visitStaticDtorDeclaration(d); }
+ void visit(InvariantDeclaration d) { visitInvariantDeclaration(d); }
+ void visit(UnitTestDeclaration d) { visitUnitTestDeclaration(d); }
+ void visit(BitFieldDeclaration d) { visitBitFieldDeclaration(d); }
+ void visit(NewDeclaration d) { visitNewDeclaration(d); }
+ void visit(Module m) { visitModule(m); }
+ }
+
+ scope v = new DsymbolPrettyPrintVisitor();
+ s.accept(v);
+}
+
+
+/*****************************************
+ * Pretty-print a template parameter list to a buffer.
+ */
+private void visitTemplateParameters(TemplateParameters* parameters, ref OutBuffer buf, ref HdrGenState hgs)
+{
+ if (!parameters)
+ return;
+ foreach (i, p; *parameters)
+ {
+ if (i)
+ buf.writestring(", ");
+ p.templateParameterToBuffer(buf, &hgs);
+ }
+}
+
+
+/*******************************************
+ * Pretty-print a VarDeclaration to buf.
+ */
+private void visitVarDecl(VarDeclaration v, bool anywritten, ref OutBuffer buf, ref HdrGenState hgs)
+{
+ const bool isextern = hgs.hdrgen &&
+ !hgs.insideFuncBody &&
+ !hgs.tpltMember &&
+ !hgs.insideAggregate &&
+ !(v.storage_class & STC.manifest);
+
+ void vinit(VarDeclaration v)
+ {
+ auto ie = v._init.isExpInitializer();
+ if (ie && (ie.exp.op == EXP.construct || ie.exp.op == EXP.blit))
+ (cast(AssignExp)ie.exp).e2.expressionToBuffer(buf, &hgs);
+ else
+ v._init.initializerToBuffer(buf, &hgs);
+ }
+
+ if (anywritten)
+ {
+ buf.writestring(", ");
+ buf.writestring(v.ident.toString());
+ }
+ else
+ {
+ const bool useTypeof = isextern && v._init && !v.type;
+ auto stc = v.storage_class;
+ if (isextern)
+ stc |= STC.extern_;
+ if (useTypeof)
+ stc &= ~STC.auto_;
+ if (stcToBuffer(buf, stc))
+ buf.writeByte(' ');
+ if (v.type)
+ typeToBuffer(v.type, v.ident, buf, &hgs);
+ else if (useTypeof)
+ {
+ buf.writestring("typeof(");
+ vinit(v);
+ buf.writestring(") ");
+ buf.writestring(v.ident.toString());
+ }
+ else
+ buf.writestring(v.ident.toString());
+ }
+ if (v._init && !isextern)
{
- moduleToBuffer2(m, buf, hgs);
+ buf.writestring(" = ");
+ vinit(v);
}
}
+
/*********************************************
* Print expression to buffer.
*/
-private void expressionPrettyPrint(Expression e, OutBuffer* buf, HdrGenState* hgs)
+private void expressionPrettyPrint(Expression e, ref OutBuffer buf, HdrGenState* hgs)
{
void visit(Expression e)
{
@@ -1922,7 +2019,7 @@ private void expressionPrettyPrint(Expression e, OutBuffer* buf, HdrGenState* hg
case Tdchar:
{
const o = buf.length;
- writeSingleCharLiteral(*buf, cast(dchar) v);
+ writeSingleCharLiteral(buf, cast(dchar) v);
if (hgs.ddoc)
escapeDdocString(buf, o);
break;
@@ -2005,7 +2102,7 @@ private void expressionPrettyPrint(Expression e, OutBuffer* buf, HdrGenState* hg
void visitVoidInit(VoidInitExp e)
{
- buf.writestring("__void");
+ buf.writestring("void");
}
void floatToBuffer(Type type, real_t value)
@@ -2064,7 +2161,7 @@ private void expressionPrettyPrint(Expression e, OutBuffer* buf, HdrGenState* hg
const o = buf.length;
foreach (i; 0 .. e.len)
{
- writeCharLiteral(*buf, e.getCodeUnit(i));
+ writeCharLiteral(buf, e.getCodeUnit(i));
}
if (hgs.ddoc)
escapeDdocString(buf, o);
@@ -2251,8 +2348,7 @@ private void expressionPrettyPrint(Expression e, OutBuffer* buf, HdrGenState* hg
// which isn't correct as regular D code.
buf.writeByte('(');
- scope v = new DsymbolPrettyPrintVisitor(buf, hgs);
- v.visitVarDecl(var, false);
+ visitVarDecl(var, false, buf, *hgs);
buf.writeByte(';');
buf.writeByte(')');
@@ -2308,8 +2404,7 @@ private void expressionPrettyPrint(Expression e, OutBuffer* buf, HdrGenState* hg
if (e.parameters && e.parameters.length)
{
buf.writestring(", ");
- scope v = new DsymbolPrettyPrintVisitor(buf, hgs);
- v.visitTemplateParameters(e.parameters);
+ visitTemplateParameters(e.parameters, buf, *hgs);
}
buf.writeByte(')');
}
@@ -2717,7 +2812,7 @@ private void expressionPrettyPrint(Expression e, OutBuffer* buf, HdrGenState* hg
* allowHex = whether hex floating point literals may be used
* for greater accuracy
*/
-void floatToBuffer(Type type, const real_t value, OutBuffer* buf, const bool allowHex)
+void floatToBuffer(Type type, const real_t value, ref OutBuffer buf, const bool allowHex)
{
/** sizeof(value)*3 is because each byte of mantissa is max
of 256 (3 characters). The string will be "-M.MMMMe-4932".
@@ -2762,9 +2857,9 @@ void floatToBuffer(Type type, const real_t value, OutBuffer* buf, const bool all
}
}
-private void templateParameterToBuffer(TemplateParameter tp, OutBuffer* buf, HdrGenState* hgs)
+private void templateParameterToBuffer(TemplateParameter tp, ref OutBuffer buf, HdrGenState* hgs)
{
- scope v = new TemplateParameterPrettyPrintVisitor(buf, hgs);
+ scope v = new TemplateParameterPrettyPrintVisitor(&buf, hgs);
tp.accept(v);
}
@@ -2787,12 +2882,12 @@ public:
if (tp.specType)
{
buf.writestring(" : ");
- typeToBuffer(tp.specType, null, buf, hgs);
+ typeToBuffer(tp.specType, null, *buf, hgs);
}
if (tp.defaultType)
{
buf.writestring(" = ");
- typeToBuffer(tp.defaultType, null, buf, hgs);
+ typeToBuffer(tp.defaultType, null, *buf, hgs);
}
}
@@ -2806,33 +2901,33 @@ public:
{
buf.writestring("alias ");
if (tp.specType)
- typeToBuffer(tp.specType, tp.ident, buf, hgs);
+ typeToBuffer(tp.specType, tp.ident, *buf, hgs);
else
buf.writestring(tp.ident.toString());
if (tp.specAlias)
{
buf.writestring(" : ");
- objectToBuffer(tp.specAlias, buf, hgs);
+ objectToBuffer(tp.specAlias, *buf, hgs);
}
if (tp.defaultAlias)
{
buf.writestring(" = ");
- objectToBuffer(tp.defaultAlias, buf, hgs);
+ objectToBuffer(tp.defaultAlias, *buf, hgs);
}
}
override void visit(TemplateValueParameter tp)
{
- typeToBuffer(tp.valType, tp.ident, buf, hgs);
+ typeToBuffer(tp.valType, tp.ident, *buf, hgs);
if (tp.specValue)
{
buf.writestring(" : ");
- tp.specValue.expressionToBuffer(buf, hgs);
+ tp.specValue.expressionToBuffer(*buf, hgs);
}
if (tp.defaultValue)
{
buf.writestring(" = ");
- tp.defaultValue.expressionToBuffer(buf, hgs);
+ tp.defaultValue.expressionToBuffer(*buf, hgs);
}
}
@@ -2843,9 +2938,9 @@ public:
}
}
-private void conditionToBuffer(Condition c, OutBuffer* buf, HdrGenState* hgs)
+private void conditionToBuffer(Condition c, ref OutBuffer buf, HdrGenState* hgs)
{
- scope v = new ConditionPrettyPrintVisitor(buf, hgs);
+ scope v = new ConditionPrettyPrintVisitor(&buf, hgs);
c.accept(v);
}
@@ -2885,42 +2980,37 @@ public:
override void visit(StaticIfCondition c)
{
buf.writestring("static if (");
- c.exp.expressionToBuffer(buf, hgs);
+ c.exp.expressionToBuffer(*buf, hgs);
buf.writeByte(')');
}
}
-void toCBuffer(const Statement s, OutBuffer* buf, HdrGenState* hgs)
+void toCBuffer(const Statement s, ref OutBuffer buf, ref HdrGenState hgs)
{
- (cast()s).statementToBuffer(buf, hgs);
+ (cast()s).statementToBuffer(buf, &hgs);
}
-void toCBuffer(const Type t, OutBuffer* buf, const Identifier ident, HdrGenState* hgs)
+void toCBuffer(const Type t, ref OutBuffer buf, const Identifier ident, ref HdrGenState hgs)
{
- typeToBuffer(cast() t, ident, buf, hgs);
-}
-
-void toCBuffer(Dsymbol s, OutBuffer* buf, HdrGenState* hgs)
-{
- scope v = new DsymbolPrettyPrintVisitor(buf, hgs);
- s.accept(v);
+ typeToBuffer(cast() t, ident, buf, &hgs);
}
// used from TemplateInstance::toChars() and TemplateMixin::toChars()
-void toCBufferInstance(const TemplateInstance ti, OutBuffer* buf, bool qualifyTypes = false)
+void toCBufferInstance(const TemplateInstance ti, ref OutBuffer buf, bool qualifyTypes = false)
{
HdrGenState hgs;
hgs.fullQual = qualifyTypes;
- scope v = new DsymbolPrettyPrintVisitor(buf, &hgs);
- v.visit(cast() ti);
+
+ buf.writestring(ti.name.toChars());
+ tiargsToBuffer(cast() ti, buf, &hgs);
}
-void toCBuffer(const Initializer iz, OutBuffer* buf, HdrGenState* hgs)
+void toCBuffer(const Initializer iz, ref OutBuffer buf, ref HdrGenState hgs)
{
- initializerToBuffer(cast() iz, buf, hgs);
+ initializerToBuffer(cast() iz, buf, &hgs);
}
-bool stcToBuffer(OutBuffer* buf, StorageClass stc) @safe
+bool stcToBuffer(ref OutBuffer buf, StorageClass stc) @safe
{
//printf("stc: %llx\n", stc);
bool result = false;
@@ -3039,7 +3129,7 @@ string stcToString(ref StorageClass stc) @safe
return null;
}
-private void linkageToBuffer(OutBuffer* buf, LINK linkage) @safe
+private void linkageToBuffer(ref OutBuffer buf, LINK linkage) @safe
{
const s = linkageToString(linkage);
if (s.length)
@@ -3058,26 +3148,21 @@ const(char)* linkageToChars(LINK linkage)
string linkageToString(LINK linkage) pure nothrow @safe
{
- final switch (linkage)
- {
- case LINK.default_:
- return null;
- case LINK.d:
- return "D";
- case LINK.c:
- return "C";
- case LINK.cpp:
- return "C++";
- case LINK.windows:
- return "Windows";
- case LINK.objc:
- return "Objective-C";
- case LINK.system:
- return "System";
+ with (LINK)
+ {
+ immutable string[7] a = [
+ default_ : null,
+ d : "D",
+ c : "C",
+ cpp : "C++",
+ windows : "Windows",
+ objc : "Objective-C",
+ system : "System" ];
+ return a[linkage];
}
}
-void visibilityToBuffer(OutBuffer* buf, Visibility vis)
+void visibilityToBuffer(ref OutBuffer buf, Visibility vis)
{
buf.writestring(visibilityToString(vis.kind));
if (vis.kind == Visibility.Kind.package_ && vis.pkg)
@@ -3101,48 +3186,42 @@ const(char)* visibilityToChars(Visibility.Kind kind)
/// Ditto
extern (D) string visibilityToString(Visibility.Kind kind) nothrow pure @safe
{
- final switch (kind)
- {
- case Visibility.Kind.undefined:
- return null;
- case Visibility.Kind.none:
- return "none";
- case Visibility.Kind.private_:
- return "private";
- case Visibility.Kind.package_:
- return "package";
- case Visibility.Kind.protected_:
- return "protected";
- case Visibility.Kind.public_:
- return "public";
- case Visibility.Kind.export_:
- return "export";
+ with (Visibility.Kind)
+ {
+ immutable string[7] a = [
+ none : "none",
+ private_ : "private",
+ package_ : "package",
+ protected_ : "protected",
+ public_ : "public",
+ export_ : "export" ];
+ return a[kind];
}
}
// Print the full function signature with correct ident, attributes and template args
-void functionToBufferFull(TypeFunction tf, OutBuffer* buf, const Identifier ident, HdrGenState* hgs, TemplateDeclaration td)
+void functionToBufferFull(TypeFunction tf, ref OutBuffer buf, const Identifier ident, HdrGenState* hgs, TemplateDeclaration td)
{
//printf("TypeFunction::toCBuffer() this = %p\n", this);
visitFuncIdentWithPrefix(tf, ident, td, buf, hgs);
}
// ident is inserted before the argument list and will be "function" or "delegate" for a type
-void functionToBufferWithIdent(TypeFunction tf, OutBuffer* buf, const(char)* ident, bool isStatic)
+void functionToBufferWithIdent(TypeFunction tf, ref OutBuffer buf, const(char)* ident, bool isStatic)
{
HdrGenState hgs;
visitFuncIdentWithPostfix(tf, ident.toDString(), buf, &hgs, isStatic);
}
-void toCBuffer(const Expression e, OutBuffer* buf, HdrGenState* hgs)
+void toCBuffer(const Expression e, ref OutBuffer buf, ref HdrGenState hgs)
{
- expressionPrettyPrint(cast()e, buf, hgs);
+ expressionPrettyPrint(cast()e, buf, &hgs);
}
/**************************************************
* Write out argument types to buf.
*/
-void argExpTypesToCBuffer(OutBuffer* buf, Expressions* arguments)
+void argExpTypesToCBuffer(ref OutBuffer buf, Expressions* arguments)
{
if (!arguments || !arguments.length)
return;
@@ -3155,13 +3234,13 @@ void argExpTypesToCBuffer(OutBuffer* buf, Expressions* arguments)
}
}
-void toCBuffer(const TemplateParameter tp, OutBuffer* buf, HdrGenState* hgs)
+void toCBuffer(const TemplateParameter tp, ref OutBuffer buf, ref HdrGenState hgs)
{
- scope v = new TemplateParameterPrettyPrintVisitor(buf, hgs);
+ scope v = new TemplateParameterPrettyPrintVisitor(&buf, &hgs);
(cast() tp).accept(v);
}
-void arrayObjectsToBuffer(OutBuffer* buf, Objects* objects)
+void arrayObjectsToBuffer(ref OutBuffer buf, Objects* objects)
{
if (!objects || !objects.length)
return;
@@ -3184,7 +3263,7 @@ extern (C++) const(char)* parametersTypeToChars(ParameterList pl)
{
OutBuffer buf;
HdrGenState hgs;
- parametersToBuffer(pl, &buf, &hgs);
+ parametersToBuffer(pl, buf, &hgs);
return buf.extractChars();
}
@@ -3202,7 +3281,7 @@ const(char)* parameterToChars(Parameter parameter, TypeFunction tf, bool fullQua
HdrGenState hgs;
hgs.fullQual = fullQual;
- parameterToBuffer(parameter, &buf, &hgs);
+ parameterToBuffer(parameter, buf, &hgs);
if (tf.parameterList.varargs == VarArg.typesafe && parameter == tf.parameterList[tf.parameterList.parameters.length - 1])
{
@@ -3220,7 +3299,7 @@ const(char)* parameterToChars(Parameter parameter, TypeFunction tf, bool fullQua
* hgs = context
*/
-private void parametersToBuffer(ParameterList pl, OutBuffer* buf, HdrGenState* hgs)
+private void parametersToBuffer(ParameterList pl, ref OutBuffer buf, HdrGenState* hgs)
{
buf.writeByte('(');
foreach (i; 0 .. pl.length)
@@ -3258,7 +3337,7 @@ private void parametersToBuffer(ParameterList pl, OutBuffer* buf, HdrGenState* h
* buf = buffer to write it to
* hgs = context
*/
-private void parameterToBuffer(Parameter p, OutBuffer* buf, HdrGenState* hgs)
+private void parameterToBuffer(Parameter p, ref OutBuffer buf, HdrGenState* hgs)
{
if (p.userAttribDecl)
{
@@ -3330,7 +3409,7 @@ private void parameterToBuffer(Parameter p, OutBuffer* buf, HdrGenState* hgs)
* basis = replace `null`s in argument list with this expression (for sparse array literals)
* names = if non-null, use these as the names for the arguments
*/
-private void argsToBuffer(Expressions* expressions, OutBuffer* buf, HdrGenState* hgs, Expression basis = null, Identifiers* names = null)
+private void argsToBuffer(Expressions* expressions, ref OutBuffer buf, HdrGenState* hgs, Expression basis = null, Identifiers* names = null)
{
if (!expressions || !expressions.length)
return;
@@ -3381,7 +3460,7 @@ private void argsToBuffer(Expressions* expressions, OutBuffer* buf, HdrGenState*
}
}
-private void sizeToBuffer(Expression e, OutBuffer* buf, HdrGenState* hgs)
+private void sizeToBuffer(Expression e, ref OutBuffer buf, HdrGenState* hgs)
{
if (e.type == Type.tsize_t)
{
@@ -3409,7 +3488,7 @@ private void sizeToBuffer(Expression e, OutBuffer* buf, HdrGenState* hgs)
expToBuffer(e, PREC.assign, buf, hgs);
}
-private void expressionToBuffer(Expression e, OutBuffer* buf, HdrGenState* hgs)
+private void expressionToBuffer(Expression e, ref OutBuffer buf, HdrGenState* hgs)
{
expressionPrettyPrint(e, buf, hgs);
}
@@ -3418,7 +3497,7 @@ private void expressionToBuffer(Expression e, OutBuffer* buf, HdrGenState* hgs)
* Write expression out to buf, but wrap it
* in ( ) if its precedence is less than pr.
*/
-private void expToBuffer(Expression e, PREC pr, OutBuffer* buf, HdrGenState* hgs)
+private void expToBuffer(Expression e, PREC pr, ref OutBuffer buf, HdrGenState* hgs)
{
debug
{
@@ -3452,7 +3531,7 @@ private void expToBuffer(Expression e, PREC pr, OutBuffer* buf, HdrGenState* hgs
/**************************************************
* An entry point to pretty-print type.
*/
-private void typeToBuffer(Type t, const Identifier ident, OutBuffer* buf, HdrGenState* hgs,
+private void typeToBuffer(Type t, const Identifier ident, ref OutBuffer buf, HdrGenState* hgs,
ubyte modMask = 0)
{
if (auto tf = t.isTypeFunction())
@@ -3468,7 +3547,7 @@ private void typeToBuffer(Type t, const Identifier ident, OutBuffer* buf, HdrGen
}
}
-private void visitWithMask(Type t, ubyte modMask, OutBuffer* buf, HdrGenState* hgs)
+private void visitWithMask(Type t, ubyte modMask, ref OutBuffer buf, HdrGenState* hgs)
{
// Tuples and functions don't use the type constructor syntax
if (modMask == t.mod || t.ty == Tfunction || t.ty == Ttuple)
@@ -3504,7 +3583,7 @@ private void visitWithMask(Type t, ubyte modMask, OutBuffer* buf, HdrGenState* h
}
-private void dumpTemplateInstance(TemplateInstance ti, OutBuffer* buf, HdrGenState* hgs)
+private void dumpTemplateInstance(TemplateInstance ti, ref OutBuffer buf, HdrGenState* hgs)
{
buf.writeByte('{');
buf.writenl();
@@ -3527,7 +3606,7 @@ private void dumpTemplateInstance(TemplateInstance ti, OutBuffer* buf, HdrGenSta
}
-private void tiargsToBuffer(TemplateInstance ti, OutBuffer* buf, HdrGenState* hgs)
+private void tiargsToBuffer(TemplateInstance ti, ref OutBuffer buf, HdrGenState* hgs)
{
buf.writeByte('!');
if (ti.nest)
@@ -3576,7 +3655,7 @@ private void tiargsToBuffer(TemplateInstance ti, OutBuffer* buf, HdrGenState* hg
* This makes a 'pretty' version of the template arguments.
* It's analogous to genIdent() which makes a mangled version.
*/
-private void objectToBuffer(RootObject oarg, OutBuffer* buf, HdrGenState* hgs)
+private void objectToBuffer(RootObject oarg, ref OutBuffer buf, HdrGenState* hgs)
{
//printf("objectToBuffer()\n");
/* The logic of this should match what genIdent() does. The _dynamic_cast()
@@ -3629,7 +3708,7 @@ private void objectToBuffer(RootObject oarg, OutBuffer* buf, HdrGenState* hgs)
}
-private void visitFuncIdentWithPostfix(TypeFunction t, const char[] ident, OutBuffer* buf, HdrGenState* hgs, bool isStatic)
+private void visitFuncIdentWithPostfix(TypeFunction t, const char[] ident, ref OutBuffer buf, HdrGenState* hgs, bool isStatic)
{
if (t.inuse)
{
@@ -3674,7 +3753,7 @@ private void visitFuncIdentWithPostfix(TypeFunction t, const char[] ident, OutBu
}
private void visitFuncIdentWithPrefix(TypeFunction t, const Identifier ident, TemplateDeclaration td,
- OutBuffer* buf, HdrGenState* hgs)
+ ref OutBuffer buf, HdrGenState* hgs)
{
if (t.inuse)
{
@@ -3743,7 +3822,7 @@ private void visitFuncIdentWithPrefix(TypeFunction t, const Identifier ident, Te
}
-private void initializerToBuffer(Initializer inx, OutBuffer* buf, HdrGenState* hgs)
+private void initializerToBuffer(Initializer inx, ref OutBuffer buf, HdrGenState* hgs)
{
void visitError(ErrorInitializer iz)
{
@@ -3811,7 +3890,7 @@ private void initializerToBuffer(Initializer inx, OutBuffer* buf, HdrGenState* h
if (d.exp)
{
buf.writeByte('[');
- toCBuffer(d.exp, buf, hgs);
+ toCBuffer(d.exp, buf, *hgs);
buf.writeByte(']');
}
else
@@ -3832,7 +3911,7 @@ private void initializerToBuffer(Initializer inx, OutBuffer* buf, HdrGenState* h
}
-private void typeToBufferx(Type t, OutBuffer* buf, HdrGenState* hgs)
+private void typeToBufferx(Type t, ref OutBuffer buf, HdrGenState* hgs)
{
void visitType(Type t)
{
@@ -4020,7 +4099,7 @@ private void typeToBufferx(Type t, OutBuffer* buf, HdrGenState* hgs)
buf.writeByte(' ');
if (t.id)
buf.writestring(t.id.toChars());
- if (t.tok == TOK.enum_ && t.base.ty != TY.Tint32)
+ if (t.tok == TOK.enum_ && t.base && t.base.ty != TY.Tint32)
{
buf.writestring(" : ");
visitWithMask(t.base, t.mod, buf, hgs);
diff --git a/gcc/d/dmd/hdrgen.h b/gcc/d/dmd/hdrgen.h
index 43fea34..e43a355 100644
--- a/gcc/d/dmd/hdrgen.h
+++ b/gcc/d/dmd/hdrgen.h
@@ -15,7 +15,7 @@
class Module;
-void genhdrfile(Module *m);
+void genhdrfile(Module *m, OutBuffer &buf);
void genCppHdrFiles(Modules &ms);
-void moduleToBuffer(OutBuffer *buf, Module *m);
+void moduleToBuffer(OutBuffer& buf, Module *m);
const char *parametersTypeToChars(ParameterList pl);
diff --git a/gcc/d/dmd/iasm.d b/gcc/d/dmd/iasm.d
index 1fdfe40..c58224f 100644
--- a/gcc/d/dmd/iasm.d
+++ b/gcc/d/dmd/iasm.d
@@ -64,6 +64,7 @@ extern(C++) Statement asmSemantic(AsmStatement s, Scope *sc)
return statementSemantic(se, sc);
}
auto ias = new InlineAsmStatement(s.loc, s.tokens);
+ ias.caseSensitive = s.caseSensitive;
return inlineAsmSemantic(ias, sc);
}
else version (IN_GCC)
diff --git a/gcc/d/dmd/iasmgcc.d b/gcc/d/dmd/iasmgcc.d
index 1d4dea4..5494fec 100644
--- a/gcc/d/dmd/iasmgcc.d
+++ b/gcc/d/dmd/iasmgcc.d
@@ -330,7 +330,7 @@ extern (C++) public Statement gccAsmSemantic(GccAsmStatement s, Scope *sc)
s.insn = semanticString(sc, s.insn, "asm instruction template");
if (s.labels && s.outputargs)
- s.error("extended asm statements with labels cannot have output constraints");
+ error(s.loc, "extended asm statements with labels cannot have output constraints");
// Analyse all input and output operands.
if (s.args)
diff --git a/gcc/d/dmd/id.d b/gcc/d/dmd/id.d
index 43b2e5f..b506e6f 100644
--- a/gcc/d/dmd/id.d
+++ b/gcc/d/dmd/id.d
@@ -61,6 +61,8 @@ immutable Msgtable[] msgtable =
{ "IUnknown" },
{ "Object" },
{ "object" },
+ { "_size_t", "size_t" },
+ { "_ptrdiff_t", "ptrdiff_t" },
{ "string" },
{ "wstring" },
{ "dstring" },
@@ -114,6 +116,7 @@ immutable Msgtable[] msgtable =
{ "returnLabel", "__returnLabel" },
{ "line" },
{ "empty", "" },
+ { "dotdotdot", "..." }, // use for error messages
{ "p" },
{ "__vptr" },
{ "__monitor" },
@@ -305,6 +308,7 @@ immutable Msgtable[] msgtable =
{ "aaKeys", "_aaKeys" },
{ "aaValues", "_aaValues" },
{ "aaRehash", "_aaRehash" },
+ { "_aaAsStruct" },
{ "monitorenter", "_d_monitorenter" },
{ "monitorexit", "_d_monitorexit" },
{ "criticalenter", "_d_criticalenter2" },
diff --git a/gcc/d/dmd/importc.d b/gcc/d/dmd/importc.d
index fe0aa17..98ac903 100644
--- a/gcc/d/dmd/importc.d
+++ b/gcc/d/dmd/importc.d
@@ -20,6 +20,7 @@ import dmd.dcast;
import dmd.declaration;
import dmd.dscope;
import dmd.dsymbol;
+import dmd.errors;
import dmd.expression;
import dmd.expressionsem;
import dmd.identifier;
@@ -126,14 +127,14 @@ Expression fieldLookup(Expression e, Scope* sc, Identifier id, bool arrow)
t = t.isTypePointer().next;
auto pe = e.toChars();
if (!arrow)
- e.error("since `%s` is a pointer, use `%s->%s` instead of `%s.%s`", pe, pe, id.toChars(), pe, id.toChars());
+ error(e.loc, "since `%s` is a pointer, use `%s->%s` instead of `%s.%s`", pe, pe, id.toChars(), pe, id.toChars());
e = new PtrExp(e.loc, e);
}
if (auto ts = t.isTypeStruct())
s = ts.sym.search(e.loc, id, 0);
if (!s)
{
- e.error("`%s` is not a member of `%s`", id.toChars(), t.toChars());
+ error(e.loc, "`%s` is not a member of `%s`", id.toChars(), t.toChars());
return ErrorExp.get();
}
Expression ef = new DotVarExp(e.loc, e, s.isDeclaration());
diff --git a/gcc/d/dmd/init.d b/gcc/d/dmd/init.d
index e7cf905..ecca552 100644
--- a/gcc/d/dmd/init.d
+++ b/gcc/d/dmd/init.d
@@ -57,14 +57,6 @@ extern (C++) class Initializer : ASTNode
this.kind = kind;
}
- override final const(char)* toChars() const
- {
- OutBuffer buf;
- HdrGenState hgs;
- .toCBuffer(this, &buf, &hgs);
- return buf.extractChars();
- }
-
final inout(ErrorInitializer) isErrorInitializer() inout @nogc nothrow pure
{
// Use void* cast to skip dynamic casting call
diff --git a/gcc/d/dmd/init.h b/gcc/d/dmd/init.h
index 9a6a56b..4ab5848 100644
--- a/gcc/d/dmd/init.h
+++ b/gcc/d/dmd/init.h
@@ -35,8 +35,6 @@ public:
DYNCAST dyncast() const override { return DYNCAST_INITIALIZER; }
- const char *toChars() const override final;
-
ErrorInitializer *isErrorInitializer();
VoidInitializer *isVoidInitializer();
StructInitializer *isStructInitializer();
diff --git a/gcc/d/dmd/initsem.d b/gcc/d/dmd/initsem.d
index c60b431..28c7c2b 100644
--- a/gcc/d/dmd/initsem.d
+++ b/gcc/d/dmd/initsem.d
@@ -29,6 +29,7 @@ import dmd.expression;
import dmd.expressionsem;
import dmd.func;
import dmd.globals;
+import dmd.hdrgen;
import dmd.id;
import dmd.identifier;
import dmd.importc;
@@ -53,31 +54,38 @@ import dmd.typesem;
*/
Expression toAssocArrayLiteral(ArrayInitializer ai)
{
- Expression e;
- //printf("ArrayInitializer::toAssocArrayInitializer()\n");
+ //printf("ArrayInitializer::toAssocArrayInitializer(%s)\n", ai.toChars());
//static int i; if (++i == 2) assert(0);
const dim = ai.value.length;
+ if (!dim)
+ {
+ error(ai.loc, "invalid associative array initializer `%s`, use `null` instead",
+ toChars(ai));
+ return ErrorExp.get();
+ }
+ auto no(const char* format, Initializer i)
+ {
+ error(i.loc, format, toChars(i));
+ return ErrorExp.get();
+ }
+ Expression e;
auto keys = new Expressions(dim);
auto values = new Expressions(dim);
for (size_t i = 0; i < dim; i++)
{
- e = ai.index[i];
- if (!e)
- goto Lno;
- (*keys)[i] = e;
Initializer iz = ai.value[i];
- if (!iz)
- goto Lno;
+ assert(iz);
e = iz.initializerToExpression();
if (!e)
- goto Lno;
+ return no("invalid value `%s` in initializer", iz);
(*values)[i] = e;
+ e = ai.index[i];
+ if (!e)
+ return no("missing key for value `%s` in initializer", iz);
+ (*keys)[i] = e;
}
e = new AssocArrayLiteralExp(ai.loc, keys, values);
return e;
-Lno:
- error(ai.loc, "not an associative array initializer");
- return ErrorExp.get();
}
/******************************************
@@ -392,13 +400,13 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ
}
if (i.exp.op == EXP.type)
{
- i.exp.error("initializer must be an expression, not `%s`", i.exp.toChars());
+ error(i.exp.loc, "initializer must be an expression, not `%s`", i.exp.toChars());
return err();
}
// Make sure all pointers are constants
if (needInterpret && hasNonConstPointers(i.exp))
{
- i.exp.error("cannot use non-constant CTFE pointer in an initializer `%s`", currExp.toChars());
+ error(i.exp.loc, "cannot use non-constant CTFE pointer in an initializer `%s`", currExp.toChars());
return err();
}
Type ti = i.exp.type.toBasetype();
@@ -556,7 +564,7 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ
}
if (dim1 != dim2)
{
- i.exp.error("mismatched array lengths, %d and %d", cast(int)dim1, cast(int)dim2);
+ error(i.exp.loc, "mismatched array lengths, %d and %d", cast(int)dim1, cast(int)dim2);
i.exp = ErrorExp.get();
}
}
@@ -564,7 +572,7 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ
const errors = global.startGagging();
i.exp = i.exp.implicitCastTo(sc, t);
if (global.endGagging(errors))
- currExp.error("cannot implicitly convert expression `%s` of type `%s` to `%s`", currExp.toChars(), et.toChars(), t.toChars());
+ error(currExp.loc, "cannot implicitly convert expression `%s` of type `%s` to `%s`", currExp.toChars(), et.toChars(), t.toChars());
}
}
L1:
@@ -784,12 +792,12 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ
const length = (*dlist).length;
if (length == 0 || !(*dlist)[0].ident)
{
- error(ci.loc, "`.identifier` expected for C struct field initializer `%s`", ci.toChars());
+ error(ci.loc, "`.identifier` expected for C struct field initializer `%s`", toChars(ci));
return err();
}
if (length > 1)
{
- error(ci.loc, "only 1 designator currently allowed for C struct field initializer `%s`", ci.toChars());
+ error(ci.loc, "only 1 designator currently allowed for C struct field initializer `%s`", toChars(ci));
return err();
}
auto id = (*dlist)[0].ident;
@@ -905,12 +913,12 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ
const length = (*dlist).length;
if (length == 0 || !(*dlist)[0].exp)
{
- error(ci.loc, "`[ constant-expression ]` expected for C array element initializer `%s`", ci.toChars());
+ error(ci.loc, "`[ constant-expression ]` expected for C array element initializer `%s`", toChars(ci));
return err();
}
if (length > 1)
{
- error(ci.loc, "only 1 designator currently allowed for C array element initializer `%s`", ci.toChars());
+ error(ci.loc, "only 1 designator currently allowed for C array element initializer `%s`", toChars(ci));
return err();
}
//printf("tn: %s, di.initializer: %s\n", tn.toChars(), di.initializer.toChars());
@@ -981,7 +989,7 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ
}
else
{
- error(ci.loc, "unrecognized C initializer `%s`", ci.toChars());
+ error(ci.loc, "unrecognized C initializer `%s`", toChars(ci));
return err();
}
}
@@ -1096,9 +1104,9 @@ Initializer inferType(Initializer init, Scope* sc)
{
TemplateInstance ti = se.sds.isTemplateInstance();
if (ti && ti.semanticRun == PASS.semantic && !ti.aliasdecl)
- se.error("cannot infer type from %s `%s`, possible circular dependency", se.sds.kind(), se.toChars());
+ error(se.loc, "cannot infer type from %s `%s`, possible circular dependency", se.sds.kind(), se.toChars());
else
- se.error("cannot infer type from %s `%s`", se.sds.kind(), se.toChars());
+ error(se.loc, "cannot infer type from %s `%s`", se.sds.kind(), se.toChars());
return new ErrorInitializer();
}
@@ -1112,7 +1120,7 @@ Initializer inferType(Initializer init, Scope* sc)
}
if (hasOverloads && !f.isUnique())
{
- init.exp.error("cannot infer type from overloaded function symbol `%s`", init.exp.toChars());
+ error(init.exp.loc, "cannot infer type from overloaded function symbol `%s`", init.exp.toChars());
return new ErrorInitializer();
}
}
@@ -1120,7 +1128,7 @@ Initializer inferType(Initializer init, Scope* sc)
{
if (ae.e1.op == EXP.overloadSet)
{
- init.exp.error("cannot infer type from overloaded function symbol `%s`", init.exp.toChars());
+ error(init.exp.loc, "cannot infer type from overloaded function symbol `%s`", init.exp.toChars());
return new ErrorInitializer();
}
}
diff --git a/gcc/d/dmd/json.d b/gcc/d/dmd/json.d
index 9689986..f1999fd 100644
--- a/gcc/d/dmd/json.d
+++ b/gcc/d/dmd/json.d
@@ -788,7 +788,7 @@ public:
objectStart();
jsonProperties(d);
if (d._init)
- property("init", d._init.toString());
+ property("init", toString(d._init));
if (d.isField())
property("offset", d.offset);
if (!d.alignment.isUnknown() && !d.alignment.isDefault())
@@ -810,17 +810,14 @@ public:
Params:
modules = array of the "root modules"
*/
- private void generateModules(Modules* modules)
+ private void generateModules(ref Modules modules)
{
arrayStart();
- if (modules)
+ foreach (m; modules)
{
- foreach (m; *modules)
- {
- if (global.params.verbose)
- message("json gen %s", m.toChars());
- m.accept(this);
- }
+ if (global.params.v.verbose)
+ message("json gen %s", m.toChars());
+ m.accept(this);
}
arrayEnd();
}
@@ -981,9 +978,15 @@ public:
}
}
-extern (C++) void json_generate(OutBuffer* buf, Modules* modules)
+/***********************************
+ * Generate json for the modules.
+ * Params:
+ * modules = array of Modules
+ * buf = write json output to buf
+ */
+extern (C++) void json_generate(ref Modules modules, ref OutBuffer buf)
{
- scope ToJsonVisitor json = new ToJsonVisitor(buf);
+ scope ToJsonVisitor json = new ToJsonVisitor(&buf);
// write trailing newline
scope(exit) buf.writeByte('\n');
diff --git a/gcc/d/dmd/json.h b/gcc/d/dmd/json.h
index 7a23897..09fdecd 100644
--- a/gcc/d/dmd/json.h
+++ b/gcc/d/dmd/json.h
@@ -15,5 +15,5 @@
struct OutBuffer;
-void json_generate(OutBuffer *, Modules *);
+void json_generate(Modules &, OutBuffer &);
JsonFieldFlags tryParseJsonField(const char *fieldName);
diff --git a/gcc/d/dmd/lexer.d b/gcc/d/dmd/lexer.d
index c28fe5c..882f2ea 100644
--- a/gcc/d/dmd/lexer.d
+++ b/gcc/d/dmd/lexer.d
@@ -51,7 +51,7 @@ struct CompileEnv
bool previewIn; /// `in` means `[ref] scope const`, accepts rvalues
bool ddocOutput; /// collect embedded documentation comments
bool shortenedMethods = true; /// allow => in normal function declarations
- bool obsolete; /// warn on use of legacy code
+ bool masm; /// use MASM inline asm syntax
}
/***********************************************************
@@ -484,6 +484,12 @@ class Lexer
goto default;
wysiwygStringConstant(t);
return;
+ case 'x':
+ if (p[1] != '"')
+ goto case_ident;
+ p++;
+ t.value = hexStringConstant(t);
+ return;
case 'q':
if (Ccompile)
goto case_ident;
@@ -526,7 +532,7 @@ class Lexer
//case 'u':
case 'v':
case 'w':
- case 'x':
+ /*case 'x':*/
case 'y':
case 'z':
case 'A':
@@ -1476,6 +1482,85 @@ class Lexer
}
}
+ /**************************************
+ * Lex hex strings:
+ * x"0A ae 34FE BD"
+ */
+ final TOK hexStringConstant(Token* t)
+ {
+ Loc start = loc();
+ uint n = 0;
+ uint v = ~0; // dead assignment, needed to suppress warning
+ p++;
+ stringbuffer.setsize(0);
+ while (1)
+ {
+ dchar c = *p++;
+ switch (c)
+ {
+ case ' ':
+ case '\t':
+ case '\v':
+ case '\f':
+ continue; // skip white space
+ case '\r':
+ if (*p == '\n')
+ continue; // ignore '\r' if followed by '\n'
+ // Treat isolated '\r' as if it were a '\n'
+ goto case '\n';
+ case '\n':
+ endOfLine();
+ continue;
+ case 0:
+ case 0x1A:
+ error("unterminated string constant starting at %s", start.toChars());
+ t.setString();
+ // decrement `p`, because it needs to point to the next token (the 0 or 0x1A character is the TOK.endOfFile token).
+ p--;
+ return TOK.hexadecimalString;
+ case '"':
+ if (n & 1)
+ {
+ error("odd number (%d) of hex characters in hex string", n);
+ stringbuffer.writeByte(v);
+ }
+ t.setString(stringbuffer);
+ t.postfix = 'h';
+ stringPostfix(t);
+ return TOK.hexadecimalString;
+ default:
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'a' && c <= 'f')
+ c -= 'a' - 10;
+ else if (c >= 'A' && c <= 'F')
+ c -= 'A' - 10;
+ else if (c & 0x80)
+ {
+ p--;
+ const u = decodeUTF();
+ p++;
+ if (u == PS || u == LS)
+ endOfLine();
+ else
+ error("non-hex character \\u%04x in hex string", u);
+ }
+ else
+ error("non-hex character '%c' in hex string", c);
+ if (n & 1)
+ {
+ v = (v << 4) | c;
+ stringbuffer.writeByte(v);
+ }
+ else
+ v = c;
+ n++;
+ break;
+ }
+ }
+ assert(0); // see bug 15731
+ }
+
/**
Lex a delimited string. Some examples of delimited strings are:
---
@@ -2122,9 +2207,14 @@ class Lexer
if (base == 2)
goto Ldone; // if ".identifier" or ".unicode"
goto Lreal; // otherwise as part of a floating point literal
+
+ case 'i':
+ if (Ccompile)
+ goto Ldone;
+ goto Lreal;
+
case 'p':
case 'P':
- case 'i':
Lreal:
p = start;
return inreal(t);
@@ -2317,7 +2407,13 @@ class Lexer
decimal = 2, // decimal
unsigned = 4, // u or U suffix
long_ = 8, // l or L suffix
- llong = 0x10 // ll or LL
+ llong = 0x10, // ll or LL
+
+ // Microsoft extensions
+ i8 = 0x20,
+ i16 = 0x40,
+ i32 = 0x80,
+ i64 = 0x100,
}
FLAGS flags = (base == 10) ? FLAGS.decimal : FLAGS.octalhex;
bool err;
@@ -2343,6 +2439,37 @@ class Lexer
}
break;
+ case 'i':
+ case 'I':
+ if (p[1] == '8')
+ {
+ f = FLAGS.i8;
+ ++p;
+ }
+ else if (p[1] == '1' && p[2] == '6')
+ {
+ f = FLAGS.i16;
+ p += 2;
+ }
+ else if (p[1] == '3' && p[2] == '2')
+ {
+ f = FLAGS.i32;
+ p += 2;
+ }
+ else if (p[1] == '6' && p[2] == '4')
+ {
+ f = FLAGS.i64;
+ p += 2;
+ }
+ else
+ break Lsuffixes;
+ if (p[1] >= '0' && p[1] <= '9' && !err)
+ {
+ error("invalid integer suffix");
+ err = true;
+ }
+ break;
+
default:
break Lsuffixes;
}
@@ -2475,6 +2602,34 @@ class Lexer
result = TOK.uns64Literal;
break;
+ case FLAGS.octalhex | FLAGS.i8:
+ case FLAGS.octalhex | FLAGS.i16:
+ case FLAGS.octalhex | FLAGS.i32:
+ case FLAGS.octalhex | FLAGS.unsigned | FLAGS.i8:
+ case FLAGS.octalhex | FLAGS.unsigned | FLAGS.i16:
+ case FLAGS.octalhex | FLAGS.unsigned | FLAGS.i32:
+ case FLAGS.decimal | FLAGS.unsigned | FLAGS.i8:
+ case FLAGS.decimal | FLAGS.unsigned | FLAGS.i16:
+ case FLAGS.decimal | FLAGS.unsigned | FLAGS.i32:
+ result = TOK.uns32Literal;
+ break;
+
+ case FLAGS.decimal | FLAGS.i8:
+ case FLAGS.decimal | FLAGS.i16:
+ case FLAGS.decimal | FLAGS.i32:
+ result = TOK.int32Literal;
+ break;
+
+ case FLAGS.octalhex | FLAGS.i64:
+ case FLAGS.octalhex | FLAGS.unsigned | FLAGS.i64:
+ case FLAGS.decimal | FLAGS.unsigned | FLAGS.i64:
+ result = TOK.uns64Literal;
+ break;
+
+ case FLAGS.decimal | FLAGS.i64:
+ result = TOK.int64Literal;
+ break;
+
default:
debug printf("%x\n",flags);
assert(0);
diff --git a/gcc/d/dmd/location.d b/gcc/d/dmd/location.d
index 0f3b9a7..ef2bd0a 100644
--- a/gcc/d/dmd/location.d
+++ b/gcc/d/dmd/location.d
@@ -38,8 +38,8 @@ debug info etc.
struct Loc
{
private uint _linnum;
- private ushort _charnum;
- private ushort fileIndex; // index into filenames[], starting from 1 (0 means no filename)
+ private uint _charnum;
+ private uint fileIndex; // index into filenames[], starting from 1 (0 means no filename)
version (LocOffset)
uint fileOffset; /// utf8 code unit index relative to start of file, starting from 0
@@ -67,7 +67,7 @@ nothrow:
extern (D) this(const(char)* filename, uint linnum, uint charnum) @safe
{
this._linnum = linnum;
- this._charnum = cast(ushort) charnum;
+ this._charnum = charnum;
this.filename = filename;
}
@@ -80,7 +80,7 @@ nothrow:
/// ditto
extern (C++) uint charnum(uint num) @nogc @safe
{
- return _charnum = cast(ushort) num;
+ return _charnum = num;
}
/// line number, starting from 1
@@ -114,8 +114,16 @@ nothrow:
{
//printf("setting %s\n", name);
filenames.push(name);
- fileIndex = cast(ushort)filenames.length;
- assert(fileIndex); // no overflow
+ fileIndex = cast(uint)filenames.length;
+ if (!fileIndex)
+ {
+ import dmd.globals : global;
+ import dmd.errors : error, fatal;
+
+ global.gag = 0; // ensure error message gets printed
+ error(Loc.initial, "internal compiler error: file name index overflow!");
+ fatal();
+ }
}
else
fileIndex = 0;
diff --git a/gcc/d/dmd/module.h b/gcc/d/dmd/module.h
index 8b48110..ce51266 100644
--- a/gcc/d/dmd/module.h
+++ b/gcc/d/dmd/module.h
@@ -142,7 +142,7 @@ public:
int doppelganger; // sub-module
Symbol *cov; // private uint[] __coverage;
- unsigned *covb; // bit array of valid code line numbers
+ DArray<unsigned> covb; // bit array of valid code line numbers
Symbol *sictor; // module order independent constructor
Symbol *sctor; // module constructor
diff --git a/gcc/d/dmd/mtype.d b/gcc/d/dmd/mtype.d
index 9d83db1..01f94a7 100644
--- a/gcc/d/dmd/mtype.d
+++ b/gcc/d/dmd/mtype.d
@@ -157,7 +157,7 @@ MOD MODmerge(MOD mod1, MOD mod2) pure nothrow @nogc @safe
/*********************************
* Store modifier name into buf.
*/
-void MODtoBuffer(OutBuffer* buf, MOD mod) nothrow @safe
+void MODtoBuffer(ref OutBuffer buf, MOD mod) nothrow @safe
{
buf.writestring(MODtoString(mod));
}
@@ -787,7 +787,7 @@ extern (C++) abstract class Type : ASTNode
HdrGenState hgs;
hgs.fullQual = (ty == Tclass && !mod);
- .toCBuffer(this, &buf, null, &hgs);
+ toCBuffer(this, buf, null, hgs);
return buf.extractChars();
}
@@ -799,7 +799,7 @@ extern (C++) abstract class Type : ASTNode
HdrGenState hgs;
hgs.fullQual = QualifyTypes;
- .toCBuffer(this, &buf, null, &hgs);
+ toCBuffer(this, buf, null, hgs);
return buf.extractChars();
}
@@ -973,7 +973,7 @@ extern (C++) abstract class Type : ASTNode
/*********************************
* Store this type's modifier name into buf.
*/
- final void modToBuffer(OutBuffer* buf) nothrow const
+ final void modToBuffer(ref OutBuffer buf) nothrow const
{
if (mod)
{
@@ -989,7 +989,7 @@ extern (C++) abstract class Type : ASTNode
{
OutBuffer buf;
buf.reserve(16);
- modToBuffer(&buf);
+ modToBuffer(buf);
return buf.extractChars();
}
@@ -4563,7 +4563,7 @@ extern (C++) final class TypeFunction : TypeNext
continue;
if (params == parameterList.parameters)
params = parameterList.parameters.copy();
- (*params)[i] = new Parameter(p.storageClass, t, null, null, null);
+ (*params)[i] = new Parameter(p.loc, p.storageClass, t, null, null, null);
}
if (next == tret && params == parameterList.parameters)
return this;
@@ -4592,7 +4592,7 @@ extern (C++) final class TypeFunction : TypeNext
// arguments get specially formatted
private const(char)* getParamError(Expression arg, Parameter par)
{
- if (global.gag && !global.params.showGaggedErrors)
+ if (global.gag && !global.params.v.showGaggedErrors)
return null;
// show qualification when toChars() is the same but types are different
// https://issues.dlang.org/show_bug.cgi?id=19948
@@ -4611,7 +4611,7 @@ extern (C++) final class TypeFunction : TypeNext
private extern(D) const(char)* getMatchError(A...)(const(char)* format, A args)
{
- if (global.gag && !global.params.showGaggedErrors)
+ if (global.gag && !global.params.v.showGaggedErrors)
return null;
OutBuffer buf;
buf.printf(format, args);
@@ -6217,8 +6217,8 @@ extern (C++) final class TypeTuple : Type
{
Expression e = (*exps)[i];
if (e.type.ty == Ttuple)
- e.error("cannot form sequence of sequences");
- auto arg = new Parameter(STC.undefined_, e.type, null, null, null);
+ error(e.loc, "cannot form sequence of sequences");
+ auto arg = new Parameter(e.loc, STC.undefined_, e.type, null, null, null);
(*arguments)[i] = arg;
}
}
@@ -6244,15 +6244,15 @@ extern (C++) final class TypeTuple : Type
{
super(Ttuple);
arguments = new Parameters();
- arguments.push(new Parameter(0, t1, null, null, null));
+ arguments.push(new Parameter(Loc.initial, 0, t1, null, null, null));
}
extern (D) this(Type t1, Type t2)
{
super(Ttuple);
arguments = new Parameters();
- arguments.push(new Parameter(0, t1, null, null, null));
- arguments.push(new Parameter(0, t2, null, null, null));
+ arguments.push(new Parameter(Loc.initial, 0, t1, null, null, null));
+ arguments.push(new Parameter(Loc.initial, 0, t2, null, null, null));
}
static TypeTuple create() @safe
@@ -6661,14 +6661,16 @@ extern (C++) final class Parameter : ASTNode
{
import dmd.attrib : UserAttributeDeclaration;
+ Loc loc;
StorageClass storageClass;
Type type;
Identifier ident;
Expression defaultArg;
UserAttributeDeclaration userAttribDecl; // user defined attributes
- extern (D) this(StorageClass storageClass, Type type, Identifier ident, Expression defaultArg, UserAttributeDeclaration userAttribDecl) @safe
+ extern (D) this(const ref Loc loc, StorageClass storageClass, Type type, Identifier ident, Expression defaultArg, UserAttributeDeclaration userAttribDecl) @safe
{
+ this.loc = loc;
this.type = type;
this.ident = ident;
this.storageClass = storageClass;
@@ -6676,14 +6678,14 @@ extern (C++) final class Parameter : ASTNode
this.userAttribDecl = userAttribDecl;
}
- static Parameter create(StorageClass storageClass, Type type, Identifier ident, Expression defaultArg, UserAttributeDeclaration userAttribDecl) @safe
+ static Parameter create(const ref Loc loc, StorageClass storageClass, Type type, Identifier ident, Expression defaultArg, UserAttributeDeclaration userAttribDecl) @safe
{
- return new Parameter(storageClass, type, ident, defaultArg, userAttribDecl);
+ return new Parameter(loc, storageClass, type, ident, defaultArg, userAttribDecl);
}
Parameter syntaxCopy()
{
- return new Parameter(storageClass, type ? type.syntaxCopy() : null, ident, defaultArg ? defaultArg.syntaxCopy() : null, userAttribDecl ? userAttribDecl.syntaxCopy(null) : null);
+ return new Parameter(loc, storageClass, type ? type.syntaxCopy() : null, ident, defaultArg ? defaultArg.syntaxCopy() : null, userAttribDecl ? userAttribDecl.syntaxCopy(null) : null);
}
/****************************************************
@@ -7688,3 +7690,28 @@ pure string visitTYCase(string handler) @safe
}
assert(0);
}
+
+
+/**
+ * Returns:
+ * `TypeIdentifier` corresponding to `object.Throwable`
+ */
+TypeIdentifier getThrowable()
+{
+ auto tid = new TypeIdentifier(Loc.initial, Id.empty);
+ tid.addIdent(Id.object);
+ tid.addIdent(Id.Throwable);
+ return tid;
+}
+
+/**
+ * Returns:
+ * TypeIdentifier corresponding to `object.Exception`
+ */
+TypeIdentifier getException()
+{
+ auto tid = new TypeIdentifier(Loc.initial, Id.empty);
+ tid.addIdent(Id.object);
+ tid.addIdent(Id.Exception);
+ return tid;
+}
diff --git a/gcc/d/dmd/mtype.h b/gcc/d/dmd/mtype.h
index 457b91f..aeeee8c 100644
--- a/gcc/d/dmd/mtype.h
+++ b/gcc/d/dmd/mtype.h
@@ -237,7 +237,7 @@ public:
virtual unsigned alignsize();
Type *trySemantic(const Loc &loc, Scope *sc);
Type *merge2();
- void modToBuffer(OutBuffer *buf) const;
+ void modToBuffer(OutBuffer& buf) const;
char *modToChars() const;
virtual bool isintegral();
@@ -563,13 +563,14 @@ enum class PURE : unsigned char
class Parameter final : public ASTNode
{
public:
+ Loc loc;
StorageClass storageClass;
Type *type;
Identifier *ident;
Expression *defaultArg;
UserAttributeDeclaration *userAttribDecl; // user defined attributes
- static Parameter *create(StorageClass storageClass, Type *type, Identifier *ident,
+ static Parameter *create(const Loc &loc, StorageClass storageClass, Type *type, Identifier *ident,
Expression *defaultArg, UserAttributeDeclaration *userAttribDecl);
Parameter *syntaxCopy();
Type *isLazyArray();
diff --git a/gcc/d/dmd/mustuse.d b/gcc/d/dmd/mustuse.d
index 844f719..1d831bb 100644
--- a/gcc/d/dmd/mustuse.d
+++ b/gcc/d/dmd/mustuse.d
@@ -12,6 +12,7 @@ module dmd.mustuse;
import dmd.dscope;
import dmd.dsymbol;
+import dmd.errors;
import dmd.expression;
import dmd.globals;
import dmd.identifier;
@@ -49,7 +50,7 @@ bool checkMustUse(Expression e, Scope* sc)
// isStructDeclaration returns non-null for both structs and unions
if (sd && hasMustUseAttribute(sd, sc) && !isAssignment(e) && !isIncrementOrDecrement(e))
{
- e.error("ignored value of `@%s` type `%s`; prepend a `cast(void)` if intentional",
+ error(e.loc, "ignored value of `@%s` type `%s`; prepend a `cast(void)` if intentional",
Id.udaMustUse.toChars(), e.type.toPrettyChars(true));
return true;
}
diff --git a/gcc/d/dmd/nogc.d b/gcc/d/dmd/nogc.d
index 01a6832..5606061 100644
--- a/gcc/d/dmd/nogc.d
+++ b/gcc/d/dmd/nogc.d
@@ -86,7 +86,7 @@ public:
}
if (f.setGC(e.loc, format))
{
- e.error(format, f.kind(), f.toPrettyChars());
+ error(e.loc, format, f.kind(), f.toPrettyChars());
err = true;
return true;
}
@@ -225,7 +225,7 @@ Expression checkGC(Scope* sc, Expression e)
if (e && e.op != EXP.error && f && sc.intypeof != 1 &&
(!(sc.flags & SCOPE.ctfe) || betterC) &&
(f.type.ty == Tfunction &&
- (cast(TypeFunction)f.type).isnogc || f.nogcInprocess || global.params.vgc) &&
+ (cast(TypeFunction)f.type).isnogc || f.nogcInprocess || global.params.v.gc) &&
!(sc.flags & SCOPE.debug_))
{
scope NOGCVisitor gcv = new NOGCVisitor(f);
diff --git a/gcc/d/dmd/nspace.d b/gcc/d/dmd/nspace.d
index 551db5b..2d3367a 100644
--- a/gcc/d/dmd/nspace.d
+++ b/gcc/d/dmd/nspace.d
@@ -52,6 +52,7 @@ import dmd.astenums;
import dmd.dscope;
import dmd.dsymbol;
import dmd.dsymbolsem;
+import dmd.errors;
import dmd.expression;
import dmd.globals;
import dmd.identifier;
@@ -134,7 +135,7 @@ extern (C++) final class Nspace : ScopeDsymbol
if (!members || !symtab) // opaque or semantic() is not yet called
{
if (!(flags & IgnoreErrors))
- error("is forward referenced when looking for `%s`", ident.toChars());
+ .error(loc, "%s `%s` is forward referenced when looking for `%s`", kind, toPrettyChars, ident.toChars());
return null;
}
diff --git a/gcc/d/dmd/ob.d b/gcc/d/dmd/ob.d
index 4774d1f..8b30681 100644
--- a/gcc/d/dmd/ob.d
+++ b/gcc/d/dmd/ob.d
@@ -1970,7 +1970,7 @@ void checkObErrors(ref ObState obstate)
else
{
if (pvs.state == PtrState.Owner && v.type.hasPointersToMutableFields())
- v.error(e.loc, "assigning to Owner without disposing of owned value");
+ .error(e.loc, "%s `%s` assigning to Owner without disposing of owned value", v.kind, v.toPrettyChars);
pvs.state = PtrState.Owner;
}
@@ -1993,12 +1993,12 @@ void checkObErrors(ref ObState obstate)
if (pvsr.state == Undefined)
{
- v.error(e.loc, "is reading from `%s` which is Undefined", r.toChars());
+ .error(e.loc, "%s `%s` is reading from `%s` which is Undefined", v.kind, v.toPrettyChars, r.toChars());
}
else if (isBorrowedPtr(v)) // v is going to borrow from r
{
if (pvsr.state == Readonly)
- v.error(e.loc, "is borrowing from `%s` which is Readonly", r.toChars());
+ .error(e.loc, "%s `%s` is borrowing from `%s` which is Readonly", v.kind, v.toPrettyChars, r.toChars());
pvs.state = Borrowed;
}
@@ -2039,7 +2039,7 @@ void checkObErrors(ref ObState obstate)
assert(vi != size_t.max);
auto pvs = &gen[vi];
if (pvs.state == PtrState.Undefined)
- v.error(loc, "has undefined state and cannot be read");
+ .error(loc, "%s `%s` has undefined state and cannot be read", v.kind, v.toPrettyChars);
readVar(ob, vi, mutable, gen);
}
@@ -2187,7 +2187,7 @@ void checkObErrors(ref ObState obstate)
{
// move (i.e. consume arg)
if (pvs.state != PtrState.Owner)
- v.error(arg.loc, "is not Owner, cannot consume its value");
+ .error(arg.loc, "%s `%s` is not Owner, cannot consume its value", v.kind, v.toPrettyChars);
makeUndefined(vi, cpvs);
}
}
@@ -2226,7 +2226,7 @@ void checkObErrors(ref ObState obstate)
{
// move (i.e. consume arg)
if (pvs.state != PtrState.Owner)
- v.error(arg.loc, "is not Owner, cannot consume its value");
+ .error(arg.loc, "%s `%s` is not Owner, cannot consume its value", v.kind, v.toPrettyChars);
makeUndefined(vi, cpvs);
}
}
@@ -2261,7 +2261,7 @@ void checkObErrors(ref ObState obstate)
{
if (obstate.mutableStack[vi] || obstate.mutableStack[vk])
{
- v.error(ce.loc, "is passed as Owner more than once");
+ .error(ce.loc, "%s `%s` is passed as Owner more than once", v.kind, v.toPrettyChars);
break; // no need to continue
}
}
@@ -2490,7 +2490,7 @@ void checkObErrors(ref ObState obstate)
if (s1 != s2 && (s1 == PtrState.Owner || s2 == PtrState.Owner))
{
auto v = obstate.vars[i];
- v.error(ob.exp ? ob.exp.loc : v.loc, "is both %s and %s", s1.toChars(), s2.toChars());
+ .error(ob.exp ? ob.exp.loc : v.loc, "%s `%s` is both %s and %s", v.kind, v.toPrettyChars, s1.toChars(), s2.toChars());
}
pvs1.combine(*pvs2, i, ob.gen);
}
@@ -2536,7 +2536,7 @@ void checkObErrors(ref ObState obstate)
switch (pvsr.state)
{
case Undefined:
- r.error(ob.exp.loc, "is returned but is Undefined");
+ .error(ob.exp.loc, "%s `%s` is returned but is Undefined", r.kind, r.toPrettyChars);
break;
case Owner:
@@ -2568,7 +2568,7 @@ void checkObErrors(ref ObState obstate)
{
auto v = obstate.vars[i];
if (v.type.hasPointers())
- v.error(v.loc, "is not disposed of before return");
+ .error(v.loc, "%s `%s` is not disposed of before return", v.kind, v.toPrettyChars);
}
}
}
diff --git a/gcc/d/dmd/objc.d b/gcc/d/dmd/objc.d
index 623a362..359474c 100644
--- a/gcc/d/dmd/objc.d
+++ b/gcc/d/dmd/objc.d
@@ -410,12 +410,12 @@ extern(C++) private final class Unsupported : Objc
override void setObjc(ClassDeclaration cd)
{
- cd.error("Objective-C classes not supported");
+ .error(cd.loc, "%s `%s` Objective-C classes not supported", cd.kind, cd.toPrettyChars);
}
override void setObjc(InterfaceDeclaration id)
{
- id.error("Objective-C interfaces not supported");
+ .error(id.loc, "%s `%s` Objective-C interfaces not supported", id.kind, id.toPrettyChars);
}
override const(char)* toPrettyChars(ClassDeclaration, bool qualifyTypes) const
@@ -552,7 +552,7 @@ extern(C++) private final class Supported : Objc
if (fd.objc.selector)
{
- fd.error("can only have one Objective-C selector per method");
+ .error(fd.loc, "%s `%s` can only have one Objective-C selector per method", fd.kind, fd.toPrettyChars);
return 1;
}
@@ -572,15 +572,15 @@ extern(C++) private final class Supported : Objc
return;
TypeFunction tf = cast(TypeFunction)fd.type;
if (fd.objc.selector.paramCount != tf.parameterList.parameters.length)
- fd.error("number of colons in Objective-C selector must match number of parameters");
+ .error(fd.loc, "%s `%s` number of colons in Objective-C selector must match number of parameters", fd.kind, fd.toPrettyChars);
if (fd.parent && fd.parent.isTemplateInstance())
- fd.error("template cannot have an Objective-C selector attached");
+ .error(fd.loc, "%s `%s` template cannot have an Objective-C selector attached", fd.kind, fd.toPrettyChars);
}
override void checkLinkage(FuncDeclaration fd)
{
if (fd._linkage != LINK.objc && fd.objc.selector)
- fd.error("must have Objective-C linkage to attach a selector");
+ .error(fd.loc, "%s `%s` must have Objective-C linkage to attach a selector", fd.kind, fd.toPrettyChars);
}
override bool isVirtual(const FuncDeclaration fd) const
@@ -608,7 +608,7 @@ extern(C++) private final class Supported : Objc
fd.objc.isOptional = count > 0;
if (count > 1)
- fd.error("can only declare a function as optional once");
+ .error(fd.loc, "%s `%s` can only declare a function as optional once", fd.kind, fd.toPrettyChars);
}
/// Returns: the number of times `fd` has been declared as optional.
@@ -643,7 +643,7 @@ extern(C++) private final class Supported : Objc
if (fd._linkage != LINK.objc)
{
- fd.error("only functions with Objective-C linkage can be declared as optional");
+ .error(fd.loc, "%s `%s` only functions with Objective-C linkage can be declared as optional", fd.kind, fd.toPrettyChars);
const linkage = linkageToString(fd._linkage);
@@ -655,14 +655,14 @@ extern(C++) private final class Supported : Objc
if (parent && parent.isTemplateInstance())
{
- fd.error("template cannot be optional");
+ .error(fd.loc, "%s `%s` template cannot be optional", fd.kind, fd.toPrettyChars);
parent = parent.parent;
assert(parent);
}
if (parent && !parent.isInterfaceDeclaration())
{
- fd.error("only functions declared inside interfaces can be optional");
+ .error(fd.loc, "%s `%s` only functions declared inside interfaces can be optional", fd.kind, fd.toPrettyChars);
errorSupplemental(fd.loc, "function is declared inside %s", fd.parent.kind);
}
}
@@ -805,9 +805,9 @@ extern(C++) private final class Supported : Objc
enum supplementalMessage = "`offsetof` is not available for members " ~
"of Objective-C classes. Please use the Objective-C runtime instead";
- expression.error(errorMessage, expression.toChars(),
+ error(expression.loc, errorMessage, expression.toChars(),
expression.type.toChars());
- expression.errorSupplemental(supplementalMessage);
+ errorSupplemental(expression.loc, supplementalMessage);
}
override void checkTupleof(Expression expression, TypeClass type) const
@@ -815,8 +815,8 @@ extern(C++) private final class Supported : Objc
if (type.sym.classKind != ClassKind.objc)
return;
- expression.error("no property `tupleof` for type `%s`", type.toChars());
- expression.errorSupplemental("`tupleof` is not available for members " ~
+ error(expression.loc, "no property `tupleof` for type `%s`", type.toChars());
+ errorSupplemental(expression.loc, "`tupleof` is not available for members " ~
"of Objective-C classes. Please use the Objective-C runtime instead");
}
}
@@ -866,8 +866,8 @@ if (is(T == ClassDeclaration) || is(T == InterfaceDeclaration))
}
else
{
- error("base " ~ errorType ~ " for an Objective-C " ~
- errorType ~ " must be `extern (Objective-C)`");
+ .error(classDeclaration.loc, "%s `%s` base " ~ errorType ~ " for an Objective-C " ~
+ errorType ~ " must be `extern (Objective-C)`", classDeclaration.kind, classDeclaration.toPrettyChars);
}
}
diff --git a/gcc/d/dmd/opover.d b/gcc/d/dmd/opover.d
index 457e8b6..addcd01 100644
--- a/gcc/d/dmd/opover.d
+++ b/gcc/d/dmd/opover.d
@@ -405,7 +405,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
{
// @@@DEPRECATED_2.110@@@.
// Deprecated in 2.088, made an error in 2.100
- e.error("`%s` is obsolete. Use `opUnary(string op)() if (op == \"%s\")` instead.", id.toChars(), EXPtoString(e.op).ptr);
+ error(e.loc, "`%s` is obsolete. Use `opUnary(string op)() if (op == \"%s\")` instead.", id.toChars(), EXPtoString(e.op).ptr);
return ErrorExp.get();
}
}
@@ -644,7 +644,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
s = search_function(ad1, Id.opBinary);
if (s && !s.isTemplateDeclaration())
{
- e.e1.error("`%s.opBinary` isn't a template", e.e1.toChars());
+ error(e.e1.loc, "`%s.opBinary` isn't a template", e.e1.toChars());
return ErrorExp.get();
}
}
@@ -653,7 +653,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
s_r = search_function(ad2, Id.opBinaryRight);
if (s_r && !s_r.isTemplateDeclaration())
{
- e.e2.error("`%s.opBinaryRight` isn't a template", e.e2.toChars());
+ error(e.e2.loc, "`%s.opBinaryRight` isn't a template", e.e2.toChars());
return ErrorExp.get();
}
if (s_r && s_r == s) // https://issues.dlang.org/show_bug.cgi?id=12778
@@ -678,9 +678,9 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
// @@@DEPRECATED_2.110@@@.
// Deprecated in 2.088, made an error in 2.100
if (id == Id.postinc || id == Id.postdec)
- e.error("`%s` is obsolete. Use `opUnary(string op)() if (op == \"%s\")` instead.", id.toChars(), EXPtoString(e.op).ptr);
+ error(e.loc, "`%s` is obsolete. Use `opUnary(string op)() if (op == \"%s\")` instead.", id.toChars(), EXPtoString(e.op).ptr);
else
- e.error("`%s` is obsolete. Use `opBinary(string op)(...) if (op == \"%s\")` instead.", id.toChars(), EXPtoString(e.op).ptr);
+ error(e.loc, "`%s` is obsolete. Use `opBinary(string op)(...) if (op == \"%s\")` instead.", id.toChars(), EXPtoString(e.op).ptr);
return ErrorExp.get();
}
}
@@ -696,7 +696,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
{
// @@@DEPRECATED_2.110@@@.
// Deprecated in 2.088, made an error in 2.100
- e.error("`%s` is obsolete. Use `opBinaryRight(string op)(...) if (op == \"%s\")` instead.", id_r.toChars(), EXPtoString(e.op).ptr);
+ error(e.loc, "`%s` is obsolete. Use `opBinaryRight(string op)(...) if (op == \"%s\")` instead.", id_r.toChars(), EXPtoString(e.op).ptr);
return ErrorExp.get();
}
}
@@ -738,7 +738,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
if (m.count > 1)
{
// Error, ambiguous
- e.error("overloads `%s` and `%s` both match argument list for `%s`", m.lastf.type.toChars(), m.nextf.type.toChars(), m.lastf.toChars());
+ error(e.loc, "overloads `%s` and `%s` both match argument list for `%s`", m.lastf.type.toChars(), m.nextf.type.toChars(), m.lastf.toChars());
}
else if (m.last == MATCH.nomatch)
{
@@ -820,7 +820,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
if (m.count > 1)
{
// Error, ambiguous
- e.error("overloads `%s` and `%s` both match argument list for `%s`", m.lastf.type.toChars(), m.nextf.type.toChars(), m.lastf.toChars());
+ error(e.loc, "overloads `%s` and `%s` both match argument list for `%s`", m.lastf.type.toChars(), m.nextf.type.toChars(), m.lastf.toChars());
}
else if (m.last == MATCH.nomatch)
{
@@ -890,7 +890,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
}
if (rewrittenLhs)
{
- e.error("cannot use `alias this` to partially initialize variable `%s` of type `%s`. Use `%s`",
+ error(e.loc, "cannot use `alias this` to partially initialize variable `%s` of type `%s`. Use `%s`",
e.e1.toChars(), ad1.toChars(), rewrittenLhs.toChars());
return ErrorExp.get();
}
@@ -918,7 +918,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
if (t1.ty == Tclass && e.e2.op == EXP.null_ ||
t2.ty == Tclass && e.e1.op == EXP.null_)
{
- e.error("use `%s` instead of `%s` when comparing with `null`",
+ error(e.loc, "use `%s` instead of `%s` when comparing with `null`",
EXPtoString(e.op == EXP.equal ? EXP.identity : EXP.notIdentity).ptr,
EXPtoString(e.op).ptr);
return ErrorExp.get();
@@ -943,7 +943,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
*/
if (!ClassDeclaration.object)
{
- e.error("cannot compare classes for equality because `object.Object` was not declared");
+ error(e.loc, "cannot compare classes for equality because `object.Object` was not declared");
return null;
}
@@ -1045,7 +1045,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
size_t dim = tup1.exps.length;
if (dim != tup2.exps.length)
{
- e.error("mismatched sequence lengths, `%d` and `%d`",
+ error(e.loc, "mismatched sequence lengths, `%d` and `%d`",
cast(int)dim, cast(int)tup2.exps.length);
return ErrorExp.get();
}
@@ -1210,7 +1210,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
s = search_function(ad1, Id.opOpAssign);
if (s && !s.isTemplateDeclaration())
{
- e.error("`%s.opOpAssign` isn't a template", e.e1.toChars());
+ error(e.loc, "`%s.opOpAssign` isn't a template", e.e1.toChars());
return ErrorExp.get();
}
}
@@ -1231,7 +1231,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
// Deprecated in 2.088, made an error in 2.100
scope char[] op = EXPtoString(e.op).dup;
op[$-1] = '\0'; // remove trailing `=`
- e.error("`%s` is obsolete. Use `opOpAssign(string op)(...) if (op == \"%s\")` instead.", id.toChars(), op.ptr);
+ error(e.loc, "`%s` is obsolete. Use `opOpAssign(string op)(...) if (op == \"%s\")` instead.", id.toChars(), op.ptr);
return ErrorExp.get();
}
}
@@ -1253,7 +1253,7 @@ Expression op_overload(Expression e, Scope* sc, EXP* pop = null)
if (m.count > 1)
{
// Error, ambiguous
- e.error("overloads `%s` and `%s` both match argument list for `%s`", m.lastf.type.toChars(), m.nextf.type.toChars(), m.lastf.toChars());
+ error(e.loc, "overloads `%s` and `%s` both match argument list for `%s`", m.lastf.type.toChars(), m.nextf.type.toChars(), m.lastf.toChars());
}
else if (m.last == MATCH.nomatch)
{
@@ -1366,7 +1366,7 @@ private Expression compare_overload(BinExp e, Scope* sc, Identifier id, EXP* pop
if (!(m.lastf == lastf && m.count == 2 && count == 1))
{
// Error, ambiguous
- e.error("overloads `%s` and `%s` both match argument list for `%s`", m.lastf.type.toChars(), m.nextf.type.toChars(), m.lastf.toChars());
+ error(e.loc, "overloads `%s` and `%s` both match argument list for `%s`", m.lastf.type.toChars(), m.nextf.type.toChars(), m.lastf.toChars());
}
}
else if (m.last == MATCH.nomatch)
@@ -1767,10 +1767,10 @@ private FuncDeclaration findBestOpApplyMatch(Expression ethis, FuncDeclaration f
if (fd_ambig)
{
- .error(ethis.loc, "`%s.%s` matches more than one declaration:\n`%s`: `%s`\nand:\n`%s`: `%s`",
- ethis.toChars(), fstart.ident.toChars(),
- fd_best.loc.toChars(), fd_best.type.toChars(),
- fd_ambig.loc.toChars(), fd_ambig.type.toChars());
+ .error(ethis.loc, "`%s.%s` matches more than one declaration:",
+ ethis.toChars(), fstart.ident.toChars());
+ .errorSupplemental(fd_best.loc, "`%s`\nand:", fd_best.type.toChars());
+ .errorSupplemental(fd_ambig.loc, "`%s`", fd_ambig.type.toChars());
return null;
}
diff --git a/gcc/d/dmd/optimize.d b/gcc/d/dmd/optimize.d
index f98e7c7..d108cff 100644
--- a/gcc/d/dmd/optimize.d
+++ b/gcc/d/dmd/optimize.d
@@ -24,6 +24,7 @@ import dmd.errors;
import dmd.expression;
import dmd.expressionsem;
import dmd.globals;
+import dmd.hdrgen;
import dmd.init;
import dmd.location;
import dmd.mtype;
@@ -90,7 +91,7 @@ Expression expandVar(int result, VarDeclaration v)
{
if (v.storage_class & STC.manifest)
{
- v.error("recursive initialization of constant");
+ .error(v.loc, "%s `%s` recursive initialization of constant", v.kind, v.toPrettyChars);
return errorReturn();
}
return nullReturn();
@@ -100,7 +101,7 @@ Expression expandVar(int result, VarDeclaration v)
{
if (v.storage_class & STC.manifest)
{
- v.error("enum cannot be initialized with `%s`", v._init.toChars());
+ .error(v.loc, "%s `%s` enum cannot be initialized with `%s`", v.kind, v.toPrettyChars, dmd.hdrgen.toChars(v._init));
return errorReturn();
}
return nullReturn();
@@ -275,7 +276,7 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
//printf("Expression_optimize() e: %s result: %d keepLvalue %d\n", e.toChars(), result, keepLvalue);
Expression ret = e;
- void error()
+ void errorReturn()
{
ret = ErrorExp.get();
}
@@ -571,8 +572,8 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
if (index < 0 || index > dim)
{
- e.error("array index %lld is out of bounds `[0..%lld]`", index, dim);
- return error();
+ error(e.loc, "array index %lld is out of bounds `[0..%lld]`", index, dim);
+ return errorReturn();
}
import core.checkedint : mulu;
@@ -580,8 +581,8 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
const offset = mulu(index, ts.nextOf().size(e.loc), overflow); // offset = index*size
if (overflow)
{
- e.error("array offset overflow");
- return error();
+ error(e.loc, "array offset overflow");
+ return errorReturn();
}
Expression ex = new AddrExp(ae1.loc, ae1); // &a[i]
@@ -610,8 +611,8 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
*/
if (!((dim == 0 || dim == index) && ve.var.isCsymbol()))
{
- e.error("array index %lld is out of bounds `[0..%lld]`", index, dim);
- return error();
+ error(e.loc, "array index %lld is out of bounds `[0..%lld]`", index, dim);
+ return errorReturn();
}
}
@@ -620,8 +621,8 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
const offset = mulu(index, ts.nextOf().size(e.loc), overflow);
if (overflow)
{
- e.error("array offset overflow");
- return error();
+ error(e.loc, "array offset overflow");
+ return errorReturn();
}
ret = new SymOffExp(e.loc, ve.var, offset);
@@ -645,8 +646,8 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
*/
if (!((dim == 0 || dim == index) && ve.var.isCsymbol()))
{
- e.error("array index %lld is out of bounds `[0..%lld]`", index, dim);
- return error();
+ error(e.loc, "array index %lld is out of bounds `[0..%lld]`", index, dim);
+ return errorReturn();
}
}
@@ -655,8 +656,8 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
const offset = mulu(index, ts.nextOf().size(e.loc), overflow); // index*elementsize
if (overflow)
{
- e.error("array offset overflow");
- return error();
+ error(e.loc, "array offset overflow");
+ return errorReturn();
}
auto pe = new AddrExp(e.loc, ve);
@@ -808,7 +809,7 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
const esz = e.type.nextOf().size(e.loc);
const e1sz = e.e1.type.toBasetype().nextOf().size(e.e1.loc);
if (esz == SIZE_INVALID || e1sz == SIZE_INVALID)
- return error();
+ return errorReturn();
if (e1sz == esz)
{
@@ -855,13 +856,13 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
ClassDeclaration cdfrom = e.e1.type.isClassHandle();
ClassDeclaration cdto = e.type.isClassHandle();
if (cdfrom.errors || cdto.errors)
- return error();
+ return errorReturn();
if (cdto == ClassDeclaration.object && !cdfrom.isInterfaceDeclaration())
return returnE_e1(); // can always convert a class to Object
// Need to determine correct offset before optimizing away the cast.
// https://issues.dlang.org/show_bug.cgi?id=16980
if (cdfrom.size(e.loc) == SIZE_INVALID)
- return error();
+ return errorReturn();
assert(cdfrom.sizeok == Sizeok.done);
assert(cdto.sizeok == Sizeok.done || !cdto.isBaseOf(cdfrom, null));
int offset;
@@ -886,7 +887,7 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
const e1sz = e.e1.type.size(e.e1.loc);
if (esz == SIZE_INVALID ||
e1sz == SIZE_INVALID)
- return error();
+ return errorReturn();
if (esz == e1sz)
return returnE_e1();
@@ -919,8 +920,8 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
sz *= 8;
if (i2 < 0 || i2 >= sz)
{
- e.error("shift assign by %lld is outside the range `0..%llu`", i2, cast(ulong)sz - 1);
- return error();
+ error(e.loc, "shift assign by %lld is outside the range `0..%llu`", i2, cast(ulong)sz - 1);
+ return errorReturn();
}
}
}
@@ -1005,8 +1006,8 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
sz *= 8;
if (i2 < 0 || i2 >= sz)
{
- e.error("shift by %lld is outside the range `0..%llu`", i2, cast(ulong)sz - 1);
- return error();
+ error(e.loc, "shift by %lld is outside the range `0..%llu`", i2, cast(ulong)sz - 1);
+ return errorReturn();
}
if (e.e1.isConst() == 1)
ret = (*shift)(e.loc, e.type, e.e1, e.e2).copy();
@@ -1062,8 +1063,8 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
// All negative integral powers are illegal.
if (e.e1.type.isintegral() && (e.e2.op == EXP.int64) && cast(sinteger_t)e.e2.toInteger() < 0)
{
- e.error("cannot raise `%s` to a negative integer power. Did you mean `(cast(real)%s)^^%s` ?", e.e1.type.toBasetype().toChars(), e.e1.toChars(), e.e2.toChars());
- return error();
+ error(e.loc, "cannot raise `%s` to a negative integer power. Did you mean `(cast(real)%s)^^%s` ?", e.e1.type.toBasetype().toChars(), e.e1.toChars(), e.e2.toChars());
+ return errorReturn();
}
// If e2 *could* have been an integer, make it one.
if (e.e2.op == EXP.float64 && e.e2.toReal() == real_t(cast(sinteger_t)e.e2.toReal()))
@@ -1280,19 +1281,25 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
//printf("CatExp::optimize(%d) %s\n", result, e.toChars());
if (binOptimize(e, result))
return;
- if (auto ce1 = e.e1.isCatExp())
- {
- // https://issues.dlang.org/show_bug.cgi?id=12798
- // optimize ((expr ~ str1) ~ str2)
- scope CatExp cex = new CatExp(e.loc, ce1.e2, e.e2);
- cex.type = e.type;
- Expression ex = Expression_optimize(cex, result, false);
- if (ex != cex)
+
+ if (e.type == Type.tstring)
+ if (auto ce1 = e.e1.isCatExp())
{
- e.e1 = ce1.e1;
- e.e2 = ex;
+ // https://issues.dlang.org/show_bug.cgi?id=12798
+ // optimize ((expr ~ str1) ~ str2)
+ // https://issues.dlang.org/show_bug.cgi?id=24078
+ // This optimization is only valid if `expr` is a string.
+ // Otherwise it leads to:
+ // `["c"] ~ "a" ~ "b"` becoming `["c"] ~ "ab"`
+ scope CatExp cex = new CatExp(e.loc, ce1.e2, e.e2);
+ cex.type = e.type;
+ Expression ex = Expression_optimize(cex, result, false);
+ if (ex != cex)
+ {
+ e.e1 = ce1.e1;
+ e.e2 = ex;
+ }
}
- }
// optimize "str"[] -> "str"
if (auto se1 = e.e1.isSliceExp())
{
@@ -1331,7 +1338,7 @@ Expression Expression_optimize(Expression e, int result, bool keepLvalue)
{
if (b++ == global.recursionLimit)
{
- e.error("infinite loop while optimizing expression");
+ error(e.loc, "infinite loop while optimizing expression");
fatal();
}
diff --git a/gcc/d/dmd/parse.d b/gcc/d/dmd/parse.d
index 13bba4f..3821f94 100644
--- a/gcc/d/dmd/parse.d
+++ b/gcc/d/dmd/parse.d
@@ -1204,7 +1204,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
if (orig & added)
{
OutBuffer buf;
- AST.stcToBuffer(&buf, added);
+ AST.stcToBuffer(buf, added);
error("redundant attribute `%s`", buf.peekChars());
return orig | added;
}
@@ -2007,6 +2007,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
case TOK.wcharLiteral:
case TOK.dcharLiteral:
case TOK.string_:
+ case TOK.hexadecimalString:
case TOK.file:
case TOK.fileFullPath:
case TOK.line:
@@ -2545,7 +2546,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
else if (StorageClass modStc = stc & STC.TYPECTOR)
{
OutBuffer buf;
- AST.stcToBuffer(&buf, modStc);
+ AST.stcToBuffer(buf, modStc);
error(loc, "static constructor cannot be `%s`", buf.peekChars());
}
stc &= ~(STC.static_ | STC.TYPECTOR);
@@ -2580,7 +2581,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
else if (StorageClass modStc = stc & STC.TYPECTOR)
{
OutBuffer buf;
- AST.stcToBuffer(&buf, modStc);
+ AST.stcToBuffer(buf, modStc);
error(loc, "static destructor cannot be `%s`", buf.peekChars());
}
stc &= ~(STC.static_ | STC.TYPECTOR);
@@ -2619,7 +2620,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
else if (StorageClass modStc = stc & STC.TYPECTOR)
{
OutBuffer buf;
- AST.stcToBuffer(&buf, modStc);
+ AST.stcToBuffer(buf, modStc);
error(loc, "shared static constructor cannot be `%s`", buf.peekChars());
}
stc &= ~(STC.static_ | STC.TYPECTOR);
@@ -2653,7 +2654,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
else if (StorageClass modStc = stc & STC.TYPECTOR)
{
OutBuffer buf;
- AST.stcToBuffer(&buf, modStc);
+ AST.stcToBuffer(buf, modStc);
error(loc, "shared static destructor cannot be `%s`", buf.peekChars());
}
stc &= ~(STC.static_ | STC.TYPECTOR);
@@ -2837,7 +2838,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
if (varargsStc & ~VarArgsStc)
{
OutBuffer buf;
- AST.stcToBuffer(&buf, varargsStc & ~VarArgsStc);
+ AST.stcToBuffer(buf, varargsStc & ~VarArgsStc);
error("variadic parameter cannot have attributes `%s`", buf.peekChars());
varargsStc &= VarArgsStc;
}
@@ -2935,11 +2936,12 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
//error("scope cannot be ref or out");
const tv = peekNext();
+ Loc loc;
if (tpl && token.value == TOK.identifier &&
(tv == TOK.comma || tv == TOK.rightParenthesis || tv == TOK.dotDotDot))
{
Identifier id = Identifier.generateId("__T");
- const loc = token.loc;
+ loc = token.loc;
at = new AST.TypeIdentifier(loc, id);
if (!*tpl)
*tpl = new AST.TemplateParameters();
@@ -2951,7 +2953,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
}
else
{
- at = parseType(&ai);
+ at = parseType(&ai, null, &loc);
}
ae = null;
if (token.value == TOK.assign) // = defaultArg
@@ -2959,7 +2961,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
nextToken();
ae = parseDefaultInitExp();
}
- auto param = new AST.Parameter(storageClass | STC.parameter, at, ai, ae, null);
+ auto param = new AST.Parameter(loc, storageClass | STC.parameter, at, ai, ae, null);
if (udas)
{
auto a = new AST.Dsymbols();
@@ -3083,7 +3085,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
else
{
OutBuffer buf;
- AST.stcToBuffer(&buf, _stc);
+ AST.stcToBuffer(buf, _stc);
error(attributeErrorMessage, buf.peekChars());
}
nextToken();
@@ -3284,8 +3286,14 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
if (token.value != TOK.rightCurly)
{
/* { */
- error(token.loc, "`}` expected following members in `%s` declaration at %s",
- Token.toChars(tok), loc.toChars());
+ error(token.loc, "`}` expected following members in `%s` declaration",
+ Token.toChars(tok));
+ if (id)
+ eSink.errorSupplemental(loc, "%s `%s` starts here",
+ Token.toChars(tok), id.toChars());
+ else
+ eSink.errorSupplemental(loc, "%s starts here",
+ Token.toChars(tok));
}
nextToken();
}
@@ -3481,8 +3489,9 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
* Params:
* pident = set to Identifier if there is one, null if not
* ptpl = if !null, then set to TemplateParameterList
+ * pdeclLoc = if !null, then set to location of the declarator
*/
- AST.Type parseType(Identifier* pident = null, AST.TemplateParameters** ptpl = null)
+ AST.Type parseType(Identifier* pident = null, AST.TemplateParameters** ptpl = null, Loc* pdeclLoc = null)
{
/* Take care of the storage class prefixes that
* serve as type attributes:
@@ -3539,6 +3548,8 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
AST.Type t;
t = parseBasicType();
+ if (pdeclLoc)
+ *pdeclLoc = token.loc;
int alt = 0;
t = parseDeclarator(t, alt, pident, ptpl);
checkCstyleTypeSyntax(typeLoc, t, alt, pident ? *pident : null);
@@ -4575,8 +4586,10 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
}
else if (t.ty == Tfunction)
{
+ /* @@@DEPRECATED_2.115@@@
+ * change to error, deprecated in 2.105.1 */
if (storage_class & STC.manifest)
- error("function cannot have enum storage class");
+ deprecation("function cannot have enum storage class");
AST.Expression constraint = null;
//printf("%s funcdecl t = %s, storage_class = x%lx\n", loc.toChars(), t.toChars(), storage_class);
@@ -4953,7 +4966,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
if (remStc)
{
OutBuffer buf;
- AST.stcToBuffer(&buf, remStc);
+ AST.stcToBuffer(buf, remStc);
// @@@DEPRECATED_2.103@@@
// Deprecated in 2020-07, can be made an error in 2.103
eSink.deprecation(token.loc, "storage class `%s` has no effect in type aliases", buf.peekChars());
@@ -5108,7 +5121,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
if (save == TOK.function_)
{
OutBuffer buf;
- AST.stcToBuffer(&buf, modStc);
+ AST.stcToBuffer(buf, modStc);
error("function literal cannot be `%s`", buf.peekChars());
}
else
@@ -5126,7 +5139,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
parameterList.parameters = new AST.Parameters();
Identifier id = Identifier.generateId("__T");
AST.Type t = new AST.TypeIdentifier(loc, id);
- parameterList.parameters.push(new AST.Parameter(STC.parameter, t, token.ident, null, null));
+ parameterList.parameters.push(new AST.Parameter(loc, STC.parameter, t, token.ident, null, null));
tpl = new AST.TemplateParameters();
AST.TemplateParameter tp = new AST.TemplateTypeParameter(loc, id, null, null);
@@ -5443,6 +5456,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
{
Identifier ai = null;
AST.Type at;
+ Loc aloc;
StorageClass storageClass = 0;
StorageClass stc = 0;
@@ -5524,6 +5538,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
lastai = token.ident;
ai = token.ident;
at = null; // infer argument type
+ aloc = token.loc;
nextToken();
goto Larg;
}
@@ -5532,7 +5547,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
if (!ai)
noIdentifierForDeclarator(at);
Larg:
- auto p = new AST.Parameter(storageClass, at, ai, null, null);
+ auto p = new AST.Parameter(aloc, storageClass, at, ai, null, null);
parameters.push(p);
if (token.value == TOK.comma)
{
@@ -5684,16 +5699,18 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
{
Identifier ai = token.ident;
AST.Type at = null; // infer parameter type
+ const aloc = token.loc;
nextToken();
check(TOK.assign);
- param = new AST.Parameter(storageClass, at, ai, null, null);
+ param = new AST.Parameter(aloc, storageClass, at, ai, null, null);
}
else if (isDeclaration(&token, NeedDeclaratorId.must, TOK.assign, null))
{
Identifier ai;
+ const aloc = token.loc;
AST.Type at = parseType(&ai);
check(TOK.assign);
- param = new AST.Parameter(storageClass, at, ai, null, null);
+ param = new AST.Parameter(aloc, storageClass, at, ai, null, null);
}
else if (storageClass != 0)
error("found `%s` while expecting `=` or identifier", n.toChars());
@@ -5789,6 +5806,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
case TOK.true_:
case TOK.false_:
case TOK.string_:
+ case TOK.hexadecimalString:
case TOK.leftParenthesis:
case TOK.cast_:
case TOK.mul:
@@ -5816,7 +5834,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
AST.Expression exp = parseExpression();
/* https://issues.dlang.org/show_bug.cgi?id=15103
* Improve declaration / initialization syntax error message
- * Error: found 'foo' when expecting ';' following statement
+ * Error: found 'foo' when expecting ';' following expression
* becomes Error: found `(` when expecting `;` or `=`, did you mean `Foo foo = 42`?
*/
if (token.value == TOK.identifier && exp.op == EXP.identifier)
@@ -5841,11 +5859,14 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
* otherwise we fall back on the old path (advancing the token).
*/
if (token.value != TOK.semicolon && peek(&token).value == TOK.semicolon)
- error("found `%s` when expecting `;` following statement", token.toChars());
+ error("found `%s` when expecting `;` following expression", token.toChars());
else
{
if (token.value != TOK.semicolon)
- error("found `%s` when expecting `;` following statement `%s` on line %s", token.toChars(), exp.toChars(), exp.loc.toChars());
+ {
+ error("found `%s` when expecting `;` following expression", token.toChars());
+ eSink.errorSupplemental(exp.loc, "expression: `%s`", exp.toChars());
+ }
nextToken();
}
}
@@ -6226,9 +6247,9 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
if (auto ds = parseDebugSpecification())
{
if (ds.ident)
- ds.error("declaration must be at module level");
+ eSink.error(ds.loc, "%s `%s` declaration must be at module level", ds.kind, ds.toPrettyChars);
else
- ds.error("level declaration must be at module level");
+ eSink.error(ds.loc, "%s `%s` level declaration must be at module level", ds.kind, ds.toPrettyChars);
}
break;
}
@@ -6242,9 +6263,9 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
if (auto vs = parseVersionSpecification())
{
if (vs.ident)
- vs.error("declaration must be at module level");
+ eSink.error(vs.loc, "%s `%s` declaration must be at module level", vs.kind, vs.toPrettyChars);
else
- vs.error("level declaration must be at module level");
+ eSink.error(vs.loc, "%s `%s` level declaration must be at module level", vs.kind, vs.toPrettyChars);
}
break;
}
@@ -6308,10 +6329,11 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
{
nextToken();
check(TOK.leftParenthesis);
+ auto param = parseAssignCondition();
AST.Expression condition = parseExpression();
closeCondition("switch", null, condition);
AST.Statement _body = parseStatement(ParseStatementFlags.scope_);
- s = new AST.SwitchStatement(loc, condition, _body, isfinal);
+ s = new AST.SwitchStatement(loc, param, condition, _body, isfinal, token.loc);
break;
}
case TOK.case_:
@@ -6580,7 +6602,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
}
case TOK.asm_:
- s = parseAsm();
+ s = parseAsm(false);
break;
case TOK.import_:
@@ -6951,10 +6973,12 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
* AsmInstruction ;
* AsmInstruction ; AsmInstruction
*
+ * Params:
+ * endOfLine = true if EOL means end of asm statement
* Returns:
* inline assembler block as a Statement
*/
- AST.Statement parseAsm()
+ AST.Statement parseAsm(bool endOfLine)
{
// Parse the asm block into a sequence of AsmStatements,
// each AsmStatement is one instruction.
@@ -6977,6 +7001,8 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
size_t nestlevel = 0;
while (1)
{
+ if (endOfLine)
+ nextDefineLine();
switch (token.value)
{
case TOK.identifier:
@@ -7011,6 +7037,10 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
}
break;
+ case TOK.endOfLine:
+ nextDefineLine();
+ goto case;
+
case TOK.semicolon:
if (nestlevel != 0)
error("mismatched number of curly brackets");
@@ -7018,7 +7048,9 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
if (toklist || label)
{
// Create AsmStatement from list of tokens we've saved
- AST.Statement s = new AST.AsmStatement(token.loc, toklist);
+ AST.AsmStatement as = new AST.AsmStatement(token.loc, toklist);
+ as.caseSensitive = !endOfLine;
+ AST.Statement s = as;
toklist = null;
ptoklist = &toklist;
if (label)
@@ -7062,6 +7094,8 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
break;
}
nextToken();
+ if (token.value == TOK.endOfLine)
+ nextToken();
auto s = new AST.CompoundAsmStatement(loc, statements, stc);
return s;
}
@@ -7291,6 +7325,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
case TOK.wcharLiteral:
case TOK.dcharLiteral:
case TOK.string_:
+ case TOK.hexadecimalString:
case TOK.file:
case TOK.fileFullPath:
case TOK.line:
@@ -7562,7 +7597,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
}
continue;
- // Valid tokens that follow a declaration
+ // Valid tokens that follow the start of a declaration
case TOK.rightParenthesis:
case TOK.rightBracket:
case TOK.assign:
@@ -7581,6 +7616,23 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
}
return false;
+ // To recognize the shortened function declaration syntax
+ case TOK.goesTo:
+ /*
+ 1. https://issues.dlang.org/show_bug.cgi?id=24088
+
+ 2. We need to make sure the would-be
+ declarator has an identifier otherwise function literals
+ are handled incorrectly. Some special treatment is required
+ here, it turns out that a lot of code in the compiler relies
+ on this mess (in the parser), i.e. having isDeclarator be more
+ precise the parsing of other things go kaboom, so we do it in a
+ separate case.
+ */
+ if (*haveId)
+ goto case TOK.do_;
+ goto default;
+
case TOK.identifier:
if (t.ident == Id._body)
{
@@ -8148,6 +8200,8 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
break;
case TOK.string_:
+ case TOK.hexadecimalString:
+ const bool hexString = token.value == TOK.hexadecimalString;
{
// cat adjacent strings
auto s = token.ustring;
@@ -8157,7 +8211,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
{
const prev = token;
nextToken();
- if (token.value == TOK.string_)
+ if (token.value == TOK.string_ || token.value == TOK.hexadecimalString)
{
if (token.postfix)
{
@@ -8183,6 +8237,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
break;
}
e = new AST.StringExp(loc, s[0 .. len], len, 1, postfix);
+ e.isStringExp().hexString = hexString;
break;
}
case TOK.void_:
@@ -8847,7 +8902,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
{
e = parseUnaryExp();
e = new AST.CastExp(loc, e, t);
- error("C style cast illegal, use `%s`", e.toChars());
+ error(loc, "C style cast illegal, use `%s`", e.toChars());
}
return e;
}
@@ -9549,7 +9604,7 @@ class Parser(AST, Lexer = dmd.lexer.Lexer) : Lexer
void usageOfBodyKeyword()
{
- if (compileEnv.obsolete)
+ version (none) // disable obsolete warning
{
eSink.warning(token.loc, "usage of identifer `body` as a keyword is obsolete. Use `do` instead.");
}
diff --git a/gcc/d/dmd/printast.d b/gcc/d/dmd/printast.d
index e43ffad..e1deb2c 100644
--- a/gcc/d/dmd/printast.d
+++ b/gcc/d/dmd/printast.d
@@ -64,7 +64,7 @@ extern (C++) final class PrintASTVisitor : Visitor
import dmd.hdrgen : floatToBuffer;
import dmd.common.outbuffer : OutBuffer;
OutBuffer buf;
- floatToBuffer(e.type, e.value, &buf, false);
+ floatToBuffer(e.type, e.value, buf, false);
printf("Real %s %s\n", buf.peekChars(), e.type ? e.type.toChars() : "");
}
diff --git a/gcc/d/dmd/root/filename.d b/gcc/d/dmd/root/filename.d
index 987c793..631c08c 100644
--- a/gcc/d/dmd/root/filename.d
+++ b/gcc/d/dmd/root/filename.d
@@ -14,13 +14,14 @@ module dmd.root.filename;
import core.stdc.ctype;
import core.stdc.errno;
import core.stdc.string;
+
+import dmd.common.file;
+import dmd.common.outbuffer;
+
import dmd.root.array;
import dmd.root.file;
-import dmd.common.outbuffer;
-import dmd.common.file;
import dmd.root.port;
import dmd.root.rmem;
-import dmd.root.rootobject;
import dmd.root.string;
version (Posix)
diff --git a/gcc/d/dmd/root/rootobject.d b/gcc/d/dmd/root/rootobject.d
index 7138841..65c499d 100644
--- a/gcc/d/dmd/root/rootobject.d
+++ b/gcc/d/dmd/root/rootobject.d
@@ -1,5 +1,5 @@
/**
- * Provide the root object that classes in dmd inherit from.
+ * Provide the root object that AST classes in dmd inherit from.
*
* Copyright: Copyright (C) 1999-2023 by The D Language Foundation, All Rights Reserved
* Authors: Walter Bright, https://www.digitalmars.com
@@ -11,10 +11,6 @@
module dmd.root.rootobject;
-import core.stdc.stdio;
-
-import dmd.common.outbuffer;
-
/***********************************************************
*/
diff --git a/gcc/d/dmd/safe.d b/gcc/d/dmd/safe.d
index c3fa90d..bd531c0 100644
--- a/gcc/d/dmd/safe.d
+++ b/gcc/d/dmd/safe.d
@@ -51,7 +51,7 @@ bool checkUnsafeAccess(Scope* sc, Expression e, bool readonly, bool printmsg)
DotVarExp dve = cast(DotVarExp)e;
if (VarDeclaration v = dve.var.isVarDeclaration())
{
- if (sc.intypeof || !sc.func || !sc.func.isSafeBypassingInference())
+ if (!sc.func)
return false;
auto ad = v.isMember2();
if (!ad)
@@ -65,6 +65,11 @@ bool checkUnsafeAccess(Scope* sc, Expression e, bool readonly, bool printmsg)
return true;
}
+ // This branch shouldn't be here, but unfortunately calling `ad.determineSize`
+ // breaks code with circular reference errors. Specifically, test23589.d fails
+ if (ad.sizeok != Sizeok.done && !sc.func.isSafeBypassingInference())
+ return false;
+
// needed to set v.overlapped and v.overlapUnsafe
if (ad.sizeok != Sizeok.done)
ad.determineSize(ad.loc);
@@ -74,9 +79,23 @@ bool checkUnsafeAccess(Scope* sc, Expression e, bool readonly, bool printmsg)
{
if (v.overlapped)
{
- if (sc.setUnsafe(!printmsg, e.loc,
+ if (sc.func.isSafeBypassingInference() && sc.setUnsafe(!printmsg, e.loc,
"field `%s.%s` cannot access pointers in `@safe` code that overlap other fields", ad, v))
+ {
return true;
+ }
+ else
+ {
+ import dmd.globals : FeatureState;
+ // @@@DEPRECATED_2.116@@@
+ // https://issues.dlang.org/show_bug.cgi?id=20655
+ // Inferring `@system` because of union access breaks code,
+ // so make it a deprecation safety violation as of 2.106
+ // To turn into an error, remove `isSafeBypassingInference` check in the
+ // above if statement and remove the else branch
+ sc.setUnsafePreview(FeatureState.default_, !printmsg, e.loc,
+ "field `%s.%s` cannot access pointers in `@safe` code that overlap other fields", ad, v);
+ }
}
}
diff --git a/gcc/d/dmd/semantic2.d b/gcc/d/dmd/semantic2.d
index 53c8714..6a379517 100644
--- a/gcc/d/dmd/semantic2.d
+++ b/gcc/d/dmd/semantic2.d
@@ -195,7 +195,7 @@ private extern(C++) final class Semantic2Visitor : Visitor
if (!tempinst.errors)
{
if (!tempdecl.literal)
- tempinst.error(tempinst.loc, "error instantiating");
+ .error(tempinst.loc, "%s `%s` error instantiating", tempinst.kind, tempinst.toPrettyChars);
if (tempinst.tinst)
tempinst.tinst.printInstantiationTrace();
}
@@ -275,6 +275,7 @@ private extern(C++) final class Semantic2Visitor : Visitor
// https://issues.dlang.org/show_bug.cgi?id=20417
// Don't run CTFE for the temporary variables inside typeof or __traits(compiles)
vd._init = vd._init.initializerSemantic(sc, vd.type, sc.intypeof == 1 || sc.flags & SCOPE.compile ? INITnointerpret : INITinterpret);
+ lowerStaticAAs(vd, sc);
vd.inuse--;
}
if (vd._init && vd.storage_class & STC.manifest)
@@ -314,7 +315,7 @@ private extern(C++) final class Semantic2Visitor : Visitor
}
if (hasInvalidEnumInitializer(ei.exp))
- vd.error(": Unable to initialize enum with class or pointer to struct. Use static const variable instead.");
+ .error(vd.loc, "%s `%s` : Unable to initialize enum with class or pointer to struct. Use static const variable instead.", vd.kind, vd.toPrettyChars);
}
}
else if (vd._init && vd.isThreadlocal())
@@ -325,13 +326,13 @@ private extern(C++) final class Semantic2Visitor : Visitor
{
ExpInitializer ei = vd._init.isExpInitializer();
if (ei && ei.exp.op == EXP.classReference)
- vd.error("is a thread-local class and cannot have a static initializer. Use `static this()` to initialize instead.");
+ .error(vd.loc, "%s `%s` is a thread-local class and cannot have a static initializer. Use `static this()` to initialize instead.", vd.kind, vd.toPrettyChars);
}
else if (vd.type.ty == Tpointer && vd.type.nextOf().ty == Tstruct && vd.type.nextOf().isMutable() && !vd.type.nextOf().isShared())
{
ExpInitializer ei = vd._init.isExpInitializer();
if (ei && ei.exp.op == EXP.address && (cast(AddrExp)ei.exp).e1.op == EXP.structLiteral)
- vd.error("is a thread-local pointer to struct and cannot have a static initializer. Use `static this()` to initialize instead.");
+ .error(vd.loc, "%s `%s` is a thread-local pointer to struct and cannot have a static initializer. Use `static this()` to initialize instead.", vd.kind, vd.toPrettyChars);
}
}
vd.semanticRun = PASS.semantic2done;
@@ -453,7 +454,7 @@ private extern(C++) final class Semantic2Visitor : Visitor
(!sameAttr || !sameParams)
)
{
- f2.error("cannot overload `extern(%s)` function at %s",
+ .error(f2.loc, "%s `%s` cannot overload `extern(%s)` function at %s", f2.kind, f2.toPrettyChars,
linkageToChars(f1._linkage),
f1.loc.toChars());
return 0;
@@ -472,14 +473,14 @@ private extern(C++) final class Semantic2Visitor : Visitor
// this condition, as well as the error for extern(C) functions above.
if (sameAttr != tf1.attributesEqual(tf2))
{
- f2.deprecation("cannot overload `extern(%s)` function at %s",
+ .deprecation(f2.loc, "%s `%s` cannot overload `extern(%s)` function at %s", f2.kind, f2.toPrettyChars,
linkageToChars(f1._linkage),
f1.loc.toChars());
}
return 0;
}
- error(f2.loc, "%s `%s%s` conflicts with previous declaration at %s",
+ .error(f2.loc, "%s `%s%s` conflicts with previous declaration at %s",
f2.kind(),
f2.toPrettyChars(),
parametersTypeToChars(tf2.parameterList),
@@ -630,7 +631,7 @@ private extern(C++) final class Semantic2Visitor : Visitor
if (ad._scope)
{
- ad.error("has forward references");
+ .error(ad.loc, "%s `%s` has forward references", ad.kind, ad.toPrettyChars);
return;
}
@@ -684,20 +685,20 @@ private extern(C++) final class Semantic2Visitor : Visitor
//printf(" found\n");
// Check that calling conventions match
if (fd._linkage != ifd._linkage)
- fd.error("linkage doesn't match interface function");
+ .error(fd.loc, "%s `%s` linkage doesn't match interface function", fd.kind, fd.toPrettyChars);
// Check that it is current
//printf("newinstance = %d fd.toParent() = %s ifd.toParent() = %s\n",
//newinstance, fd.toParent().toChars(), ifd.toParent().toChars());
if (fd.toParent() != cd && ifd.toParent() == base.sym)
- cd.error("interface function `%s` is not implemented", ifd.toFullSignature());
+ .error(cd.loc, "%s `%s` interface function `%s` is not implemented", cd.kind, cd.toPrettyChars, ifd.toFullSignature());
}
else
{
//printf(" not found %p\n", fd);
// BUG: should mark this class as abstract?
if (!cd.isAbstract())
- cd.error("interface function `%s` is not implemented", ifd.toFullSignature());
+ .error(cd.loc, "%s `%s` interface function `%s` is not implemented", cd.kind, cd.toPrettyChars, ifd.toFullSignature());
}
}
}
@@ -747,7 +748,7 @@ private void doGNUABITagSemantic(ref Expression e, ref Expression* lastTag)
// When `@gnuAbiTag` is used, the type will be the UDA, not the struct literal
if (e.op == EXP.type)
{
- e.error("`@%s` at least one argument expected", Id.udaGNUAbiTag.toChars());
+ error(e.loc, "`@%s` at least one argument expected", Id.udaGNUAbiTag.toChars());
return;
}
@@ -765,7 +766,7 @@ private void doGNUABITagSemantic(ref Expression e, ref Expression* lastTag)
auto ale = (*sle.elements)[0].isArrayLiteralExp();
if (ale is null)
{
- e.error("`@%s` at least one argument expected", Id.udaGNUAbiTag.toChars());
+ error(e.loc, "`@%s` at least one argument expected", Id.udaGNUAbiTag.toChars());
return;
}
@@ -774,8 +775,8 @@ private void doGNUABITagSemantic(ref Expression e, ref Expression* lastTag)
{
const str1 = (*lastTag.isStructLiteralExp().elements)[0].toString();
const str2 = ale.toString();
- e.error("only one `@%s` allowed per symbol", Id.udaGNUAbiTag.toChars());
- e.errorSupplemental("instead of `@%s @%s`, use `@%s(%.*s, %.*s)`",
+ error(e.loc, "only one `@%s` allowed per symbol", Id.udaGNUAbiTag.toChars());
+ errorSupplemental(e.loc, "instead of `@%s @%s`, use `@%s(%.*s, %.*s)`",
lastTag.toChars(), e.toChars(), Id.udaGNUAbiTag.toChars(),
// Avoid [ ... ]
cast(int)str1.length - 2, str1.ptr + 1,
@@ -791,7 +792,7 @@ private void doGNUABITagSemantic(ref Expression e, ref Expression* lastTag)
const str = elem.toStringExp().peekString();
if (!str.length)
{
- e.error("argument `%d` to `@%s` cannot be %s", cast(int)(idx + 1),
+ error(e.loc, "argument `%d` to `@%s` cannot be %s", cast(int)(idx + 1),
Id.udaGNUAbiTag.toChars(),
elem.isNullExp() ? "`null`".ptr : "empty".ptr);
continue;
@@ -801,7 +802,7 @@ private void doGNUABITagSemantic(ref Expression e, ref Expression* lastTag)
{
if (!c.isValidMangling())
{
- e.error("`@%s` char `0x%02x` not allowed in mangling",
+ error(e.loc, "`@%s` char `0x%02x` not allowed in mangling",
Id.udaGNUAbiTag.toChars(), c);
break;
}
@@ -819,3 +820,55 @@ private void doGNUABITagSemantic(ref Expression e, ref Expression* lastTag)
}
ale.elements.sort!predicate;
}
+
+/**
+ * Try lower a variable's static Associative Array to a newaa struct.
+ * Params:
+ * vd = Variable to lower
+ * sc = Scope
+ */
+void lowerStaticAAs(VarDeclaration vd, Scope* sc)
+{
+ if (vd.storage_class & STC.manifest)
+ return;
+ if (auto ei = vd._init.isExpInitializer())
+ {
+ scope v = new StaticAAVisitor(sc);
+ v.vd = vd;
+ ei.exp.accept(v);
+ }
+}
+
+/// Visit Associative Array literals and lower them to structs for static initialization
+private extern(C++) final class StaticAAVisitor : SemanticTimeTransitiveVisitor
+{
+ alias visit = SemanticTimeTransitiveVisitor.visit;
+ Scope* sc;
+ VarDeclaration vd;
+
+ this(Scope* sc) scope @safe
+ {
+ this.sc = sc;
+ }
+
+ override void visit(AssocArrayLiteralExp aaExp)
+ {
+ if (!verifyHookExist(aaExp.loc, *sc, Id._aaAsStruct, "initializing static associative arrays", Id.object))
+ return;
+
+ Expression hookFunc = new IdentifierExp(aaExp.loc, Id.empty);
+ hookFunc = new DotIdExp(aaExp.loc, hookFunc, Id.object);
+ hookFunc = new DotIdExp(aaExp.loc, hookFunc, Id._aaAsStruct);
+ auto arguments = new Expressions();
+ arguments.push(aaExp.syntaxCopy());
+ Expression loweredExp = new CallExp(aaExp.loc, hookFunc, arguments);
+
+ sc = sc.startCTFE();
+ loweredExp = loweredExp.expressionSemantic(sc);
+ loweredExp = resolveProperties(sc, loweredExp);
+ sc = sc.endCTFE();
+ loweredExp = loweredExp.ctfeInterpret();
+
+ aaExp.lowering = loweredExp;
+ }
+}
diff --git a/gcc/d/dmd/semantic3.d b/gcc/d/dmd/semantic3.d
index bff89f8..e093140 100644
--- a/gcc/d/dmd/semantic3.d
+++ b/gcc/d/dmd/semantic3.d
@@ -141,7 +141,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
if (!tempinst.errors)
{
if (!tempdecl.literal)
- tempinst.error(tempinst.loc, "error instantiating");
+ .error(tempinst.loc, "%s `%s` error instantiating", tempinst.kind, tempinst.toPrettyChars);
if (tempinst.tinst)
tempinst.tinst.printInstantiationTrace();
}
@@ -303,7 +303,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
if (!funcdecl.fbody && funcdecl.inferRetType && !f.next)
{
- funcdecl.error("has no function body with return type inference");
+ .error(funcdecl.loc, "%s `%s` has no function body with return type inference", funcdecl.kind, funcdecl.toPrettyChars);
return;
}
@@ -371,7 +371,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
if (!sc.intypeof)
{
if (fld.tok == TOK.delegate_)
- funcdecl.error("cannot be %s members", ad.kind());
+ .error(funcdecl.loc, "%s `%s` cannot be %s members", funcdecl.kind, funcdecl.toPrettyChars, ad.kind());
else
fld.tok = TOK.function_;
}
@@ -395,7 +395,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
// functions to be reworked as a frontend-only feature.
if (funcdecl.hasDualContext())
{
- funcdecl.deprecation("function requires a dual-context, which is deprecated");
+ .deprecation(funcdecl.loc, "%s `%s` function requires a dual-context, which is deprecated", funcdecl.kind, funcdecl.toPrettyChars);
if (auto ti = sc2.parent ? sc2.parent.isInstantiated() : null)
ti.printInstantiationTrace(Classification.deprecation);
}
@@ -412,11 +412,11 @@ private extern(C++) final class Semantic3Visitor : Visitor
if (!global.params.useTypeInfo || !Type.dtypeinfo || !Type.typeinfotypelist)
{
if (!global.params.useTypeInfo)
- funcdecl.error("D-style variadic functions cannot be used with -betterC");
+ .error(funcdecl.loc, "%s `%s` D-style variadic functions cannot be used with -betterC", funcdecl.kind, funcdecl.toPrettyChars);
else if (!Type.typeinfotypelist)
- funcdecl.error("`object.TypeInfo_Tuple` could not be found, but is implicitly used in D-style variadic functions");
+ .error(funcdecl.loc, "%s `%s` `object.TypeInfo_Tuple` could not be found, but is implicitly used in D-style variadic functions", funcdecl.kind, funcdecl.toPrettyChars);
else
- funcdecl.error("`object.TypeInfo` could not be found, but is implicitly used in D-style variadic functions");
+ .error(funcdecl.loc, "%s `%s` `object.TypeInfo` could not be found, but is implicitly used in D-style variadic functions", funcdecl.kind, funcdecl.toPrettyChars);
fatal();
}
@@ -471,7 +471,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
stc |= STC.temp;
}
Type vtype = fparam.type;
- auto v = new VarDeclaration(funcdecl.loc, vtype, id, null);
+ auto v = new VarDeclaration(fparam.loc, vtype, id, null);
//printf("declaring parameter %s of type %s\n", v.toChars(), v.type.toChars());
stc |= STC.parameter;
if (f.parameterList.varargs == VarArg.typesafe && i + 1 == nparams)
@@ -484,7 +484,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
v.dsymbolSemantic(sc2);
if (!sc2.insert(v))
{
- funcdecl.error("parameter `%s.%s` is already defined", funcdecl.toChars(), v.toChars());
+ .error(funcdecl.loc, "%s `%s` parameter `%s.%s` is already defined", funcdecl.kind, funcdecl.toPrettyChars, funcdecl.toChars(), v.toChars());
funcdecl.errors = true;
}
else
@@ -523,7 +523,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
//printf("declaring tuple %s\n", v.toChars());
v.isexp = true;
if (!sc2.insert(v))
- funcdecl.error("parameter `%s.%s` is already defined", funcdecl.toChars(), v.toChars());
+ .error(funcdecl.loc, "%s `%s` parameter `%s.%s` is already defined", funcdecl.kind, funcdecl.toPrettyChars, funcdecl.toChars(), v.toChars());
funcdecl.localsymtab.insert(v);
v.parent = funcdecl;
}
@@ -687,7 +687,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
* as delegating calls to other constructors
*/
if (v.isCtorinit() && !v.type.isMutable() && cd)
- funcdecl.error("missing initializer for %s field `%s`", MODtoChars(v.type.mod), v.toChars());
+ .error(funcdecl.loc, "%s `%s` missing initializer for %s field `%s`", funcdecl.kind, funcdecl.toPrettyChars, MODtoChars(v.type.mod), v.toChars());
else if (v.storage_class & STC.nodefaultctor)
error(funcdecl.loc, "field `%s` must be initialized in constructor", v.toChars());
else if (v.type.needsNested())
@@ -698,7 +698,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
bool mustInit = (v.storage_class & STC.nodefaultctor || v.type.needsNested());
if (mustInit && !(sc2.ctorflow.fieldinit[i].csx & CSX.this_ctor))
{
- funcdecl.error("field `%s` must be initialized but skipped", v.toChars());
+ .error(funcdecl.loc, "%s `%s` field `%s` must be initialized but skipped", funcdecl.kind, funcdecl.toPrettyChars, v.toChars());
}
}
}
@@ -714,11 +714,11 @@ private extern(C++) final class Semantic3Visitor : Visitor
FuncDeclaration fd = resolveFuncCall(Loc.initial, sc2, cd.baseClass.ctor, null, tthis, ArgumentList(), FuncResolveFlag.quiet);
if (!fd)
{
- funcdecl.error("no match for implicit `super()` call in constructor");
+ .error(funcdecl.loc, "%s `%s` no match for implicit `super()` call in constructor", funcdecl.kind, funcdecl.toPrettyChars);
}
else if (fd.storage_class & STC.disable)
{
- funcdecl.error("cannot call `super()` implicitly because it is annotated with `@disable`");
+ .error(funcdecl.loc, "%s `%s` cannot call `super()` implicitly because it is annotated with `@disable`", funcdecl.kind, funcdecl.toPrettyChars);
}
else
{
@@ -749,7 +749,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
funcdecl.buildEnsureRequire();
// Check for errors related to 'nothrow'.
- const blockexit = funcdecl.fbody.blockExit(funcdecl, f.isnothrow);
+ const blockexit = funcdecl.fbody.blockExit(funcdecl, f.isnothrow ? global.errorSink : null);
if (f.isnothrow && blockexit & BE.throw_)
error(funcdecl.loc, "%s `%s` may throw but is marked as `nothrow`", funcdecl.kind(), funcdecl.toPrettyChars());
@@ -802,7 +802,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
// Fallthrough despite being declared as noreturn? return is already rejected when evaluating the ReturnStatement
if (blockexit & BE.fallthru)
{
- funcdecl.error("is typed as `%s` but does return", f.next.toChars());
+ .error(funcdecl.loc, "%s `%s` is typed as `%s` but does return", funcdecl.kind, funcdecl.toPrettyChars, f.next.toChars());
funcdecl.loc.errorSupplemental("`noreturn` functions must either throw, abort or loop indefinitely");
}
}
@@ -812,9 +812,9 @@ private extern(C++) final class Semantic3Visitor : Visitor
if ((blockexit & BE.fallthru) && f.next.ty != Tvoid && !inlineAsm && !(sc.flags & SCOPE.Cfile))
{
if (!funcdecl.hasReturnExp)
- funcdecl.error("has no `return` statement, but is expected to return a value of type `%s`", f.next.toChars());
+ .error(funcdecl.loc, "%s `%s` has no `return` statement, but is expected to return a value of type `%s`", funcdecl.kind, funcdecl.toPrettyChars, f.next.toChars());
else
- funcdecl.error("no `return exp;` or `assert(0);` at end of function");
+ .error(funcdecl.loc, "%s `%s` no `return exp;` or `assert(0);` at end of function", funcdecl.kind, funcdecl.toPrettyChars);
}
}
@@ -993,7 +993,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
freq = freq.statementSemantic(sc2);
// @@@DEPRECATED_2.111@@@ - pass `isnothrow` instead of `false` to print a more detailed error msg`
- const blockExit = freq.blockExit(funcdecl, false);
+ const blockExit = freq.blockExit(funcdecl, null);
if (blockExit & BE.throw_)
{
if (isnothrow)
@@ -1023,7 +1023,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
{
if (e.id)
{
- funcdecl.error(e.ensure.loc, "`void` functions have no result");
+ .error(e.ensure.loc, "%s `%s` `void` functions have no result", funcdecl.kind, funcdecl.toPrettyChars);
//fens = null;
}
}
@@ -1040,7 +1040,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
fens = fens.statementSemantic(sc2);
// @@@DEPRECATED_2.111@@@ - pass `isnothrow` instead of `false` to print a more detailed error msg`
- const blockExit = fens.blockExit(funcdecl, false);
+ const blockExit = fens.blockExit(funcdecl, null);
if (blockExit & BE.throw_)
{
if (isnothrow)
@@ -1075,7 +1075,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
{
if (!v._init)
{
- v.error("zero-length `out` parameters are not allowed.");
+ .error(v.loc, "%s `%s` zero-length `out` parameters are not allowed.", v.kind, v.toPrettyChars);
return;
}
ExpInitializer ie = v._init.isExpInitializer();
@@ -1177,7 +1177,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
s = s.statementSemantic(sc2);
- const blockexit = s.blockExit(funcdecl, isnothrow);
+ const blockexit = s.blockExit(funcdecl, isnothrow ? global.errorSink : null);
if (blockexit & BE.throw_)
{
funcdecl.hasNoEH = false;
@@ -1187,7 +1187,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
f.isnothrow = false;
}
- if (sbody.blockExit(funcdecl, f.isnothrow) == BE.fallthru)
+ if (sbody.blockExit(funcdecl, f.isnothrow ? global.errorSink : null) == BE.fallthru)
sbody = new CompoundStatement(Loc.initial, sbody, s);
else
sbody = new TryFinallyStatement(Loc.initial, sbody, s);
@@ -1229,7 +1229,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
}
else
{
- funcdecl.error("synchronized function `%s` must be a member of a class", funcdecl.toChars());
+ .error(funcdecl.loc, "%s `%s` synchronized function `%s` must be a member of a class", funcdecl.kind, funcdecl.toPrettyChars, funcdecl.toChars());
}
}
@@ -1246,7 +1246,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
LabelDsymbol label = cast(LabelDsymbol)keyValue.value;
if (!label.statement && (!label.deleted || label.iasm))
{
- funcdecl.error(label.loc, "label `%s` is undefined", label.toChars());
+ .error(label.loc, "%s `%s` label `%s` is undefined", funcdecl.kind, funcdecl.toPrettyChars, label.toChars());
}
}
@@ -1260,7 +1260,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
}
if (funcdecl.isNaked() && (funcdecl.fensures || funcdecl.frequires))
- funcdecl.error("naked assembly functions with contracts are not supported");
+ .error(funcdecl.loc, "%s `%s` naked assembly functions with contracts are not supported", funcdecl.kind, funcdecl.toPrettyChars);
sc2.ctorflow.callSuper = CSX.none;
sc2.pop();
@@ -1366,7 +1366,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
}
if (isCppNonMappableType(f.next.toBasetype()))
{
- funcdecl.error("cannot return type `%s` because its linkage is `extern(C++)`", f.next.toChars());
+ .error(funcdecl.loc, "%s `%s` cannot return type `%s` because its linkage is `extern(C++)`", funcdecl.kind, funcdecl.toPrettyChars, f.next.toChars());
if (f.next.isTypeDArray())
errorSupplemental(funcdecl.loc, "slices are specific to D and do not have a counterpart representation in C++", f.next.toChars());
funcdecl.errors = true;
@@ -1375,7 +1375,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
{
if (isCppNonMappableType(param.type.toBasetype(), param))
{
- funcdecl.error("cannot have parameter of type `%s` because its linkage is `extern(C++)`", param.type.toChars());
+ .error(funcdecl.loc, "%s `%s` cannot have parameter of type `%s` because its linkage is `extern(C++)`", funcdecl.kind, funcdecl.toPrettyChars, param.type.toChars());
if (param.type.toBasetype().isTypeSArray())
errorSupplemental(funcdecl.loc, "perhaps use a `%s*` type instead",
param.type.nextOf().mutableOf().unSharedOf().toChars());
@@ -1453,7 +1453,7 @@ private extern(C++) final class Semantic3Visitor : Visitor
{
// storage_class is apparently not set for dtor & ctor
OutBuffer ob;
- stcToBuffer(&ob,
+ stcToBuffer(ob,
(ngErr ? STC.nogc : 0) |
(puErr ? STC.pure_ : 0) |
(saErr ? STC.system : 0)
@@ -1620,7 +1620,7 @@ private struct FuncDeclSem3
FuncDeclaration fdv = funcdecl.foverrides[i];
if (fdv.fbody && !fdv.frequires)
{
- funcdecl.error("cannot have an in contract when overridden function `%s` does not have an in contract", fdv.toPrettyChars());
+ .error(funcdecl.loc, "%s `%s` cannot have an in contract when overridden function `%s` does not have an in contract", funcdecl.kind, funcdecl.toPrettyChars, fdv.toPrettyChars());
break;
}
}
diff --git a/gcc/d/dmd/sideeffect.d b/gcc/d/dmd/sideeffect.d
index 30921c6..de92b29 100644
--- a/gcc/d/dmd/sideeffect.d
+++ b/gcc/d/dmd/sideeffect.d
@@ -14,6 +14,7 @@ module dmd.sideeffect;
import dmd.astenums;
import dmd.declaration;
import dmd.dscope;
+import dmd.errors;
import dmd.expression;
import dmd.expressionsem;
import dmd.func;
@@ -299,7 +300,7 @@ bool discardValue(Expression e)
}
else
s = ce.e1.toChars();
- e.warning("calling `%s` without side effects discards return value of type `%s`; prepend a `cast(void)` if intentional", s, e.type.toChars());
+ warning(e.loc, "calling `%s` without side effects discards return value of type `%s`; prepend a `cast(void)` if intentional", s, e.type.toChars());
}
}
}
@@ -368,12 +369,12 @@ bool discardValue(Expression e)
BinExp tmp = e.isBinExp();
assert(tmp);
- e.error("the result of the equality expression `%s` is discarded", e.toChars());
+ error(e.loc, "the result of the equality expression `%s` is discarded", e.toChars());
bool seenSideEffect = false;
foreach(expr; [tmp.e1, tmp.e2])
{
if (hasSideEffect(expr)) {
- expr.errorSupplemental("note that `%s` may have a side effect", expr.toChars());
+ errorSupplemental(expr.loc, "note that `%s` may have a side effect", expr.toChars());
seenSideEffect |= true;
}
}
@@ -381,7 +382,7 @@ bool discardValue(Expression e)
default:
break;
}
- e.error("`%s` has no effect", e.toChars());
+ error(e.loc, "`%s` has no effect", e.toChars());
return true;
}
diff --git a/gcc/d/dmd/statement.d b/gcc/d/dmd/statement.d
index 607dd51..da26bc9 100644
--- a/gcc/d/dmd/statement.d
+++ b/gcc/d/dmd/statement.d
@@ -16,20 +16,14 @@ module dmd.statement;
import core.stdc.stdarg;
import core.stdc.stdio;
-import dmd.aggregate;
import dmd.arraytypes;
import dmd.astenums;
import dmd.ast_node;
+import dmd.errors;
import dmd.gluelayer;
import dmd.cond;
-import dmd.dclass;
import dmd.declaration;
-import dmd.denum;
-import dmd.dimport;
-import dmd.dscope;
import dmd.dsymbol;
-import dmd.dtemplate;
-import dmd.errors;
import dmd.expression;
import dmd.func;
import dmd.globals;
@@ -37,40 +31,14 @@ import dmd.hdrgen;
import dmd.id;
import dmd.identifier;
import dmd.location;
-import dmd.dinterpret;
import dmd.mtype;
import dmd.common.outbuffer;
import dmd.root.rootobject;
import dmd.sapply;
-import dmd.sideeffect;
import dmd.staticassert;
import dmd.tokens;
import dmd.visitor;
-/**
- * Returns:
- * `TypeIdentifier` corresponding to `object.Throwable`
- */
-TypeIdentifier getThrowable()
-{
- auto tid = new TypeIdentifier(Loc.initial, Id.empty);
- tid.addIdent(Id.object);
- tid.addIdent(Id.Throwable);
- return tid;
-}
-
-/**
- * Returns:
- * TypeIdentifier corresponding to `object.Exception`
- */
-TypeIdentifier getException()
-{
- auto tid = new TypeIdentifier(Loc.initial, Id.empty);
- tid.addIdent(Id.object);
- tid.addIdent(Id.Exception);
- return tid;
-}
-
/***********************************************************
* Specification: https://dlang.org/spec/statement.html
*/
@@ -114,68 +82,6 @@ extern (C++) abstract class Statement : ASTNode
return b;
}
- override final const(char)* toChars() const
- {
- HdrGenState hgs;
- OutBuffer buf;
- .toCBuffer(this, &buf, &hgs);
- buf.writeByte(0);
- return buf.extractSlice().ptr;
- }
-
- static if (__VERSION__ < 2092)
- {
- final void error(const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.error);
- va_end(ap);
- }
-
- final void warning(const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.warning);
- va_end(ap);
- }
-
- final void deprecation(const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.deprecation);
- va_end(ap);
- }
- }
- else
- {
- pragma(printf) final void error(const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.error);
- va_end(ap);
- }
-
- pragma(printf) final void warning(const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.warning);
- va_end(ap);
- }
-
- pragma(printf) final void deprecation(const(char)* format, ...)
- {
- va_list ap;
- va_start(ap, format);
- .verrorReport(loc, format, ap, ErrorKind.deprecation);
- va_end(ap);
- }
- }
-
Statement getRelatedLabeled()
{
return this;
@@ -656,12 +562,7 @@ extern (C++) final class CompoundDeclarationStatement : CompoundStatement
override CompoundDeclarationStatement syntaxCopy()
{
- auto a = new Statements(statements.length);
- foreach (i, s; *statements)
- {
- (*a)[i] = s ? s.syntaxCopy() : null;
- }
- return new CompoundDeclarationStatement(loc, a);
+ return new CompoundDeclarationStatement(loc, Statement.arraySyntaxCopy(statements));
}
override void accept(Visitor v)
@@ -686,12 +587,7 @@ extern (C++) final class UnrolledLoopStatement : Statement
override UnrolledLoopStatement syntaxCopy()
{
- auto a = new Statements(statements.length);
- foreach (i, s; *statements)
- {
- (*a)[i] = s ? s.syntaxCopy() : null;
- }
- return new UnrolledLoopStatement(loc, a);
+ return new UnrolledLoopStatement(loc, Statement.arraySyntaxCopy(statements));
}
override bool hasBreak() const pure nothrow
@@ -1202,30 +1098,39 @@ extern (C++) final class StaticAssertStatement : Statement
*/
extern (C++) final class SwitchStatement : Statement
{
+ Parameter param;
Expression condition; /// switch(condition)
Statement _body; ///
bool isFinal; /// https://dlang.org/spec/statement.html#final-switch-statement
+ Loc endloc;
+ bool hasDefault; /// true if has default statement
+ bool hasVars; /// true if has variable case values
DefaultStatement sdefault; /// default:
Statement tryBody; /// set to TryCatchStatement or TryFinallyStatement if in _body portion
TryFinallyStatement tf; /// set if in the 'finally' block of a TryFinallyStatement
GotoCaseStatements gotoCases; /// array of unresolved GotoCaseStatement's
CaseStatements* cases; /// array of CaseStatement's
- int hasNoDefault; /// !=0 if no default statement
- int hasVars; /// !=0 if has variable case values
VarDeclaration lastVar; /// last observed variable declaration in this statement
- extern (D) this(const ref Loc loc, Expression condition, Statement _body, bool isFinal)
+ extern (D) this(const ref Loc loc, Parameter param, Expression condition, Statement _body, bool isFinal, Loc endloc)
{
super(loc, STMT.Switch);
+ this.param = param;
this.condition = condition;
this._body = _body;
this.isFinal = isFinal;
+ this.endloc = endloc;
}
override SwitchStatement syntaxCopy()
{
- return new SwitchStatement(loc, condition.syntaxCopy(), _body.syntaxCopy(), isFinal);
+ return new SwitchStatement(loc,
+ param ? param.syntaxCopy() : null,
+ condition.syntaxCopy(),
+ _body.syntaxCopy(),
+ isFinal,
+ endloc);
}
override bool hasBreak() const pure nothrow
@@ -1233,46 +1138,6 @@ extern (C++) final class SwitchStatement : Statement
return true;
}
- /************************************
- * Returns:
- * true if error
- */
- extern (D) bool checkLabel()
- {
- /*
- * Checks the scope of a label for existing variable declaration.
- * Params:
- * vd = last variable declared before this case/default label
- * Returns: `true` if the variables declared in this label would be skipped.
- */
- bool checkVar(VarDeclaration vd)
- {
- for (auto v = vd; v && v != lastVar; v = v.lastVar)
- {
- if (v.isDataseg() || (v.storage_class & (STC.manifest | STC.temp) && vd.ident != Id.withSym) || v._init.isVoidInitializer())
- continue;
- if (vd.ident == Id.withSym)
- error("`switch` skips declaration of `with` temporary at %s", v.loc.toChars());
- else
- error("`switch` skips declaration of variable `%s` at %s", v.toPrettyChars(), v.loc.toChars());
- return true;
- }
- return false;
- }
-
- enum error = true;
-
- if (sdefault && checkVar(sdefault.lastVar))
- return !error; // return error once fully deprecated
-
- foreach (scase; *cases)
- {
- if (scase && checkVar(scase.lastVar))
- return !error; // return error once fully deprecated
- }
- return !error;
- }
-
override void accept(Visitor v)
{
v.visit(this);
@@ -1796,87 +1661,6 @@ extern (C++) final class GotoStatement : Statement
return new GotoStatement(loc, ident);
}
- /**************
- * Returns: true for error
- */
- extern (D) bool checkLabel()
- {
- if (!label.statement)
- return true; // error should have been issued for this already
-
- if (label.statement.os != os)
- {
- if (os && os.tok == TOK.onScopeFailure && !label.statement.os)
- {
- // Jump out from scope(failure) block is allowed.
- }
- else
- {
- if (label.statement.os)
- error("cannot `goto` in to `%s` block", Token.toChars(label.statement.os.tok));
- else
- error("cannot `goto` out of `%s` block", Token.toChars(os.tok));
- return true;
- }
- }
-
- if (label.statement.tf != tf)
- {
- error("cannot `goto` in or out of `finally` block");
- return true;
- }
-
- if (label.statement.inCtfeBlock && !inCtfeBlock)
- {
- error("cannot `goto` into `if (__ctfe)` block");
- return true;
- }
-
- Statement stbnext;
- for (auto stb = tryBody; stb != label.statement.tryBody; stb = stbnext)
- {
- if (!stb)
- {
- error("cannot `goto` into `try` block");
- return true;
- }
- if (auto stf = stb.isTryFinallyStatement())
- stbnext = stf.tryBody;
- else if (auto stc = stb.isTryCatchStatement())
- stbnext = stc.tryBody;
- else
- assert(0);
- }
-
- VarDeclaration vd = label.statement.lastVar;
- if (!vd || vd.isDataseg() || (vd.storage_class & STC.manifest))
- return false;
-
- VarDeclaration last = lastVar;
- while (last && last != vd)
- last = last.lastVar;
- if (last == vd)
- {
- // All good, the label's scope has no variables
- }
- else if (vd.storage_class & STC.exptemp)
- {
- // Lifetime ends at end of expression, so no issue with skipping the statement
- }
- else if (vd.ident == Id.withSym)
- {
- error("`goto` skips declaration of `with` temporary at %s", vd.loc.toChars());
- return true;
- }
- else
- {
- error("`goto` skips declaration of variable `%s` at %s", vd.toPrettyChars(), vd.loc.toChars());
- return true;
- }
-
- return false;
- }
-
override void accept(Visitor v)
{
v.visit(this);
@@ -1959,6 +1743,7 @@ extern (C++) final class LabelDsymbol : Dsymbol
extern (C++) class AsmStatement : Statement
{
Token* tokens;
+ bool caseSensitive; // for register names
extern (D) this(const ref Loc loc, Token* tokens) @safe
{
@@ -2057,12 +1842,7 @@ extern (C++) final class CompoundAsmStatement : CompoundStatement
override CompoundAsmStatement syntaxCopy()
{
- auto a = new Statements(statements.length);
- foreach (i, s; *statements)
- {
- (*a)[i] = s ? s.syntaxCopy() : null;
- }
- return new CompoundAsmStatement(loc, a, stc);
+ return new CompoundAsmStatement(loc, Statement.arraySyntaxCopy(statements), stc);
}
override void accept(Visitor v)
diff --git a/gcc/d/dmd/statement.h b/gcc/d/dmd/statement.h
index eb4849d..fe899c6 100644
--- a/gcc/d/dmd/statement.h
+++ b/gcc/d/dmd/statement.h
@@ -113,10 +113,8 @@ public:
virtual Statement *syntaxCopy();
- const char *toChars() const override final;
-
void error(const char *format, ...);
- void warning(const char *format, ...);
+ void warning(unsigned flag, const char *format, ...);
void deprecation(const char *format, ...);
virtual Statement *getRelatedLabeled() { return this; }
virtual bool hasBreak() const;
@@ -431,17 +429,19 @@ public:
class SwitchStatement final : public Statement
{
public:
+ Parameter *param;
Expression *condition;
Statement *_body;
d_bool isFinal;
+ Loc endloc;
+ d_bool hasDefault; // true if default statement
+ d_bool hasVars; // true if has variable case values
DefaultStatement *sdefault;
Statement *tryBody; // set to TryCatchStatement or TryFinallyStatement if in _body portion
TryFinallyStatement *tf;
GotoCaseStatements gotoCases; // array of unresolved GotoCaseStatement's
CaseStatements *cases; // array of CaseStatement's
- int hasNoDefault; // !=0 if no default statement
- int hasVars; // !=0 if has variable case values
VarDeclaration *lastVar;
SwitchStatement *syntaxCopy() override;
@@ -712,6 +712,7 @@ class AsmStatement : public Statement
{
public:
Token *tokens;
+ bool caseSensitive; // for register names
AsmStatement *syntaxCopy() override;
void accept(Visitor *v) override { v->visit(this); }
diff --git a/gcc/d/dmd/statementsem.d b/gcc/d/dmd/statementsem.d
index 178cef5..962ef62 100644
--- a/gcc/d/dmd/statementsem.d
+++ b/gcc/d/dmd/statementsem.d
@@ -38,12 +38,14 @@ import dmd.dsymbol;
import dmd.dsymbolsem;
import dmd.dtemplate;
import dmd.errors;
+import dmd.errorsink;
import dmd.escape;
import dmd.expression;
import dmd.expressionsem;
import dmd.func;
import dmd.globals;
import dmd.gluelayer;
+import dmd.hdrgen;
import dmd.id;
import dmd.identifier;
import dmd.importc;
@@ -129,7 +131,7 @@ private Expression checkAssignmentAsCondition(Expression e, Scope* sc)
auto ec = lastComma(e);
if (ec.op == EXP.assign)
{
- ec.error("assignment cannot be used as a condition, perhaps `==` was meant?");
+ error(ec.loc, "assignment cannot be used as a condition, perhaps `==` was meant?");
return ErrorExp.get();
}
return e;
@@ -349,7 +351,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
Identifier id = Identifier.generateId("__o");
Statement handler = new PeelStatement(sexception);
- if (sexception.blockExit(sc.func, false) & BE.fallthru)
+ if (sexception.blockExit(sc.func, null) & BE.fallthru)
{
auto ts = new ThrowStatement(Loc.initial, new IdentifierExp(Loc.initial, id));
ts.internalThrow = true;
@@ -664,7 +666,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
const olderrors = global.startGagging();
discardValue(fs.increment);
if (global.endGagging(olderrors))
- fs.increment.deprecation("`%s` has no effect", fs.increment.toChars());
+ deprecation(fs.increment.loc, "`%s` has no effect", fs.increment.toChars());
if (checkNonAssignmentArrayOp(fs.increment))
fs.increment = ErrorExp.get();
fs.increment = fs.increment.optimize(WANTvalue);
@@ -704,7 +706,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (!p.type)
{
- fs.error("cannot infer type for `foreach` variable `%s`, perhaps set it explicitly", p.ident.toChars());
+ error(fs.loc, "cannot infer type for `foreach` variable `%s`, perhaps set it explicitly", p.ident.toChars());
p.type = Type.terror;
result = true;
}
@@ -753,7 +755,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
assert(oaggr.type);
- fs.error("invalid `%s` aggregate `%s` of type `%s`",
+ error(fs.loc, "invalid `%s` aggregate `%s` of type `%s`",
Token.toChars(fs.op), oaggr.toChars(), oaggr.type.toPrettyChars());
if (auto ad = isAggregate(fs.aggr.type))
@@ -815,11 +817,11 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (foundMismatch && dim != foreachParamCount)
{
const(char)* plural = foreachParamCount > 1 ? "s" : "";
- fs.error("cannot infer argument types, expected %llu argument%s, not %llu",
+ error(fs.loc, "cannot infer argument types, expected %llu argument%s, not %llu",
cast(ulong) foreachParamCount, plural, cast(ulong) dim);
}
else
- fs.error("cannot uniquely infer `foreach` argument types");
+ error(fs.loc, "cannot uniquely infer `foreach` argument types");
return setError();
}
@@ -845,11 +847,11 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (p.storageClass & STC.manifest)
{
- fs.error("cannot declare `enum` loop variables for non-unrolled foreach");
+ error(fs.loc, "cannot declare `enum` loop variables for non-unrolled foreach");
}
if (p.storageClass & STC.alias_)
{
- fs.error("cannot declare `alias` loop variables for non-unrolled foreach");
+ error(fs.loc, "cannot declare `alias` loop variables for non-unrolled foreach");
}
}
@@ -861,7 +863,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
void rangeError()
{
- fs.error("cannot infer argument types");
+ error(fs.loc, "cannot infer argument types");
return retError();
}
@@ -959,7 +961,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (dim < 1 || dim > 2)
{
- fs.error("only one or two arguments for array `foreach`");
+ error(fs.loc, "only one or two arguments for array `foreach`");
return retError();
}
@@ -978,7 +980,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
Type tindex = (*fs.parameters)[0].type;
if (!tindex.isintegral())
{
- fs.error("foreach: key cannot be of non-integral type `%s`", tindex.toChars());
+ error(fs.loc, "foreach: key cannot be of non-integral type `%s`", tindex.toChars());
return retError();
}
/* What cases to deprecate implicit conversions for:
@@ -990,7 +992,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
(tn.ty != tv.ty && tn.ty.isSomeChar && tv.ty.isSomeChar)) &&
!Type.tsize_t.implicitConvTo(tindex))
{
- fs.deprecation("foreach: loop index implicitly converted from `size_t` to `%s`",
+ deprecation(fs.loc, "foreach: loop index implicitly converted from `size_t` to `%s`",
tindex.toChars());
}
}
@@ -1007,7 +1009,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (p.storageClass & STC.ref_)
{
- fs.error("`foreach`: value of UTF conversion cannot be `ref`");
+ error(fs.loc, "`foreach`: value of UTF conversion cannot be `ref`");
return retError();
}
if (dim == 2)
@@ -1015,7 +1017,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
p = (*fs.parameters)[0];
if (p.storageClass & STC.ref_)
{
- fs.error("`foreach`: key cannot be `ref`");
+ error(fs.loc, "`foreach`: key cannot be `ref`");
return retError();
}
}
@@ -1036,7 +1038,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (fs.key.type.constConv(p.type) == MATCH.nomatch)
{
- fs.error("key type mismatch, `%s` to `ref %s`",
+ error(fs.loc, "key type mismatch, `%s` to `ref %s`",
fs.key.type.toChars(), p.type.toChars());
return retError();
}
@@ -1048,7 +1050,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
dimrange.imax = SignExtendedNumber(dimrange.imax.value-1);
if (!IntRange.fromType(fs.key.type).contains(dimrange))
{
- fs.error("index type `%s` cannot cover index range 0..%llu",
+ error(fs.loc, "index type `%s` cannot cover index range 0..%llu",
p.type.toChars(), ta.dim.toInteger());
return retError();
}
@@ -1071,7 +1073,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
Type t = tab.nextOf();
if (t.constConv(p.type) == MATCH.nomatch)
{
- fs.error("argument type mismatch, `%s` to `ref %s`",
+ error(fs.loc, "argument type mismatch, `%s` to `ref %s`",
t.toChars(), p.type.toChars());
return retError();
}
@@ -1197,13 +1199,13 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
}
case Taarray:
if (fs.op == TOK.foreach_reverse_)
- fs.warning("cannot use `foreach_reverse` with an associative array");
+ warning(fs.loc, "cannot use `foreach_reverse` with an associative array");
if (checkForArgTypes(fs))
return retError();
if (dim < 1 || dim > 2)
{
- fs.error("only one or two arguments for associative array `foreach`");
+ error(fs.loc, "only one or two arguments for associative array `foreach`");
return retError();
}
return retStmt(apply());
@@ -1308,7 +1310,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
}
if (tfront.ty == Tvoid)
{
- fs.error("`%s.front` is `void` and has no value", oaggr.toChars());
+ error(fs.loc, "`%s.front` is `void` and has no value", oaggr.toChars());
return retError();
}
@@ -1348,7 +1350,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (exps.length != dim)
{
const(char)* plural = exps.length > 1 ? "s" : "";
- fs.error("cannot infer argument types, expected %llu argument%s, not %llu",
+ error(fs.loc, "cannot infer argument types, expected %llu argument%s, not %llu",
cast(ulong) exps.length, plural, cast(ulong) dim);
return retError();
}
@@ -1371,7 +1373,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
p.type = p.type.addStorageClass(sc).typeSemantic(loc, sc2);
if (!exp.implicitConvTo(p.type))
{
- fs.error("cannot implicilty convert range element of type `%s` to variable `%s` of type `%s`",
+ error(fs.loc, "cannot implicilty convert range element of type `%s` to variable `%s` of type `%s`",
exp.type.toChars(), p.toChars(), p.type.toChars());
return retError();
}
@@ -1390,7 +1392,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
version (none)
{
- printf("init: %s\n", _init.toChars());
+ printf("init: %s\n", toChars(_init));
printf("condition: %s\n", condition.toChars());
printf("increment: %s\n", increment.toChars());
printf("body: %s\n", forbody.toChars());
@@ -1399,12 +1401,12 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
}
case Tdelegate:
if (fs.op == TOK.foreach_reverse_)
- fs.deprecation("cannot use `foreach_reverse` with a delegate");
+ deprecation(fs.loc, "cannot use `foreach_reverse` with a delegate");
return retStmt(apply());
case Terror:
return retError();
default:
- fs.error("`foreach`: `%s` is not an aggregate type", fs.aggr.type.toChars());
+ error(fs.loc, "`foreach`: `%s` is not an aggregate type", fs.aggr.type.toChars());
return retError();
}
}
@@ -1421,7 +1423,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
fs.lwr = fs.lwr.optimize(WANTvalue);
if (!fs.lwr.type)
{
- fs.error("invalid range lower bound `%s`", fs.lwr.toChars());
+ error(fs.loc, "invalid range lower bound `%s`", fs.lwr.toChars());
return setError();
}
@@ -1430,7 +1432,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
fs.upr = fs.upr.optimize(WANTvalue);
if (!fs.upr.type)
{
- fs.error("invalid range upper bound `%s`", fs.upr.toChars());
+ error(fs.loc, "invalid range upper bound `%s`", fs.upr.toChars());
return setError();
}
@@ -1584,7 +1586,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (fs.key.type.constConv(fs.prm.type) == MATCH.nomatch)
{
- fs.error("argument type mismatch, `%s` to `ref %s`", fs.key.type.toChars(), fs.prm.type.toChars());
+ error(fs.loc, "argument type mismatch, `%s` to `ref %s`", fs.key.type.toChars(), fs.prm.type.toChars());
return setError();
}
}
@@ -1762,14 +1764,14 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
/* Should this be allowed?
*/
- ps.error("`pragma(lib)` not allowed as statement");
+ error(ps.loc, "`pragma(lib)` not allowed as statement");
return setError();
}
else
{
if (!ps.args || ps.args.length != 1)
{
- ps.error("`string` expected for library name");
+ error(ps.loc, "`string` expected for library name");
return setError();
}
else
@@ -1778,7 +1780,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (!se)
return setError();
- if (global.params.verbose)
+ if (global.params.v.verbose)
{
message("library %.*s", cast(int)se.len, se.string);
}
@@ -1789,7 +1791,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
/* Should this be allowed?
*/
- ps.error("`pragma(linkerDirective)` not allowed as statement");
+ error(ps.loc, "`pragma(linkerDirective)` not allowed as statement");
return setError();
}
else if (ps.ident == Id.startaddress)
@@ -1805,13 +1807,32 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
}
else
{
- ps.error("`pragma(inline)` is not inside a function");
+ error(ps.loc, "`pragma(inline)` is not inside a function");
return setError();
}
}
+ else if (ps.ident == Id.mangle)
+ {
+ auto es = ps._body ? ps._body.isExpStatement() : null;
+ auto de = es ? es.exp.isDeclarationExp() : null;
+ if (!de)
+ {
+ error(ps.loc, "`pragma(mangle)` must be attached to a declaration");
+ return setError();
+ }
+ const se = ps.args && (*ps.args).length == 1 ? semanticString(sc, (*ps.args)[0], "pragma mangle argument") : null;
+ if (!se)
+ {
+ error(ps.loc, "`pragma(mangle)` takes a single argument that must be a string literal");
+ return setError();
+ }
+ const cnt = setMangleOverride(de.declaration, cast(const(char)[])se.peekData());
+ if (cnt != 1)
+ assert(0);
+ }
else if (!global.params.ignoreUnsupportedPragmas)
{
- ps.error("unrecognized `pragma(%s)`", ps.ident.toChars());
+ error(ps.loc, "unrecognized `pragma(%s)`", ps.ident.toChars());
return setError();
}
@@ -1819,7 +1840,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (ps.ident == Id.msg || ps.ident == Id.startaddress)
{
- ps.error("`pragma(%s)` is missing a terminating `;`", ps.ident.toChars());
+ error(ps.loc, "`pragma(%s)` is missing a terminating `;`", ps.ident.toChars());
return setError();
}
ps._body = ps._body.statementSemantic(sc);
@@ -1848,6 +1869,37 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
return;
}
+ if (ss.param)
+ {
+ /**
+ * If the switch statement is of form `switch(auto a = exp) { body }`,
+ * rewrite to the following inside it's own scope:
+ *
+ * auto a = exp
+ * switch(a)
+ * { body }
+ */
+ auto statements = new Statements();
+ auto vardecl = new VarDeclaration(ss.param.loc,
+ ss.param.type,
+ ss.param.ident,
+ new ExpInitializer(ss.condition.loc, ss.condition),
+ ss.param.storageClass);
+
+ statements.push(new ExpStatement(ss.param.loc, vardecl));
+
+ ss.condition = new VarExp(ss.param.loc, vardecl, false);
+ ss.param = null;
+
+ statements.push(ss);
+
+ Statement s = new CompoundStatement(ss.loc, statements);
+ s = new ScopeStatement(ss.loc, s, ss.endloc);
+ s = s.statementSemantic(sc);
+ result = s;
+ return;
+ }
+
bool conditionError = false;
ss.condition = ss.condition.expressionSemantic(sc);
ss.condition = resolveProperties(sc, ss.condition);
@@ -1885,7 +1937,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (!ss.condition.isErrorExp())
{
- ss.error("`%s` must be of integral or string type, it is a `%s`",
+ error(ss.loc, "`%s` must be of integral or string type, it is a `%s`",
ss.condition.toChars(), ss.condition.type.toChars());
conditionError = true;
break;
@@ -1924,7 +1976,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (!gcs.exp)
{
- gcs.error("no `case` statement following `goto case;`");
+ error(gcs.loc, "no `case` statement following `goto case;`");
sc.pop();
return setError();
}
@@ -1942,7 +1994,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
}
}
}
- gcs.error("`case %s` not found", gcs.exp.toChars());
+ error(gcs.loc, "`case %s` not found", gcs.exp.toChars());
sc.pop();
return setError();
}
@@ -1959,9 +2011,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (ed && ss.cases.length < ed.members.length)
{
int missingMembers = 0;
- const maxShown = !global.params.verbose ?
- (global.params.errorSupplementLimit ? global.params.errorSupplementLimit : int.max)
- : int.max;
+ const maxShown = global.params.v.errorSupplementCount();
Lmembers:
foreach (es; *ed.members)
{
@@ -1975,7 +2025,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
continue Lmembers;
}
if (missingMembers == 0)
- ss.error("missing cases for `enum` members in `final switch`:");
+ error(ss.loc, "missing cases for `enum` members in `final switch`:");
if (missingMembers < maxShown)
errorSupplemental(ss.loc, "`%s`", em.toChars());
@@ -1994,13 +2044,12 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
needswitcherror = true;
}
- if (!sc.sw.sdefault &&
- (!ss.isFinal || needswitcherror || global.params.useAssert == CHECKENABLE.on || sc.func.isSafe))
+ ss.hasDefault = sc.sw.sdefault ||
+ !(!ss.isFinal || needswitcherror || global.params.useAssert == CHECKENABLE.on || sc.func.isSafe);
+ if (!ss.hasDefault)
{
- ss.hasNoDefault = 1;
-
if (!ss.isFinal && (!ss._body || !ss._body.isErrorStatement()) && !(sc.flags & SCOPE.Cfile))
- ss.error("`switch` statement without a `default`; use `final switch` or add `default: assert(0);` or add `default: break;`");
+ error(ss.loc, "`switch` statement without a `default`; use `final switch` or add `default: assert(0);` or add `default: break;`");
// Generate runtime error if the default is hit
auto a = new Statements();
@@ -2051,7 +2100,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
a.reserve(2);
sc.sw.sdefault = new DefaultStatement(ss.loc, s);
a.push(ss._body);
- if (ss._body.blockExit(sc.func, false) & BE.fallthru)
+ if (ss._body.blockExit(sc.func, null) & BE.fallthru)
a.push(new BreakStatement(Loc.initial, null));
a.push(sc.sw.sdefault);
cs = new CompoundStatement(ss.loc, a);
@@ -2187,18 +2236,18 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
/* Flag that we need to do special code generation
* for this, i.e. generate a sequence of if-then-else
*/
- sw.hasVars = 1;
+ sw.hasVars = true;
/* TODO check if v can be uninitialized at that point.
*/
if (!v.isConst() && !v.isImmutable())
{
- cs.error("`case` variables have to be `const` or `immutable`");
+ error(cs.loc, "`case` variables have to be `const` or `immutable`");
}
if (sw.isFinal)
{
- cs.error("`case` variables not allowed in `final switch` statements");
+ error(cs.loc, "`case` variables not allowed in `final switch` statements");
errors = true;
}
@@ -2213,7 +2262,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (!scx.search(cs.exp.loc, v.ident, null))
{
- cs.error("`case` variable `%s` declared at %s cannot be declared in `switch` body",
+ error(cs.loc, "`case` variable `%s` declared at %s cannot be declared in `switch` body",
v.toChars(), v.loc.toChars());
errors = true;
}
@@ -2229,7 +2278,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
cs.exp = se;
else if (!cs.exp.isIntegerExp() && !cs.exp.isErrorExp())
{
- cs.error("`case` expression must be a compile-time `string` or an integral constant, not `%s`", cs.exp.toChars());
+ error(cs.loc, "`case` expression must be a compile-time `string` or an integral constant, not `%s`", cs.exp.toChars());
errors = true;
}
@@ -2242,7 +2291,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (cs2.exp.equals(cs.exp))
{
// https://issues.dlang.org/show_bug.cgi?id=15909
- cs.error("duplicate `case %s` in `switch` statement", initialExp.toChars());
+ error(cs.loc, "duplicate `case %s` in `switch` statement", initialExp.toChars());
errors = true;
break;
}
@@ -2265,18 +2314,18 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (sc.sw.tf != sc.tf)
{
- cs.error("`switch` and `case` are in different `finally` blocks");
+ error(cs.loc, "`switch` and `case` are in different `finally` blocks");
errors = true;
}
if (sc.sw.tryBody != sc.tryBody)
{
- cs.error("case cannot be in different `try` block level from `switch`");
+ error(cs.loc, "case cannot be in different `try` block level from `switch`");
errors = true;
}
}
else
{
- cs.error("`case` not in `switch` statement");
+ error(cs.loc, "`case` not in `switch` statement");
errors = true;
}
@@ -2299,7 +2348,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
SwitchStatement sw = sc.sw;
if (sw is null)
{
- crs.error("case range not in `switch` statement");
+ error(crs.loc, "case range not in `switch` statement");
return setError();
}
@@ -2307,7 +2356,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
bool errors = false;
if (sw.isFinal)
{
- crs.error("case ranges not allowed in `final switch`");
+ error(crs.loc, "case ranges not allowed in `final switch`");
errors = true;
}
@@ -2336,14 +2385,14 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
uinteger_t lval = crs.last.toInteger();
if ((crs.first.type.isunsigned() && fval > lval) || (!crs.first.type.isunsigned() && cast(sinteger_t)fval > cast(sinteger_t)lval))
{
- crs.error("first `case %s` is greater than last `case %s`", crs.first.toChars(), crs.last.toChars());
+ error(crs.loc, "first `case %s` is greater than last `case %s`", crs.first.toChars(), crs.last.toChars());
errors = true;
lval = fval;
}
if (lval - fval > 256)
{
- crs.error("had %llu cases which is more than 257 cases in case range", 1 + lval - fval);
+ error(crs.loc, "had %llu cases which is more than 257 cases in case range", 1 + lval - fval);
errors = true;
lval = fval + 256;
}
@@ -2385,30 +2434,30 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (sc.sw.sdefault)
{
- ds.error("`switch` statement already has a default");
+ error(ds.loc, "`switch` statement already has a default");
errors = true;
}
sc.sw.sdefault = ds;
if (sc.sw.tf != sc.tf)
{
- ds.error("`switch` and `default` are in different `finally` blocks");
+ error(ds.loc, "`switch` and `default` are in different `finally` blocks");
errors = true;
}
if (sc.sw.tryBody != sc.tryBody)
{
- ds.error("default cannot be in different `try` block level from `switch`");
+ error(ds.loc, "default cannot be in different `try` block level from `switch`");
errors = true;
}
if (sc.sw.isFinal)
{
- ds.error("`default` statement not allowed in `final switch` statement");
+ error(ds.loc, "`default` statement not allowed in `final switch` statement");
errors = true;
}
}
else
{
- ds.error("`default` not in `switch` statement");
+ error(ds.loc, "`default` not in `switch` statement");
errors = true;
}
@@ -2429,12 +2478,12 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
gds.sw = sc.sw;
if (!gds.sw)
{
- gds.error("`goto default` not in `switch` statement");
+ error(gds.loc, "`goto default` not in `switch` statement");
return setError();
}
if (gds.sw.isFinal)
{
- gds.error("`goto default` not allowed in `final switch` statement");
+ error(gds.loc, "`goto default` not allowed in `final switch` statement");
return setError();
}
result = gds;
@@ -2447,7 +2496,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (!sc.sw)
{
- gcs.error("`goto case` not in `switch` statement");
+ error(gcs.loc, "`goto case` not in `switch` statement");
return setError();
}
@@ -2512,7 +2561,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
bool errors = false;
if (sc.flags & SCOPE.contract)
{
- rs.error("`return` statements cannot be in contracts");
+ error(rs.loc, "`return` statements cannot be in contracts");
errors = true;
}
if (sc.os)
@@ -2521,18 +2570,18 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
// Deprecated in 2.100, transform into an error in 2.112
if (sc.os.tok == TOK.onScopeFailure)
{
- rs.deprecation("`return` statements cannot be in `scope(failure)` bodies.");
+ deprecation(rs.loc, "`return` statements cannot be in `scope(failure)` bodies.");
deprecationSupplemental(rs.loc, "Use try-catch blocks for this purpose");
}
else
{
- rs.error("`return` statements cannot be in `%s` bodies", Token.toChars(sc.os.tok));
+ error(rs.loc, "`return` statements cannot be in `%s` bodies", Token.toChars(sc.os.tok));
errors = true;
}
}
if (sc.tf)
{
- rs.error("`return` statements cannot be in `finally` bodies");
+ error(rs.loc, "`return` statements cannot be in `finally` bodies");
errors = true;
}
@@ -2540,7 +2589,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (rs.exp)
{
- rs.error("cannot return expression from constructor");
+ error(rs.loc, "cannot return expression from constructor");
errors = true;
}
@@ -2595,7 +2644,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (!convToVoid)
{
- rs.error("cannot return non-void from `void` function");
+ error(rs.loc, "cannot return non-void from `void` function");
errors = true;
rs.exp = new CastExp(rs.loc, rs.exp, Type.tvoid);
rs.exp = rs.exp.expressionSemantic(sc);
@@ -2613,7 +2662,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
//errors = true;
}
if (global.endGagging(olderrors))
- rs.exp.deprecation("`%s` has no effect", rs.exp.toChars());
+ deprecation(rs.exp.loc, "`%s` has no effect", rs.exp.toChars());
/* Replace:
* return exp;
@@ -2655,7 +2704,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
}
else if (!rs.exp.isErrorExp())
{
- rs.error("expected return type of `%s`, not `%s`:",
+ error(rs.loc, "expected return type of `%s`, not `%s`:",
tret.toChars(),
rs.exp.type.toChars());
errorSupplemental((fd.returns) ? (*fd.returns)[0].loc : fd.loc,
@@ -2688,7 +2737,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
// checking for `shared`, make sure we were right
if (global.params.noSharedAccess == FeatureState.enabled && rs.exp.type.isShared())
{
- fd.error("function returns `shared` but cannot be inferred `ref`");
+ .error(fd.loc, "%s `%s` function returns `shared` but cannot be inferred `ref`", fd.kind, fd.toPrettyChars);
supplemental();
}
}
@@ -2697,7 +2746,12 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
/* May return by ref
*/
- if (checkReturnEscapeRef(sc, rs.exp, true))
+ Scope* sc2 = sc.push();
+ sc2.eSink = global.errorSinkNull;
+ bool err = checkReturnEscapeRef(sc2, rs.exp, true);
+ sc2.pop();
+
+ if (err)
turnOffRef(() { checkReturnEscapeRef(sc, rs.exp, false); });
else if (!rs.exp.type.constConv(tf.next))
turnOffRef(
@@ -2736,7 +2790,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
if (tf.next.ty != Terror)
{
- rs.error("mismatched function return type inference of `void` and `%s`", tf.next.toChars());
+ error(rs.loc, "mismatched function return type inference of `void` and `%s`", tf.next.toChars());
}
errors = true;
tf.next = Type.terror;
@@ -2755,15 +2809,15 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (tbret.ty != Terror)
{
if (e0)
- rs.error("expected return type of `%s`, not `%s`", tret.toChars(), resType.toChars());
+ error(rs.loc, "expected return type of `%s`, not `%s`", tret.toChars(), resType.toChars());
else if (tbret.isTypeNoreturn())
{
- rs.error("cannot return from `noreturn` function");
+ error(rs.loc, "cannot return from `noreturn` function");
.errorSupplemental(rs.loc,
"Consider adding an endless loop, `assert(0)`, or another `noreturn` expression");
}
else
- rs.error("`return` expression expected");
+ error(rs.loc, "`return` expression expected");
}
errors = true;
}
@@ -2777,7 +2831,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
// If any branches have called a ctor, but this branch hasn't, it's an error
if (sc.ctorflow.callSuper & CSX.any_ctor && !(sc.ctorflow.callSuper & (CSX.this_ctor | CSX.super_ctor)))
{
- rs.error("`return` without calling constructor");
+ error(rs.loc, "`return` without calling constructor");
errors = true;
}
@@ -2790,7 +2844,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
bool mustInit = (v.storage_class & STC.nodefaultctor || v.type.needsNested());
if (mustInit && !(sc.ctorflow.fieldinit[i].csx & CSX.this_ctor))
{
- rs.error("an earlier `return` statement skips field `%s` initialization", v.toChars());
+ error(rs.loc, "an earlier `return` statement skips field `%s` initialization", v.toChars());
errors = true;
}
}
@@ -2904,9 +2958,9 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
Statement s = ls.statement;
if (!s || !s.hasBreak())
- bs.error("label `%s` has no `break`", bs.ident.toChars());
+ error(bs.loc, "label `%s` has no `break`", bs.ident.toChars());
else if (ls.tf != sc.tf)
- bs.error("cannot break out of `finally` block");
+ error(bs.loc, "cannot break out of `finally` block");
else
{
ls.breaks = true;
@@ -2916,14 +2970,14 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
return setError();
}
}
- bs.error("enclosing label `%s` for `break` not found", bs.ident.toChars());
+ error(bs.loc, "enclosing label `%s` for `break` not found", bs.ident.toChars());
return setError();
}
else if (!sc.sbreak)
{
if (sc.os && sc.os.tok != TOK.onScopeFailure)
{
- bs.error("`break` is not allowed inside `%s` bodies", Token.toChars(sc.os.tok));
+ error(bs.loc, "`break` is not allowed inside `%s` bodies", Token.toChars(sc.os.tok));
}
else if (sc.fes)
{
@@ -2932,12 +2986,12 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
return;
}
else
- bs.error("`break` is not inside a loop or `switch`");
+ error(bs.loc, "`break` is not inside a loop or `switch`");
return setError();
}
else if (sc.sbreak.isForwardingStatement())
{
- bs.error("must use labeled `break` within `static foreach`");
+ error(bs.loc, "must use labeled `break` within `static foreach`");
}
result = bs;
}
@@ -2992,9 +3046,9 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
{
Statement s = ls.statement;
if (!s || !s.hasContinue())
- cs.error("label `%s` has no `continue`", cs.ident.toChars());
+ error(cs.loc, "label `%s` has no `continue`", cs.ident.toChars());
else if (ls.tf != sc.tf)
- cs.error("cannot continue out of `finally` block");
+ error(cs.loc, "cannot continue out of `finally` block");
else
{
result = cs;
@@ -3003,14 +3057,14 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
return setError();
}
}
- cs.error("enclosing label `%s` for `continue` not found", cs.ident.toChars());
+ error(cs.loc, "enclosing label `%s` for `continue` not found", cs.ident.toChars());
return setError();
}
else if (!sc.scontinue)
{
if (sc.os && sc.os.tok != TOK.onScopeFailure)
{
- cs.error("`continue` is not allowed inside `%s` bodies", Token.toChars(sc.os.tok));
+ error(cs.loc, "`continue` is not allowed inside `%s` bodies", Token.toChars(sc.os.tok));
}
else if (sc.fes)
{
@@ -3019,12 +3073,12 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
return;
}
else
- cs.error("`continue` is not inside a loop");
+ error(cs.loc, "`continue` is not inside a loop");
return setError();
}
else if (sc.scontinue.isForwardingStatement())
{
- cs.error("must use labeled `continue` within `static foreach`");
+ error(cs.loc, "must use labeled `continue` within `static foreach`");
}
result = cs;
}
@@ -3050,7 +3104,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
ClassDeclaration cd = ss.exp.type.isClassHandle();
if (!cd)
{
- ss.error("can only `synchronize` on class objects, not `%s`", ss.exp.type.toChars());
+ error(ss.loc, "can only `synchronize` on class objects, not `%s`", ss.exp.type.toChars());
return setError();
}
else if (cd.isInterfaceDeclaration())
@@ -3060,7 +3114,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
*/
if (!ClassDeclaration.object)
{
- ss.error("missing or corrupt object.d");
+ error(ss.loc, "missing or corrupt object.d");
fatal();
}
@@ -3085,7 +3139,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
cs.push(new ExpStatement(ss.loc, tmp));
auto args = new Parameters();
- args.push(new Parameter(0, ClassDeclaration.object.type, null, null, null));
+ args.push(new Parameter(Loc.initial, 0, ClassDeclaration.object.type, null, null, null));
FuncDeclaration fdenter = FuncDeclaration.genCfunc(args, Type.tvoid, Id.monitorenter);
Expression e = new CallExp(ss.loc, fdenter, new VarExp(ss.loc, tmp));
@@ -3127,7 +3181,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
cs.push(new ExpStatement(ss.loc, v));
auto enterArgs = new Parameters();
- enterArgs.push(new Parameter(0, t.pointerTo(), null, null, null));
+ enterArgs.push(new Parameter(Loc.initial, 0, t.pointerTo(), null, null, null));
FuncDeclaration fdenter = FuncDeclaration.genCfunc(enterArgs, Type.tvoid, Id.criticalenter, STC.nothrow_);
Expression e = new AddrExp(ss.loc, tmpExp);
@@ -3137,7 +3191,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
cs.push(new ExpStatement(ss.loc, e));
auto exitArgs = new Parameters();
- exitArgs.push(new Parameter(0, t, null, null, null));
+ exitArgs.push(new Parameter(Loc.initial, 0, t, null, null, null));
FuncDeclaration fdexit = FuncDeclaration.genCfunc(exitArgs, Type.tvoid, Id.criticalexit, STC.nothrow_);
e = new CallExp(ss.loc, fdexit, tmpExp);
@@ -3177,7 +3231,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
Dsymbol s = (cast(TypeExp)ws.exp).type.toDsymbol(sc);
if (!s || !s.isScopeDsymbol())
{
- ws.error("`with` type `%s` has no members", ws.exp.toChars());
+ error(ws.loc, "`with` type `%s` has no members", ws.exp.toChars());
return setError();
}
sym = new WithScopeSymbol(ws);
@@ -3252,7 +3306,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
}
else
{
- ws.error("`with` expression types must be enums or aggregates or pointers to them, not `%s`", olde.type.toChars());
+ error(ws.loc, "`with` expression types must be enums or aggregates or pointers to them, not `%s`", olde.type.toChars());
return setError();
}
}
@@ -3281,13 +3335,13 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (!global.params.useExceptions)
{
- tcs.error("cannot use try-catch statements with -betterC");
+ error(tcs.loc, "cannot use try-catch statements with %s", global.params.betterC ? "-betterC".ptr : "-nothrow".ptr);
return setError();
}
if (!ClassDeclaration.throwable)
{
- tcs.error("cannot use try-catch statements because `object.Throwable` was not declared");
+ error(tcs.loc, "cannot use try-catch statements because `object.Throwable` was not declared");
return setError();
}
@@ -3320,7 +3374,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
const sj = cj.loc.toChars();
if (c.type.toBasetype().implicitConvTo(cj.type.toBasetype()))
{
- tcs.error("`catch` at %s hides `catch` at %s", sj, si);
+ error(tcs.loc, "`catch` at %s hides `catch` at %s", sj, si);
catchErrors = true;
}
}
@@ -3331,7 +3385,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
sc.func.hasCatches = true;
if (flags == (FLAGcpp | FLAGd))
{
- tcs.error("cannot mix catching D and C++ exceptions in the same try-catch");
+ error(tcs.loc, "cannot mix catching D and C++ exceptions in the same try-catch");
catchErrors = true;
}
}
@@ -3353,7 +3407,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
/* If the try body never throws, we can eliminate any catches
* of recoverable exceptions.
*/
- if (!(tcs._body.blockExit(sc.func, false) & BE.throw_) && ClassDeclaration.exception)
+ if (!(tcs._body.blockExit(sc.func, null) & BE.throw_) && ClassDeclaration.exception)
{
foreach_reverse (i; 0 .. tcs.catches.length)
{
@@ -3403,7 +3457,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
return;
}
- auto blockexit = tfs._body.blockExit(sc.func, false);
+ auto blockexit = tfs._body.blockExit(sc.func, null);
// if not worrying about exceptions
if (!(global.params.useExceptions && ClassDeclaration.throwable))
@@ -3429,7 +3483,7 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
// https://issues.dlang.org/show_bug.cgi?id=23159
if (!global.params.useExceptions)
{
- oss.error("`%s` cannot be used with -betterC", Token.toChars(oss.tok));
+ error(oss.loc, "`%s` cannot be used with -betterC", Token.toChars(oss.tok));
return setError();
}
@@ -3439,12 +3493,12 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (sc.os && sc.os.tok != TOK.onScopeFailure)
{
// If enclosing is scope(success) or scope(exit), this will be placed in finally block.
- oss.error("cannot put `%s` statement inside `%s`", Token.toChars(oss.tok), Token.toChars(sc.os.tok));
+ error(oss.loc, "cannot put `%s` statement inside `%s`", Token.toChars(oss.tok), Token.toChars(sc.os.tok));
return setError();
}
if (sc.tf)
{
- oss.error("cannot put `%s` statement inside `finally` block", Token.toChars(oss.tok));
+ error(oss.loc, "cannot put `%s` statement inside `finally` block", Token.toChars(oss.tok));
return setError();
}
}
@@ -3556,12 +3610,12 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
if (ls.loc == ls2.loc)
{
ls2.duplicated = true;
- ls.error("label `%s` is duplicated", ls2.toChars());
+ error(ls.loc, "label `%s` is duplicated", ls2.toChars());
.errorSupplemental(ls2.loc, "labels cannot be used in a static foreach with more than 1 iteration");
}
else
{
- ls.error("label `%s` is already defined", ls2.toChars());
+ error(ls.loc, "label `%s` is already defined", ls2.toChars());
.errorSupplemental(ls2.loc, "first definition is here");
}
return setError();
@@ -3620,14 +3674,14 @@ Statement statementSemanticVisit(Statement s, Scope* sc)
assert(sc.func);
if (!(cas.stc & STC.pure_) && sc.func.setImpure(cas.loc, "`asm` statement is assumed to be impure - mark it with `pure` if it is not"))
- cas.error("`asm` statement is assumed to be impure - mark it with `pure` if it is not");
+ error(cas.loc, "`asm` statement is assumed to be impure - mark it with `pure` if it is not");
if (!(cas.stc & STC.nogc) && sc.func.setGC(cas.loc, "`asm` statement in %s `%s` is assumed to use the GC - mark it with `@nogc` if it does not"))
- cas.error("`asm` statement is assumed to use the GC - mark it with `@nogc` if it does not");
+ error(cas.loc, "`asm` statement is assumed to use the GC - mark it with `@nogc` if it does not");
// @@@DEPRECATED_2.114@@@
// change deprecation() to error(), add `else` and remove `| STC.safe`
// to turn deprecation into an error when deprecation cycle is over
if (cas.stc & STC.safe)
- cas.deprecation("`asm` statement cannot be marked `@safe`, use `@system` or `@trusted` instead");
+ deprecation(cas.loc, "`asm` statement cannot be marked `@safe`, use `@system` or `@trusted` instead");
if (!(cas.stc & (STC.trusted | STC.safe)))
{
sc.setUnsafe(false, cas.loc, "`asm` statement is assumed to be `@system` - mark it with `@trusted` if it is not");
@@ -3696,7 +3750,7 @@ public bool throwSemantic(const ref Loc loc, ref Expression exp, Scope* sc)
{
if (!global.params.useExceptions)
{
- loc.error("cannot use `throw` statements with -betterC");
+ loc.error("cannot use `throw` statements with %s", global.params.betterC ? "-betterC".ptr : "-nothrow".ptr);
return false;
}
@@ -3767,7 +3821,7 @@ private extern(D) Expression applyOpApply(ForeachStatement fs, Expression flde,
return null;
if (ec.type != Type.tint32)
{
- fs.error("`opApply()` function for `%s` must return an `int`", tab.toChars());
+ error(fs.loc, "`opApply()` function for `%s` must return an `int`", tab.toChars());
return null;
}
return ec;
@@ -3792,7 +3846,7 @@ private extern(D) Expression applyDelegate(ForeachStatement fs, Expression flde,
return null;
if (ec.type != Type.tint32)
{
- fs.error("`opApply()` function for `%s` must return an `int`", tab.toChars());
+ error(fs.loc, "`opApply()` function for `%s` must return an `int`", tab.toChars());
return null;
}
return ec;
@@ -3841,13 +3895,13 @@ private extern(D) Expression applyArray(ForeachStatement fs, Expression flde,
FuncDeclaration fdapply;
TypeDelegate dgty;
auto params = new Parameters();
- params.push(new Parameter(STC.in_, tn.arrayOf(), null, null, null));
+ params.push(new Parameter(Loc.initial, STC.in_, tn.arrayOf(), null, null, null));
auto dgparams = new Parameters();
- dgparams.push(new Parameter(0, Type.tvoidptr, null, null, null));
+ dgparams.push(new Parameter(Loc.initial, 0, Type.tvoidptr, null, null, null));
if (dim == 2)
- dgparams.push(new Parameter(0, Type.tvoidptr, null, null, null));
+ dgparams.push(new Parameter(Loc.initial, 0, Type.tvoidptr, null, null, null));
dgty = new TypeDelegate(new TypeFunction(ParameterList(dgparams), Type.tint32, LINK.d));
- params.push(new Parameter(0, dgty, null, null, null));
+ params.push(new Parameter(Loc.initial, 0, dgty, null, null, null));
fdapply = FuncDeclaration.genCfunc(params, Type.tint32, fdname.ptr);
if (tab.isTypeSArray())
@@ -3879,7 +3933,7 @@ private extern(D) Expression applyAssocArray(ForeachStatement fs, Expression fld
Type ti = (isRef ? taa.index.addMod(MODFlags.const_) : taa.index);
if (isRef ? !ti.constConv(ta) : !ti.implicitConvTo(ta))
{
- fs.error("`foreach`: index must be type `%s`, not `%s`",
+ error(fs.loc, "`foreach`: index must be type `%s`, not `%s`",
ti.toChars(), ta.toChars());
return null;
}
@@ -3890,7 +3944,7 @@ private extern(D) Expression applyAssocArray(ForeachStatement fs, Expression fld
Type taav = taa.nextOf();
if (isRef ? !taav.constConv(ta) : !taav.implicitConvTo(ta))
{
- fs.error("`foreach`: value must be type `%s`, not `%s`",
+ error(fs.loc, "`foreach`: value must be type `%s`, not `%s`",
taav.toChars(), ta.toChars());
return null;
}
@@ -3908,14 +3962,14 @@ private extern(D) Expression applyAssocArray(ForeachStatement fs, Expression fld
if (!fdapply[i])
{
auto params = new Parameters();
- params.push(new Parameter(0, Type.tvoid.pointerTo(), null, null, null));
- params.push(new Parameter(STC.const_, Type.tsize_t, null, null, null));
+ params.push(new Parameter(Loc.initial, 0, Type.tvoid.pointerTo(), null, null, null));
+ params.push(new Parameter(Loc.initial, STC.const_, Type.tsize_t, null, null, null));
auto dgparams = new Parameters();
- dgparams.push(new Parameter(0, Type.tvoidptr, null, null, null));
+ dgparams.push(new Parameter(Loc.initial, 0, Type.tvoidptr, null, null, null));
if (dim == 2)
- dgparams.push(new Parameter(0, Type.tvoidptr, null, null, null));
+ dgparams.push(new Parameter(Loc.initial, 0, Type.tvoidptr, null, null, null));
fldeTy[i] = new TypeDelegate(new TypeFunction(ParameterList(dgparams), Type.tint32, LINK.d));
- params.push(new Parameter(0, fldeTy[i], null, null, null));
+ params.push(new Parameter(Loc.initial, 0, fldeTy[i], null, null, null));
fdapply[i] = FuncDeclaration.genCfunc(params, Type.tint32, i ? Id._aaApply2 : Id._aaApply);
}
@@ -3967,7 +4021,7 @@ private extern(D) Statement loopReturn(Expression e, Statements* cases, const re
}
s = new CompoundStatement(loc, a);
- return new SwitchStatement(loc, e, s, false);
+ return new SwitchStatement(loc, null, e, s, false, loc);
}
/*************************************
@@ -4000,7 +4054,7 @@ private FuncExp foreachBodyToFunction(Scope* sc, ForeachStatement fs, TypeFuncti
{
if (!(prm.storageClass & STC.ref_))
{
- fs.error("`foreach`: cannot make `%s` `ref`", p.ident.toChars());
+ error(fs.loc, "`foreach`: cannot make `%s` `ref`", p.ident.toChars());
return null;
}
goto LcopyArg;
@@ -4026,7 +4080,7 @@ private FuncExp foreachBodyToFunction(Scope* sc, ForeachStatement fs, TypeFuncti
Statement s = new ExpStatement(fs.loc, v);
fs._body = new CompoundStatement(fs.loc, s, fs._body);
}
- params.push(new Parameter(stc, p.type, id, null, null));
+ params.push(new Parameter(fs.loc, stc, p.type, id, null, null));
}
// https://issues.dlang.org/show_bug.cgi?id=13840
// Throwable nested function inside nothrow function is acceptable.
@@ -4328,7 +4382,7 @@ public auto makeTupleForeach(Scope* sc, bool isStatic, bool isDecl, ForeachState
const bool skipCheck = isStatic && needExpansion;
if (!skipCheck && (dim < 1 || dim > 2))
{
- fs.error("only one (value) or two (key,value) arguments allowed for sequence `foreach`");
+ error(fs.loc, "only one (value) or two (key,value) arguments allowed for sequence `foreach`");
return returnEarly();
}
@@ -4390,7 +4444,7 @@ public auto makeTupleForeach(Scope* sc, bool isStatic, bool isDecl, ForeachState
// Declare key
if (p.isReference() || p.isLazy())
{
- fs.error("no storage class for key `%s`", p.ident.toChars());
+ error(fs.loc, "no storage class for key `%s`", p.ident.toChars());
return returnEarly();
}
@@ -4405,7 +4459,7 @@ public auto makeTupleForeach(Scope* sc, bool isStatic, bool isDecl, ForeachState
if (!p.type.isintegral())
{
- fs.error("foreach: key cannot be of non-integral type `%s`",
+ error(fs.loc, "foreach: key cannot be of non-integral type `%s`",
p.type.toChars());
return returnEarly();
}
@@ -4416,7 +4470,7 @@ public auto makeTupleForeach(Scope* sc, bool isStatic, bool isDecl, ForeachState
dimrange.imax = SignExtendedNumber(dimrange.imax.value-1);
if (!IntRange.fromType(p.type).contains(dimrange))
{
- fs.error("index type `%s` cannot cover index range 0..%llu",
+ error(fs.loc, "index type `%s` cannot cover index range 0..%llu",
p.type.toChars(), cast(ulong)length);
return returnEarly();
}
@@ -4450,7 +4504,7 @@ public auto makeTupleForeach(Scope* sc, bool isStatic, bool isDecl, ForeachState
if (storageClass & (STC.out_ | STC.lazy_) ||
storageClass & STC.ref_ && !te)
{
- fs.error("no storage class for value `%s`", ident.toChars());
+ error(fs.loc, "no storage class for value `%s`", ident.toChars());
return false;
}
Declaration var;
@@ -4478,7 +4532,7 @@ public auto makeTupleForeach(Scope* sc, bool isStatic, bool isDecl, ForeachState
}
else if (storageClass & STC.alias_)
{
- fs.error("`foreach` loop variable cannot be both `enum` and `alias`");
+ error(fs.loc, "`foreach` loop variable cannot be both `enum` and `alias`");
return false;
}
@@ -4487,12 +4541,12 @@ public auto makeTupleForeach(Scope* sc, bool isStatic, bool isDecl, ForeachState
var = new AliasDeclaration(loc, ident, ds);
if (storageClass & STC.ref_)
{
- fs.error("symbol `%s` cannot be `ref`", ds.toChars());
+ error(fs.loc, "symbol `%s` cannot be `ref`", ds.toChars());
return false;
}
if (paramtype)
{
- fs.error("cannot specify element type for symbol `%s`", ds.toChars());
+ error(fs.loc, "cannot specify element type for symbol `%s`", ds.toChars());
return false;
}
}
@@ -4501,7 +4555,7 @@ public auto makeTupleForeach(Scope* sc, bool isStatic, bool isDecl, ForeachState
var = new AliasDeclaration(loc, ident, e.type);
if (paramtype)
{
- fs.error("cannot specify element type for type `%s`", e.type.toChars());
+ error(fs.loc, "cannot specify element type for type `%s`", e.type.toChars());
return false;
}
}
@@ -4522,17 +4576,17 @@ public auto makeTupleForeach(Scope* sc, bool isStatic, bool isDecl, ForeachState
{
if (!isStatic)
{
- fs.error("constant value `%s` cannot be `ref`", ie.toChars());
+ error(fs.loc, "constant value `%s` cannot be `ref`", toChars(ie));
}
else
{
if (!needExpansion)
{
- fs.error("constant value `%s` cannot be `ref`", ie.toChars());
+ error(fs.loc, "constant value `%s` cannot be `ref`", toChars(ie));
}
else
{
- fs.error("constant value `%s` cannot be `ref`", ident.toChars());
+ error(fs.loc, "constant value `%s` cannot be `ref`", ident.toChars());
}
}
return false;
@@ -4548,7 +4602,7 @@ public auto makeTupleForeach(Scope* sc, bool isStatic, bool isDecl, ForeachState
var = new AliasDeclaration(loc, ident, t);
if (paramtype)
{
- fs.error("cannot specify element type for symbol `%s`", fs.toChars());
+ error(fs.loc, "cannot specify element type for symbol `%s`", fs.toChars());
return false;
}
}
@@ -4833,7 +4887,7 @@ private Statements* flatten(Statement statement, Scope* sc)
const bool doUnittests = global.params.useUnitTests || global.params.ddoc.doOutput || global.params.dihdr.doOutput;
auto loc = adjustLocForMixin(str, cs.loc, global.params.mixinOut);
scope p = new Parser!ASTCodegen(loc, sc._module, str, false, global.errorSink, &global.compileEnv, doUnittests);
- p.transitionIn = global.params.vin;
+ p.transitionIn = global.params.v.vin;
p.nextToken();
auto a = new Statements();
@@ -5028,3 +5082,130 @@ bool pragmaStartAddressSemantic(Loc loc, Scope* sc, Expressions* args)
}
return true;
}
+
+/************************************
+ * Check for skipped variable declarations.
+ * Params:
+ * ss = statement to check
+ * Returns:
+ * true if error
+ */
+private bool checkLabel(SwitchStatement ss)
+{
+ /*
+ * Checks the scope of a label for existing variable declaration.
+ * Params:
+ * vd = last variable declared before this case/default label
+ * Returns: `true` if the variables declared in this label would be skipped.
+ */
+ bool checkVar(VarDeclaration vd)
+ {
+ for (auto v = vd; v && v != ss.lastVar; v = v.lastVar)
+ {
+ if (v.isDataseg() || (v.storage_class & (STC.manifest | STC.temp) && vd.ident != Id.withSym) || v._init.isVoidInitializer())
+ continue;
+ if (vd.ident == Id.withSym)
+ error(ss.loc, "`switch` skips declaration of `with` temporary");
+ else
+ error(ss.loc, "`switch` skips declaration of variable `%s`", v.toPrettyChars());
+ errorSupplemental(v.loc, "declared here");
+ return true;
+ }
+ return false;
+ }
+
+ enum error = true;
+
+ if (ss.sdefault && checkVar(ss.sdefault.lastVar))
+ return !error; // return error once fully deprecated
+
+ foreach (scase; *ss.cases)
+ {
+ if (scase && checkVar(scase.lastVar))
+ return !error; // return error once fully deprecated
+ }
+ return !error;
+}
+
+
+/**************
+ * Check for skipped variable declarations.
+ * Params:
+ * gs = statement to check
+ * Returns: true for error
+ */
+bool checkLabel(GotoStatement gs)
+{
+ if (!gs.label.statement)
+ return true; // error should have been issued for this already
+
+ if (gs.label.statement.os != gs.os)
+ {
+ if (gs.os && gs.os.tok == TOK.onScopeFailure && !gs.label.statement.os)
+ {
+ // Jump out from scope(failure) block is allowed.
+ }
+ else
+ {
+ if (gs.label.statement.os)
+ error(gs.loc, "cannot `goto` in to `%s` block", Token.toChars(gs.label.statement.os.tok));
+ else
+ error(gs.loc, "cannot `goto` out of `%s` block", Token.toChars(gs.os.tok));
+ return true;
+ }
+ }
+
+ if (gs.label.statement.tf != gs.tf)
+ {
+ error(gs.loc, "cannot `goto` in or out of `finally` block");
+ return true;
+ }
+
+ if (gs.label.statement.inCtfeBlock && !gs.inCtfeBlock)
+ {
+ error(gs.loc, "cannot `goto` into `if (__ctfe)` block");
+ return true;
+ }
+
+ Statement stbnext;
+ for (auto stb = gs.tryBody; stb != gs.label.statement.tryBody; stb = stbnext)
+ {
+ if (!stb)
+ {
+ error(gs.loc, "cannot `goto` into `try` block");
+ return true;
+ }
+ if (auto stf = stb.isTryFinallyStatement())
+ stbnext = stf.tryBody;
+ else if (auto stc = stb.isTryCatchStatement())
+ stbnext = stc.tryBody;
+ else
+ assert(0);
+ }
+
+ VarDeclaration vd = gs.label.statement.lastVar;
+ if (!vd || vd.isDataseg() || (vd.storage_class & STC.manifest))
+ return false;
+
+ VarDeclaration last = gs.lastVar;
+ while (last && last != vd)
+ last = last.lastVar;
+ if (last == vd)
+ {
+ // All good, the label's scope has no variables
+ }
+ else if (vd.storage_class & STC.exptemp)
+ {
+ // Lifetime ends at end of expression, so no issue with skipping the statement
+ }
+ else
+ {
+ if (vd.ident == Id.withSym)
+ error(gs.loc, "`goto` skips declaration of `with` temporary");
+ else
+ error(gs.loc, "`goto` skips declaration of variable `%s`", vd.toPrettyChars());
+ errorSupplemental(vd.loc, "declared here");
+ return true;
+ }
+ return false;
+}
diff --git a/gcc/d/dmd/staticcond.d b/gcc/d/dmd/staticcond.d
index aa6f37c..45e7773 100644
--- a/gcc/d/dmd/staticcond.d
+++ b/gcc/d/dmd/staticcond.d
@@ -111,7 +111,8 @@ bool evalStaticCondition(Scope* sc, Expression original, Expression e, out bool
const opt = e.toBool();
if (opt.isEmpty())
{
- e.error("expression `%s` is not constant", e.toChars());
+ if (!e.type.isTypeError())
+ error(e.loc, "expression `%s` is not constant", e.toChars());
errors = true;
return false;
}
diff --git a/gcc/d/dmd/tokens.d b/gcc/d/dmd/tokens.d
index 950c830..c76d549 100644
--- a/gcc/d/dmd/tokens.d
+++ b/gcc/d/dmd/tokens.d
@@ -124,6 +124,7 @@ enum TOK : ubyte
// Leaf operators
identifier,
string_,
+ hexadecimalString,
this_,
super_,
error,
@@ -855,6 +856,7 @@ extern (C++) struct Token
TOK.wcharLiteral: "wcharv",
TOK.dcharLiteral: "dcharv",
TOK.wchar_tLiteral: "wchar_tv",
+ TOK.hexadecimalString: "xstring",
TOK.endOfLine: "\\n",
TOK.whitespace: "whitespace",
@@ -898,7 +900,7 @@ extern (C++) struct Token
nothrow:
- int isKeyword() const @safe
+ int isKeyword() pure const @safe @nogc
{
foreach (kw; keywords)
{
@@ -1014,6 +1016,24 @@ nothrow:
p = buf.extractChars();
}
break;
+ case TOK.hexadecimalString:
+ {
+ OutBuffer buf;
+ buf.writeByte('x');
+ buf.writeByte('"');
+ foreach (size_t i; 0 .. len)
+ {
+ if (i)
+ buf.writeByte(' ');
+ buf.printf("%02x", ustring[i]);
+ }
+ buf.writeByte('"');
+ if (postfix)
+ buf.writeByte(postfix);
+ buf.writeByte(0);
+ p = buf.extractData();
+ break;
+ }
case TOK.identifier:
case TOK.enum_:
case TOK.struct_:
diff --git a/gcc/d/dmd/tokens.h b/gcc/d/dmd/tokens.h
index 6c1b979..b1f633f 100644
--- a/gcc/d/dmd/tokens.h
+++ b/gcc/d/dmd/tokens.h
@@ -133,6 +133,7 @@ enum class TOK : unsigned char
// Leaf operators
identifier,
string_,
+ hexadecimalString,
this_,
super_,
error,
diff --git a/gcc/d/dmd/traits.d b/gcc/d/dmd/traits.d
index 0d9c95f..bdc569c 100644
--- a/gcc/d/dmd/traits.d
+++ b/gcc/d/dmd/traits.d
@@ -337,7 +337,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
Expression dimError(int expected)
{
- e.error("expected %d arguments for `%s` but had %d", expected, e.ident.toChars(), cast(int)dim);
+ error(e.loc, "expected %d arguments for `%s` but had %d", expected, e.ident.toChars(), cast(int)dim);
return ErrorExp.get();
}
@@ -500,7 +500,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto t = isType(o);
if (!t)
{
- e.error("type expected as second argument of __traits `%s` instead of `%s`",
+ error(e.loc, "type expected as second argument of __traits `%s` instead of `%s`",
e.ident.toChars(), o.toChars());
return ErrorExp.get();
}
@@ -521,7 +521,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto t = isType(o);
if (!t)
{
- e.error("type expected as second argument of __traits `%s` instead of `%s`",
+ error(e.loc, "type expected as second argument of __traits `%s` instead of `%s`",
e.ident.toChars(), o.toChars());
return ErrorExp.get();
}
@@ -543,7 +543,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto t = isType(o);
if (!t)
{
- e.error("type expected as second argument of __traits `%s` instead of `%s`",
+ error(e.loc, "type expected as second argument of __traits `%s` instead of `%s`",
e.ident.toChars(), o.toChars());
return ErrorExp.get();
}
@@ -577,7 +577,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
return fd.isNested() ? True() : False();
}
- e.error("aggregate or function expected instead of `%s`", o.toChars());
+ error(e.loc, "aggregate or function expected instead of `%s`", o.toChars());
return ErrorExp.get();
}
if (e.ident == Id.isDisabled)
@@ -598,7 +598,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
{
// @@@DEPRECATED2.121@@@
// Deprecated in 2.101 - Can be removed from 2.121
- e.deprecation("`traits(isVirtualFunction)` is deprecated. Use `traits(isVirtualMethod)` instead");
+ deprecation(e.loc, "`traits(isVirtualFunction)` is deprecated. Use `traits(isVirtualMethod)` instead");
if (dim != 1)
return dimError(1);
@@ -686,7 +686,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
{
if (!po.ident)
{
- e.error("argument `%s` has no identifier", po.type.toChars());
+ error(e.loc, "argument `%s` has no identifier", po.type.toChars());
return ErrorExp.get();
}
id = po.ident;
@@ -696,7 +696,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
Dsymbol s = getDsymbolWithoutExpCtx(o);
if (!s || !s.ident)
{
- e.error("argument `%s` has no identifier", o.toChars());
+ error(e.loc, "argument `%s` has no identifier", o.toChars());
return ErrorExp.get();
}
id = s.ident;
@@ -733,7 +733,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
else
{
if (!isError(o))
- e.error("argument `%s` has no identifier", o.toChars());
+ error(e.loc, "argument `%s` has no identifier", o.toChars());
return ErrorExp.get();
}
assert(fqn);
@@ -758,7 +758,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
if (!s)
{
if (!isError(o))
- e.error("argument `%s` has no visibility", o.toChars());
+ error(e.loc, "argument `%s` has no visibility", o.toChars());
return ErrorExp.get();
}
if (s.semanticRun == PASS.initial)
@@ -809,7 +809,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
}
if (!s || s.isImport())
{
- e.error("argument `%s` has no parent", o.toChars());
+ error(e.loc, "argument `%s` has no parent", o.toChars());
return ErrorExp.get();
}
@@ -845,7 +845,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
ex = exp;
else
{
- e.error("symbol or expression expected as first argument of __traits `child` instead of `%s`", op.toChars());
+ error(e.loc, "symbol or expression expected as first argument of __traits `child` instead of `%s`", op.toChars());
return ErrorExp.get();
}
@@ -854,7 +854,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto symc = getDsymbol(oc);
if (!symc)
{
- e.error("symbol expected as second argument of __traits `child` instead of `%s`", oc.toChars());
+ error(e.loc, "symbol expected as second argument of __traits `child` instead of `%s`", oc.toChars());
return ErrorExp.get();
}
@@ -878,7 +878,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto ex = isExpression((*e.args)[0]);
if (!ex)
{
- e.error("expression expected as second argument of __traits `%s`", e.ident.toChars());
+ error(e.loc, "expression expected as second argument of __traits `%s`", e.ident.toChars());
return ErrorExp.get();
}
ex = ex.ctfeInterpret();
@@ -891,7 +891,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
Type t = decoToType(se.toUTF8(sc).peekString());
if (!t)
{
- e.error("cannot determine `%s`", e.toChars());
+ error(e.loc, "cannot determine `%s`", e.toChars());
return ErrorExp.get();
}
return (new TypeExp(e.loc, t)).expressionSemantic(sc);
@@ -909,7 +909,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto ex = isExpression((*e.args)[1]);
if (!ex)
{
- e.error("expression expected as second argument of __traits `%s`", e.ident.toChars());
+ error(e.loc, "expression expected as second argument of __traits `%s`", e.ident.toChars());
return ErrorExp.get();
}
ex = ex.ctfeInterpret();
@@ -921,7 +921,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
b = b.ctfeInterpret();
if (!b.type.equals(Type.tbool))
{
- e.error("`bool` expected as third argument of `__traits(getOverloads)`, not `%s` of type `%s`", b.toChars(), b.type.toChars());
+ error(e.loc, "`bool` expected as third argument of `__traits(getOverloads)`, not `%s` of type `%s`", b.toChars(), b.type.toChars());
return ErrorExp.get();
}
includeTemplates = b.toBool().get();
@@ -930,14 +930,14 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
StringExp se = ex.toStringExp();
if (!se || se.len == 0)
{
- e.error("string expected as second argument of __traits `%s` instead of `%s`", e.ident.toChars(), ex.toChars());
+ error(e.loc, "string expected as second argument of __traits `%s` instead of `%s`", e.ident.toChars(), ex.toChars());
return ErrorExp.get();
}
se = se.toUTF8(sc);
if (se.sz != 1)
{
- e.error("string must be chars");
+ error(e.loc, "string must be chars");
return ErrorExp.get();
}
auto id = Identifier.idPool(se.peekString());
@@ -972,7 +972,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
ex = new DotIdExp(e.loc, ex2, id);
else
{
- e.error("invalid first argument");
+ error(e.loc, "invalid first argument");
return ErrorExp.get();
}
doSemantic:
@@ -1004,13 +1004,13 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
Expression eorig = ex;
ex = ex.expressionSemantic(scx);
if (errors < global.errors)
- e.error("`%s` cannot be resolved", eorig.toChars());
+ error(e.loc, "`%s` cannot be resolved", eorig.toChars());
if (e.ident == Id.getVirtualFunctions)
{
// @@@DEPRECATED2.121@@@
// Deprecated in 2.101 - Can be removed from 2.121
- e.deprecation("`traits(getVirtualFunctions)` is deprecated. Use `traits(getVirtualMethods)` instead");
+ deprecation(e.loc, "`traits(getVirtualFunctions)` is deprecated. Use `traits(getVirtualMethods)` instead");
}
/* Create tuple of functions of ex
@@ -1165,7 +1165,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto cd = s ? s.isClassDeclaration() : null;
if (!cd)
{
- e.error("first argument is not a class");
+ error(e.loc, "first argument is not a class");
return ErrorExp.get();
}
if (cd.sizeok != Sizeok.done)
@@ -1174,7 +1174,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
}
if (cd.sizeok != Sizeok.done)
{
- e.error("%s `%s` is forward referenced", cd.kind(), cd.toChars());
+ error(e.loc, "%s `%s` is forward referenced", cd.kind(), cd.toChars());
return ErrorExp.get();
}
@@ -1260,7 +1260,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
if (t)
printf("t = %d %s\n", t.ty, t.toChars());
}
- e.error("first argument is not a symbol");
+ error(e.loc, "first argument is not a symbol");
return ErrorExp.get();
}
@@ -1281,7 +1281,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
if (!tf)
{
- e.error("first argument is not a function");
+ error(e.loc, "first argument is not a function");
return ErrorExp.get();
}
@@ -1324,7 +1324,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
if (!tf)
{
- e.error("argument to `__traits(isReturnOnStack, %s)` is not a function", o.toChars());
+ error(e.loc, "argument to `__traits(isReturnOnStack, %s)` is not a function", o.toChars());
return ErrorExp.get();
}
@@ -1360,7 +1360,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
{
if (!fd)
{
- e.error("argument to `__traits(getFunctionVariadicStyle, %s)` is not a function", o.toChars());
+ error(e.loc, "argument to `__traits(getFunctionVariadicStyle, %s)` is not a function", o.toChars());
return ErrorExp.get();
}
link = fd._linkage;
@@ -1411,7 +1411,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
fparams = fd.getParameterList();
else
{
- e.error("first argument to `__traits(getParameterStorageClasses, %s, %s)` is not a function or a function call",
+ error(e.loc, "first argument to `__traits(getParameterStorageClasses, %s, %s)` is not a function or a function call",
o.toChars(), o1.toChars());
return ErrorExp.get();
}
@@ -1427,7 +1427,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto ex = isExpression((*e.args)[1]);
if (!ex)
{
- e.error("expression expected as second argument of `__traits(getParameterStorageClasses, %s, %s)`",
+ error(e.loc, "expression expected as second argument of `__traits(getParameterStorageClasses, %s, %s)`",
o.toChars(), o1.toChars());
return ErrorExp.get();
}
@@ -1435,7 +1435,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto ii = ex.toUInteger();
if (ii >= fparams.length)
{
- e.error("parameter index must be in range 0..%u not %s", cast(uint)fparams.length, ex.toChars());
+ error(e.loc, "parameter index must be in range 0..%u not %s", cast(uint)fparams.length, ex.toChars());
return ErrorExp.get();
}
@@ -1507,7 +1507,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
AggregateDeclaration agg;
if (!s || ((d = s.isDeclaration()) is null && (agg = s.isAggregateDeclaration()) is null))
{
- e.error("argument to `__traits(getLinkage, %s)` is not a declaration", o.toChars());
+ error(e.loc, "argument to `__traits(getLinkage, %s)` is not a declaration", o.toChars());
return ErrorExp.get();
}
@@ -1521,7 +1521,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
agg.size(e.loc);
if (agg.sizeok != Sizeok.done)
{
- e.error("%s `%s` is forward referenced", agg.kind(), agg.toChars());
+ error(e.loc, "%s `%s` is forward referenced", agg.kind(), agg.toChars());
return ErrorExp.get();
}
}
@@ -1557,8 +1557,8 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto s = getDsymbol(o);
if (!s)
{
- e.error("in expression `%s` `%s` can't have members", e.toChars(), o.toChars());
- e.errorSupplemental("`%s` must evaluate to either a module, a struct, an union, a class, an interface or a template instantiation", o.toChars());
+ error(e.loc, "in expression `%s` `%s` can't have members", e.toChars(), o.toChars());
+ errorSupplemental(e.loc, "`%s` must evaluate to either a module, a struct, an union, a class, an interface or a template instantiation", o.toChars());
return ErrorExp.get();
}
@@ -1580,8 +1580,8 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto sds = s.isScopeDsymbol();
if (!sds || sds.isTemplateDeclaration())
{
- e.error("in expression `%s` %s `%s` has no members", e.toChars(), s.kind(), s.toChars());
- e.errorSupplemental("`%s` must evaluate to either a module, a struct, an union, a class, an interface or a template instantiation", s.toChars());
+ error(e.loc, "in expression `%s` %s `%s` has no members", e.toChars(), s.kind(), s.toChars());
+ errorSupplemental(e.loc, "`%s` must evaluate to either a module, a struct, an union, a class, an interface or a template instantiation", s.toChars());
return ErrorExp.get();
}
@@ -1739,7 +1739,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
if (sc2.func && sc2.func.type.ty == Tfunction)
{
const tf = cast(TypeFunction)sc2.func.type;
- err |= tf.isnothrow && canThrow(ex, sc2.func, false);
+ err |= tf.isnothrow && canThrow(ex, sc2.func, null);
}
ex = checkGC(sc2, ex);
if (ex.op == EXP.error)
@@ -1796,7 +1796,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto s = getDsymbolWithoutExpCtx(o);
if (!s)
{
- e.error("argument `%s` to __traits(getUnitTests) must be a module or aggregate",
+ error(e.loc, "argument `%s` to __traits(getUnitTests) must be a module or aggregate",
o.toChars());
return ErrorExp.get();
}
@@ -1806,7 +1806,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto sds = s.isScopeDsymbol();
if (!sds || sds.isTemplateDeclaration())
{
- e.error("argument `%s` to __traits(getUnitTests) must be a module or aggregate, not a %s",
+ error(e.loc, "argument `%s` to __traits(getUnitTests) must be a module or aggregate, not a %s",
s.toChars(), s.kind());
return ErrorExp.get();
}
@@ -1857,7 +1857,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
auto fd = s ? s.isFuncDeclaration() : null;
if (!fd)
{
- e.error("first argument to __traits(getVirtualIndex) must be a function");
+ error(e.loc, "first argument to __traits(getVirtualIndex) must be a function");
return ErrorExp.get();
}
@@ -1880,7 +1880,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
// Interfaces don't have an init symbol and hence cause linker errors
if (!ad || ad.isInterfaceDeclaration())
{
- e.error("struct / class type expected as argument to __traits(initSymbol) instead of `%s`", o.toChars());
+ error(e.loc, "struct / class type expected as argument to __traits(initSymbol) instead of `%s`", o.toChars());
return ErrorExp.get();
}
@@ -1898,7 +1898,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
Type t = isType(o);
if (!t)
{
- e.error("type expected as second argument of __traits `%s` instead of `%s`",
+ error(e.loc, "type expected as second argument of __traits `%s` instead of `%s`",
e.ident.toChars(), o.toChars());
return ErrorExp.get();
}
@@ -1920,7 +1920,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
StringExp se = ex ? ex.ctfeInterpret().toStringExp() : null;
if (!ex || !se || se.len == 0)
{
- e.error("string expected as argument of __traits `%s` instead of `%s`", e.ident.toChars(), ex.toChars());
+ error(e.loc, "string expected as argument of __traits `%s` instead of `%s`", e.ident.toChars(), (*e.args)[0].toChars());
return ErrorExp.get();
}
se = se.toUTF8(sc);
@@ -1929,7 +1929,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
Expression r = target.getTargetInfo(slice.ptr, e.loc); // BUG: reliance on terminating 0
if (!r)
{
- e.error("`getTargetInfo` key `\"%.*s\"` not supported by this implementation",
+ error(e.loc, "`getTargetInfo` key `\"%.*s\"` not supported by this implementation",
cast(int)slice.length, slice.ptr);
return ErrorExp.get();
}
@@ -1943,7 +1943,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
Dsymbol s = getDsymbolWithoutExpCtx(arg0);
if (!s || !s.loc.isValid())
{
- e.error("can only get the location of a symbol, not `%s`", arg0.toChars());
+ error(e.loc, "can only get the location of a symbol, not `%s`", arg0.toChars());
return ErrorExp.get();
}
@@ -1952,7 +1952,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
//const td = s.isTemplateDeclaration();
if ((fd && fd.overnext) /*|| (td && td.overnext)*/)
{
- e.error("cannot get location of an overload set, " ~
+ error(e.loc, "cannot get location of an overload set, " ~
"use `__traits(getOverloads, ..., \"%s\"%s)[N]` " ~
"to get the Nth overload",
arg0.toChars(), /*td ? ", true".ptr :*/ "".ptr);
@@ -1975,7 +1975,7 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
{
if (d.inuse)
{
- d.error("circular reference in `__traits(GetCppNamespaces,...)`");
+ .error(d.loc, "%s `%s` circular reference in `__traits(GetCppNamespaces,...)`", d.kind, d.toPrettyChars);
return ErrorExp.get();
}
d.inuse = 1;
@@ -2053,14 +2053,14 @@ Expression semanticTraits(TraitsExp e, Scope* sc)
char[] contents = cast(char[]) e.args.toString();
contents = contents[1..$];
contents[$-1] = '\0';
- e.error("`__traits(parameters)` cannot have arguments, but `%s` was supplied", contents.ptr);
+ error(e.loc, "`__traits(parameters)` cannot have arguments, but `%s` was supplied", contents.ptr);
return ErrorExp.get();
}
auto fd = sc.getEnclosingFunction();
if (!fd)
{
- e.error("`__traits(parameters)` may only be used inside a function");
+ error(e.loc, "`__traits(parameters)` may only be used inside a function");
return ErrorExp.get();
}
@@ -2316,7 +2316,7 @@ private void traitNotFound(TraitsExp e)
}
if (auto sub = speller!trait_search_fp(e.ident.toString()))
- e.error("unrecognized trait `%s`, did you mean `%.*s`?", e.ident.toChars(), cast(int) sub.length, sub.ptr);
+ error(e.loc, "unrecognized trait `%s`, did you mean `%.*s`?", e.ident.toChars(), cast(int) sub.length, sub.ptr);
else
- e.error("unrecognized trait `%s`", e.ident.toChars());
+ error(e.loc, "unrecognized trait `%s`", e.ident.toChars());
}
diff --git a/gcc/d/dmd/typesem.d b/gcc/d/dmd/typesem.d
index a80aa80..c69268a 100644
--- a/gcc/d/dmd/typesem.d
+++ b/gcc/d/dmd/typesem.d
@@ -1170,8 +1170,8 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc)
StorageClass stc2 = narg.storageClass & (STC.ref_ | STC.out_ | STC.lazy_);
if (stc1 && stc2 && stc1 != stc2)
{
- OutBuffer buf1; stcToBuffer(&buf1, stc1 | ((stc1 & STC.ref_) ? (fparam.storageClass & STC.auto_) : 0));
- OutBuffer buf2; stcToBuffer(&buf2, stc2);
+ OutBuffer buf1; stcToBuffer(buf1, stc1 | ((stc1 & STC.ref_) ? (fparam.storageClass & STC.auto_) : 0));
+ OutBuffer buf2; stcToBuffer(buf2, stc2);
.error(loc, "incompatible parameter storage classes `%s` and `%s`",
buf1.peekChars(), buf2.peekChars());
@@ -1179,7 +1179,7 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc)
stc = stc1 | (stc & ~(STC.ref_ | STC.out_ | STC.lazy_));
}
(*newparams)[j] = new Parameter(
- stc, narg.type, narg.ident, narg.defaultArg, narg.userAttribDecl);
+ loc, stc, narg.type, narg.ident, narg.defaultArg, narg.userAttribDecl);
}
fparam.type = new TypeTuple(newparams);
fparam.type = fparam.type.typeSemantic(loc, argsc);
@@ -2089,6 +2089,7 @@ Expression getProperty(Type t, Scope* scope_, const ref Loc loc, Identifier iden
{
e = new StringExp(loc, mt.deco.toDString());
Scope sc;
+ sc.eSink = global.errorSink;
e = e.expressionSemantic(&sc);
}
}
@@ -2097,6 +2098,7 @@ Expression getProperty(Type t, Scope* scope_, const ref Loc loc, Identifier iden
const s = mt.toChars();
e = new StringExp(loc, s.toDString());
Scope sc;
+ sc.eSink = global.errorSink;
e = e.expressionSemantic(&sc);
}
else if (flag && mt != Type.terror)
@@ -2127,7 +2129,9 @@ Expression getProperty(Type t, Scope* scope_, const ref Loc loc, Identifier iden
error(loc, "no property `%s` for `%s` of type `%s`", ident.toChars(), src.toChars(), mt.toPrettyChars(true));
else
error(loc, "no property `%s` for type `%s`", ident.toChars(), mt.toPrettyChars(true));
+
if (auto dsym = mt.toDsymbol(scope_))
+ {
if (auto sym = dsym.isAggregateDeclaration())
{
if (auto fd = search_function(sym, Id.opDispatch))
@@ -2135,6 +2139,9 @@ Expression getProperty(Type t, Scope* scope_, const ref Loc loc, Identifier iden
else if (!sym.members)
errorSupplemental(sym.loc, "`%s %s` is opaque and has no members.", sym.kind, mt.toPrettyChars(true));
}
+ errorSupplemental(dsym.loc, "%s `%s` defined here",
+ dsym.kind, dsym.toChars());
+ }
}
}
e = ErrorExp.get();
@@ -3387,7 +3394,7 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
{
if (e.op == EXP.type)
{
- e.error("`%s` is not an expression", e.toChars());
+ error(e.loc, "`%s` is not an expression", e.toChars());
return ErrorExp.get();
}
else if (mt.dim.toUInteger() < 1 && checkUnsafeDotExp(sc, e, ident, flag))
@@ -3402,7 +3409,7 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
{
if (e.isTypeExp())
{
- e.error("`.tupleof` cannot be used on type `%s`", mt.toChars);
+ error(e.loc, "`.tupleof` cannot be used on type `%s`", mt.toChars);
return ErrorExp.get();
}
else
@@ -3436,7 +3443,7 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
}
if (e.op == EXP.type && (ident == Id.length || ident == Id.ptr))
{
- e.error("`%s` is not an expression", e.toChars());
+ error(e.loc, "`%s` is not an expression", e.toChars());
return ErrorExp.get();
}
if (ident == Id.length)
@@ -3482,7 +3489,7 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
if (fd_aaLen is null)
{
auto fparams = new Parameters();
- fparams.push(new Parameter(STC.const_ | STC.scope_, mt, null, null, null));
+ fparams.push(new Parameter(Loc.initial, STC.const_ | STC.scope_, mt, null, null, null));
fd_aaLen = FuncDeclaration.genCfunc(fparams, Type.tsize_t, Id.aaLen);
TypeFunction tf = fd_aaLen.type.toTypeFunction();
tf.purity = PURE.const_;
@@ -3604,7 +3611,7 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
e = build_overload(e.loc, sc, e, null, fd);
// @@@DEPRECATED_2.110@@@.
// Deprecated in 2.082, made an error in 2.100.
- e.error("`opDot` is obsolete. Use `alias this`");
+ error(e.loc, "`opDot` is obsolete. Use `alias this`");
return ErrorExp.get();
}
@@ -3619,7 +3626,7 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
TemplateDeclaration td = fd.isTemplateDeclaration();
if (!td)
{
- fd.error("must be a template `opDispatch(string s)`, not a %s", fd.kind());
+ .error(fd.loc, "%s `%s` must be a template `opDispatch(string s)`, not a %s", fd.kind, fd.toPrettyChars, fd.kind());
return returnExp(ErrorExp.get());
}
auto se = new StringExp(e.loc, ident.toString());
@@ -3755,9 +3762,9 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
!v.type.deco && v.inuse)
{
if (v.inuse) // https://issues.dlang.org/show_bug.cgi?id=9494
- e.error("circular reference to %s `%s`", v.kind(), v.toPrettyChars());
+ error(e.loc, "circular reference to %s `%s`", v.kind(), v.toPrettyChars());
else
- e.error("forward reference to %s `%s`", v.kind(), v.toPrettyChars());
+ error(e.loc, "forward reference to %s `%s`", v.kind(), v.toPrettyChars());
return ErrorExp.get();
}
if (v.type.ty == Terror)
@@ -3769,7 +3776,7 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
{
if (v.inuse)
{
- e.error("circular initialization of %s `%s`", v.kind(), v.toPrettyChars());
+ error(e.loc, "circular initialization of %s `%s`", v.kind(), v.toPrettyChars());
return ErrorExp.get();
}
checkAccess(e.loc, sc, null, v);
@@ -3843,7 +3850,7 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
Declaration d = s.isDeclaration();
if (!d)
{
- e.error("`%s.%s` is not a declaration", e.toChars(), ident.toChars());
+ error(e.loc, "`%s.%s` is not a declaration", e.toChars(), ident.toChars());
return ErrorExp.get();
}
@@ -3932,12 +3939,14 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
if (!(flag & 1) && !res)
{
if (auto ns = mt.sym.search_correct(ident))
- e.error("no property `%s` for type `%s`. Did you mean `%s.%s` ?", ident.toChars(), mt.toChars(), mt.toChars(),
+ error(e.loc, "no property `%s` for type `%s`. Did you mean `%s.%s` ?", ident.toChars(), mt.toChars(), mt.toChars(),
ns.toChars());
else
- e.error("no property `%s` for type `%s`", ident.toChars(),
+ error(e.loc, "no property `%s` for type `%s`", ident.toChars(),
mt.toChars());
+ errorSupplemental(mt.sym.loc, "%s `%s` defined here",
+ mt.sym.kind, mt.toChars());
return ErrorExp.get();
}
return res;
@@ -4180,14 +4189,14 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
!v.type.deco && v.inuse)
{
if (v.inuse) // https://issues.dlang.org/show_bug.cgi?id=9494
- e.error("circular reference to %s `%s`", v.kind(), v.toPrettyChars());
+ error(e.loc, "circular reference to %s `%s`", v.kind(), v.toPrettyChars());
else
- e.error("forward reference to %s `%s`", v.kind(), v.toPrettyChars());
+ error(e.loc, "forward reference to %s `%s`", v.kind(), v.toPrettyChars());
return ErrorExp.get();
}
if (v.type.ty == Terror)
{
- e.error("type of variable `%s` has errors", v.toPrettyChars);
+ error(e.loc, "type of variable `%s` has errors", v.toPrettyChars);
return ErrorExp.get();
}
@@ -4195,7 +4204,7 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
{
if (v.inuse)
{
- e.error("circular initialization of %s `%s`", v.kind(), v.toPrettyChars());
+ error(e.loc, "circular initialization of %s `%s`", v.kind(), v.toPrettyChars());
return ErrorExp.get();
}
checkAccess(e.loc, sc, null, v);
@@ -4274,7 +4283,7 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, DotExpFlag
Declaration d = s.isDeclaration();
if (!d)
{
- e.error("`%s.%s` is not a declaration", e.toChars(), ident.toChars());
+ error(e.loc, "`%s.%s` is not a declaration", e.toChars(), ident.toChars());
return ErrorExp.get();
}
@@ -4791,7 +4800,7 @@ Type stripDefaultArgs(Type t)
{
Type t = stripDefaultArgs(p.type);
return (t != p.type || p.defaultArg || p.ident || p.userAttribDecl)
- ? new Parameter(p.storageClass, t, null, null, null)
+ ? new Parameter(p.loc, p.storageClass, t, null, null, null)
: null;
}
@@ -4901,7 +4910,7 @@ Expression getMaxMinValue(EnumDeclaration ed, const ref Loc loc, Identifier id)
if (ed.inuse)
{
- ed.error(loc, "recursive definition of `.%s` property", id.toChars());
+ .error(loc, "%s `%s` recursive definition of `.%s` property", ed.kind, ed.toPrettyChars, id.toChars());
return errorReturn();
}
if (*pval)
@@ -4913,12 +4922,12 @@ Expression getMaxMinValue(EnumDeclaration ed, const ref Loc loc, Identifier id)
return errorReturn();
if (!ed.members)
{
- ed.error(loc, "is opaque and has no `.%s`", id.toChars());
+ .error(loc, "%s `%s` is opaque and has no `.%s`", ed.kind, ed.toPrettyChars, id.toChars(), id.toChars());
return errorReturn();
}
if (!(ed.memtype && ed.memtype.isintegral()))
{
- ed.error(loc, "has no `.%s` property because base type `%s` is not an integral type",
+ .error(loc, "%s `%s` has no `.%s` property because base type `%s` is not an integral type", ed.kind, ed.toPrettyChars, id.toChars(),
id.toChars(), ed.memtype ? ed.memtype.toChars() : "");
return errorReturn();
}
@@ -4937,7 +4946,7 @@ Expression getMaxMinValue(EnumDeclaration ed, const ref Loc loc, Identifier id)
if (em.semanticRun < PASS.semanticdone)
{
- em.error("is forward referenced looking for `.%s`", id.toChars());
+ .error(em.loc, "%s `%s` is forward referenced looking for `.%s`", em.kind, em.toPrettyChars, id.toChars());
ed.errors = true;
continue;
}
@@ -5001,7 +5010,7 @@ RootObject compileTypeMixin(TypeMixin tm, ref const Loc loc, Scope* sc)
const bool doUnittests = global.params.useUnitTests || global.params.ddoc.doOutput || global.params.dihdr.doOutput;
auto locm = adjustLocForMixin(str, loc, global.params.mixinOut);
scope p = new Parser!ASTCodegen(locm, sc._module, str, false, global.errorSink, &global.compileEnv, doUnittests);
- p.transitionIn = global.params.vin;
+ p.transitionIn = global.params.v.vin;
p.nextToken();
//printf("p.loc.linnum = %d\n", p.loc.linnum);
diff --git a/gcc/d/dmd/typinf.d b/gcc/d/dmd/typinf.d
index 4f87d92..6e05695 100644
--- a/gcc/d/dmd/typinf.d
+++ b/gcc/d/dmd/typinf.d
@@ -52,6 +52,10 @@ extern (C++) void genTypeInfo(Expression e, const ref Loc loc, Type torig, Scope
.error(loc, "expression `%s` uses the GC and cannot be used with switch `-betterC`", e.toChars());
else
.error(loc, "`TypeInfo` cannot be used with -betterC");
+
+ if (sc && sc.tinst)
+ sc.tinst.printInstantiationTrace(Classification.error, uint.max);
+
fatal();
}
}
diff --git a/gcc/d/dmd/utils.d b/gcc/d/dmd/utils.d
index dfb4cb5..bb389b6 100644
--- a/gcc/d/dmd/utils.d
+++ b/gcc/d/dmd/utils.d
@@ -53,12 +53,6 @@ const(char)* toWinPath(const(char)* src)
* loc = The line number information from where the call originates
* filename = Path to file
*/
-Buffer readFile(Loc loc, const(char)* filename)
-{
- return readFile(loc, filename.toDString());
-}
-
-/// Ditto
Buffer readFile(Loc loc, const(char)[] filename)
{
auto result = File.read(filename);
@@ -78,15 +72,19 @@ Buffer readFile(Loc loc, const(char)[] filename)
* loc = The line number information from where the call originates
* filename = Path to file
* data = Full content of the file to be written
+ * Returns:
+ * false on error
*/
-extern (D) void writeFile(Loc loc, const(char)[] filename, const void[] data)
+extern (D) bool writeFile(Loc loc, const(char)[] filename, const void[] data)
{
- ensurePathToNameExists(Loc.initial, filename);
+ if (!ensurePathToNameExists(Loc.initial, filename))
+ return false;
if (!File.update(filename, data))
{
error(loc, "error writing file '%.*s'", cast(int) filename.length, filename.ptr);
- fatal();
+ return false;
}
+ return true;
}
@@ -97,8 +95,10 @@ extern (D) void writeFile(Loc loc, const(char)[] filename, const void[] data)
* Params:
* loc = The line number information from where the call originates
* name = a path to check (the name is stripped)
+ * Returns:
+ * false on error
*/
-void ensurePathToNameExists(Loc loc, const(char)[] name)
+bool ensurePathToNameExists(Loc loc, const(char)[] name)
{
const char[] pt = FileName.path(name);
if (pt.length)
@@ -106,10 +106,12 @@ void ensurePathToNameExists(Loc loc, const(char)[] name)
if (!FileName.ensurePathExists(pt))
{
error(loc, "cannot create directory %*.s", cast(int) pt.length, pt.ptr);
- fatal();
+ FileName.free(pt.ptr);
+ return false;
}
}
FileName.free(pt.ptr);
+ return true;
}
diff --git a/gcc/d/expr.cc b/gcc/d/expr.cc
index 7038655..52243e6 100644
--- a/gcc/d/expr.cc
+++ b/gcc/d/expr.cc
@@ -2050,6 +2050,9 @@ public:
tree result = get_decl_tree (e->var);
TREE_USED (result) = 1;
+ if (e->var->isFuncDeclaration ())
+ result = maybe_reject_intrinsic (result);
+
if (declaration_reference_p (e->var))
gcc_assert (POINTER_TYPE_P (TREE_TYPE (result)));
else
@@ -2535,13 +2538,13 @@ public:
{
/* Copy the string contents to a null terminated string. */
dinteger_t length = (e->len * e->sz);
- char *string = XALLOCAVEC (char, length + 1);
+ char *string = XALLOCAVEC (char, length + e->sz);
+ memset (string, 0, length + e->sz);
if (length > 0)
memcpy (string, e->string, length);
- string[length] = '\0';
/* String value and type includes the null terminator. */
- tree value = build_string (length, string);
+ tree value = build_string (length + e->sz, string);
TREE_TYPE (value) = make_array_type (tb->nextOf (), length + 1);
value = build_address (value);
@@ -2725,6 +2728,15 @@ public:
void visit (AssocArrayLiteralExp *e) final override
{
+ if (e->lowering != NULL)
+ {
+ /* When an associative array literal gets lowered, it's converted into a
+ struct literal suitable for static initialization. */
+ gcc_assert (this->constp_);
+ this->result_ = build_expr (e->lowering, this->constp_, true);
+ return ;
+ }
+
/* Want the mutable type for typeinfo reference. */
Type *tb = e->type->toBasetype ()->mutableOf ();
diff --git a/gcc/d/intrinsics.cc b/gcc/d/intrinsics.cc
index 583d5a9..1b03e9e 100644
--- a/gcc/d/intrinsics.cc
+++ b/gcc/d/intrinsics.cc
@@ -60,12 +60,15 @@ struct intrinsic_decl
/* True if the intrinsic is only handled in CTFE. */
bool ctfeonly;
+
+ /* True if the intrinsic has a library implementation. */
+ bool fallback;
};
static const intrinsic_decl intrinsic_decls[] =
{
-#define DEF_D_INTRINSIC(CODE, BUILTIN, NAME, MODULE, DECO, CTFE) \
- { CODE, BUILTIN, NAME, MODULE, DECO, CTFE },
+#define DEF_D_INTRINSIC(CODE, BUILTIN, NAME, MODULE, DECO, CTFE, FALLBACK) \
+ { CODE, BUILTIN, NAME, MODULE, DECO, CTFE, FALLBACK },
#include "intrinsics.def"
@@ -1436,3 +1439,43 @@ maybe_expand_intrinsic (tree callexp)
gcc_unreachable ();
}
}
+
+/* If FNDECL is an intrinsic, return the FUNCTION_DECL that has a library
+ fallback implementation of it, otherwise raise an error. */
+
+tree
+maybe_reject_intrinsic (tree fndecl)
+{
+ gcc_assert (TREE_CODE (fndecl) == FUNCTION_DECL);
+
+ intrinsic_code intrinsic = DECL_INTRINSIC_CODE (fndecl);
+
+ if (intrinsic == INTRINSIC_NONE)
+ {
+ /* Not an intrinsic, but it still might be a declaration from the
+ `gcc.builtins' module. */
+ if (fndecl_built_in_p (fndecl) && DECL_IS_UNDECLARED_BUILTIN (fndecl)
+ && !DECL_ASSEMBLER_NAME_SET_P (fndecl))
+ error ("built-in function %qE must be directly called", fndecl);
+
+ return fndecl;
+ }
+
+ /* Nothing to do if the intrinsic has a D library implementation. */
+ if (intrinsic_decls[intrinsic].fallback)
+ return fndecl;
+
+ /* Check the GCC built-in decl if the intrinsic maps to one. */
+ built_in_function code = intrinsic_decls[intrinsic].built_in;
+ if (code != BUILT_IN_NONE)
+ {
+ tree builtin = builtin_decl_explicit (code);
+ if (!DECL_IS_UNDECLARED_BUILTIN (builtin)
+ || DECL_ASSEMBLER_NAME_SET_P (builtin))
+ return builtin;
+ }
+
+ /* It's a D language intrinsic with no library implementation. */
+ error ("intrinsic function %qE must be directly called", fndecl);
+ return fndecl;
+}
diff --git a/gcc/d/intrinsics.def b/gcc/d/intrinsics.def
index 454bddf..e472cf7 100644
--- a/gcc/d/intrinsics.def
+++ b/gcc/d/intrinsics.def
@@ -15,7 +15,7 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-/* DEF_D_INTRINSIC (CODE, BUILTIN, NAME, MODULE, DECO, CTFE)
+/* DEF_D_INTRINSIC (CODE, BUILTIN, NAME, MODULE, DECO, CTFE, FALLBACK)
CODE The enum code used to refer to this intrinsic.
BUILTIN The enum code used to reference the function DECL_FUNCTION_CODE,
if the intrinsic can be mapped 1:1 to a GCC built-in.
@@ -24,40 +24,45 @@ along with GCC; see the file COPYING3. If not see
DECO The function signature decoration of the intrinsic.
CTFE True if the function is only handled as a built-in during CTFE,
otherwise the runtime implementation is used.
+ FALLBACK True if the function has a D runtime library implementation.
Used for declaring internally recognized functions that either map to a
GCC builtin, or are specially handled by the compiler. */
/* A D built-in that has no runtime implementation. */
#define DEF_D_BUILTIN(C, B, N, M, D) \
- DEF_D_INTRINSIC (C, B, N, M, D, false)
+ DEF_D_INTRINSIC (C, B, N, M, D, false, false)
+
+/* A D built-in that has a runtime implementation. */
+#define DEF_D_LIB_BUILTIN(C, B, N, M, D) \
+ DEF_D_INTRINSIC (C, B, N, M, D, false, true)
/* A D built-in that is specially recognized only during CTFE. */
#define DEF_CTFE_BUILTIN(C, B, N, M, D) \
- DEF_D_INTRINSIC (C, B, N, M, D, true)
+ DEF_D_INTRINSIC (C, B, N, M, D, true, true)
DEF_D_BUILTIN (INTRINSIC_NONE, BUILT_IN_NONE, 0, 0, 0)
/* core.bitop intrinsics. */
-DEF_D_BUILTIN (INTRINSIC_BSF, BUILT_IN_NONE, "bsf", "core.bitop",
- "FNaNbNiNfkZi")
-DEF_D_BUILTIN (INTRINSIC_BSR, BUILT_IN_NONE, "bsr", "core.bitop",
- "FNaNbNiNfkZi")
-DEF_D_BUILTIN (INTRINSIC_BT, BUILT_IN_NONE, "bt", "core.bitop",
- "FNaNbNiMxPkkZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_BSF, BUILT_IN_NONE, "bsf", "core.bitop",
+ "FNaNbNiNfkZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_BSR, BUILT_IN_NONE, "bsr", "core.bitop",
+ "FNaNbNiNfkZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_BT, BUILT_IN_NONE, "bt", "core.bitop",
+ "FNaNbNiMxPkkZi")
DEF_D_BUILTIN (INTRINSIC_BTC, BUILT_IN_NONE, "btc", "core.bitop",
"FNaNbNiPkkZi")
DEF_D_BUILTIN (INTRINSIC_BTR, BUILT_IN_NONE, "btr", "core.bitop",
"FNaNbNiPkkZi")
DEF_D_BUILTIN (INTRINSIC_BTS, BUILT_IN_NONE, "bts", "core.bitop",
"FNaNbNiPkkZi")
-DEF_D_BUILTIN (INTRINSIC_BSF64, BUILT_IN_NONE, "bsf", "core.bitop",
- "FNaNbNiNfmZi")
-DEF_D_BUILTIN (INTRINSIC_BSR64, BUILT_IN_NONE, "bsr", "core.bitop",
- "FNaNbNiNfmZi")
-DEF_D_BUILTIN (INTRINSIC_BT64, BUILT_IN_NONE, "bt", "core.bitop",
- "FNaNbNiMxPmmZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_BSF64, BUILT_IN_NONE, "bsf", "core.bitop",
+ "FNaNbNiNfmZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_BSR64, BUILT_IN_NONE, "bsr", "core.bitop",
+ "FNaNbNiNfmZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_BT64, BUILT_IN_NONE, "bt", "core.bitop",
+ "FNaNbNiMxPmmZi")
DEF_D_BUILTIN (INTRINSIC_BTC64, BUILT_IN_NONE, "btc", "core.bitop",
"FNaNbNiPmmZi")
DEF_D_BUILTIN (INTRINSIC_BTR64, BUILT_IN_NONE, "btr", "core.bitop",
@@ -72,17 +77,19 @@ DEF_D_BUILTIN (INTRINSIC_BSWAP32, BUILT_IN_BSWAP32, "bswap", "core.bitop",
DEF_D_BUILTIN (INTRINSIC_BSWAP64, BUILT_IN_BSWAP64, "bswap", "core.bitop",
"FNaNbNiNfmZm")
-DEF_D_BUILTIN (INTRINSIC_POPCNT32, BUILT_IN_NONE, "popcnt", "core.bitop",
- "FNaNbNiNfkZi")
-DEF_D_BUILTIN (INTRINSIC_POPCNT64, BUILT_IN_NONE, "popcnt", "core.bitop",
- "FNaNbNiNfmZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_POPCNT32, BUILT_IN_NONE, "popcnt", "core.bitop",
+ "FNaNbNiNfkZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_POPCNT64, BUILT_IN_NONE, "popcnt", "core.bitop",
+ "FNaNbNiNfmZi")
-DEF_D_BUILTIN (INTRINSIC_ROL, BUILT_IN_NONE, "rol", "core.bitop", "FNa@1TkZ@1T")
-DEF_D_BUILTIN (INTRINSIC_ROL_TIARG, BUILT_IN_NONE, "rol", "core.bitop",
- "FNa@1TZ@1T")
-DEF_D_BUILTIN (INTRINSIC_ROR, BUILT_IN_NONE, "ror", "core.bitop", "FNa@1TkZ@1T")
-DEF_D_BUILTIN (INTRINSIC_ROR_TIARG, BUILT_IN_NONE, "ror", "core.bitop",
- "FNa@1TZ@1T")
+DEF_D_LIB_BUILTIN (INTRINSIC_ROL, BUILT_IN_NONE, "rol", "core.bitop",
+ "FNa@1TkZ@1T")
+DEF_D_LIB_BUILTIN (INTRINSIC_ROL_TIARG, BUILT_IN_NONE, "rol", "core.bitop",
+ "FNa@1TZ@1T")
+DEF_D_LIB_BUILTIN (INTRINSIC_ROR, BUILT_IN_NONE, "ror", "core.bitop",
+ "FNa@1TkZ@1T")
+DEF_D_LIB_BUILTIN (INTRINSIC_ROR_TIARG, BUILT_IN_NONE, "ror", "core.bitop",
+ "FNa@1TZ@1T")
/* core.volatile intrinsics. */
@@ -105,36 +112,36 @@ DEF_D_BUILTIN (INTRINSIC_VSTORE64, BUILT_IN_NONE, "volatileStore",
/* core.checkedint intrinsics. */
-DEF_D_BUILTIN (INTRINSIC_ADDS, BUILT_IN_NONE, "adds", "core.checkedint",
- "FiiKbZi")
-DEF_D_BUILTIN (INTRINSIC_ADDSL, BUILT_IN_NONE, "adds", "core.checkedint",
- "FllKbZl")
-DEF_D_BUILTIN (INTRINSIC_ADDU, BUILT_IN_NONE, "addu", "core.checkedint",
- "FkkKbZk")
-DEF_D_BUILTIN (INTRINSIC_ADDUL, BUILT_IN_NONE, "addu", "core.checkedint",
- "FmmKbZm")
-DEF_D_BUILTIN (INTRINSIC_SUBS, BUILT_IN_NONE, "subs", "core.checkedint",
- "FiiKbZi")
-DEF_D_BUILTIN (INTRINSIC_SUBSL, BUILT_IN_NONE, "subs", "core.checkedint",
- "FllKbZl")
-DEF_D_BUILTIN (INTRINSIC_SUBU, BUILT_IN_NONE, "subu", "core.checkedint",
- "FkkKbZk")
-DEF_D_BUILTIN (INTRINSIC_SUBUL, BUILT_IN_NONE, "subu", "core.checkedint",
- "FmmKbZm")
-DEF_D_BUILTIN (INTRINSIC_MULS, BUILT_IN_NONE, "muls", "core.checkedint",
- "FiiKbZi")
-DEF_D_BUILTIN (INTRINSIC_MULSL, BUILT_IN_NONE, "muls", "core.checkedint",
- "FllKbZl")
-DEF_D_BUILTIN (INTRINSIC_MULU, BUILT_IN_NONE, "mulu", "core.checkedint",
- "FkkKbZk")
-DEF_D_BUILTIN (INTRINSIC_MULUI, BUILT_IN_NONE, "mulu", "core.checkedint",
- "FmkKbZm")
-DEF_D_BUILTIN (INTRINSIC_MULUL, BUILT_IN_NONE, "mulu", "core.checkedint",
- "FmmKbZm")
-DEF_D_BUILTIN (INTRINSIC_NEGS, BUILT_IN_NONE, "negs", "core.checkedint",
- "FiKbZi")
-DEF_D_BUILTIN (INTRINSIC_NEGSL, BUILT_IN_NONE, "negs", "core.checkedint",
- "FlKbZl")
+DEF_D_LIB_BUILTIN (INTRINSIC_ADDS, BUILT_IN_NONE, "adds", "core.checkedint",
+ "FiiKbZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_ADDSL, BUILT_IN_NONE, "adds", "core.checkedint",
+ "FllKbZl")
+DEF_D_LIB_BUILTIN (INTRINSIC_ADDU, BUILT_IN_NONE, "addu", "core.checkedint",
+ "FkkKbZk")
+DEF_D_LIB_BUILTIN (INTRINSIC_ADDUL, BUILT_IN_NONE, "addu", "core.checkedint",
+ "FmmKbZm")
+DEF_D_LIB_BUILTIN (INTRINSIC_SUBS, BUILT_IN_NONE, "subs", "core.checkedint",
+ "FiiKbZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_SUBSL, BUILT_IN_NONE, "subs", "core.checkedint",
+ "FllKbZl")
+DEF_D_LIB_BUILTIN (INTRINSIC_SUBU, BUILT_IN_NONE, "subu", "core.checkedint",
+ "FkkKbZk")
+DEF_D_LIB_BUILTIN (INTRINSIC_SUBUL, BUILT_IN_NONE, "subu", "core.checkedint",
+ "FmmKbZm")
+DEF_D_LIB_BUILTIN (INTRINSIC_MULS, BUILT_IN_NONE, "muls", "core.checkedint",
+ "FiiKbZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_MULSL, BUILT_IN_NONE, "muls", "core.checkedint",
+ "FllKbZl")
+DEF_D_LIB_BUILTIN (INTRINSIC_MULU, BUILT_IN_NONE, "mulu", "core.checkedint",
+ "FkkKbZk")
+DEF_D_LIB_BUILTIN (INTRINSIC_MULUI, BUILT_IN_NONE, "mulu", "core.checkedint",
+ "FmkKbZm")
+DEF_D_LIB_BUILTIN (INTRINSIC_MULUL, BUILT_IN_NONE, "mulu", "core.checkedint",
+ "FmmKbZm")
+DEF_D_LIB_BUILTIN (INTRINSIC_NEGS, BUILT_IN_NONE, "negs", "core.checkedint",
+ "FiKbZi")
+DEF_D_LIB_BUILTIN (INTRINSIC_NEGSL, BUILT_IN_NONE, "negs", "core.checkedint",
+ "FlKbZl")
/* core.math intrinsics. */
@@ -182,11 +189,12 @@ DEF_D_BUILTIN (INTRINSIC_SQRT, BUILT_IN_SQRT, "sqrt", "core.math",
"FNaNbNiNfdZd")
DEF_D_BUILTIN (INTRINSIC_SQRTL, BUILT_IN_SQRTL, "sqrt", "core.math",
"FNaNbNiNfeZe")
-DEF_D_BUILTIN (INTRINSIC_TOPRECF, BUILT_IN_NONE, "toPrec", "core.math",
- "FfZ@1T")
-DEF_D_BUILTIN (INTRINSIC_TOPREC, BUILT_IN_NONE, "toPrec", "core.math", "FdZ@1T")
-DEF_D_BUILTIN (INTRINSIC_TOPRECL, BUILT_IN_NONE, "toPrec", "core.math",
- "FeZ@1T")
+DEF_D_LIB_BUILTIN (INTRINSIC_TOPRECF, BUILT_IN_NONE, "toPrec", "core.math",
+ "FfZ@1T")
+DEF_D_LIB_BUILTIN (INTRINSIC_TOPREC, BUILT_IN_NONE, "toPrec", "core.math",
+ "FdZ@1T")
+DEF_D_LIB_BUILTIN (INTRINSIC_TOPRECL, BUILT_IN_NONE, "toPrec", "core.math",
+ "FeZ@1T")
/* std.math intrinsics. */
diff --git a/gcc/d/modules.cc b/gcc/d/modules.cc
index f2180d3..8d6c8f0 100644
--- a/gcc/d/modules.cc
+++ b/gcc/d/modules.cc
@@ -277,12 +277,13 @@ get_compiler_dso_type (void)
DECL_CHAIN (field) = fields;
fields = field;
- field = create_field_decl (build_pointer_type (get_moduleinfo_type ()),
- NULL, 1, 1);
+ tree moduleinfo_ptr_ptr_type =
+ build_pointer_type (build_pointer_type (get_moduleinfo_type ()));
+
+ field = create_field_decl (moduleinfo_ptr_ptr_type, NULL, 1, 1);
DECL_CHAIN (field) = fields;
fields = field;
- field = create_field_decl (build_pointer_type (get_moduleinfo_type ()),
- NULL, 1, 1);
+ field = create_field_decl (moduleinfo_ptr_ptr_type, NULL, 1, 1);
DECL_CHAIN (field) = fields;
fields = field;
diff --git a/gcc/data-streamer-in.cc b/gcc/data-streamer-in.cc
index 6e36adc..2334298 100644
--- a/gcc/data-streamer-in.cc
+++ b/gcc/data-streamer-in.cc
@@ -277,10 +277,12 @@ streamer_read_value_range (class lto_input_block *ib, data_in *data_in,
wide_int
streamer_read_wide_int (class lto_input_block *ib)
{
- HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
+ HOST_WIDE_INT abuf[WIDE_INT_MAX_INL_ELTS], *a = abuf;
int i;
int prec = streamer_read_uhwi (ib);
int len = streamer_read_uhwi (ib);
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ a = XALLOCAVEC (HOST_WIDE_INT, len);
for (i = 0; i < len; i++)
a[i] = streamer_read_hwi (ib);
return wide_int::from_array (a, len, prec);
@@ -292,10 +294,12 @@ streamer_read_wide_int (class lto_input_block *ib)
widest_int
streamer_read_widest_int (class lto_input_block *ib)
{
- HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
+ HOST_WIDE_INT abuf[WIDE_INT_MAX_INL_ELTS], *a = abuf;
int i;
int prec ATTRIBUTE_UNUSED = streamer_read_uhwi (ib);
int len = streamer_read_uhwi (ib);
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ a = XALLOCAVEC (HOST_WIDE_INT, len);
for (i = 0; i < len; i++)
a[i] = streamer_read_hwi (ib);
return widest_int::from_array (a, len);
diff --git a/gcc/data-streamer.h b/gcc/data-streamer.h
index 7e69eb9..c2b9a8d 100644
--- a/gcc/data-streamer.h
+++ b/gcc/data-streamer.h
@@ -199,7 +199,7 @@ bp_unpack_value (struct bitpack_d *bp, unsigned nbits)
inline poly_int<NUM_POLY_INT_COEFFS, bitpack_word_t>
bp_unpack_poly_value (struct bitpack_d *bp, unsigned nbits)
{
- poly_int_pod<NUM_POLY_INT_COEFFS, bitpack_word_t> x;
+ poly_int<NUM_POLY_INT_COEFFS, bitpack_word_t> x;
for (int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
x.coeffs[i] = bp_unpack_value (bp, nbits);
return x;
diff --git a/gcc/diagnostic-format-json.cc b/gcc/diagnostic-format-json.cc
index 539b98b..346abb3 100644
--- a/gcc/diagnostic-format-json.cc
+++ b/gcc/diagnostic-format-json.cc
@@ -28,18 +28,63 @@ along with GCC; see the file COPYING3. If not see
#include "json.h"
#include "selftest.h"
-/* The top-level JSON array of pending diagnostics. */
+/* Subclass of diagnostic_output_format for JSON output. */
-static json::array *toplevel_array;
+class json_output_format : public diagnostic_output_format
+{
+public:
+ void on_begin_group () final override
+ {
+ /* No-op. */
+ }
+ void on_end_group () final override
+ {
+ m_cur_group = nullptr;
+ m_cur_children_array = nullptr;
+ }
+ void
+ on_begin_diagnostic (diagnostic_info *) final override
+ {
+ /* No-op. */
+ }
+ void
+ on_end_diagnostic (diagnostic_info *diagnostic,
+ diagnostic_t orig_diag_kind) final override;
+ void on_diagram (const diagnostic_diagram &) final override
+ {
+ /* No-op. */
+ }
-/* The JSON object for the current diagnostic group. */
+protected:
+ json_output_format (diagnostic_context &context)
+ : diagnostic_output_format (context),
+ m_toplevel_array (new json::array ()),
+ m_cur_group (nullptr),
+ m_cur_children_array (nullptr)
+ {
+ }
-static json::object *cur_group;
+ /* Flush the top-level array to OUTF. */
+ void
+ flush_to_file (FILE *outf)
+ {
+ m_toplevel_array->dump (outf);
+ fprintf (outf, "\n");
+ delete m_toplevel_array;
+ m_toplevel_array = nullptr;
+ }
+
+private:
+ /* The top-level JSON array of pending diagnostics. */
+ json::array *m_toplevel_array;
-/* The JSON array for the "children" array within the current diagnostic
- group. */
+ /* The JSON object for the current diagnostic group. */
+ json::object *m_cur_group;
-static json::array *cur_children_array;
+ /* The JSON array for the "children" array within the current diagnostic
+ group. */
+ json::array *m_cur_children_array;
+};
/* Generate a JSON object for LOC. */
@@ -139,20 +184,13 @@ json_from_metadata (const diagnostic_metadata *metadata)
return metadata_obj;
}
-/* No-op implementation of "begin_diagnostic" for JSON output. */
-
-static void
-json_begin_diagnostic (diagnostic_context *, diagnostic_info *)
-{
-}
-
-/* Implementation of "end_diagnostic" for JSON output.
+/* Implementation of "on_end_diagnostic" vfunc for JSON output.
Generate a JSON object for DIAGNOSTIC, and store for output
within current diagnostic group. */
-static void
-json_end_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic,
- diagnostic_t orig_diag_kind)
+void
+json_output_format::on_end_diagnostic (diagnostic_info *diagnostic,
+ diagnostic_t orig_diag_kind)
{
json::object *diag_obj = new json::object ();
@@ -178,22 +216,22 @@ json_end_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic,
// FIXME: encoding of the message (json::string requires UTF-8)
diag_obj->set ("message",
- new json::string (pp_formatted_text (context->printer)));
- pp_clear_output_area (context->printer);
+ new json::string (pp_formatted_text (m_context.printer)));
+ pp_clear_output_area (m_context.printer);
char *option_text;
- option_text = context->option_name (context, diagnostic->option_index,
- orig_diag_kind, diagnostic->kind);
+ option_text = m_context.option_name (&m_context, diagnostic->option_index,
+ orig_diag_kind, diagnostic->kind);
if (option_text)
{
diag_obj->set ("option", new json::string (option_text));
free (option_text);
}
- if (context->get_option_url)
+ if (m_context.get_option_url)
{
- char *option_url = context->get_option_url (context,
- diagnostic->option_index);
+ char *option_url = m_context.get_option_url (&m_context,
+ diagnostic->option_index);
if (option_url)
{
diag_obj->set ("option_url", new json::string (option_url));
@@ -203,21 +241,21 @@ json_end_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic,
/* If we've already emitted a diagnostic within this auto_diagnostic_group,
then add diag_obj to its "children" array. */
- if (cur_group)
+ if (m_cur_group)
{
- gcc_assert (cur_children_array);
- cur_children_array->append (diag_obj);
+ gcc_assert (m_cur_children_array);
+ m_cur_children_array->append (diag_obj);
}
else
{
/* Otherwise, make diag_obj be the top-level object within the group;
add a "children" array and record the column origin. */
- toplevel_array->append (diag_obj);
- cur_group = diag_obj;
- cur_children_array = new json::array ();
- diag_obj->set ("children", cur_children_array);
+ m_toplevel_array->append (diag_obj);
+ m_cur_group = diag_obj;
+ m_cur_children_array = new json::array ();
+ diag_obj->set ("children", m_cur_children_array);
diag_obj->set ("column-origin",
- new json::integer_number (context->column_origin));
+ new json::integer_number (m_context.column_origin));
}
const rich_location *richloc = diagnostic->richloc;
@@ -228,7 +266,8 @@ json_end_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic,
for (unsigned int i = 0; i < richloc->get_num_locations (); i++)
{
const location_range *loc_range = richloc->get_range (i);
- json::object *loc_obj = json_from_location_range (context, loc_range, i);
+ json::object *loc_obj
+ = json_from_location_range (&m_context, loc_range, i);
if (loc_obj)
loc_array->append (loc_obj);
}
@@ -240,7 +279,7 @@ json_end_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic,
for (unsigned int i = 0; i < richloc->get_num_fixit_hints (); i++)
{
const fixit_hint *hint = richloc->get_fixit_hint (i);
- json::object *fixit_obj = json_from_fixit_hint (context, hint);
+ json::object *fixit_obj = json_from_fixit_hint (&m_context, hint);
fixit_array->append (fixit_obj);
}
}
@@ -257,9 +296,9 @@ json_end_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic,
}
const diagnostic_path *path = richloc->get_path ();
- if (path && context->make_json_for_path)
+ if (path && m_context.make_json_for_path)
{
- json::value *path_value = context->make_json_for_path (context, path);
+ json::value *path_value = m_context.make_json_for_path (&m_context, path);
diag_obj->set ("path", path_value);
}
@@ -267,71 +306,51 @@ json_end_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic,
new json::literal (richloc->escape_on_output_p ()));
}
-/* No-op implementation of "begin_group_cb" for JSON output. */
-
-static void
-json_begin_group (diagnostic_context *)
-{
-}
-
-/* Implementation of "end_group_cb" for JSON output. */
-
-static void
-json_end_group (diagnostic_context *)
+class json_stderr_output_format : public json_output_format
{
- cur_group = NULL;
- cur_children_array = NULL;
-}
-
-/* Flush the top-level array to OUTF. */
-
-static void
-json_flush_to_file (FILE *outf)
-{
- toplevel_array->dump (outf);
- fprintf (outf, "\n");
- delete toplevel_array;
- toplevel_array = NULL;
-}
-
-/* Callback for final cleanup for JSON output to stderr. */
-
-static void
-json_stderr_final_cb (diagnostic_context *)
-{
- json_flush_to_file (stderr);
-}
-
-static char *json_output_base_file_name;
-
-/* Callback for final cleanup for JSON output to a file. */
+public:
+ json_stderr_output_format (diagnostic_context &context)
+ : json_output_format (context)
+ {
+ }
+ ~json_stderr_output_format ()
+ {
+ flush_to_file (stderr);
+ }
+};
-static void
-json_file_final_cb (diagnostic_context *)
+class json_file_output_format : public json_output_format
{
- char *filename = concat (json_output_base_file_name, ".gcc.json", NULL);
- FILE *outf = fopen (filename, "w");
- if (!outf)
- {
- const char *errstr = xstrerror (errno);
- fnotice (stderr, "error: unable to open '%s' for writing: %s\n",
- filename, errstr);
- free (filename);
- return;
- }
- json_flush_to_file (outf);
- fclose (outf);
- free (filename);
-}
+public:
+ json_file_output_format (diagnostic_context &context,
+ const char *base_file_name)
+ : json_output_format (context),
+ m_base_file_name (xstrdup (base_file_name))
+ {
+ }
-/* Callback for diagnostic_context::m_diagrams.m_emission_cb. */
+ ~json_file_output_format ()
+ {
+ char *filename = concat (m_base_file_name, ".gcc.json", NULL);
+ free (m_base_file_name);
+ m_base_file_name = nullptr;
+ FILE *outf = fopen (filename, "w");
+ if (!outf)
+ {
+ const char *errstr = xstrerror (errno);
+ fnotice (stderr, "error: unable to open '%s' for writing: %s\n",
+ filename, errstr);
+ free (filename);
+ return;
+ }
+ flush_to_file (outf);
+ fclose (outf);
+ free (filename);
+ }
-static void
-json_emit_diagram (diagnostic_context *,
- const diagnostic_diagram &)
-{
- /* No-op. */
-}
+private:
+ char *m_base_file_name;
+};
/* Populate CONTEXT in preparation for JSON output (either to stderr, or
to a file). */
@@ -339,17 +358,8 @@ json_emit_diagram (diagnostic_context *,
static void
diagnostic_output_format_init_json (diagnostic_context *context)
{
- /* Set up top-level JSON array. */
- if (toplevel_array == NULL)
- toplevel_array = new json::array ();
-
/* Override callbacks. */
- context->begin_diagnostic = json_begin_diagnostic;
- context->end_diagnostic = json_end_diagnostic;
- context->begin_group_cb = json_begin_group;
- context->end_group_cb = json_end_group;
context->print_path = NULL; /* handled in json_end_diagnostic. */
- context->m_diagrams.m_emission_cb = json_emit_diagram;
/* The metadata is handled in JSON format, rather than as text. */
context->show_cwe = false;
@@ -368,7 +378,8 @@ void
diagnostic_output_format_init_json_stderr (diagnostic_context *context)
{
diagnostic_output_format_init_json (context);
- context->final_cb = json_stderr_final_cb;
+ delete context->m_output_format;
+ context->m_output_format = new json_stderr_output_format (*context);
}
/* Populate CONTEXT in preparation for JSON output to a file named
@@ -379,8 +390,9 @@ diagnostic_output_format_init_json_file (diagnostic_context *context,
const char *base_file_name)
{
diagnostic_output_format_init_json (context);
- context->final_cb = json_file_final_cb;
- json_output_base_file_name = xstrdup (base_file_name);
+ delete context->m_output_format;
+ context->m_output_format = new json_file_output_format (*context,
+ base_file_name);
}
#if CHECKING_P
diff --git a/gcc/diagnostic-format-sarif.cc b/gcc/diagnostic-format-sarif.cc
index f56c4ce..d8cca21 100644
--- a/gcc/diagnostic-format-sarif.cc
+++ b/gcc/diagnostic-format-sarif.cc
@@ -215,6 +215,9 @@ private:
json::object *
make_reporting_descriptor_reference_object_for_cwe_id (int cwe_id);
json::object *make_artifact_object (const char *filename);
+ char *get_source_lines (const char *filename,
+ int start_line,
+ int end_line) const;
json::object *maybe_make_artifact_content_object (const char *filename) const;
json::object *maybe_make_artifact_content_object (const char *filename,
int start_line,
@@ -248,8 +251,6 @@ private:
int m_tabstop;
};
-static sarif_builder *the_builder;
-
/* class sarif_object : public json::object. */
sarif_property_bag &
@@ -1540,7 +1541,8 @@ json::object *
sarif_builder::maybe_make_artifact_content_object (const char *filename) const
{
/* Let input.cc handle any charset conversion. */
- char_span utf8_content = get_source_file_content (filename);
+ char_span utf8_content
+ = m_context->m_file_cache->get_source_file_content (filename);
if (!utf8_content)
return NULL;
@@ -1558,16 +1560,17 @@ sarif_builder::maybe_make_artifact_content_object (const char *filename) const
/* Attempt to read the given range of lines from FILENAME; return
a freshly-allocated 0-terminated buffer containing them, or NULL. */
-static char *
-get_source_lines (const char *filename,
- int start_line,
- int end_line)
+char *
+sarif_builder::get_source_lines (const char *filename,
+ int start_line,
+ int end_line) const
{
auto_vec<char> result;
for (int line = start_line; line <= end_line; line++)
{
- char_span line_content = location_get_source_line (filename, line);
+ char_span line_content
+ = m_context->m_file_cache->get_source_line (filename, line);
if (!line_content.get_buffer ())
return NULL;
result.reserve (line_content.length () + 1);
@@ -1680,82 +1683,6 @@ sarif_builder::make_artifact_content_object (const char *text) const
return content_obj;
}
-/* No-op implementation of "begin_diagnostic" for SARIF output. */
-
-static void
-sarif_begin_diagnostic (diagnostic_context *, diagnostic_info *)
-{
-}
-
-/* Implementation of "end_diagnostic" for SARIF output. */
-
-static void
-sarif_end_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic,
- diagnostic_t orig_diag_kind)
-{
- gcc_assert (the_builder);
- the_builder->end_diagnostic (context, diagnostic, orig_diag_kind);
-}
-
-/* No-op implementation of "begin_group_cb" for SARIF output. */
-
-static void
-sarif_begin_group (diagnostic_context *)
-{
-}
-
-/* Implementation of "end_group_cb" for SARIF output. */
-
-static void
-sarif_end_group (diagnostic_context *)
-{
- gcc_assert (the_builder);
- the_builder->end_group ();
-}
-
-/* Flush the top-level array to OUTF. */
-
-static void
-sarif_flush_to_file (FILE *outf)
-{
- gcc_assert (the_builder);
- the_builder->flush_to_file (outf);
- delete the_builder;
- the_builder = NULL;
-}
-
-/* Callback for final cleanup for SARIF output to stderr. */
-
-static void
-sarif_stderr_final_cb (diagnostic_context *)
-{
- gcc_assert (the_builder);
- sarif_flush_to_file (stderr);
-}
-
-static char *sarif_output_base_file_name;
-
-/* Callback for final cleanup for SARIF output to a file. */
-
-static void
-sarif_file_final_cb (diagnostic_context *)
-{
- char *filename = concat (sarif_output_base_file_name, ".sarif", NULL);
- FILE *outf = fopen (filename, "w");
- if (!outf)
- {
- const char *errstr = xstrerror (errno);
- fnotice (stderr, "error: unable to open '%s' for writing: %s\n",
- filename, errstr);
- free (filename);
- return;
- }
- gcc_assert (the_builder);
- sarif_flush_to_file (outf);
- fclose (outf);
- free (filename);
-}
-
/* Callback for diagnostic_context::ice_handler_cb for when an ICE
occurs. */
@@ -1773,15 +1700,89 @@ sarif_ice_handler (diagnostic_context *context)
fnotice (stderr, "Internal compiler error:\n");
}
-/* Callback for diagnostic_context::m_diagrams.m_emission_cb. */
+class sarif_output_format : public diagnostic_output_format
+{
+public:
+ void on_begin_group () final override
+ {
+ /* No-op, */
+ }
+ void on_end_group () final override
+ {
+ m_builder.end_group ();
+ }
+ void
+ on_begin_diagnostic (diagnostic_info *) final override
+ {
+ /* No-op, */
+ }
+ void
+ on_end_diagnostic (diagnostic_info *diagnostic,
+ diagnostic_t orig_diag_kind) final override
+ {
+ m_builder.end_diagnostic (&m_context, diagnostic, orig_diag_kind);
+ }
+ void on_diagram (const diagnostic_diagram &diagram) final override
+ {
+ m_builder.emit_diagram (&m_context, diagram);
+ }
-static void
-sarif_emit_diagram (diagnostic_context *context,
- const diagnostic_diagram &diagram)
+protected:
+ sarif_output_format (diagnostic_context &context)
+ : diagnostic_output_format (context),
+ m_builder (&context)
+ {}
+
+ sarif_builder m_builder;
+};
+
+class sarif_stream_output_format : public sarif_output_format
{
- gcc_assert (the_builder);
- the_builder->emit_diagram (context, diagram);
-}
+public:
+ sarif_stream_output_format (diagnostic_context &context, FILE *stream)
+ : sarif_output_format (context),
+ m_stream (stream)
+ {
+ }
+ ~sarif_stream_output_format ()
+ {
+ m_builder.flush_to_file (m_stream);
+ }
+private:
+ FILE *m_stream;
+};
+
+class sarif_file_output_format : public sarif_output_format
+{
+public:
+ sarif_file_output_format (diagnostic_context &context,
+ const char *base_file_name)
+ : sarif_output_format (context),
+ m_base_file_name (xstrdup (base_file_name))
+ {
+ }
+ ~sarif_file_output_format ()
+ {
+ char *filename = concat (m_base_file_name, ".sarif", NULL);
+ free (m_base_file_name);
+ m_base_file_name = nullptr;
+ FILE *outf = fopen (filename, "w");
+ if (!outf)
+ {
+ const char *errstr = xstrerror (errno);
+ fnotice (stderr, "error: unable to open '%s' for writing: %s\n",
+ filename, errstr);
+ free (filename);
+ return;
+ }
+ m_builder.flush_to_file (outf);
+ fclose (outf);
+ free (filename);
+ }
+
+private:
+ char *m_base_file_name;
+};
/* Populate CONTEXT in preparation for SARIF output (either to stderr, or
to a file). */
@@ -1789,16 +1790,9 @@ sarif_emit_diagram (diagnostic_context *context,
static void
diagnostic_output_format_init_sarif (diagnostic_context *context)
{
- the_builder = new sarif_builder (context);
-
/* Override callbacks. */
- context->begin_diagnostic = sarif_begin_diagnostic;
- context->end_diagnostic = sarif_end_diagnostic;
- context->begin_group_cb = sarif_begin_group;
- context->end_group_cb = sarif_end_group;
context->print_path = NULL; /* handled in sarif_end_diagnostic. */
context->ice_handler_cb = sarif_ice_handler;
- context->m_diagrams.m_emission_cb = sarif_emit_diagram;
/* The metadata is handled in SARIF format, rather than as text. */
context->show_cwe = false;
@@ -1817,7 +1811,8 @@ void
diagnostic_output_format_init_sarif_stderr (diagnostic_context *context)
{
diagnostic_output_format_init_sarif (context);
- context->final_cb = sarif_stderr_final_cb;
+ delete context->m_output_format;
+ context->m_output_format = new sarif_stream_output_format (*context, stderr);
}
/* Populate CONTEXT in preparation for SARIF output to a file named
@@ -1825,9 +1820,22 @@ diagnostic_output_format_init_sarif_stderr (diagnostic_context *context)
void
diagnostic_output_format_init_sarif_file (diagnostic_context *context,
- const char *base_file_name)
+ const char *base_file_name)
+{
+ diagnostic_output_format_init_sarif (context);
+ delete context->m_output_format;
+ context->m_output_format = new sarif_file_output_format (*context,
+ base_file_name);
+}
+
+/* Populate CONTEXT in preparation for SARIF output to STREAM. */
+
+void
+diagnostic_output_format_init_sarif_stream (diagnostic_context *context,
+ FILE *stream)
{
diagnostic_output_format_init_sarif (context);
- context->final_cb = sarif_file_final_cb;
- sarif_output_base_file_name = xstrdup (base_file_name);
+ delete context->m_output_format;
+ context->m_output_format = new sarif_stream_output_format (*context,
+ stream);
}
diff --git a/gcc/diagnostic-show-locus.cc b/gcc/diagnostic-show-locus.cc
index 31ef851..4439d9a 100644
--- a/gcc/diagnostic-show-locus.cc
+++ b/gcc/diagnostic-show-locus.cc
@@ -1193,9 +1193,9 @@ layout::layout (diagnostic_context * context,
m_exploc (richloc->get_expanded_location (0), m_policy,
LOCATION_ASPECT_CARET),
m_colorizer (context, diagnostic_kind),
- m_colorize_source_p (context->colorize_source_p),
- m_show_labels_p (context->show_labels_p),
- m_show_line_numbers_p (context->show_line_numbers_p),
+ m_colorize_source_p (context->m_source_printing.colorize_source_p),
+ m_show_labels_p (context->m_source_printing.show_labels_p),
+ m_show_line_numbers_p (context->m_source_printing.show_line_numbers_p),
m_diagnostic_path_p (diagnostic_kind == DK_DIAGNOSTIC_PATH),
m_layout_ranges (richloc->get_num_locations ()),
m_fixit_hints (richloc->get_num_fixit_hints ()),
@@ -1229,8 +1229,8 @@ layout::layout (diagnostic_context * context,
calculate_linenum_width ();
calculate_x_offset_display ();
- if (context->show_ruler_p)
- show_ruler (m_x_offset_display + m_context->caret_max_width);
+ if (context->m_source_printing.show_ruler_p)
+ show_ruler (m_x_offset_display + m_context->m_source_printing.max_width);
}
@@ -1595,7 +1595,8 @@ layout::calculate_linenum_width ()
m_linenum_width = MAX (m_linenum_width, 3);
/* If there's a minimum margin width, apply it (subtracting 1 for the space
after the line number. */
- m_linenum_width = MAX (m_linenum_width, m_context->min_margin_width - 1);
+ m_linenum_width = MAX (m_linenum_width,
+ m_context->m_source_printing.min_margin_width - 1);
}
/* Calculate m_x_offset_display, which improves readability in case the source
@@ -1609,7 +1610,7 @@ layout::calculate_x_offset_display ()
{
m_x_offset_display = 0;
- const int max_width = m_context->caret_max_width;
+ const int max_width = m_context->m_source_printing.max_width;
if (!max_width)
{
/* Nothing to do, the width is not capped. */
@@ -1851,7 +1852,8 @@ layout::print_annotation_line (linenum_type row, const line_bounds lbounds)
/* Draw the caret. */
char caret_char;
if (state.range_idx < rich_location::STATICALLY_ALLOCATED_RANGES)
- caret_char = m_context->caret_chars[state.range_idx];
+ caret_char
+ = m_context->m_source_printing.caret_chars[state.range_idx];
else
caret_char = '^';
pp_character (m_pp, caret_char);
@@ -2838,7 +2840,7 @@ diagnostic_show_locus (diagnostic_context * context,
{
location_t loc = richloc->get_loc ();
/* Do nothing if source-printing has been disabled. */
- if (!context->show_caret)
+ if (!context->m_source_printing.enabled)
return;
/* Don't attempt to print source for UNKNOWN_LOCATION and for builtins. */
@@ -2860,7 +2862,7 @@ diagnostic_show_locus (diagnostic_context * context,
line_span_idx++)
{
const line_span *line_span = layout.get_line_span (line_span_idx);
- if (context->show_line_numbers_p)
+ if (context->m_source_printing.show_line_numbers_p)
{
/* With line numbers, we should show whenever the line-numbering
"jumps". */
@@ -2874,7 +2876,7 @@ diagnostic_show_locus (diagnostic_context * context,
{
expanded_location exploc
= layout.get_expanded_location (line_span);
- context->start_span (context, exploc);
+ context->m_text_callbacks.start_span (context, exploc);
}
}
/* Iterate over the lines within this span (using linenum_arith_t to
@@ -2959,11 +2961,11 @@ test_offset_impl (int caret_byte_col, int max_width,
int left_margin = test_left_margin)
{
test_diagnostic_context dc;
- dc.caret_max_width = max_width;
+ dc.m_source_printing.max_width = max_width;
/* diagnostic_context::min_margin_width sets the minimum space reserved for
the line number plus one space after. */
- dc.min_margin_width = left_margin - test_linenum_sep + 1;
- dc.show_line_numbers_p = true;
+ dc.m_source_printing.min_margin_width = left_margin - test_linenum_sep + 1;
+ dc.m_source_printing.show_line_numbers_p = true;
rich_location richloc (line_table,
linemap_position_for_column (line_table,
caret_byte_col));
@@ -3074,10 +3076,11 @@ test_layout_x_offset_display_utf8 (const line_table_case &case_)
/* Test that the source line is offset as expected when printed. */
{
test_diagnostic_context dc;
- dc.caret_max_width = small_width - 6;
- dc.min_margin_width = test_left_margin - test_linenum_sep + 1;
- dc.show_line_numbers_p = true;
- dc.show_ruler_p = true;
+ dc.m_source_printing.max_width = small_width - 6;
+ dc.m_source_printing.min_margin_width
+ = test_left_margin - test_linenum_sep + 1;
+ dc.m_source_printing.show_line_numbers_p = true;
+ dc.m_source_printing.show_ruler_p = true;
rich_location richloc (line_table,
linemap_position_for_column (line_table,
emoji_col));
@@ -3098,10 +3101,11 @@ test_layout_x_offset_display_utf8 (const line_table_case &case_)
it with a padding space in this case. */
{
test_diagnostic_context dc;
- dc.caret_max_width = small_width - 5;
- dc.min_margin_width = test_left_margin - test_linenum_sep + 1;
- dc.show_line_numbers_p = true;
- dc.show_ruler_p = true;
+ dc.m_source_printing.max_width = small_width - 5;
+ dc.m_source_printing.min_margin_width
+ = test_left_margin - test_linenum_sep + 1;
+ dc.m_source_printing.show_line_numbers_p = true;
+ dc.m_source_printing.show_ruler_p = true;
rich_location richloc (line_table,
linemap_position_for_column (line_table,
emoji_col + 2));
@@ -3201,9 +3205,10 @@ test_layout_x_offset_display_tab (const line_table_case &case_)
test_diagnostic_context dc;
dc.tabstop = tabstop;
static const int small_width = 24;
- dc.caret_max_width = small_width - 4;
- dc.min_margin_width = test_left_margin - test_linenum_sep + 1;
- dc.show_line_numbers_p = true;
+ dc.m_source_printing.max_width = small_width - 4;
+ dc.m_source_printing.min_margin_width
+ = test_left_margin - test_linenum_sep + 1;
+ dc.m_source_printing.show_line_numbers_p = true;
layout test_layout (&dc, &richloc, DK_ERROR);
test_layout.print_line (1);
@@ -3287,19 +3292,19 @@ test_one_liner_multiple_carets_and_ranges ()
= make_location (linemap_position_for_column (line_table, 2),
linemap_position_for_column (line_table, 1),
linemap_position_for_column (line_table, 3));
- dc.caret_chars[0] = 'A';
+ dc.m_source_printing.caret_chars[0] = 'A';
location_t bar
= make_location (linemap_position_for_column (line_table, 8),
linemap_position_for_column (line_table, 7),
linemap_position_for_column (line_table, 9));
- dc.caret_chars[1] = 'B';
+ dc.m_source_printing.caret_chars[1] = 'B';
location_t field
= make_location (linemap_position_for_column (line_table, 13),
linemap_position_for_column (line_table, 11),
linemap_position_for_column (line_table, 15));
- dc.caret_chars[2] = 'C';
+ dc.m_source_printing.caret_chars[2] = 'C';
rich_location richloc (line_table, foo);
richloc.add_range (bar, SHOW_RANGE_WITH_CARET);
@@ -3382,8 +3387,8 @@ test_one_liner_fixit_remove ()
/* Normal, with ruler. */
{
test_diagnostic_context dc;
- dc.show_ruler_p = true;
- dc.caret_max_width = 104;
+ dc.m_source_printing.show_ruler_p = true;
+ dc.m_source_printing.max_width = 104;
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
ASSERT_STREQ (" 0 0 0 0 0 0 0 0 0 1 \n"
" 1 2 3 4 5 6 7 8 9 0 \n"
@@ -3397,8 +3402,8 @@ test_one_liner_fixit_remove ()
/* Test of adding a prefix, with ruler. */
{
test_diagnostic_context dc;
- dc.show_ruler_p = true;
- dc.caret_max_width = 50;
+ dc.m_source_printing.show_ruler_p = true;
+ dc.m_source_printing.max_width = 50;
pp_prefixing_rule (dc.printer) = DIAGNOSTICS_SHOW_PREFIX_EVERY_LINE;
pp_set_prefix (dc.printer, xstrdup ("TEST PREFIX:"));
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
@@ -3413,9 +3418,9 @@ test_one_liner_fixit_remove ()
/* Test of adding a prefix, with ruler and line numbers. */
{
test_diagnostic_context dc;
- dc.show_ruler_p = true;
- dc.caret_max_width = 50;
- dc.show_line_numbers_p = true;
+ dc.m_source_printing.show_ruler_p = true;
+ dc.m_source_printing.max_width = 50;
+ dc.m_source_printing.show_line_numbers_p = true;
pp_prefixing_rule (dc.printer) = DIAGNOSTICS_SHOW_PREFIX_EVERY_LINE;
pp_set_prefix (dc.printer, xstrdup ("TEST PREFIX:"));
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
@@ -3641,7 +3646,7 @@ test_one_liner_labels ()
/* Verify that we can disable label-printing. */
{
test_diagnostic_context dc;
- dc.show_labels_p = false;
+ dc.m_source_printing.show_labels_p = false;
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
ASSERT_STREQ (" foo = bar.field;\n"
" ^~~ ~~~ ~~~~~\n",
@@ -3895,19 +3900,19 @@ test_one_liner_multiple_carets_and_ranges_utf8 ()
= make_location (linemap_position_for_column (line_table, 7),
linemap_position_for_column (line_table, 1),
linemap_position_for_column (line_table, 8));
- dc.caret_chars[0] = 'A';
+ dc.m_source_printing.caret_chars[0] = 'A';
location_t bar
= make_location (linemap_position_for_column (line_table, 16),
linemap_position_for_column (line_table, 12),
linemap_position_for_column (line_table, 17));
- dc.caret_chars[1] = 'B';
+ dc.m_source_printing.caret_chars[1] = 'B';
location_t field
= make_location (linemap_position_for_column (line_table, 26),
linemap_position_for_column (line_table, 19),
linemap_position_for_column (line_table, 30));
- dc.caret_chars[2] = 'C';
+ dc.m_source_printing.caret_chars[2] = 'C';
rich_location richloc (line_table, foo);
richloc.add_range (bar, SHOW_RANGE_WITH_CARET);
richloc.add_range (field, SHOW_RANGE_WITH_CARET);
@@ -4348,7 +4353,7 @@ static void
test_one_liner_colorized_utf8 ()
{
test_diagnostic_context dc;
- dc.colorize_source_p = true;
+ dc.m_source_printing.colorize_source_p = true;
diagnostic_color_init (&dc, DIAGNOSTICS_COLOR_YES);
const location_t pi = linemap_position_for_column (line_table, 12);
rich_location richloc (line_table, pi);
@@ -4563,7 +4568,7 @@ test_diagnostic_show_locus_fixit_lines (const line_table_case &case_)
richloc.add_fixit_insert_before (y, ".");
richloc.add_fixit_replace (colon, "=");
test_diagnostic_context dc;
- dc.show_line_numbers_p = true;
+ dc.m_source_printing.show_line_numbers_p = true;
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
ASSERT_STREQ (" 3 | y\n"
" | .\n"
@@ -5304,7 +5309,7 @@ test_fixit_insert_containing_newline (const line_table_case &case_)
/* With line numbers. */
{
test_diagnostic_context dc;
- dc.show_line_numbers_p = true;
+ dc.m_source_printing.show_line_numbers_p = true;
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
ASSERT_STREQ (" 2 | x = a;\n"
" +++ |+ break;\n"
@@ -5383,7 +5388,7 @@ test_fixit_insert_containing_newline_2 (const line_table_case &case_)
consolidated, since it makes little sense to skip line 2. */
{
test_diagnostic_context dc;
- dc.show_line_numbers_p = true;
+ dc.m_source_printing.show_line_numbers_p = true;
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
ASSERT_STREQ (" +++ |+#include <stdio.h>\n"
" 1 | test (int ch)\n"
@@ -5679,8 +5684,8 @@ test_line_numbers_multiline_range ()
location_t loc = make_location (caret, start, finish);
test_diagnostic_context dc;
- dc.show_line_numbers_p = true;
- dc.min_margin_width = 0;
+ dc.m_source_printing.show_line_numbers_p = true;
+ dc.m_source_printing.min_margin_width = 0;
gcc_rich_location richloc (loc);
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
ASSERT_STREQ (" 9 | this is line 9\n"
diff --git a/gcc/diagnostic.cc b/gcc/diagnostic.cc
index 00183b1..6e46371 100644
--- a/gcc/diagnostic.cc
+++ b/gcc/diagnostic.cc
@@ -147,29 +147,7 @@ diagnostic_set_caret_max_width (diagnostic_context *context, int value)
if (value <= 0)
value = INT_MAX;
- context->caret_max_width = value;
-}
-
-/* Default implementation of final_cb. */
-
-static void
-default_diagnostic_final_cb (diagnostic_context *context)
-{
- /* Some of the errors may actually have been warnings. */
- if (diagnostic_kind_count (context, DK_WERROR))
- {
- /* -Werror was given. */
- if (context->warning_as_error_requested)
- pp_verbatim (context->printer,
- _("%s: all warnings being treated as errors"),
- progname);
- /* At least one -Werror= was given. */
- else
- pp_verbatim (context->printer,
- _("%s: some warnings being treated as errors"),
- progname);
- pp_newline_and_flush (context->printer);
- }
+ context->m_source_printing.max_width = value;
}
/* Initialize the diagnostic message outputting machinery. */
@@ -189,10 +167,10 @@ diagnostic_initialize (diagnostic_context *context, int n_opts)
context->classify_diagnostic = XNEWVEC (diagnostic_t, n_opts);
for (i = 0; i < n_opts; i++)
context->classify_diagnostic[i] = DK_UNSPECIFIED;
- context->show_caret = false;
+ context->m_source_printing.enabled = false;
diagnostic_set_caret_max_width (context, pp_line_cutoff (context->printer));
for (i = 0; i < rich_location::STATICALLY_ALLOCATED_RANGES; i++)
- context->caret_chars[i] = '^';
+ context->m_source_printing.caret_chars[i] = '^';
context->show_cwe = false;
context->show_rules = false;
context->path_format = DPF_NONE;
@@ -209,7 +187,7 @@ diagnostic_initialize (diagnostic_context *context, int n_opts)
context->max_errors = 0;
context->internal_error = NULL;
diagnostic_starter (context) = default_diagnostic_starter;
- context->start_span = default_diagnostic_start_span_fn;
+ context->m_text_callbacks.start_span = default_diagnostic_start_span_fn;
diagnostic_finalizer (context) = default_diagnostic_finalizer;
context->option_enabled = NULL;
context->option_state = NULL;
@@ -220,13 +198,13 @@ diagnostic_initialize (diagnostic_context *context, int n_opts)
context->x_data = NULL;
context->lock = 0;
context->inhibit_notes_p = false;
- context->colorize_source_p = false;
- context->show_labels_p = false;
- context->show_line_numbers_p = false;
- context->min_margin_width = 0;
- context->show_ruler_p = false;
+ context->m_source_printing.colorize_source_p = false;
+ context->m_source_printing.show_labels_p = false;
+ context->m_source_printing.show_line_numbers_p = false;
+ context->m_source_printing.min_margin_width = 0;
+ context->m_source_printing.show_ruler_p = false;
context->report_bug = false;
-
+ context->extra_output_kind = EXTRA_DIAGNOSTIC_OUTPUT_none;
if (const char *var = getenv ("GCC_EXTRA_DIAGNOSTIC_OUTPUT"))
{
if (!strcmp (var, "fixits-v1"))
@@ -242,16 +220,23 @@ diagnostic_initialize (diagnostic_context *context, int n_opts)
context->edit_context_ptr = NULL;
context->diagnostic_group_nesting_depth = 0;
context->diagnostic_group_emission_count = 0;
- context->begin_group_cb = NULL;
- context->end_group_cb = NULL;
- context->final_cb = default_diagnostic_final_cb;
+ context->m_output_format = new diagnostic_text_output_format (*context);
+ context->set_locations_cb = nullptr;
context->ice_handler_cb = NULL;
context->includes_seen = NULL;
context->m_client_data_hooks = NULL;
context->m_diagrams.m_theme = NULL;
- context->m_diagrams.m_emission_cb = NULL;
- diagnostics_text_art_charset_init (context,
- DIAGNOSTICS_TEXT_ART_CHARSET_DEFAULT);
+
+ enum diagnostic_text_art_charset text_art_charset
+ = DIAGNOSTICS_TEXT_ART_CHARSET_DEFAULT;
+ if (const char *lang = getenv ("LANG"))
+ {
+ /* For LANG=C, don't assume the terminal supports anything
+ other than ASCII. */
+ if (!strcmp (lang, "C"))
+ text_art_charset = DIAGNOSTICS_TEXT_ART_CHARSET_ASCII;
+ }
+ diagnostics_text_art_charset_init (context, text_art_charset);
}
/* Maybe initialize the color support. We require clients to do this
@@ -325,8 +310,8 @@ void diagnostic_initialize_input_context (diagnostic_context *context,
void
diagnostic_finish (diagnostic_context *context)
{
- if (context->final_cb)
- context->final_cb (context);
+ delete context->m_output_format;
+ context->m_output_format= nullptr;
if (context->m_diagrams.m_theme)
{
@@ -372,9 +357,9 @@ diagnostic_set_info_translated (diagnostic_info *diagnostic, const char *msg,
diagnostic_t kind)
{
gcc_assert (richloc);
- diagnostic->message.err_no = errno;
- diagnostic->message.args_ptr = args;
- diagnostic->message.format_spec = msg;
+ diagnostic->message.m_err_no = errno;
+ diagnostic->message.m_args_ptr = args;
+ diagnostic->message.m_format_spec = msg;
diagnostic->message.m_richloc = richloc;
diagnostic->richloc = richloc;
diagnostic->metadata = NULL;
@@ -1507,6 +1492,8 @@ diagnostic_report_diagnostic (diagnostic_context *context,
location_t location = diagnostic_location (diagnostic);
diagnostic_t orig_diag_kind = diagnostic->kind;
+ gcc_assert (context->m_output_format);
+
/* Give preference to being able to inhibit warnings, before they
get reclassified to something else. */
bool report_warning_p = true;
@@ -1551,7 +1538,7 @@ diagnostic_report_diagnostic (diagnostic_context *context,
&& diagnostic->kind == DK_WARNING)
diagnostic->kind = DK_ERROR;
- diagnostic->message.x_data = &diagnostic->x_data;
+ diagnostic->message.m_data = &diagnostic->x_data;
/* Check to see if the diagnostic is enabled at the location and
not disabled by #pragma GCC diagnostic anywhere along the inlining
@@ -1587,8 +1574,8 @@ diagnostic_report_diagnostic (diagnostic_context *context,
}
if (context->internal_error)
(*context->internal_error) (context,
- diagnostic->message.format_spec,
- diagnostic->message.args_ptr);
+ diagnostic->message.m_format_spec,
+ diagnostic->message.m_args_ptr);
}
if (diagnostic->kind == DK_ERROR && orig_diag_kind == DK_WARNING)
++diagnostic_kind_count (context, DK_WERROR);
@@ -1597,14 +1584,11 @@ diagnostic_report_diagnostic (diagnostic_context *context,
/* Is this the initial diagnostic within the stack of groups? */
if (context->diagnostic_group_emission_count == 0)
- {
- if (context->begin_group_cb)
- context->begin_group_cb (context);
- }
+ context->m_output_format->on_begin_group ();
context->diagnostic_group_emission_count++;
pp_format (context->printer, &diagnostic->message);
- (*diagnostic_starter (context)) (context, diagnostic);
+ context->m_output_format->on_begin_diagnostic (diagnostic);
pp_output_formatted_text (context->printer);
if (context->show_cwe)
print_any_cwe (context, diagnostic);
@@ -1612,7 +1596,7 @@ diagnostic_report_diagnostic (diagnostic_context *context,
print_any_rules (context, diagnostic);
if (context->show_option_requested)
print_option_information (context, diagnostic, orig_diag_kind);
- (*diagnostic_finalizer (context)) (context, diagnostic, orig_diag_kind);
+ context->m_output_format->on_end_diagnostic (diagnostic, orig_diag_kind);
switch (context->extra_output_kind)
{
default:
@@ -1703,14 +1687,10 @@ trim_filename (const char *name)
void
verbatim (const char *gmsgid, ...)
{
- text_info text;
va_list ap;
va_start (ap, gmsgid);
- text.err_no = errno;
- text.args_ptr = &ap;
- text.format_spec = _(gmsgid);
- text.x_data = NULL;
+ text_info text (_(gmsgid), &ap, errno);
pp_format_verbatim (global_dc->printer, &text);
pp_newline_and_flush (global_dc->printer);
va_end (ap);
@@ -2213,22 +2193,8 @@ diagnostic_emit_diagram (diagnostic_context *context,
if (context->m_diagrams.m_theme == nullptr)
return;
- if (context->m_diagrams.m_emission_cb)
- {
- context->m_diagrams.m_emission_cb (context, diagram);
- return;
- }
-
- /* Default implementation. */
- char *saved_prefix = pp_take_prefix (context->printer);
- pp_set_prefix (context->printer, NULL);
- /* Use a newline before and after and a two-space indent
- to make the diagram stand out a little from the wall of text. */
- pp_newline (context->printer);
- diagram.get_canvas ().print_to_pp (context->printer, " ");
- pp_newline (context->printer);
- pp_set_prefix (context->printer, saved_prefix);
- pp_flush (context->printer);
+ gcc_assert (context->m_output_format);
+ context->m_output_format->on_diagram (diagram);
}
/* Special case error functions. Most are implemented in terms of the
@@ -2330,14 +2296,59 @@ auto_diagnostic_group::~auto_diagnostic_group ()
If any diagnostics were emitted, give the context a chance
to do something. */
if (global_dc->diagnostic_group_emission_count > 0)
- {
- if (global_dc->end_group_cb)
- global_dc->end_group_cb (global_dc);
- }
+ global_dc->m_output_format->on_end_group ();
global_dc->diagnostic_group_emission_count = 0;
}
}
+/* class diagnostic_text_output_format : public diagnostic_output_format. */
+
+diagnostic_text_output_format::~diagnostic_text_output_format ()
+{
+ /* Some of the errors may actually have been warnings. */
+ if (diagnostic_kind_count (&m_context, DK_WERROR))
+ {
+ /* -Werror was given. */
+ if (m_context.warning_as_error_requested)
+ pp_verbatim (m_context.printer,
+ _("%s: all warnings being treated as errors"),
+ progname);
+ /* At least one -Werror= was given. */
+ else
+ pp_verbatim (m_context.printer,
+ _("%s: some warnings being treated as errors"),
+ progname);
+ pp_newline_and_flush (m_context.printer);
+ }
+}
+
+void
+diagnostic_text_output_format::on_begin_diagnostic (diagnostic_info *diagnostic)
+{
+ (*diagnostic_starter (&m_context)) (&m_context, diagnostic);
+}
+
+void
+diagnostic_text_output_format::on_end_diagnostic (diagnostic_info *diagnostic,
+ diagnostic_t orig_diag_kind)
+{
+ (*diagnostic_finalizer (&m_context)) (&m_context, diagnostic, orig_diag_kind);
+}
+
+void
+diagnostic_text_output_format::on_diagram (const diagnostic_diagram &diagram)
+{
+ char *saved_prefix = pp_take_prefix (m_context.printer);
+ pp_set_prefix (m_context.printer, NULL);
+ /* Use a newline before and after and a two-space indent
+ to make the diagram stand out a little from the wall of text. */
+ pp_newline (m_context.printer);
+ diagram.get_canvas ().print_to_pp (m_context.printer, " ");
+ pp_newline (m_context.printer);
+ pp_set_prefix (m_context.printer, saved_prefix);
+ pp_flush (m_context.printer);
+}
+
/* Set the output format for CONTEXT to FORMAT, using BASE_FILE_NAME for
file-based output formats. */
@@ -2464,19 +2475,13 @@ simple_diagnostic_path::add_event (location_t loc, tree fndecl, int depth,
pretty_printer *pp = m_event_pp;
pp_clear_output_area (pp);
- text_info ti;
rich_location rich_loc (line_table, UNKNOWN_LOCATION);
va_list ap;
va_start (ap, fmt);
- ti.format_spec = _(fmt);
- ti.args_ptr = &ap;
- ti.err_no = 0;
- ti.x_data = NULL;
- ti.m_richloc = &rich_loc;
-
+ text_info ti (_(fmt), &ap, 0, nullptr, &rich_loc);
pp_format (pp, &ti);
pp_output_formatted_text (pp);
@@ -2501,18 +2506,13 @@ simple_diagnostic_path::add_thread_event (diagnostic_thread_id_t thread_id,
pretty_printer *pp = m_event_pp;
pp_clear_output_area (pp);
- text_info ti;
rich_location rich_loc (line_table, UNKNOWN_LOCATION);
va_list ap;
va_start (ap, fmt);
- ti.format_spec = _(fmt);
- ti.args_ptr = &ap;
- ti.err_no = 0;
- ti.x_data = NULL;
- ti.m_richloc = &rich_loc;
+ text_info ti (_(fmt), &ap, 0, nullptr, &rich_loc);
pp_format (pp, &ti);
pp_output_formatted_text (pp);
diff --git a/gcc/diagnostic.h b/gcc/diagnostic.h
index 4ec83a9..a2c8740 100644
--- a/gcc/diagnostic.h
+++ b/gcc/diagnostic.h
@@ -177,6 +177,51 @@ class diagnostic_client_data_hooks;
class logical_location;
class diagnostic_diagram;
+/* Abstract base class for a particular output format for diagnostics;
+ each value of -fdiagnostics-output-format= will have its own
+ implementation. */
+
+class diagnostic_output_format
+{
+public:
+ virtual ~diagnostic_output_format () {}
+
+ virtual void on_begin_group () = 0;
+ virtual void on_end_group () = 0;
+ virtual void on_begin_diagnostic (diagnostic_info *) = 0;
+ virtual void on_end_diagnostic (diagnostic_info *,
+ diagnostic_t orig_diag_kind) = 0;
+ virtual void on_diagram (const diagnostic_diagram &diagram) = 0;
+
+protected:
+ diagnostic_output_format (diagnostic_context &context)
+ : m_context (context)
+ {}
+
+ diagnostic_context &m_context;
+};
+
+/* Subclass of diagnostic_output_format for classic text-based output
+ to stderr.
+
+ Uses diagnostic_context.m_text_callbacks to provide client-specific
+ textual output (e.g. include paths, macro expansions, etc). */
+
+class diagnostic_text_output_format : public diagnostic_output_format
+{
+public:
+ diagnostic_text_output_format (diagnostic_context &context)
+ : diagnostic_output_format (context)
+ {}
+ ~diagnostic_text_output_format ();
+ void on_begin_group () override {}
+ void on_end_group () override {}
+ void on_begin_diagnostic (diagnostic_info *) override;
+ void on_end_diagnostic (diagnostic_info *,
+ diagnostic_t orig_diag_kind) override;
+ void on_diagram (const diagnostic_diagram &diagram) override;
+};
+
/* This data structure bundles altogether any information relevant to
the context of a diagnostic message. */
struct diagnostic_context
@@ -219,16 +264,6 @@ struct diagnostic_context
int *push_list;
int n_push;
- /* True if we should print the source line with a caret indicating
- the location. */
- bool show_caret;
-
- /* Maximum width of the source line printed. */
- int caret_max_width;
-
- /* Character used for caret diagnostics. */
- char caret_chars[rich_location::STATICALLY_ALLOCATED_RANGES];
-
/* True if we should print any CWE identifiers associated with
diagnostics. */
bool show_cwe;
@@ -274,22 +309,25 @@ struct diagnostic_context
/* Maximum number of errors to report. */
int max_errors;
- /* This function is called before any message is printed out. It is
- responsible for preparing message prefix and such. For example, it
- might say:
- In file included from "/usr/local/include/curses.h:5:
- from "/home/gdr/src/nifty_printer.h:56:
- ...
- */
- diagnostic_starter_fn begin_diagnostic;
-
- /* This function is called by diagnostic_show_locus in between
- disjoint spans of source code, so that the context can print
- something to indicate that a new span of source code has begun. */
- diagnostic_start_span_fn start_span;
-
- /* This function is called after the diagnostic message is printed. */
- diagnostic_finalizer_fn end_diagnostic;
+ /* Client-supplied callbacks for use in text output. */
+ struct {
+ /* This function is called before any message is printed out. It is
+ responsible for preparing message prefix and such. For example, it
+ might say:
+ In file included from "/usr/local/include/curses.h:5:
+ from "/home/gdr/src/nifty_printer.h:56:
+ ...
+ */
+ diagnostic_starter_fn begin_diagnostic;
+
+ /* This function is called by diagnostic_show_locus in between
+ disjoint spans of source code, so that the context can print
+ something to indicate that a new span of source code has begun. */
+ diagnostic_start_span_fn start_span;
+
+ /* This function is called after the diagnostic message is printed. */
+ diagnostic_finalizer_fn end_diagnostic;
+ } m_text_callbacks;
/* Client hook to report an internal error. */
void (*internal_error) (diagnostic_context *, const char *, va_list *);
@@ -336,29 +374,49 @@ struct diagnostic_context
bool inhibit_notes_p;
- /* When printing source code, should the characters at carets and ranges
- be colorized? (assuming colorization is on at all).
- This should be true for frontends that generate range information
- (so that the ranges of code are colorized),
- and false for frontends that merely specify points within the
- source code (to avoid e.g. colorizing just the first character in
- a token, which would look strange). */
- bool colorize_source_p;
+ /* Fields relating to printing the user's source code (potentially with
+ a margin, underlining, labels, etc). */
+ struct {
+
+ /* True if we should print the source line with a caret indicating
+ the location.
+ Corresponds to -fdiagnostics-show-caret. */
+ bool enabled;
+
+ /* Maximum width of the source line printed. */
+ int max_width;
+
+ /* Character used at the caret when printing source locations. */
+ char caret_chars[rich_location::STATICALLY_ALLOCATED_RANGES];
- /* When printing source code, should labelled ranges be printed? */
- bool show_labels_p;
+ /* When printing source code, should the characters at carets and ranges
+ be colorized? (assuming colorization is on at all).
+ This should be true for frontends that generate range information
+ (so that the ranges of code are colorized),
+ and false for frontends that merely specify points within the
+ source code (to avoid e.g. colorizing just the first character in
+ a token, which would look strange). */
+ bool colorize_source_p;
- /* When printing source code, should there be a left-hand margin
- showing line numbers? */
- bool show_line_numbers_p;
+ /* When printing source code, should labelled ranges be printed?
+ Corresponds to -fdiagnostics-show-labels. */
+ bool show_labels_p;
- /* If printing source code, what should the minimum width of the margin
- be? Line numbers will be right-aligned, and padded to this width. */
- int min_margin_width;
+ /* When printing source code, should there be a left-hand margin
+ showing line numbers?
+ Corresponds to -fdiagnostics-show-line-numbers. */
+ bool show_line_numbers_p;
- /* Usable by plugins; if true, print a debugging ruler above the
- source output. */
- bool show_ruler_p;
+ /* If printing source code, what should the minimum width of the margin
+ be? Line numbers will be right-aligned, and padded to this width.
+ Corresponds to -fdiagnostics-minimum-margin-width=VALUE. */
+ int min_margin_width;
+
+ /* Usable by plugins; if true, print a debugging ruler above the
+ source output. */
+ bool show_ruler_p;
+
+ } m_source_printing;
/* True if -freport-bug option is used. */
bool report_bug;
@@ -392,18 +450,9 @@ struct diagnostic_context
diagnostic_group was pushed. */
int diagnostic_group_emission_count;
- /* Optional callbacks for handling diagnostic groups. */
-
- /* If non-NULL, this will be called immediately before the first
- time a diagnostic is emitted within a stack of groups. */
- void (*begin_group_cb) (diagnostic_context * context);
-
- /* If non-NULL, this will be called when a stack of groups is
- popped if any diagnostics were emitted within that group. */
- void (*end_group_cb) (diagnostic_context * context);
-
- /* Callback for final cleanup. */
- void (*final_cb) (diagnostic_context *context);
+ /* How to output diagnostics (text vs a structured format such as JSON).
+ Must be non-NULL; owned by context. */
+ diagnostic_output_format *m_output_format;
/* Callback to set the locations of call sites along the inlining
stack corresponding to a diagnostic location. Needed to traverse
@@ -431,9 +480,6 @@ struct diagnostic_context
Can be NULL (if text art is disabled). */
text_art::theme *m_theme;
- /* Callback for emitting diagrams. */
- void (*m_emission_cb) (diagnostic_context *context,
- const diagnostic_diagram &diagram);
} m_diagrams;
};
@@ -444,12 +490,13 @@ diagnostic_inhibit_notes (diagnostic_context * context)
}
-/* Client supplied function to announce a diagnostic. */
-#define diagnostic_starter(DC) (DC)->begin_diagnostic
+/* Client supplied function to announce a diagnostic
+ (for text-based diagnostic output). */
+#define diagnostic_starter(DC) (DC)->m_text_callbacks.begin_diagnostic
/* Client supplied function called after a diagnostic message is
- displayed. */
-#define diagnostic_finalizer(DC) (DC)->end_diagnostic
+ displayed (for text-based diagnostic output). */
+#define diagnostic_finalizer(DC) (DC)->m_text_callbacks.end_diagnostic
/* Extension hooks for client. */
#define diagnostic_context_auxiliary_data(DC) (DC)->x_data
@@ -605,8 +652,9 @@ inline bool
diagnostic_same_line (const diagnostic_context *context,
expanded_location s1, expanded_location s2)
{
- return s2.column && s1.line == s2.line
- && context->caret_max_width - CARET_LINE_MARGIN > abs (s1.column - s2.column);
+ return (s2.column && s1.line == s2.line
+ && (context->m_source_printing.max_width - CARET_LINE_MARGIN
+ > abs (s1.column - s2.column)));
}
extern const char *diagnostic_get_color_for_kind (diagnostic_t kind);
@@ -627,6 +675,8 @@ extern void diagnostic_output_format_init_json_file (diagnostic_context *context
extern void diagnostic_output_format_init_sarif_stderr (diagnostic_context *context);
extern void diagnostic_output_format_init_sarif_file (diagnostic_context *context,
const char *base_file_name);
+extern void diagnostic_output_format_init_sarif_stream (diagnostic_context *context,
+ FILE *stream);
/* Compute the number of digits in the decimal representation of an integer. */
extern int num_digits (int);
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index b4770f1..a3db942 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -2537,13 +2537,14 @@ for each target. However, a considerable number of attributes are
supported by most, if not all targets. Those are described in
the @ref{Common Function Attributes} section.
-Function attributes are introduced by the @code{__attribute__} keyword
-in the declaration of a function, followed by an attribute specification
-enclosed in double parentheses. You can specify multiple attributes in
-a declaration by separating them by commas within the double parentheses
-or by immediately following one attribute specification with another.
-@xref{Attribute Syntax}, for the exact rules on attribute syntax and
-placement. Compatible attribute specifications on distinct declarations
+GCC provides two different ways to specify attributes: the traditional
+GNU syntax using @samp{__attribute__ ((...))} annotations, and the
+newer standard C and C++ syntax using @samp{[[...]]} with the
+@samp{gnu::} prefix on attribute names. Note that the exact rules for
+placement of attributes in your source code are different depending on
+which syntax you use. @xref{Attribute Syntax}, for details.
+
+Compatible attribute specifications on distinct declarations
of the same function are merged. An attribute specification that is not
compatible with attributes already applied to a declaration of the same
function is ignored with a warning.
@@ -3109,7 +3110,9 @@ file descriptor opened with @code{O_RDONLY}.
@cindex @code{flatten} function attribute
@item flatten
Generally, inlining into a function is limited. For a function marked with
-this attribute, every call inside this function is inlined, if possible.
+this attribute, every call inside this function is inlined including the
+calls such inlining introduces to the function (but not recursive calls
+to the function itself), if possible.
Functions declared with attribute @code{noinline} and similar are not
inlined. Whether the function itself is considered for inlining depends
on its size and the current inlining parameters.
@@ -7207,6 +7210,11 @@ Enable/disable the generation of the SHA512 instructions.
@itemx no-sm4
Enable/disable the generation of the SM4 instructions.
+@cindex @code{target("usermsr")} function attribute, x86
+@item usermsr
+@itemx no-usermsr
+Enable/disable the generation of the USER_MSR instructions.
+
@cindex @code{target("cld")} function attribute, x86
@item cld
@itemx no-cld
@@ -7433,10 +7441,9 @@ when this attribute is present.
@cindex attribute of variables
@cindex variable attributes
-The keyword @code{__attribute__} allows you to specify special properties
+You can use attributes to specify special properties
of variables, function parameters, or structure, union, and, in C++, class
-members. This @code{__attribute__} keyword is followed by an attribute
-specification enclosed in double parentheses. Some attributes are currently
+members. Some attributes are currently
defined generically for variables. Other attributes are defined for
variables on particular target systems. Other attributes are available
for functions (@pxref{Function Attributes}), labels (@pxref{Label Attributes}),
@@ -7445,8 +7452,12 @@ enumerators (@pxref{Enumerator Attributes}), statements
Other front ends might define more attributes
(@pxref{C++ Extensions,,Extensions to the C++ Language}).
-@xref{Attribute Syntax}, for details of the exact syntax for using
-attributes.
+GCC provides two different ways to specify attributes: the traditional
+GNU syntax using @samp{__attribute__ ((...))} annotations, and the
+newer standard C and C++ syntax using @samp{[[...]]} with the
+@samp{gnu::} prefix on attribute names. Note that the exact rules for
+placement of attributes in your source code are different depending on
+which syntax you use. @xref{Attribute Syntax}, for details.
@menu
* Common Variable Attributes::
@@ -8508,7 +8519,7 @@ placed in either the @code{.bss_below100} section or the
@cindex attribute of types
@cindex type attributes
-The keyword @code{__attribute__} allows you to specify various special
+You can use attributes to specify various special
properties of types. Some type attributes apply only to structure and
union types, and in C++, also class types, while others can apply to
any type defined via a @code{typedef} declaration. Unless otherwise
@@ -8521,19 +8532,20 @@ labels (@pxref{Label Attributes}), enumerators (@pxref{Enumerator
Attributes}), statements (@pxref{Statement Attributes}), and for variables
(@pxref{Variable Attributes}).
-The @code{__attribute__} keyword is followed by an attribute specification
-enclosed in double parentheses.
+GCC provides two different ways to specify attributes: the traditional
+GNU syntax using @samp{__attribute__ ((...))} annotations, and the
+newer standard C and C++ syntax using @samp{[[...]]} with the
+@samp{gnu::} prefix on attribute names. Note that the exact rules for
+placement of attributes in your source code are different depending on
+which syntax you use. @xref{Attribute Syntax}, for details.
You may specify type attributes in an enum, struct or union type
declaration or definition by placing them immediately after the
@code{struct}, @code{union} or @code{enum} keyword. You can also place
them just past the closing curly brace of the definition, but this is less
preferred because logically the type should be fully defined at
-the closing brace.
-
-You can also include type attributes in a @code{typedef} declaration.
-@xref{Attribute Syntax}, for details of the exact syntax for using
-attributes.
+the closing brace. You can also include type attributes in a
+@code{typedef} declaration.
@menu
* Common Type Attributes::
@@ -9347,9 +9359,34 @@ have to optimize it to just @code{return 42 + 42;}.
@node Attribute Syntax
@section Attribute Syntax
@cindex attribute syntax
+@cindex C standard attributes
+@cindex C++ standard attributes
+@cindex standard attribute syntax
+@cindex GNU attribute syntax
+
+GCC provides two different ways to specify attributes: the standard C
+and C++ syntax using double square brackets, and the older GNU
+extension syntax using the @code{@w{__attribute__}} keyword, which predates
+the adoption of the standard syntax and is still widely used in older
+code.
-This section describes the syntax with which @code{__attribute__} may be
-used, and the constructs to which attribute specifiers bind, for the C
+The standard @samp{[[]]} attribute syntax is recognized by GCC's
+default language dialect for both C and C++. More specifically, this
+syntax was first introduced in the C++11 language standard
+(@pxref{Standards}), and is supported by GCC in C++ code with
+@option{-std=c++11} or @option{-std=gnu++11} or later. It is also
+part of the C2x language standard and is supported when compiling C
+code with @option{-std=c2x} or @option{-std=gnu17} or later.
+
+When using GNU-specific attributes in the standard syntax, you must
+prefix their names with @samp{gnu::}, such as @code{gnu::section}.
+Refer to the relevant language standards for exact details on the
+placement of @samp{[[]]} attributes within your code, as they differ
+in some details from the rules for the GNU attribute syntax.
+
+The remainder of this section describes the details of the GNU extension
+@code{__attribute__} syntax,
+and the constructs to which attribute specifiers bind, for the C
language. Some details may vary for C++ and Objective-C@. Because of
limitations in the grammar for attributes, some forms described here
may not be successfully parsed in all cases.
@@ -14938,6 +14975,7 @@ instructions, but allow the compiler to schedule those calls.
* PRU Built-in Functions::
* RISC-V Built-in Functions::
* RISC-V Vector Intrinsics::
+* CORE-V Built-in Functions::
* RX Built-in Functions::
* S/390 System z Built-in Functions::
* SH Built-in Functions::
@@ -21680,6 +21718,179 @@ vector intrinsic specification, which is available at the following link:
@uref{https://github.com/riscv-non-isa/rvv-intrinsic-doc/tree/v0.11.x}.
All of these functions are declared in the include file @file{riscv_vector.h}.
+@node CORE-V Built-in Functions
+@subsection CORE-V Built-in Functions
+
+These built-in functions are available for the CORE-V MAC machine
+architecture. For more information on CORE-V built-ins, please see
+@uref{https://github.com/openhwgroup/core-v-sw/blob/master/specifications/corev-builtin-spec.md#listing-of-multiply-accumulate-builtins-xcvmac}.
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_mac_mac (int32_t, int32_t, int32_t)
+Generated assembler @code{cv.mac}
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_mac_msu (int32_t, int32_t, int32_t)
+Generates the @code{cv.msu} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_mac_muluN (uint32_t, uint32_t, uint8_t)
+Generates the @code{cv.muluN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_mac_mulhhuN (uint32_t, uint32_t, uint8_t)
+Generates the @code{cv.mulhhuN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_mac_mulsN (int32_t, int32_t, uint8_t)
+Generates the @code{cv.mulsN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_mac_mulhhsN (int32_t, int32_t, uint8_t)
+Generates the @code{cv.mulhhsN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_mac_muluRN (uint32_t, uint32_t, uint8_t)
+Generates the @code{cv.muluRN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_mac_mulhhuRN (uint32_t, uint32_t, uint8_t)
+Generates the @code{cv.mulhhuRN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_mac_mulsRN (int32_t, int32_t, uint8_t)
+Generates the @code{cv.mulsRN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_mac_mulhhsRN (int32_t, int32_t, uint8_t)
+Generates the @code{cv.mulhhsRN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_mac_macuN (uint32_t, uint32_t, uint8_t)
+Generates the @code{cv.macuN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_mac_machhuN (uint32_t, uint32_t, uint8_t)
+Generates the @code{cv.machhuN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_mac_macsN (int32_t, int32_t, uint8_t)
+Generates the @code{cv.macsN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_mac_machhsN (int32_t, int32_t, uint8_t)
+Generates the @code{cv.machhsN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_mac_macuRN (uint32_t, uint32_t, uint8_t)
+Generates the @code{cv.macuRN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_mac_machhuRN (uint32_t, uint32_t, uint8_t)
+Generates the @code{cv.machhuRN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_mac_macsRN (int32_t, int32_t, uint8_t)
+Generates the @code{cv.macsRN} machine instruction.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_mac_machhsRN (int32_t, int32_t, uint8_t)
+Generates the @code{cv.machhsRN} machine instruction.
+@end deftypefn
+
+These built-in functions are available for the CORE-V ALU machine
+architecture. For more information on CORE-V built-ins, please see
+@uref{https://github.com/openhwgroup/core-v-sw/blob/master/specifications/corev-builtin-spec.md#listing-of-miscellaneous-alu-builtins-xcvalu}
+
+@deftypefn {Built-in Function} {int} __builtin_riscv_cv_alu_slet (int32_t, int32_t)
+Generated assembler @code{cv.slet}
+@end deftypefn
+
+@deftypefn {Built-in Function} {int} __builtin_riscv_cv_alu_sletu (uint32_t, uint32_t)
+Generated assembler @code{cv.sletu}
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_alu_min (int32_t, int32_t)
+Generated assembler @code{cv.min}
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_alu_minu (uint32_t, uint32_t)
+Generated assembler @code{cv.minu}
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_alu_max (int32_t, int32_t)
+Generated assembler @code{cv.max}
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_tnt} __builtin_riscv_cv_alu_maxu (uint32_t, uint32_t)
+Generated assembler @code{cv.maxu}
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_alu_exths (int16_t)
+Generated assembler @code{cv.exths}
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_alu_exthz (uint16_t)
+Generated assembler @code{cv.exthz}
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_alu_extbs (int8_t)
+Generated assembler @code{cv.extbs}
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_alu_extbz (uint8_t)
+Generated assembler @code{cv.extbz}
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_alu_clip (int32_t, uint32_t)
+Generated assembler @code{cv.clip} if the uint32_t operand is a constant and an exact power of 2.
+Generated assembler @code{cv.clipr} if the it is a register.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_alu_clipu (uint32_t, uint32_t)
+Generated assembler @code{cv.clipu} if the uint32_t operand is a constant and an exact power of 2.
+Generated assembler @code{cv.clipur} if the it is a register.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_alu_addN (int32_t, int32_t, uint8_t)
+Generated assembler @code{cv.addN} if the uint8_t operand is a constant and in the range 0 <= shft <= 31.
+Generated assembler @code{cv.addNr} if the it is a register.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_alu_adduN (uint32_t, uint32_t, uint8_t)
+Generated assembler @code{cv.adduN} if the uint8_t operand is a constant and in the range 0 <= shft <= 31.
+Generated assembler @code{cv.adduNr} if the it is a register.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_alu_addRN (int32_t, int32_t, uint8_t)
+Generated assembler @code{cv.addRN} if the uint8_t operand is a constant and in the range 0 <= shft <= 31.
+Generated assembler @code{cv.addRNr} if the it is a register.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_alu_adduRN (uint32_t, uint32_t, uint8_t)
+Generated assembler @code{cv.adduRN} if the uint8_t operand is a constant and in the range 0 <= shft <= 31.
+Generated assembler @code{cv.adduRNr} if the it is a register.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_alu_subN (int32_t, int32_t, uint8_t)
+Generated assembler @code{cv.subN} if the uint8_t operand is a constant and in the range 0 <= shft <= 31.
+Generated assembler @code{cv.subNr} if the it is a register.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_alu_subuN (uint32_t, uint32_t, uint8_t)
+Generated assembler @code{cv.subuN} if the uint8_t operand is a constant and in the range 0 <= shft <= 31.
+Generated assembler @code{cv.subuNr} if the it is a register.
+@end deftypefn
+
+@deftypefn {Built-in Function} {int32_t} __builtin_riscv_cv_alu_subRN (int32_t, int32_t, uint8_t)
+Generated assembler @code{cv.subRN} if the uint8_t operand is a constant and in the range 0 <= shft <= 31.
+Generated assembler @code{cv.subRNr} if the it is a register.
+@end deftypefn
+
+@deftypefn {Built-in Function} {uint32_t} __builtin_riscv_cv_alu_subuRN (uint32_t, uint32_t, uint8_t)
+Generated assembler @code{cv.subuRN} if the uint8_t operand is a constant and in the range 0 <= shft <= 31.
+Generated assembler @code{cv.subuRNr} if the it is a register.
+@end deftypefn
+
@node RX Built-in Functions
@subsection RX Built-in Functions
GCC supports some of the RX instructions which cannot be expressed in
@@ -22425,6 +22636,12 @@ Intel Core i7 Arrow Lake CPU.
@item arrowlake-s
Intel Core i7 Arrow Lake S CPU.
+@item clearwaterforest
+Intel Atom Clearwater Forest CPU.
+
+@item pantherlake
+Intel Core i7 Panther Lake CPU.
+
@item knl
Intel Knights Landing CPU.
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 4085fc9..a0da7f9 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -543,6 +543,7 @@ Objective-C and Objective-C++ Dialects}.
-fauto-inc-dec -fbranch-probabilities
-fcaller-saves
-fcombine-stack-adjustments -fconserve-stack
+-ffold-mem-offsets
-fcompare-elim -fcprop-registers -fcrossjumping
-fcse-follow-jumps -fcse-skip-blocks -fcx-fortran-rules
-fcx-limited-range
@@ -1443,7 +1444,8 @@ See RS/6000 and PowerPC Options.
-mrdseed -msgx -mavx512vp2intersect -mserialize -mtsxldtrk
-mamx-tile -mamx-int8 -mamx-bf16 -muintr -mhreset -mavxvnni
-mavx512fp16 -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16
--mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4
+-mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mapxf
+-musermsr
-mcldemote -mms-bitfields -mno-align-stringops -minline-all-stringops
-minline-stringops-dynamically -mstringop-strategy=@var{alg}
-mkl -mwidekl
@@ -2747,9 +2749,10 @@ Typical command lines are
@opindex fopenacc
@cindex OpenACC accelerator programming
@item -fopenacc
-Enable handling of OpenACC directives @code{#pragma acc} in C/C++ and
-@code{!$acc} in Fortran. When @option{-fopenacc} is specified, the
-compiler generates accelerated code according to the OpenACC Application
+Enable handling of OpenACC directives @samp{#pragma acc} in C/C++ and
+@samp{!$acc} in free-form Fortran and @samp{!$acc}, @samp{c$acc} and
+@samp{*$acc} in fixed-form Fortran. When @option{-fopenacc} is specified,
+the compiler generates accelerated code according to the OpenACC Application
Programming Interface v2.6 @w{@uref{https://www.openacc.org}}. This option
implies @option{-pthread}, and thus is only supported on targets that
have support for @option{-pthread}.
@@ -2765,10 +2768,12 @@ can be omitted, to use a target-specific default value.
@opindex fopenmp
@cindex OpenMP parallel
@item -fopenmp
-Enable handling of OpenMP directives @code{#pragma omp} in C/C++,
-@code{[[omp::directive(...)]]}, @code{[[omp::sequence(...)]]} and
-@code{[[omp::decl(...)]]} in C++ and @code{!$omp} in Fortran.
-When @option{-fopenmp} is specified, the
+Enable handling of OpenMP directives @samp{#pragma omp} in C/C++,
+@samp{[[omp::directive(...)]]}, @samp{[[omp::sequence(...)]]} and
+@samp{[[omp::decl(...)]]} in C++ and @samp{!$omp} in Fortran. It
+additionally enables the conditional compilation sentinel @samp{!$} in
+Fortran. In fixed source form Fortran, the sentinels can also start with
+@samp{c} or @samp{*}. When @option{-fopenmp} is specified, the
compiler generates parallel code according to the OpenMP Application
Program Interface v4.5 @w{@uref{https://www.openmp.org}}. This option
implies @option{-pthread}, and thus is only supported on targets that
@@ -2784,10 +2789,12 @@ Enable handling of OpenMP's @code{simd}, @code{declare simd},
and @code{loop} directive, and of combined or composite directives with
@code{simd} as constituent with @code{#pragma omp} in C/C++,
@code{[[omp::directive(...)]]}, @code{[[omp::sequence(...)]]} and
-@code{[[omp::decl(...)]]} in C++ and @code{!$omp} in Fortran.
-Other OpenMP directives are ignored. Unless @option{-fopenmp} is
-additionally specified, the @code{loop} region binds to the current
-task region, independent of the specified @code{bind} clause.
+@code{[[omp::decl(...)]]} in C++ and @code{!$omp} in Fortran. It
+additionally enables the conditional compilation sentinel @samp{!$} in
+Fortran. In fixed source form Fortran, the sentinels can also start with
+@samp{c} or @samp{*}. Other OpenMP directives are ignored. Unless
+@option{-fopenmp} is additionally specified, the @code{loop} region binds
+to the current task region, independent of the specified @code{bind} clause.
@opindex fopenmp-target-simd-clone
@cindex OpenMP target SIMD clone
@@ -5681,7 +5688,8 @@ value further adds the possibility of emoji in the output (such as emitting
U+26A0 WARNING SIGN followed by U+FE0F VARIATION SELECTOR-16 to select the
emoji variant of the character).
-The default is @samp{emoji}.
+The default is @samp{emoji}, except when the environment variable @env{LANG}
+is set to @samp{C}, in which case the default is @samp{ascii}.
@opindex fdiagnostics-format
@item -fdiagnostics-format=@var{FORMAT}
@@ -14389,6 +14397,13 @@ the comparison operation before register allocation is complete.
Enabled at levels @option{-O1}, @option{-O2}, @option{-O3}, @option{-Os}.
+@opindex ffold-mem-offsets
+@item -ffold-mem-offsets
+@itemx -fno-fold-mem-offsets
+Try to eliminate add instructions by folding them in memory loads/stores.
+
+Enabled at levels @option{-O2}, @option{-O3}.
+
@opindex fcprop-registers
@item -fcprop-registers
After register allocation and post-register allocation instruction splitting,
@@ -20631,9 +20646,9 @@ performance of the code. Permissible values for this option are:
@samp{cortex-a73.cortex-a35}, @samp{cortex-a73.cortex-a53},
@samp{cortex-a75.cortex-a55}, @samp{cortex-a76.cortex-a55},
@samp{cortex-r82}, @samp{cortex-x1}, @samp{cortex-x1c}, @samp{cortex-x2},
-@samp{cortex-x3}, @samp{cortex-a510}, @samp{cortex-a520}, @samp{cortex-a710},
-@samp{cortex-a715}, @samp{cortex-a720}, @samp{ampere1}, @samp{ampere1a},
-and @samp{native}.
+@samp{cortex-x3}, @samp{cortex-x4}, @samp{cortex-a510}, @samp{cortex-a520},
+@samp{cortex-a710}, @samp{cortex-a715}, @samp{cortex-a720}, @samp{ampere1},
+@samp{ampere1a}, and @samp{native}.
The values @samp{cortex-a57.cortex-a53}, @samp{cortex-a72.cortex-a53},
@samp{cortex-a73.cortex-a35}, @samp{cortex-a73.cortex-a53},
@@ -21582,8 +21597,7 @@ Annotate assembler instructions with estimated addresses.
@opindex mannotate-align
@item -mannotate-align
-Explain what alignment considerations lead to the decision to make an
-instruction short or long.
+Does nothing. Preserved for backward compatibility.
@end table
@@ -32842,6 +32856,24 @@ PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI,
AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AVXVNNIINT16, SHA512, SM3
and SM4 instruction set support.
+@item clearwaterforest
+Intel Clearwater Forest CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2,
+SSE3, SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE,
+XSAVEC, XSAVES, XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB,
+MOVDIRI, MOVDIR64B, CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA,
+LZCNT, PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI,
+AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AVXVNNIINT16, SHA512, SM3, SM4,
+USER_MSR and PREFETCHI instruction set support.
+
+@item pantherlake
+Intel Panther Lake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
+SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE, XSAVEC,
+XSAVES, XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB, MOVDIRI,
+MOVDIR64B, CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA, LZCNT,
+PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI,
+AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AVXVNNIINT16, SHA512, SM3, SM4
+and PREFETCHI instruction set support.
+
@item knl
Intel Knight's Landing CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE,
@@ -33385,9 +33417,9 @@ the cache line size. @samp{compat} is the default.
@opindex mlarge-data-threshold
@item -mlarge-data-threshold=@var{threshold}
-When @option{-mcmodel=medium} is specified, data objects larger than
-@var{threshold} are placed in the large data section. This value must be the
-same across all objects linked into the binary, and defaults to 65535.
+When @option{-mcmodel=medium} or @option{-mcmodel=large} is specified, data
+objects larger than @var{threshold} are placed in large data sections. The
+default is 65535.
@opindex mrtd
@item -mrtd
@@ -33828,6 +33860,12 @@ preferred alignment to @option{-mpreferred-stack-boundary=2}.
@need 200
@opindex msm4
@itemx -msm4
+@need 200
+@opindex mapxf
+@itemx -mapxf
+@need 200
+@opindex musermsr
+@itemx -musermsr
These switches enable the use of instructions in the MMX, SSE,
AVX512ER, AVX512CD, AVX512VL, AVX512BW, AVX512DQ, AVX512IFMA, AVX512VBMI, SHA,
AES, PCLMUL, CLFLUSHOPT, CLWB, FSGSBASE, PTWRITE, RDRND, F16C, FMA, PCONFIG,
@@ -33838,9 +33876,9 @@ GFNI, VAES, WAITPKG, VPCLMULQDQ, AVX512BITALG, MOVDIRI, MOVDIR64B, AVX512BF16,
ENQCMD, AVX512VPOPCNTDQ, AVX5124FMAPS, AVX512VNNI, AVX5124VNNIW, SERIALIZE,
UINTR, HRESET, AMXTILE, AMXINT8, AMXBF16, KL, WIDEKL, AVXVNNI, AVX512-FP16,
AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AMX-FP16, PREFETCHI, RAOINT,
-AMX-COMPLEX, AVXVNNIINT16, SM3, SHA512, SM4 or CLDEMOTE extended instruction
-sets. Each has a corresponding @option{-mno-} option to disable use of these
-instructions.
+AMX-COMPLEX, AVXVNNIINT16, SM3, SHA512, SM4, APX_F, USER_MSR or CLDEMOTE
+extended instruction sets. Each has a corresponding @option{-mno-} option
+to disable use of these instructions.
These extensions are also available as built-in functions: see
@ref{x86 Built-in Functions}, for details of the functions enabled and
diff --git a/gcc/doc/options.texi b/gcc/doc/options.texi
index f50063c..715f0a1 100644
--- a/gcc/doc/options.texi
+++ b/gcc/doc/options.texi
@@ -402,19 +402,29 @@ You may also specify @code{Var} to select a variable other than
@code{target_flags}.
The options-processing script will automatically allocate a unique bit
-for the option. If the option is attached to @samp{target_flags},
-the script will set the macro @code{MASK_@var{name}} to the appropriate
-bitmask. It will also declare a @code{TARGET_@var{name}} macro that has
-the value 1 when the option is active and 0 otherwise. If you use @code{Var}
-to attach the option to a different variable, the bitmask macro with be
-called @code{OPTION_MASK_@var{name}}.
+for the option. If the option is attached to @samp{target_flags} or @code{Var}
+which is defined by @code{TargetVariable}, the script will set the macro
+@code{MASK_@var{name}} to the appropriate bitmask. It will also declare a
+@code{TARGET_@var{name}}, @code{TARGET_@var{name}_P} and
+@code{TARGET_@var{name}_OPTS_P}: @code{TARGET_@var{name}} macros that has the
+value 1 when the option is active and 0 otherwise, @code{TARGET_@var{name}_P} is
+similar to @code{TARGET_@var{name}} but take an argument as @samp{target_flags}
+or @code{TargetVariable}, and @code{TARGET_@var{name}_OPTS_P} also similar to
+@code{TARGET_@var{name}} but take an argument as @code{gcc_options}.
+If you use @code{Var} to attach the option to a different variable which is not
+defined by @code{TargetVariable}, the bitmask macro with be called
+@code{OPTION_MASK_@var{name}}.
@item InverseMask(@var{othername})
@itemx InverseMask(@var{othername}, @var{thisname})
The option is the inverse of another option that has the
@code{Mask(@var{othername})} property. If @var{thisname} is given,
-the options-processing script will declare a @code{TARGET_@var{thisname}}
-macro that is 1 when the option is active and 0 otherwise.
+the options-processing script will declare @code{TARGET_@var{thisname}},
+@code{TARGET_@var{name}_P} and @code{TARGET_@var{name}_OPTS_P} macros:
+@code{TARGET_@var{thisname}} is 1 when the option is active and 0 otherwise,
+@code{TARGET_@var{name}_P} is similar to @code{TARGET_@var{name}} but take an
+argument as @samp{target_flags}, and and @code{TARGET_@var{name}_OPTS_P} also
+similar to @code{TARGET_@var{name}} but take an argument as @code{gcc_options}.
@item Enum(@var{name})
The option's argument is a string from the set of strings associated
diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
index 8bf7014..c20af31 100644
--- a/gcc/doc/sourcebuild.texi
+++ b/gcc/doc/sourcebuild.texi
@@ -2471,6 +2471,18 @@ Test system has an integer register width of 64 bits.
@end table
+@subsubsection CORE-V specific attributes
+
+@table @code
+
+@item cv_mac
+Test system has support for the CORE-V MAC extension.
+
+@item cv_alu
+Test system has support for the CORE-V ALU extension.
+
+@end table
+
@subsubsection Other hardware attributes
@c Please keep this table sorted alphabetically.
@@ -2624,6 +2636,9 @@ Test environment appears to run executables on a simulator that
accepts only @code{EM_SPARC} executables and chokes on @code{EM_SPARC32PLUS}
or @code{EM_SPARCV9} executables.
+@item user_msr
+Target supports the execution of @code{user_msr} instructions.
+
@item vect_cmdline_needed
Target requires a command line argument to enable a SIMD instruction set.
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index b077972..f7ac806 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -2568,6 +2568,15 @@ of an address, @code{ADDRESS} for something that occurs in an
index expression if @var{outer_code} is @code{PLUS}; @code{SCRATCH} otherwise.
@end defmac
+@defmac INSN_BASE_REG_CLASS (@var{insn})
+A C expression whose value is the register class to which a valid
+base register for a specified @var{insn} must belong. This macro is
+used when some backend insns may have limited usage of base register
+compared with other insns. If you define this macro, the compiler will
+use it instead of all other defined macros that relate to
+BASE_REG_CLASS.
+@end defmac
+
@defmac INDEX_REG_CLASS
A macro whose definition is the name of the class to which a valid
index register must belong. An index register is one used in an
@@ -2575,6 +2584,14 @@ address where its value is either multiplied by a scale factor or
added to another register (as well as added to a displacement).
@end defmac
+@defmac INSN_INDEX_REG_CLASS (@var{insn})
+A C expression whose value is the register class to which a valid
+index register for a specified @var{insn} must belong. This macro is
+used when some backend insns may have limited usage of index register
+compared with other insns. If you defined this macro, the compiler
+will use it instead of @code{INDEX_REG_CLASS}.
+@end defmac
+
@defmac REGNO_OK_FOR_BASE_P (@var{num})
A C expression which is nonzero if register number @var{num} is
suitable for use as a base register in operand addresses.
@@ -2618,6 +2635,15 @@ corresponding index expression if @var{outer_code} is @code{PLUS};
that appear outside a @code{MEM}, i.e., as an @code{address_operand}.
@end defmac
+@defmac REGNO_OK_FOR_INSN_BASE_P (@var{num}, @var{insn})
+A C expression which is nonzero if register number @var{num} is
+suitable for use as a base register in operand addresses for a specified
+@var{insn}. This macro is used when some backend insn may have limited
+usage of base register compared with other insns. If you define this
+macro, the compiler will use it instead of all other defined macros
+that relate to REGNO_OK_FOR_BASE_P.
+@end defmac
+
@defmac REGNO_OK_FOR_INDEX_P (@var{num})
A C expression which is nonzero if register number @var{num} is
suitable for use as an index register in operand addresses. It may be
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index d3e1895..141027e 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -2150,6 +2150,15 @@ of an address, @code{ADDRESS} for something that occurs in an
index expression if @var{outer_code} is @code{PLUS}; @code{SCRATCH} otherwise.
@end defmac
+@defmac INSN_BASE_REG_CLASS (@var{insn})
+A C expression whose value is the register class to which a valid
+base register for a specified @var{insn} must belong. This macro is
+used when some backend insns may have limited usage of base register
+compared with other insns. If you define this macro, the compiler will
+use it instead of all other defined macros that relate to
+BASE_REG_CLASS.
+@end defmac
+
@defmac INDEX_REG_CLASS
A macro whose definition is the name of the class to which a valid
index register must belong. An index register is one used in an
@@ -2157,6 +2166,14 @@ address where its value is either multiplied by a scale factor or
added to another register (as well as added to a displacement).
@end defmac
+@defmac INSN_INDEX_REG_CLASS (@var{insn})
+A C expression whose value is the register class to which a valid
+index register for a specified @var{insn} must belong. This macro is
+used when some backend insns may have limited usage of index register
+compared with other insns. If you defined this macro, the compiler
+will use it instead of @code{INDEX_REG_CLASS}.
+@end defmac
+
@defmac REGNO_OK_FOR_BASE_P (@var{num})
A C expression which is nonzero if register number @var{num} is
suitable for use as a base register in operand addresses.
@@ -2200,6 +2217,15 @@ corresponding index expression if @var{outer_code} is @code{PLUS};
that appear outside a @code{MEM}, i.e., as an @code{address_operand}.
@end defmac
+@defmac REGNO_OK_FOR_INSN_BASE_P (@var{num}, @var{insn})
+A C expression which is nonzero if register number @var{num} is
+suitable for use as a base register in operand addresses for a specified
+@var{insn}. This macro is used when some backend insn may have limited
+usage of base register compared with other insns. If you define this
+macro, the compiler will use it instead of all other defined macros
+that relate to REGNO_OK_FOR_BASE_P.
+@end defmac
+
@defmac REGNO_OK_FOR_INDEX_P (@var{num})
A C expression which is nonzero if register number @var{num} is
suitable for use as an index register in operand addresses. It may be
diff --git a/gcc/double-int.h b/gcc/double-int.h
index 41abe84..2bd492a 100644
--- a/gcc/double-int.h
+++ b/gcc/double-int.h
@@ -440,8 +440,9 @@ namespace wi
template <>
struct int_traits <double_int>
{
- static const enum precision_type precision_type = CONST_PRECISION;
+ static const enum precision_type precision_type = INL_CONST_PRECISION;
static const bool host_dependent_precision = true;
+ static const bool needs_write_val_arg = false;
static const unsigned int precision = HOST_BITS_PER_DOUBLE_INT;
static unsigned int get_precision (const double_int &);
static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
diff --git a/gcc/dumpfile.cc b/gcc/dumpfile.cc
index a2050d1..1e72aaf 100644
--- a/gcc/dumpfile.cc
+++ b/gcc/dumpfile.cc
@@ -951,7 +951,7 @@ dump_pretty_printer::decode_format (text_info *text, const char *spec,
{
case 'C':
{
- cgraph_node *node = va_arg (*text->args_ptr, cgraph_node *);
+ cgraph_node *node = va_arg (*text->m_args_ptr, cgraph_node *);
/* Make an item for the node, and stash it. */
optinfo_item *item = make_item_for_dump_symtab_node (node);
@@ -961,7 +961,7 @@ dump_pretty_printer::decode_format (text_info *text, const char *spec,
case 'E':
{
- gimple *stmt = va_arg (*text->args_ptr, gimple *);
+ gimple *stmt = va_arg (*text->m_args_ptr, gimple *);
/* Make an item for the stmt, and stash it. */
optinfo_item *item = make_item_for_dump_gimple_expr (stmt, 0, TDF_SLIM);
@@ -971,7 +971,7 @@ dump_pretty_printer::decode_format (text_info *text, const char *spec,
case 'G':
{
- gimple *stmt = va_arg (*text->args_ptr, gimple *);
+ gimple *stmt = va_arg (*text->m_args_ptr, gimple *);
/* Make an item for the stmt, and stash it. */
optinfo_item *item = make_item_for_dump_gimple_stmt (stmt, 0, TDF_SLIM);
@@ -981,7 +981,7 @@ dump_pretty_printer::decode_format (text_info *text, const char *spec,
case 'T':
{
- tree t = va_arg (*text->args_ptr, tree);
+ tree t = va_arg (*text->m_args_ptr, tree);
/* Make an item for the tree, and stash it. */
optinfo_item *item = make_item_for_dump_generic_expr (t, TDF_SLIM);
@@ -1002,10 +1002,7 @@ dump_context::dump_printf_va (const dump_metadata_t &metadata, const char *forma
{
dump_pretty_printer pp (this, metadata.get_dump_flags ());
- text_info text;
- text.err_no = errno;
- text.args_ptr = ap;
- text.format_spec = format;
+ text_info text (format, ap, errno);
/* Phases 1 and 2, using pp_format. */
pp_format (&pp, &text);
diff --git a/gcc/dwarf2cfi.cc b/gcc/dwarf2cfi.cc
index f1777c0..d52088b 100644
--- a/gcc/dwarf2cfi.cc
+++ b/gcc/dwarf2cfi.cc
@@ -112,8 +112,8 @@ struct dw_trace_info
while scanning insns. However, the args_size value is irrelevant at
any point except can_throw_internal_p insns. Therefore the "delay"
sizes the values that must actually be emitted for this trace. */
- poly_int64_pod beg_true_args_size, end_true_args_size;
- poly_int64_pod beg_delay_args_size, end_delay_args_size;
+ poly_int64 beg_true_args_size, end_true_args_size;
+ poly_int64 beg_delay_args_size, end_delay_args_size;
/* The first EH insn in the trace, where beg_delay_args_size must be set. */
rtx_insn *eh_head;
@@ -219,7 +219,7 @@ static dw_cfa_location *cur_cfa;
struct queued_reg_save {
rtx reg;
rtx saved_reg;
- poly_int64_pod cfa_offset;
+ poly_int64 cfa_offset;
};
diff --git a/gcc/dwarf2out.cc b/gcc/dwarf2out.cc
index ad2be7c..0ea73bf 100644
--- a/gcc/dwarf2out.cc
+++ b/gcc/dwarf2out.cc
@@ -397,11 +397,9 @@ dump_struct_debug (tree type, enum debug_info_usage usage,
of the number. */
static unsigned int
-get_full_len (const wide_int &op)
+get_full_len (const dw_wide_int &op)
{
- int prec = wi::get_precision (op);
- return ((prec + HOST_BITS_PER_WIDE_INT - 1)
- / HOST_BITS_PER_WIDE_INT);
+ return CEIL (op.get_precision (), HOST_BITS_PER_WIDE_INT);
}
static bool
@@ -3900,7 +3898,7 @@ static void add_data_member_location_attribute (dw_die_ref, tree,
struct vlr_context *);
static bool add_const_value_attribute (dw_die_ref, machine_mode, rtx);
static void insert_int (HOST_WIDE_INT, unsigned, unsigned char *);
-static void insert_wide_int (const wide_int &, unsigned char *, int);
+static void insert_wide_int (const wide_int_ref &, unsigned char *, int);
static unsigned insert_float (const_rtx, unsigned char *);
static rtx rtl_for_decl_location (tree);
static bool add_location_or_const_value_attribute (dw_die_ref, tree, bool);
@@ -4594,19 +4592,31 @@ AT_unsigned (dw_attr_node *a)
return a->dw_attr_val.v.val_unsigned;
}
+dw_wide_int *
+alloc_dw_wide_int (const wide_int_ref &w)
+{
+ dw_wide_int *p
+ = (dw_wide_int *) ggc_internal_alloc (sizeof (dw_wide_int)
+ + ((w.get_len () - 1)
+ * sizeof (HOST_WIDE_INT)));
+ p->precision = w.get_precision ();
+ p->len = w.get_len ();
+ memcpy (p->val, w.get_val (), p->len * sizeof (HOST_WIDE_INT));
+ return p;
+}
+
/* Add an unsigned wide integer attribute value to a DIE. */
static inline void
add_AT_wide (dw_die_ref die, enum dwarf_attribute attr_kind,
- const wide_int& w)
+ const wide_int_ref &w)
{
dw_attr_node attr;
attr.dw_attr = attr_kind;
attr.dw_attr_val.val_class = dw_val_class_wide_int;
attr.dw_attr_val.val_entry = NULL;
- attr.dw_attr_val.v.val_wide = ggc_alloc<wide_int> ();
- *attr.dw_attr_val.v.val_wide = w;
+ attr.dw_attr_val.v.val_wide = alloc_dw_wide_int (w);
add_dwarf_attr (die, &attr);
}
@@ -16714,8 +16724,8 @@ mem_loc_descriptor (rtx rtl, machine_mode mode,
mem_loc_result->dw_loc_oprnd1.v.val_die_ref.external = 0;
mem_loc_result->dw_loc_oprnd2.val_class
= dw_val_class_wide_int;
- mem_loc_result->dw_loc_oprnd2.v.val_wide = ggc_alloc<wide_int> ();
- *mem_loc_result->dw_loc_oprnd2.v.val_wide = rtx_mode_t (rtl, mode);
+ mem_loc_result->dw_loc_oprnd2.v.val_wide
+ = alloc_dw_wide_int (rtx_mode_t (rtl, mode));
}
break;
@@ -17288,8 +17298,8 @@ loc_descriptor (rtx rtl, machine_mode mode,
loc_result = new_loc_descr (DW_OP_implicit_value,
GET_MODE_SIZE (int_mode), 0);
loc_result->dw_loc_oprnd2.val_class = dw_val_class_wide_int;
- loc_result->dw_loc_oprnd2.v.val_wide = ggc_alloc<wide_int> ();
- *loc_result->dw_loc_oprnd2.v.val_wide = rtx_mode_t (rtl, int_mode);
+ loc_result->dw_loc_oprnd2.v.val_wide
+ = alloc_dw_wide_int (rtx_mode_t (rtl, int_mode));
}
break;
@@ -20189,7 +20199,7 @@ extract_int (const unsigned char *src, unsigned int size)
/* Writes wide_int values to dw_vec_const array. */
static void
-insert_wide_int (const wide_int &val, unsigned char *dest, int elt_size)
+insert_wide_int (const wide_int_ref &val, unsigned char *dest, int elt_size)
{
int i;
@@ -20274,8 +20284,7 @@ add_const_value_attribute (dw_die_ref die, machine_mode mode, rtx rtl)
&& (GET_MODE_PRECISION (int_mode)
& (HOST_BITS_PER_WIDE_INT - 1)) == 0)
{
- wide_int w = rtx_mode_t (rtl, int_mode);
- add_AT_wide (die, DW_AT_const_value, w);
+ add_AT_wide (die, DW_AT_const_value, rtx_mode_t (rtl, int_mode));
return true;
}
return false;
diff --git a/gcc/dwarf2out.h b/gcc/dwarf2out.h
index 61a9960..515ea0b 100644
--- a/gcc/dwarf2out.h
+++ b/gcc/dwarf2out.h
@@ -30,7 +30,7 @@ typedef struct dw_cfi_node *dw_cfi_ref;
typedef struct dw_loc_descr_node *dw_loc_descr_ref;
typedef struct dw_loc_list_struct *dw_loc_list_ref;
typedef struct dw_discr_list_node *dw_discr_list_ref;
-typedef wide_int *wide_int_ptr;
+typedef struct dw_wide_int *dw_wide_int_ptr;
/* Call frames are described using a sequence of Call Frame
@@ -158,8 +158,8 @@ struct GTY(()) cfa_reg {
Instead of passing around REG and OFFSET, we pass a copy
of this structure. */
struct GTY(()) dw_cfa_location {
- poly_int64_pod offset;
- poly_int64_pod base_offset;
+ poly_int64 offset;
+ poly_int64 base_offset;
/* REG is in DWARF_FRAME_REGNUM space, *not* normal REGNO space. */
struct cfa_reg reg;
BOOL_BITFIELD indirect : 1; /* 1 if CFA is accessed via a dereference. */
@@ -252,7 +252,7 @@ struct GTY(()) dw_val_node {
unsigned HOST_WIDE_INT
GTY ((tag ("dw_val_class_unsigned_const"))) val_unsigned;
double_int GTY ((tag ("dw_val_class_const_double"))) val_double;
- wide_int_ptr GTY ((tag ("dw_val_class_wide_int"))) val_wide;
+ dw_wide_int_ptr GTY ((tag ("dw_val_class_wide_int"))) val_wide;
dw_vec_const GTY ((tag ("dw_val_class_vec"))) val_vec;
struct dw_val_die_union
{
@@ -313,6 +313,35 @@ struct GTY(()) dw_discr_list_node {
int dw_discr_range;
};
+struct GTY((variable_size)) dw_wide_int {
+ unsigned int precision;
+ unsigned int len;
+ HOST_WIDE_INT val[1];
+
+ unsigned int get_precision () const { return precision; }
+ unsigned int get_len () const { return len; }
+ const HOST_WIDE_INT *get_val () const { return val; }
+ inline HOST_WIDE_INT elt (unsigned int) const;
+ inline bool operator == (const dw_wide_int &) const;
+};
+
+inline HOST_WIDE_INT
+dw_wide_int::elt (unsigned int i) const
+{
+ if (i < len)
+ return val[i];
+ wide_int_ref ref = wi::storage_ref (val, len, precision);
+ return wi::sign_mask (ref);
+}
+
+inline bool
+dw_wide_int::operator == (const dw_wide_int &o) const
+{
+ wide_int_ref ref1 = wi::storage_ref (val, len, precision);
+ wide_int_ref ref2 = wi::storage_ref (o.val, o.len, o.precision);
+ return ref1 == ref2;
+}
+
/* Interface from dwarf2out.cc to dwarf2cfi.cc. */
extern struct dw_loc_descr_node *build_cfa_loc
(dw_cfa_location *, poly_int64);
diff --git a/gcc/edit-context.cc b/gcc/edit-context.cc
index 6f5bc6b..09b000c 100644
--- a/gcc/edit-context.cc
+++ b/gcc/edit-context.cc
@@ -122,6 +122,32 @@ class added_line
int m_len;
};
+/* Class for representing edit events that have occurred on one line of
+ one file: the replacement of some text betweeen some columns
+ on the line.
+
+ Subsequent events will need their columns adjusting if they're
+ are on this line and their column is >= the start point. */
+
+class line_event
+{
+ public:
+ line_event (int start, int next, int len) : m_start (start),
+ m_delta (len - (next - start)) {}
+
+ int get_effective_column (int orig_column) const
+ {
+ if (orig_column >= m_start)
+ return orig_column += m_delta;
+ else
+ return orig_column;
+ }
+
+ private:
+ int m_start;
+ int m_delta;
+};
+
/* The state of one edited line within an edited_file.
As well as the current content of the line, it contains a record of
the changes, so that further changes can be applied in the correct
@@ -172,32 +198,6 @@ class edited_line
auto_vec <added_line *> m_predecessors;
};
-/* Class for representing edit events that have occurred on one line of
- one file: the replacement of some text betweeen some columns
- on the line.
-
- Subsequent events will need their columns adjusting if they're
- are on this line and their column is >= the start point. */
-
-class line_event
-{
- public:
- line_event (int start, int next, int len) : m_start (start),
- m_delta (len - (next - start)) {}
-
- int get_effective_column (int orig_column) const
- {
- if (orig_column >= m_start)
- return orig_column += m_delta;
- else
- return orig_column;
- }
-
- private:
- int m_start;
- int m_delta;
-};
-
/* Forward decls. */
static void
diff --git a/gcc/emit-rtl.cc b/gcc/emit-rtl.cc
index 8bd623d..84b6833 100644
--- a/gcc/emit-rtl.cc
+++ b/gcc/emit-rtl.cc
@@ -947,7 +947,7 @@ validate_subreg (machine_mode omode, machine_mode imode,
in post-reload splitters that make arbitrarily mode changes to the
registers themselves. */
else if (VECTOR_MODE_P (omode)
- && GET_MODE_INNER (omode) == GET_MODE_INNER (imode))
+ && GET_MODE_UNIT_SIZE (omode) == GET_MODE_UNIT_SIZE (imode))
;
/* Subregs involving floating point modes are not allowed to
change size unless it's an insert into a complex mode.
diff --git a/gcc/emit-rtl.h b/gcc/emit-rtl.h
index af62f21..c11a6f3 100644
--- a/gcc/emit-rtl.h
+++ b/gcc/emit-rtl.h
@@ -30,12 +30,12 @@ struct GTY(()) incoming_args {
/* Number of bytes of args popped by function being compiled on its return.
Zero if no bytes are to be popped.
May affect compilation of return insn or of function epilogue. */
- poly_int64_pod pops_args;
+ poly_int64 pops_args;
/* If function's args have a fixed size, this is that size, in bytes.
Otherwise, it is -1.
May affect compilation of return insn or of function epilogue. */
- poly_int64_pod size;
+ poly_int64 size;
/* # bytes the prologue should push and pretend that the caller pushed them.
The prologue must do this, but only if parms can be passed in
@@ -80,7 +80,7 @@ struct GTY(()) rtl_data {
/* # of bytes of outgoing arguments. If ACCUMULATE_OUTGOING_ARGS is
defined, the needed space is pushed by the prologue. */
- poly_int64_pod outgoing_args_size;
+ poly_int64 outgoing_args_size;
/* If nonzero, an RTL expression for the location at which the current
function returns its result. If the current function returns its
@@ -139,7 +139,7 @@ struct GTY(()) rtl_data {
/* Offset to end of allocated area of stack frame.
If stack grows down, this is the address of the last stack slot allocated.
If stack grows up, this is the address for the next slot. */
- poly_int64_pod x_frame_offset;
+ poly_int64 x_frame_offset;
/* Insn after which register parms and SAVE_EXPRs are born, if nonopt. */
rtx_insn *x_parm_birth_insn;
diff --git a/gcc/explow.cc b/gcc/explow.cc
index 6424c08..0c03ac3 100644
--- a/gcc/explow.cc
+++ b/gcc/explow.cc
@@ -1818,7 +1818,10 @@ probe_stack_range (HOST_WIDE_INT first, rtx size)
gen_int_mode (PROBE_INTERVAL, Pmode), test_addr,
1, OPTAB_WIDEN);
- gcc_assert (temp == test_addr);
+ /* There is no guarantee that expand_binop constructs its result
+ in TEST_ADDR. So copy into TEST_ADDR if necessary. */
+ if (temp != test_addr)
+ emit_move_insn (test_addr, temp);
/* Probe at TEST_ADDR. */
emit_stack_probe (test_addr);
diff --git a/gcc/expr.cc b/gcc/expr.cc
index 308ddc0..8aed3fc 100644
--- a/gcc/expr.cc
+++ b/gcc/expr.cc
@@ -5438,8 +5438,8 @@ optimize_bitfield_assignment_op (poly_uint64 pbitsize,
*BITSTART and *BITEND. */
void
-get_bit_range (poly_uint64_pod *bitstart, poly_uint64_pod *bitend, tree exp,
- poly_int64_pod *bitpos, tree *offset)
+get_bit_range (poly_uint64 *bitstart, poly_uint64 *bitend, tree exp,
+ poly_int64 *bitpos, tree *offset)
{
poly_int64 bitoffset;
tree field, repr;
@@ -6083,13 +6083,10 @@ string_cst_read_str (void *data, void *, HOST_WIDE_INT offset,
size_t l = TREE_STRING_LENGTH (str) - offset;
memcpy (p, TREE_STRING_POINTER (str) + offset, l);
memset (p + l, '\0', GET_MODE_SIZE (mode) - l);
- return c_readstr (p, as_a <scalar_int_mode> (mode), false);
+ return c_readstr (p, mode, false);
}
- /* The by-pieces infrastructure does not try to pick a vector mode
- for storing STRING_CST. */
- return c_readstr (TREE_STRING_POINTER (str) + offset,
- as_a <scalar_int_mode> (mode), false);
+ return c_readstr (TREE_STRING_POINTER (str) + offset, mode, false);
}
/* Generate code for computing expression EXP,
@@ -7884,8 +7881,8 @@ store_field (rtx target, poly_int64 bitsize, poly_int64 bitpos,
this case, but the address of the object can be found. */
tree
-get_inner_reference (tree exp, poly_int64_pod *pbitsize,
- poly_int64_pod *pbitpos, tree *poffset,
+get_inner_reference (tree exp, poly_int64 *pbitsize,
+ poly_int64 *pbitpos, tree *poffset,
machine_mode *pmode, int *punsignedp,
int *preversep, int *pvolatilep)
{
@@ -9332,13 +9329,6 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
op0 = expand_expr (treeop0, target, VOIDmode,
modifier);
- /* If the signedness of the conversion differs and OP0 is
- a promoted SUBREG, clear that indication since we now
- have to do the proper extension. */
- if (TYPE_UNSIGNED (TREE_TYPE (treeop0)) != unsignedp
- && GET_CODE (op0) == SUBREG)
- SUBREG_PROMOTED_VAR_P (op0) = 0;
-
return REDUCE_BIT_FIELD (op0);
}
diff --git a/gcc/expr.h b/gcc/expr.h
index 11bff53..2a17286 100644
--- a/gcc/expr.h
+++ b/gcc/expr.h
@@ -275,8 +275,8 @@ extern bool emit_push_insn (rtx, machine_mode, tree, rtx, unsigned int,
int, rtx, poly_int64, rtx, rtx, int, rtx, bool);
/* Extract the accessible bit-range from a COMPONENT_REF. */
-extern void get_bit_range (poly_uint64_pod *, poly_uint64_pod *, tree,
- poly_int64_pod *, tree *);
+extern void get_bit_range (poly_uint64 *, poly_uint64 *, tree,
+ poly_int64 *, tree *);
/* Expand an assignment that stores the value of FROM into TO. */
extern void expand_assignment (tree, tree, bool);
diff --git a/gcc/fold-const.cc b/gcc/fold-const.cc
index c5ac822..44118e7 100644
--- a/gcc/fold-const.cc
+++ b/gcc/fold-const.cc
@@ -2137,7 +2137,10 @@ fold_convert_const_int_from_int (tree type, const_tree arg1)
/* Given an integer constant, make new constant with new type,
appropriately sign-extended or truncated. Use widest_int
so that any extension is done according ARG1's type. */
- return force_fit_type (type, wi::to_widest (arg1),
+ tree arg1_type = TREE_TYPE (arg1);
+ unsigned prec = MAX (TYPE_PRECISION (arg1_type), TYPE_PRECISION (type));
+ return force_fit_type (type, wide_int::from (wi::to_wide (arg1), prec,
+ TYPE_SIGN (arg1_type)),
!POINTER_TYPE_P (TREE_TYPE (arg1)),
TREE_OVERFLOW (arg1));
}
@@ -9266,8 +9269,8 @@ fold_view_convert_vector_encoding (tree type, tree expr)
static tree
fold_view_convert_expr (tree type, tree expr)
{
- /* We support up to 512-bit values (for V8DFmode). */
- unsigned char buffer[64];
+ /* We support up to 1024-bit values (for GCN/RISC-V V128QImode). */
+ unsigned char buffer[128];
int len;
/* Check that the host and target are sane. */
@@ -9565,8 +9568,13 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0)
}
if (change)
{
- tem = force_fit_type (type, wi::to_widest (and1), 0,
- TREE_OVERFLOW (and1));
+ tree and1_type = TREE_TYPE (and1);
+ unsigned prec = MAX (TYPE_PRECISION (and1_type),
+ TYPE_PRECISION (type));
+ tem = force_fit_type (type,
+ wide_int::from (wi::to_wide (and1), prec,
+ TYPE_SIGN (and1_type)),
+ 0, TREE_OVERFLOW (and1));
return fold_build2_loc (loc, BIT_AND_EXPR, type,
fold_convert_loc (loc, type, and0), tem);
}
@@ -11056,11 +11064,7 @@ expr_not_equal_to (tree t, const wide_int &w)
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
return false;
- if (cfun)
- get_range_query (cfun)->range_of_expr (vr, t);
- else
- get_global_range_query ()->range_of_expr (vr, t);
-
+ get_range_query (cfun)->range_of_expr (vr, t);
if (!vr.undefined_p () && !vr.contains_p (w))
return true;
/* If T has some known zero bits and W has any of those bits set,
@@ -16564,7 +16568,7 @@ round_down_loc (location_t loc, tree value, int divisor)
static tree
split_address_to_core_and_offset (tree exp,
- poly_int64_pod *pbitpos, tree *poffset)
+ poly_int64 *pbitpos, tree *poffset)
{
tree core;
machine_mode mode;
@@ -16614,7 +16618,7 @@ split_address_to_core_and_offset (tree exp,
otherwise. If they do, E1 - E2 is stored in *DIFF. */
bool
-ptr_difference_const (tree e1, tree e2, poly_int64_pod *diff)
+ptr_difference_const (tree e1, tree e2, poly_int64 *diff)
{
tree core1, core2;
poly_int64 bitpos1, bitpos2;
diff --git a/gcc/fold-const.h b/gcc/fold-const.h
index 3d08528..50f901d 100644
--- a/gcc/fold-const.h
+++ b/gcc/fold-const.h
@@ -151,7 +151,7 @@ extern tree div_if_zero_remainder (const_tree, const_tree);
extern bool tree_swap_operands_p (const_tree, const_tree);
extern enum tree_code swap_tree_comparison (enum tree_code);
-extern bool ptr_difference_const (tree, tree, poly_int64_pod *);
+extern bool ptr_difference_const (tree, tree, poly_int64 *);
extern enum tree_code invert_tree_comparison (enum tree_code, bool);
extern bool inverse_conditions_p (const_tree, const_tree);
diff --git a/gcc/fold-mem-offsets.cc b/gcc/fold-mem-offsets.cc
new file mode 100644
index 0000000..6263fc7
--- /dev/null
+++ b/gcc/fold-mem-offsets.cc
@@ -0,0 +1,901 @@
+/* Late RTL pass to fold memory offsets.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "expr.h"
+#include "backend.h"
+#include "regs.h"
+#include "target.h"
+#include "memmodel.h"
+#include "emit-rtl.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "predict.h"
+#include "df.h"
+#include "tree-pass.h"
+#include "cfgrtl.h"
+
+/* This pass tries to optimize memory offset calculations by moving constants
+ from add instructions to the memory instructions (loads / stores).
+ For example it can transform code like this:
+
+ add t4, sp, 16
+ add t2, a6, t4
+ shl t3, t2, 1
+ ld a2, 0(t3)
+ add a2, 1
+ sd a2, 8(t2)
+
+ into the following (one instruction less):
+
+ add t2, a6, sp
+ shl t3, t2, 1
+ ld a2, 32(t3)
+ add a2, 1
+ sd a2, 24(t2)
+
+ Although the previous passes try to emit efficient offset calculations
+ this pass is still beneficial because:
+
+ - The mechanisms that optimize memory offsets usually work with specific
+ patterns or have limitations. This pass is designed to fold offsets
+ through complex calculations that affect multiple memory operations
+ and have partially overlapping calculations.
+
+ - There are cases where add instructions are introduced in late rtl passes
+ and the rest of the pipeline cannot eliminate them. Arrays and structs
+ allocated on the stack can result in unwanted add instructions that
+ cannot be eliminated easily.
+
+ This pass works on a basic block level and consists of 4 phases:
+
+ - Phase 1 (Analysis): Find "foldable" instructions.
+ Foldable instructions are those that we know how to propagate
+ a constant addition through (add, shift, move, ...) and only have other
+ foldable instructions for uses. In that phase a DFS traversal on the
+ definition tree is performed and foldable instructions are marked on
+ a bitmap. The add immediate instructions that are reachable in this
+ DFS are candidates for folding since all the intermediate calculations
+ affected by them are also foldable.
+
+ - Phase 2 (Validity): Traverse and calculate the offsets that would result
+ from folding the add immediate instructions. Check whether the
+ calculated offsets result in a valid instruction for the target.
+
+ - Phase 3 (Commit offsets): Traverse again. It is now known which folds
+ are valid so at this point change the offsets in the memory instructions.
+
+ - Phase 4 (Commit instruction deletions): Scan all instructions and delete
+ or simplify (reduce to move) all add immediate instructions that were
+ folded.
+
+ This pass should run before hard register propagation because it creates
+ register moves that we expect to be eliminated. */
+
+namespace {
+
+const pass_data pass_data_fold_mem =
+{
+ RTL_PASS, /* type */
+ "fold_mem_offsets", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_df_finish, /* todo_flags_finish */
+};
+
+class pass_fold_mem_offsets : public rtl_opt_pass
+{
+public:
+ pass_fold_mem_offsets (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_fold_mem, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ return flag_fold_mem_offsets && optimize >= 2;
+ }
+
+ virtual unsigned int execute (function *);
+}; // class pass_fold_mem_offsets
+
+/* Class that holds in FOLD_INSNS the instructions that if folded the offset
+ of a memory instruction would increase by ADDED_OFFSET. */
+class fold_mem_info {
+public:
+ auto_bitmap fold_insns;
+ HOST_WIDE_INT added_offset;
+};
+
+typedef hash_map<rtx_insn *, fold_mem_info *> fold_info_map;
+
+/* Tracks which instructions can be reached through instructions that can
+ propagate offsets for folding. */
+static bitmap_head can_fold_insns;
+
+/* Marks instructions that are currently eligible for folding. */
+static bitmap_head candidate_fold_insns;
+
+/* Tracks instructions that cannot be folded because it turned out that
+ folding will result in creating an invalid memory instruction.
+ An instruction can be in both CANDIDATE_FOLD_INSNS and CANNOT_FOLD_INSNS
+ at the same time, in which case it is not legal to fold. */
+static bitmap_head cannot_fold_insns;
+
+/* The number of instructions that were simplified or eliminated. */
+static int stats_fold_count;
+
+/* Get the single reaching definition of an instruction inside a BB.
+ The definition is desired for REG used in INSN.
+ Return the definition insn or NULL if there's no definition with
+ the desired criteria. */
+static rtx_insn*
+get_single_def_in_bb (rtx_insn *insn, rtx reg)
+{
+ df_ref use;
+ struct df_link *ref_chain, *ref_link;
+
+ FOR_EACH_INSN_USE (use, insn)
+ {
+ if (GET_CODE (DF_REF_REG (use)) == SUBREG)
+ return NULL;
+ if (REGNO (DF_REF_REG (use)) == REGNO (reg))
+ break;
+ }
+
+ if (!use)
+ return NULL;
+
+ ref_chain = DF_REF_CHAIN (use);
+
+ if (!ref_chain)
+ return NULL;
+
+ for (ref_link = ref_chain; ref_link; ref_link = ref_link->next)
+ {
+ /* Problem getting some definition for this instruction. */
+ if (ref_link->ref == NULL)
+ return NULL;
+ if (DF_REF_INSN_INFO (ref_link->ref) == NULL)
+ return NULL;
+ if (global_regs[REGNO (reg)]
+ && !set_of (reg, DF_REF_INSN (ref_link->ref)))
+ return NULL;
+ }
+
+ if (ref_chain->next)
+ return NULL;
+
+ rtx_insn *def = DF_REF_INSN (ref_chain->ref);
+
+ if (BLOCK_FOR_INSN (def) != BLOCK_FOR_INSN (insn))
+ return NULL;
+
+ if (DF_INSN_LUID (def) > DF_INSN_LUID (insn))
+ return NULL;
+
+ return def;
+}
+
+/* Get all uses of REG which is set in INSN. Return the use list or NULL if a
+ use is missing / irregular. If SUCCESS is not NULL then set it to false if
+ there are missing / irregular uses and true otherwise. */
+static struct df_link*
+get_uses (rtx_insn *insn, rtx reg, bool *success)
+{
+ df_ref def;
+ struct df_link *ref_chain, *ref_link;
+
+ if (success)
+ *success = false;
+
+ FOR_EACH_INSN_DEF (def, insn)
+ if (REGNO (DF_REF_REG (def)) == REGNO (reg))
+ break;
+
+ if (!def)
+ return NULL;
+
+ ref_chain = DF_REF_CHAIN (def);
+
+ for (ref_link = ref_chain; ref_link; ref_link = ref_link->next)
+ {
+ /* Problem getting a use for this instruction. */
+ if (ref_link->ref == NULL)
+ return NULL;
+ if (DF_REF_CLASS (ref_link->ref) != DF_REF_REGULAR)
+ return NULL;
+ /* We do not handle REG_EQUIV/REG_EQ notes for now. */
+ if (DF_REF_FLAGS (ref_link->ref) & DF_REF_IN_NOTE)
+ return NULL;
+ }
+
+ if (success)
+ *success = true;
+
+ return ref_chain;
+}
+
+static HOST_WIDE_INT
+fold_offsets (rtx_insn *insn, rtx reg, bool analyze, bitmap foldable_insns);
+
+/* Helper function for fold_offsets.
+
+ If DO_RECURSION is false and ANALYZE is true this function returns true iff
+ it understands the structure of INSN and knows how to propagate constants
+ through it. In this case OFFSET_OUT and FOLDABLE_INSNS are unused.
+
+ If DO_RECURSION is true then it also calls fold_offsets for each recognized
+ part of INSN with the appropriate arguments.
+
+ If DO_RECURSION is true and ANALYZE is false then offset that would result
+ from folding is computed and is returned through the pointer OFFSET_OUT.
+ The instructions that can be folded are recorded in FOLDABLE_INSNS.
+*/
+static bool
+fold_offsets_1 (rtx_insn *insn, bool analyze, bool do_recursion,
+ HOST_WIDE_INT *offset_out, bitmap foldable_insns)
+{
+ /* Doesn't make sense if both DO_RECURSION and ANALYZE are false. */
+ gcc_checking_assert (do_recursion || analyze);
+ gcc_checking_assert (GET_CODE (PATTERN (insn)) == SET);
+
+ rtx src = SET_SRC (PATTERN (insn));
+ HOST_WIDE_INT offset = 0;
+
+ switch (GET_CODE (src))
+ {
+ case PLUS:
+ {
+ /* Propagate through add. */
+ rtx arg1 = XEXP (src, 0);
+ rtx arg2 = XEXP (src, 1);
+
+ if (REG_P (arg1))
+ {
+ if (do_recursion)
+ offset += fold_offsets (insn, arg1, analyze, foldable_insns);
+ }
+ else if (GET_CODE (arg1) == ASHIFT
+ && REG_P (XEXP (arg1, 0))
+ && CONST_INT_P (XEXP (arg1, 1)))
+ {
+ /* Handle R1 = (R2 << C) + ... */
+ if (do_recursion)
+ {
+ HOST_WIDE_INT scale
+ = (HOST_WIDE_INT_1U << INTVAL (XEXP (arg1, 1)));
+ offset += scale * fold_offsets (insn, XEXP (arg1, 0), analyze,
+ foldable_insns);
+ }
+ }
+ else if (GET_CODE (arg1) == PLUS
+ && REG_P (XEXP (arg1, 0))
+ && REG_P (XEXP (arg1, 1)))
+ {
+ /* Handle R1 = (R2 + R3) + ... */
+ if (do_recursion)
+ {
+ offset += fold_offsets (insn, XEXP (arg1, 0), analyze,
+ foldable_insns);
+ offset += fold_offsets (insn, XEXP (arg1, 1), analyze,
+ foldable_insns);
+ }
+ }
+ else if (GET_CODE (arg1) == PLUS
+ && GET_CODE (XEXP (arg1, 0)) == ASHIFT
+ && REG_P (XEXP (XEXP (arg1, 0), 0))
+ && CONST_INT_P (XEXP (XEXP (arg1, 0), 1))
+ && REG_P (XEXP (arg1, 1)))
+ {
+ /* Handle R1 = ((R2 << C) + R3) + ... */
+ if (do_recursion)
+ {
+ HOST_WIDE_INT scale
+ = (HOST_WIDE_INT_1U << INTVAL (XEXP (XEXP (arg1, 0), 1)));
+ offset += scale * fold_offsets (insn, XEXP (XEXP (arg1, 0), 0),
+ analyze, foldable_insns);
+ offset += fold_offsets (insn, XEXP (arg1, 1), analyze,
+ foldable_insns);
+ }
+ }
+ else
+ return false;
+
+ if (REG_P (arg2))
+ {
+ if (do_recursion)
+ offset += fold_offsets (insn, arg2, analyze, foldable_insns);
+ }
+ else if (CONST_INT_P (arg2))
+ {
+ if (REG_P (arg1))
+ {
+ offset += INTVAL (arg2);
+ /* This is a R1 = R2 + C instruction, candidate for folding. */
+ if (!analyze)
+ bitmap_set_bit (foldable_insns, INSN_UID (insn));
+ }
+ }
+ else
+ return false;
+
+ /* Pattern recognized for folding. */
+ break;
+ }
+ case MINUS:
+ {
+ /* Propagate through minus. */
+ rtx arg1 = XEXP (src, 0);
+ rtx arg2 = XEXP (src, 1);
+
+ if (REG_P (arg1))
+ {
+ if (do_recursion)
+ offset += fold_offsets (insn, arg1, analyze, foldable_insns);
+ }
+ else
+ return false;
+
+ if (REG_P (arg2))
+ {
+ if (do_recursion)
+ offset -= fold_offsets (insn, arg2, analyze, foldable_insns);
+ }
+ else if (CONST_INT_P (arg2))
+ {
+ if (REG_P (arg1))
+ {
+ offset -= INTVAL (arg2);
+ /* This is a R1 = R2 - C instruction, candidate for folding. */
+ if (!analyze)
+ bitmap_set_bit (foldable_insns, INSN_UID (insn));
+ }
+ }
+ else
+ return false;
+
+ /* Pattern recognized for folding. */
+ break;
+ }
+ case NEG:
+ {
+ /* Propagate through negation. */
+ rtx arg1 = XEXP (src, 0);
+ if (REG_P (arg1))
+ {
+ if (do_recursion)
+ offset = -fold_offsets (insn, arg1, analyze, foldable_insns);
+ }
+ else
+ return false;
+
+ /* Pattern recognized for folding. */
+ break;
+ }
+ case MULT:
+ {
+ /* Propagate through multiply by constant. */
+ rtx arg1 = XEXP (src, 0);
+ rtx arg2 = XEXP (src, 1);
+
+ if (REG_P (arg1) && CONST_INT_P (arg2))
+ {
+ if (do_recursion)
+ {
+ HOST_WIDE_INT scale = INTVAL (arg2);
+ offset = scale * fold_offsets (insn, arg1, analyze,
+ foldable_insns);
+ }
+ }
+ else
+ return false;
+
+ /* Pattern recognized for folding. */
+ break;
+ }
+ case ASHIFT:
+ {
+ /* Propagate through shift left by constant. */
+ rtx arg1 = XEXP (src, 0);
+ rtx arg2 = XEXP (src, 1);
+
+ if (REG_P (arg1) && CONST_INT_P (arg2))
+ {
+ if (do_recursion)
+ {
+ HOST_WIDE_INT scale = (HOST_WIDE_INT_1U << INTVAL (arg2));
+ offset = scale * fold_offsets (insn, arg1, analyze,
+ foldable_insns);
+ }
+ }
+ else
+ return false;
+
+ /* Pattern recognized for folding. */
+ break;
+ }
+ case REG:
+ {
+ /* Propagate through register move. */
+ if (do_recursion)
+ offset = fold_offsets (insn, src, analyze, foldable_insns);
+
+ /* Pattern recognized for folding. */
+ break;
+ }
+ case CONST_INT:
+ {
+ offset = INTVAL (src);
+ /* R1 = C is candidate for folding. */
+ if (!analyze)
+ bitmap_set_bit (foldable_insns, INSN_UID (insn));
+
+ /* Pattern recognized for folding. */
+ break;
+ }
+ default:
+ /* Cannot recognize. */
+ return false;
+ }
+
+ if (do_recursion && !analyze)
+ *offset_out = offset;
+
+ return true;
+}
+
+/* Function that computes the offset that would have to be added to all uses
+ of REG if the instructions marked in FOLDABLE_INSNS were to be eliminated.
+
+ If ANALYZE is true then mark in CAN_FOLD_INSNS which instructions
+ transitively only affect other instructions found in CAN_FOLD_INSNS.
+ If ANALYZE is false then compute the offset required for folding. */
+static HOST_WIDE_INT
+fold_offsets (rtx_insn *insn, rtx reg, bool analyze, bitmap foldable_insns)
+{
+ rtx_insn *def = get_single_def_in_bb (insn, reg);
+
+ if (!def || GET_CODE (PATTERN (def)) != SET)
+ return 0;
+
+ rtx dest = SET_DEST (PATTERN (def));
+
+ if (!REG_P (dest))
+ return 0;
+
+ /* We can only affect the values of GPR registers. */
+ unsigned int dest_regno = REGNO (dest);
+ if (fixed_regs[dest_regno]
+ || !TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], dest_regno))
+ return 0;
+
+ if (analyze)
+ {
+ /* Check if we know how to handle DEF. */
+ if (!fold_offsets_1 (def, true, false, NULL, NULL))
+ return 0;
+
+ /* We only fold through instructions that are transitively used as
+ memory addresses and do not have other uses. Use the same logic
+ from offset calculation to visit instructions that can propagate
+ offsets and keep track of them in CAN_FOLD_INSNS. */
+ bool success;
+ struct df_link *uses = get_uses (def, dest, &success), *ref_link;
+
+ if (!success)
+ return 0;
+
+ for (ref_link = uses; ref_link; ref_link = ref_link->next)
+ {
+ rtx_insn *use = DF_REF_INSN (ref_link->ref);
+
+ if (DEBUG_INSN_P (use))
+ continue;
+
+ /* Punt if the use is anything more complicated than a set
+ (clobber, use, etc). */
+ if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != SET)
+ return 0;
+
+ /* This use affects instructions outside of CAN_FOLD_INSNS. */
+ if (!bitmap_bit_p (&can_fold_insns, INSN_UID (use)))
+ return 0;
+
+ rtx use_set = PATTERN (use);
+
+ /* Special case: A foldable memory store is not foldable if it
+ mentions DEST outside of the address calculation. */
+ if (use_set && MEM_P (SET_DEST (use_set))
+ && reg_mentioned_p (dest, SET_SRC (use_set)))
+ return 0;
+ }
+
+ bitmap_set_bit (&can_fold_insns, INSN_UID (def));
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Instruction marked for propagation: ");
+ print_rtl_single (dump_file, def);
+ }
+ }
+ else
+ {
+ /* We cannot propagate through this instruction. */
+ if (!bitmap_bit_p (&can_fold_insns, INSN_UID (def)))
+ return 0;
+ }
+
+ HOST_WIDE_INT offset = 0;
+ bool recognized = fold_offsets_1 (def, analyze, true, &offset,
+ foldable_insns);
+
+ if (!recognized)
+ return 0;
+
+ return offset;
+}
+
+/* Test if INSN is a memory load / store that can have an offset folded to it.
+ Return true iff INSN is such an instruction and return through MEM_OUT,
+ REG_OUT and OFFSET_OUT the RTX that has a MEM code, the register that is
+ used as a base address and the offset accordingly.
+ All of the out pointers may be NULL in which case they will be ignored. */
+bool
+get_fold_mem_root (rtx_insn *insn, rtx *mem_out, rtx *reg_out,
+ HOST_WIDE_INT *offset_out)
+{
+ rtx set = single_set (insn);
+ rtx mem = NULL_RTX;
+
+ if (set != NULL_RTX)
+ {
+ rtx src = SET_SRC (set);
+ rtx dest = SET_DEST (set);
+
+ /* Don't fold when we have unspec / volatile. */
+ if (GET_CODE (src) == UNSPEC
+ || GET_CODE (src) == UNSPEC_VOLATILE
+ || GET_CODE (dest) == UNSPEC
+ || GET_CODE (dest) == UNSPEC_VOLATILE)
+ return false;
+
+ if (MEM_P (src))
+ mem = src;
+ else if (MEM_P (dest))
+ mem = dest;
+ else if ((GET_CODE (src) == SIGN_EXTEND
+ || GET_CODE (src) == ZERO_EXTEND)
+ && MEM_P (XEXP (src, 0)))
+ mem = XEXP (src, 0);
+ }
+
+ if (mem == NULL_RTX)
+ return false;
+
+ rtx mem_addr = XEXP (mem, 0);
+ rtx reg;
+ HOST_WIDE_INT offset;
+
+ if (REG_P (mem_addr))
+ {
+ reg = mem_addr;
+ offset = 0;
+ }
+ else if (GET_CODE (mem_addr) == PLUS
+ && REG_P (XEXP (mem_addr, 0))
+ && CONST_INT_P (XEXP (mem_addr, 1)))
+ {
+ reg = XEXP (mem_addr, 0);
+ offset = INTVAL (XEXP (mem_addr, 1));
+ }
+ else
+ return false;
+
+ if (mem_out)
+ *mem_out = mem;
+ if (reg_out)
+ *reg_out = reg;
+ if (offset_out)
+ *offset_out = offset;
+
+ return true;
+}
+
+/* If INSN is a root memory instruction then do a DFS traversal on its
+ definitions and find folding candidates. */
+static void
+do_analysis (rtx_insn *insn)
+{
+ rtx reg;
+ if (!get_fold_mem_root (insn, NULL, &reg, NULL))
+ return;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Starting analysis from root: ");
+ print_rtl_single (dump_file, insn);
+ }
+
+ /* Analyse folding opportunities for this memory instruction. */
+ bitmap_set_bit (&can_fold_insns, INSN_UID (insn));
+ fold_offsets (insn, reg, true, NULL);
+}
+
+static void
+do_fold_info_calculation (rtx_insn *insn, fold_info_map *fold_info)
+{
+ rtx mem, reg;
+ HOST_WIDE_INT cur_offset;
+ if (!get_fold_mem_root (insn, &mem, &reg, &cur_offset))
+ return;
+
+ fold_mem_info *info = new fold_mem_info;
+ info->added_offset = fold_offsets (insn, reg, false, info->fold_insns);
+
+ fold_info->put (insn, info);
+}
+
+/* If INSN is a root memory instruction then compute a potentially new offset
+ for it and test if the resulting instruction is valid. */
+static void
+do_check_validity (rtx_insn *insn, fold_mem_info *info)
+{
+ rtx mem, reg;
+ HOST_WIDE_INT cur_offset;
+ if (!get_fold_mem_root (insn, &mem, &reg, &cur_offset))
+ return;
+
+ HOST_WIDE_INT new_offset = cur_offset + info->added_offset;
+
+ /* Test if it is valid to change MEM's address offset to NEW_OFFSET. */
+ int icode = INSN_CODE (insn);
+ INSN_CODE (insn) = -1;
+ rtx mem_addr = XEXP (mem, 0);
+ machine_mode mode = GET_MODE (mem_addr);
+ if (new_offset != 0)
+ XEXP (mem, 0) = gen_rtx_PLUS (mode, reg, gen_int_mode (new_offset, mode));
+ else
+ XEXP (mem, 0) = reg;
+
+ bool illegal = insn_invalid_p (insn, false)
+ || !memory_address_addr_space_p (mode, XEXP (mem, 0),
+ MEM_ADDR_SPACE (mem));
+
+ /* Restore the instruction. */
+ XEXP (mem, 0) = mem_addr;
+ INSN_CODE (insn) = icode;
+
+ if (illegal)
+ bitmap_ior_into (&cannot_fold_insns, info->fold_insns);
+ else
+ bitmap_ior_into (&candidate_fold_insns, info->fold_insns);
+}
+
+static bool
+compute_validity_closure (fold_info_map *fold_info)
+{
+ /* Let's say we have an arbitrary chain of foldable instructions xN = xN + C
+ and memory operations rN that use xN as shown below. If folding x1 in r1
+ turns out to be invalid for whatever reason then it's also invalid to fold
+ any of the other xN into any rN. That means that we need the transitive
+ closure of validity to determine whether we can fold a xN instruction.
+
+ +--------------+ +-------------------+ +-------------------+
+ | r1 = mem[x1] | | r2 = mem[x1 + x2] | | r3 = mem[x2 + x3] | ...
+ +--------------+ +-------------------+ +-------------------+
+ ^ ^ ^ ^ ^
+ | / | / | ...
+ | / | / |
+ +-------------+ / +-------------+ / +-------------+
+ | x1 = x1 + 1 |-----+ | x2 = x2 + 1 |-----+ | x3 = x3 + 1 |--- ...
+ +-------------+ +-------------+ +-------------+
+ ^ ^ ^
+ | | |
+ ... ... ...
+ */
+
+ /* In general three iterations should be enough for most cases, but allow up
+ to five when -fexpensive-optimizations is used. */
+ int max_iters = 3 + 2 * flag_expensive_optimizations;
+ for (int pass = 0; pass < max_iters; pass++)
+ {
+ bool made_changes = false;
+ for (fold_info_map::iterator iter = fold_info->begin ();
+ iter != fold_info->end (); ++iter)
+ {
+ fold_mem_info *info = (*iter).second;
+ if (bitmap_intersect_p (&cannot_fold_insns, info->fold_insns))
+ made_changes |= bitmap_ior_into (&cannot_fold_insns,
+ info->fold_insns);
+ }
+
+ if (!made_changes)
+ return true;
+ }
+
+ return false;
+}
+
+/* If INSN is a root memory instruction that was affected by any folding
+ then update its offset as necessary. */
+static void
+do_commit_offset (rtx_insn *insn, fold_mem_info *info)
+{
+ rtx mem, reg;
+ HOST_WIDE_INT cur_offset;
+ if (!get_fold_mem_root (insn, &mem, &reg, &cur_offset))
+ return;
+
+ HOST_WIDE_INT new_offset = cur_offset + info->added_offset;
+
+ if (new_offset == cur_offset)
+ return;
+
+ gcc_assert (!bitmap_empty_p (info->fold_insns));
+
+ if (bitmap_intersect_p (&cannot_fold_insns, info->fold_insns))
+ return;
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "Memory offset changed from "
+ HOST_WIDE_INT_PRINT_DEC " to " HOST_WIDE_INT_PRINT_DEC
+ " for instruction:\n", cur_offset, new_offset);
+ print_rtl_single (dump_file, insn);
+ }
+
+ machine_mode mode = GET_MODE (XEXP (mem, 0));
+ if (new_offset != 0)
+ XEXP (mem, 0) = gen_rtx_PLUS (mode, reg, gen_int_mode (new_offset, mode));
+ else
+ XEXP (mem, 0) = reg;
+ INSN_CODE (insn) = recog (PATTERN (insn), insn, 0);
+ df_insn_rescan (insn);
+}
+
+/* If INSN is a move / add instruction that was folded then replace its
+ constant part with zero. */
+static void
+do_commit_insn (rtx_insn *insn)
+{
+ if (bitmap_bit_p (&candidate_fold_insns, INSN_UID (insn))
+ && !bitmap_bit_p (&cannot_fold_insns, INSN_UID (insn)))
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "Instruction folded:");
+ print_rtl_single (dump_file, insn);
+ }
+
+ stats_fold_count++;
+
+ rtx set = single_set (insn);
+ rtx dest = SET_DEST (set);
+ rtx src = SET_SRC (set);
+
+ /* Emit a move and let subsequent passes eliminate it if possible. */
+ if (GET_CODE (src) == CONST_INT)
+ {
+ /* INSN is R1 = C.
+ Replace it with R1 = 0 because C was folded. */
+ rtx mov_rtx
+ = gen_move_insn (dest, gen_int_mode (0, GET_MODE (dest)));
+ df_insn_rescan (emit_insn_after (mov_rtx, insn));
+ }
+ else
+ {
+ /* INSN is R1 = R2 + C.
+ Replace it with R1 = R2 because C was folded. */
+ rtx arg1 = XEXP (src, 0);
+
+ /* If the DEST == ARG1 then the move is a no-op. */
+ if (REGNO (dest) != REGNO (arg1))
+ {
+ gcc_checking_assert (GET_MODE (dest) == GET_MODE (arg1));
+ rtx mov_rtx = gen_move_insn (dest, arg1);
+ df_insn_rescan (emit_insn_after (mov_rtx, insn));
+ }
+ }
+
+ /* Delete the original move / add instruction. */
+ delete_insn (insn);
+ }
+}
+
+unsigned int
+pass_fold_mem_offsets::execute (function *fn)
+{
+ df_set_flags (DF_EQ_NOTES + DF_RD_PRUNE_DEAD_DEFS + DF_DEFER_INSN_RESCAN);
+ df_chain_add_problem (DF_UD_CHAIN + DF_DU_CHAIN);
+ df_analyze ();
+
+ bitmap_initialize (&can_fold_insns, NULL);
+ bitmap_initialize (&candidate_fold_insns, NULL);
+ bitmap_initialize (&cannot_fold_insns, NULL);
+
+ stats_fold_count = 0;
+
+ basic_block bb;
+ rtx_insn *insn;
+ FOR_ALL_BB_FN (bb, fn)
+ {
+ /* There is a conflict between this pass and RISCV's shorten-memrefs
+ pass. For now disable folding if optimizing for size because
+ otherwise this cancels the effects of shorten-memrefs. */
+ if (optimize_bb_for_size_p (bb))
+ continue;
+
+ fold_info_map fold_info;
+
+ bitmap_clear (&can_fold_insns);
+ bitmap_clear (&candidate_fold_insns);
+ bitmap_clear (&cannot_fold_insns);
+
+ FOR_BB_INSNS (bb, insn)
+ do_analysis (insn);
+
+ FOR_BB_INSNS (bb, insn)
+ do_fold_info_calculation (insn, &fold_info);
+
+ FOR_BB_INSNS (bb, insn)
+ if (fold_mem_info **info = fold_info.get (insn))
+ do_check_validity (insn, *info);
+
+ if (compute_validity_closure (&fold_info))
+ {
+ FOR_BB_INSNS (bb, insn)
+ if (fold_mem_info **info = fold_info.get (insn))
+ do_commit_offset (insn, *info);
+
+ FOR_BB_INSNS (bb, insn)
+ do_commit_insn (insn);
+ }
+
+ for (fold_info_map::iterator iter = fold_info.begin ();
+ iter != fold_info.end (); ++iter)
+ delete (*iter).second;
+ }
+
+ statistics_counter_event (cfun, "Number of folded instructions",
+ stats_fold_count);
+
+ bitmap_release (&can_fold_insns);
+ bitmap_release (&candidate_fold_insns);
+ bitmap_release (&cannot_fold_insns);
+
+ return 0;
+}
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_fold_mem_offsets (gcc::context *ctxt)
+{
+ return new pass_fold_mem_offsets (ctxt);
+}
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 657dc91..e753eb9 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,111 @@
+2023-10-17 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/111837
+ * frontend-passes.cc (traverse_io_block): Dependency check of loop
+ nest shall be triangular, not banded.
+
+2023-10-17 Tobias Burnus <tobias@codesourcery.com>
+
+ * intrinsic.texi (signal): Mention that the argument
+ passed to the signal handler procedure is passed by reference.
+ Extend example.
+
+2023-10-15 Tobias Burnus <tobias@codesourcery.com>
+
+ * scanner.cc (skip_free_comments, skip_fixed_comments): Remove
+ leftover 'OpenACC' from comments about OpenMP's conditional
+ compilation sentinel.
+
+2023-10-14 Tobias Burnus <tobias@codesourcery.com>
+
+ * gfortran.h (ext_attr_t): Add omp_allocate flag.
+ * match.cc (gfc_free_omp_namelist): Void deleting same
+ u2.allocator multiple times now that a sequence can use
+ the same one.
+ * openmp.cc (gfc_match_omp_clauses, gfc_match_omp_allocate): Use
+ same allocator expr multiple times.
+ (is_predefined_allocator): Make static.
+ (gfc_resolve_omp_allocate): Update/extend restriction checks;
+ remove sorry message.
+ (resolve_omp_clauses): Reject corarrays in allocate/allocators
+ directive.
+ * parse.cc (check_omp_allocate_stmt): Permit procedure pointers
+ here (rejected later) for less misleading diagnostic.
+ * trans-array.cc (gfc_trans_auto_array_allocation): Propagate
+ size for GOMP_alloc and location to which it should be added to.
+ * trans-decl.cc (gfc_trans_deferred_vars): Handle 'omp allocate'
+ for stack variables; sorry for static variables/common blocks.
+ * trans-openmp.cc (gfc_trans_omp_clauses): Evaluate 'allocate'
+ clause's allocator only once; fix adding expressions to the
+ block.
+ (gfc_trans_omp_single): Pass a block to gfc_trans_omp_clauses.
+
+2023-10-13 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/104351
+ * decl.cc (get_proc_name): Extend name conflict detection between
+ internal procedure and previous declaration also to derived type.
+
+2023-10-13 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/110957
+ * invoke.texi: Update documentation to reflect '-ffpe-trap=none'.
+ * options.cc (gfc_handle_fpe_option): Fix mixup up of error messages
+ for options -ffpe-trap and -ffpe-summary. Accept '-ffpe-trap=none'
+ to clear FPU traps previously set on command line.
+
+2023-10-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111779
+ * trans-expr.cc (gfc_trans_assignment_1): Initialize
+ lhs_caf_attr and rhs_caf_attr codimension flag to avoid
+ false positive -Wuninitialized.
+
+2023-10-12 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/67740
+ * trans-expr.cc (gfc_trans_pointer_assignment): Set the hidden
+ string length component for pointer assignment to character
+ pointer components.
+
+2023-10-08 Tobias Burnus <tobias@codesourcery.com>
+
+ * parse.cc (parse_omp_structured_block): Make the user code end
+ up inside of BLOCK construct for strictly structured blocks;
+ fix fallout for 'section' and 'teams'.
+ * openmp.cc (resolve_omp_target): Fix changed BLOCK handling
+ for teams in target checking.
+
+2023-10-04 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/37336
+ PR fortran/111674
+ * trans-expr.cc (gfc_trans_scalar_assign): Finalize components
+ on deallocation if derived type is not finalizable.
+
+2023-10-03 David Malcolm <dmalcolm@redhat.com>
+
+ * error.cc (gfc_format_decoder): Update for "m_" prefixes to
+ text_info fields.
+
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * error.cc (gfc_diagnostics_init): Update for change to start_span.
+
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * error.cc (gfc_diagnostic_starter): Update for reorganization of
+ source-printing fields of diagnostic_context.
+ (gfc_diagnostics_init): Likewise.
+ (gfc_diagnostics_finish): Likewise.
+
+2023-09-29 Andre Vehreschild <vehre@gcc.gnu.org>
+
+ PR fortran/37336
+ * trans-array.cc (structure_alloc_comps): Deref coarray.
+ (gfc_trans_deferred_array): Add freeing of components after
+ check for allocated coarray.
+
2023-09-24 Paul Thomas <pault@gcc.gnu.org>
PR fortran/68155
diff --git a/gcc/fortran/decl.cc b/gcc/fortran/decl.cc
index 4a3c5b8..bdd3be3 100644
--- a/gcc/fortran/decl.cc
+++ b/gcc/fortran/decl.cc
@@ -1404,7 +1404,9 @@ get_proc_name (const char *name, gfc_symbol **result, bool module_fcn_entry)
/* Trap declarations of attributes in encompassing scope. The
signature for this is that ts.kind is nonzero for no-CLASS
entity. For a CLASS entity, ts.kind is zero. */
- if ((sym->ts.kind != 0 || sym->ts.type == BT_CLASS)
+ if ((sym->ts.kind != 0
+ || sym->ts.type == BT_CLASS
+ || sym->ts.type == BT_DERIVED)
&& !sym->attr.implicit_type
&& sym->attr.proc == 0
&& gfc_current_ns->parent != NULL
diff --git a/gcc/fortran/error.cc b/gcc/fortran/error.cc
index 6cae672..1b34619 100644
--- a/gcc/fortran/error.cc
+++ b/gcc/fortran/error.cc
@@ -1074,7 +1074,7 @@ gfc_format_decoder (pretty_printer *pp, text_info *text, const char *spec,
if (*spec == 'C')
loc = &gfc_current_locus;
else
- loc = va_arg (*text->args_ptr, locus *);
+ loc = va_arg (*text->m_args_ptr, locus *);
gcc_assert (loc->nextc - loc->lb->line >= 0);
unsigned int offset = loc->nextc - loc->lb->line;
if (*spec == 'C' && *loc->nextc != '\0')
@@ -1222,7 +1222,7 @@ gfc_diagnostic_starter (diagnostic_context *context,
? gfc_diagnostic_build_locus_prefix (context, s1)
: gfc_diagnostic_build_locus_prefix (context, s1, s2);
- if (!context->show_caret
+ if (!context->m_source_printing.enabled
|| diagnostic_location (diagnostic, 0) <= BUILTINS_LOCATION
|| diagnostic_location (diagnostic, 0) == context->last_location)
{
@@ -1637,11 +1637,11 @@ void
gfc_diagnostics_init (void)
{
diagnostic_starter (global_dc) = gfc_diagnostic_starter;
- global_dc->start_span = gfc_diagnostic_start_span;
+ global_dc->m_text_callbacks.start_span = gfc_diagnostic_start_span;
diagnostic_finalizer (global_dc) = gfc_diagnostic_finalizer;
diagnostic_format_decoder (global_dc) = gfc_format_decoder;
- global_dc->caret_chars[0] = '1';
- global_dc->caret_chars[1] = '2';
+ global_dc->m_source_printing.caret_chars[0] = '1';
+ global_dc->m_source_printing.caret_chars[1] = '2';
pp_warning_buffer = new (XNEW (output_buffer)) output_buffer ();
pp_warning_buffer->flush_p = false;
/* pp_error_buffer is statically allocated. This simplifies memory
@@ -1658,6 +1658,6 @@ gfc_diagnostics_finish (void)
defaults. */
diagnostic_starter (global_dc) = gfc_diagnostic_starter;
diagnostic_finalizer (global_dc) = gfc_diagnostic_finalizer;
- global_dc->caret_chars[0] = '^';
- global_dc->caret_chars[1] = '^';
+ global_dc->m_source_printing.caret_chars[0] = '^';
+ global_dc->m_source_printing.caret_chars[1] = '^';
}
diff --git a/gcc/fortran/frontend-passes.cc b/gcc/fortran/frontend-passes.cc
index 136a292..536884b 100644
--- a/gcc/fortran/frontend-passes.cc
+++ b/gcc/fortran/frontend-passes.cc
@@ -1326,7 +1326,7 @@ traverse_io_block (gfc_code *code, bool *has_reached, gfc_code *prev)
if (iters[i])
{
gfc_expr *var = iters[i]->var;
- for (int j = i - 1; j < i; j++)
+ for (int j = 0; j < i; j++)
{
if (iters[j]
&& (var_in_expr (var, iters[j]->start)
diff --git a/gcc/fortran/gfortran.h b/gcc/fortran/gfortran.h
index 6caf776..88f33b0 100644
--- a/gcc/fortran/gfortran.h
+++ b/gcc/fortran/gfortran.h
@@ -1000,6 +1000,7 @@ typedef struct
unsigned omp_declare_target:1;
unsigned omp_declare_target_link:1;
ENUM_BITFIELD (gfc_omp_device_type) omp_device_type:2;
+ unsigned omp_allocate:1;
/* Mentioned in OACC DECLARE. */
unsigned oacc_declare_create:1;
diff --git a/gcc/fortran/intrinsic.texi b/gcc/fortran/intrinsic.texi
index 6c7ad03..d140718 100644
--- a/gcc/fortran/intrinsic.texi
+++ b/gcc/fortran/intrinsic.texi
@@ -13168,10 +13168,10 @@ end program test_sign
@table @asis
@item @emph{Description}:
@code{SIGNAL(NUMBER, HANDLER [, STATUS])} causes external subroutine
-@var{HANDLER} to be executed with a single integer argument when signal
-@var{NUMBER} occurs. If @var{HANDLER} is an integer, it can be used to
-turn off handling of signal @var{NUMBER} or revert to its default
-action. See @code{signal(2)}.
+@var{HANDLER} to be executed with a single integer argument passed by
+value when signal @var{NUMBER} occurs. If @var{HANDLER} is an integer,
+it can be used to turn off handling of signal @var{NUMBER} or revert to
+its default action. See @code{signal(2)}.
If @code{SIGNAL} is called as a subroutine and the @var{STATUS} argument
is supplied, it is set to the value returned by @code{signal(2)}.
@@ -13197,19 +13197,26 @@ Subroutine, function
@item @var{STATUS} @tab (Optional) @var{STATUS} shall be a scalar
integer. It has @code{INTENT(OUT)}.
@end multitable
-@c TODO: What should the interface of the handler be? Does it take arguments?
@item @emph{Return value}:
The @code{SIGNAL} function returns the value returned by @code{signal(2)}.
@item @emph{Example}:
@smallexample
+module m_handler
+contains
+ ! POSIX.1-2017: void (*func)(int)
+ subroutine handler_print(signum) bind(C)
+ use iso_c_binding, only: c_int
+ integer(c_int), value :: signum
+ print *, 'handler_print invoked with signum =', signum
+ end subroutine
+end module
program test_signal
- intrinsic signal
- external handler_print
-
- call signal (12, handler_print)
- call signal (10, 1)
+ use m_handler
+ intrinsic :: signal, sleep
+ call signal (12, handler_print) ! 12 = SIGUSR2 (on some systems)
+ call signal (10, 1) ! 10 = SIGUSR1 and 1 = SIG_IGN (on some systems)
call sleep (30)
end program test_signal
diff --git a/gcc/fortran/invoke.texi b/gcc/fortran/invoke.texi
index 38150b1..10387e3 100644
--- a/gcc/fortran/invoke.texi
+++ b/gcc/fortran/invoke.texi
@@ -1294,7 +1294,8 @@ Specify a list of floating point exception traps to enable. On most
systems, if a floating point exception occurs and the trap for that
exception is enabled, a SIGFPE signal will be sent and the program
being aborted, producing a core file useful for debugging. @var{list}
-is a (possibly empty) comma-separated list of the following
+is a (possibly empty) comma-separated list of either @samp{none} (to
+clear the set of exceptions to be trapped), or of the following
exceptions: @samp{invalid} (invalid floating point operation, such as
@code{SQRT(-1.0)}), @samp{zero} (division by zero), @samp{overflow}
(overflow in a floating point operation), @samp{underflow} (underflow
@@ -1314,7 +1315,8 @@ If the option is used more than once in the command line, the lists will
be joined: '@code{ffpe-trap=}@var{list1} @code{ffpe-trap=}@var{list2}'
is equivalent to @code{ffpe-trap=}@var{list1},@var{list2}.
-Note that once enabled an exception cannot be disabled (no negative form).
+Note that once enabled an exception cannot be disabled (no negative form),
+except by clearing all traps by specifying @samp{none}.
Many, if not most, floating point operations incur loss of precision
due to rounding, and hence the @code{ffpe-trap=inexact} is likely to
diff --git a/gcc/fortran/match.cc b/gcc/fortran/match.cc
index c926f38..148a86b 100644
--- a/gcc/fortran/match.cc
+++ b/gcc/fortran/match.cc
@@ -5541,6 +5541,7 @@ gfc_free_omp_namelist (gfc_omp_namelist *name, bool free_ns,
bool free_mem_traits_space)
{
gfc_omp_namelist *n;
+ gfc_expr *last_allocator = NULL;
for (; name; name = n)
{
@@ -5552,7 +5553,13 @@ gfc_free_omp_namelist (gfc_omp_namelist *name, bool free_ns,
if (free_ns)
gfc_free_namespace (name->u2.ns);
else if (free_align_allocator)
- gfc_free_expr (name->u2.allocator);
+ {
+ if (last_allocator != name->u2.allocator)
+ {
+ last_allocator = name->u2.allocator;
+ gfc_free_expr (name->u2.allocator);
+ }
+ }
else if (free_mem_traits_space)
{ } /* name->u2.traits_sym: shall not call gfc_free_symbol here. */
else if (name->u2.udr)
diff --git a/gcc/fortran/openmp.cc b/gcc/fortran/openmp.cc
index dc0c801..1cc65d7 100644
--- a/gcc/fortran/openmp.cc
+++ b/gcc/fortran/openmp.cc
@@ -2032,11 +2032,9 @@ gfc_match_omp_clauses (gfc_omp_clauses **cp, const omp_mask mask,
for (gfc_omp_namelist *n = *head; n; n = n->next)
{
- n->u2.allocator = ((allocator)
- ? gfc_copy_expr (allocator) : NULL);
+ n->u2.allocator = allocator;
n->u.align = (align) ? gfc_copy_expr (align) : NULL;
}
- gfc_free_expr (allocator);
gfc_free_expr (align);
continue;
}
@@ -4547,9 +4545,8 @@ gfc_match_omp_allocate (void)
for (; vars; vars = vars->next)
{
vars->u.align = (align) ? gfc_copy_expr (align) : NULL;
- vars->u2.allocator = ((allocator) ? gfc_copy_expr (allocator) : NULL);
+ vars->u2.allocator = allocator;
}
- gfc_free_expr (allocator);
gfc_free_expr (align);
}
return MATCH_YES;
@@ -7191,7 +7188,7 @@ resolve_omp_udr_clause (gfc_omp_namelist *n, gfc_namespace *ns,
/* Assume that a constant expression in the range 1 (omp_default_mem_alloc)
to 8 (omp_thread_mem_alloc) range is fine. The original symbol name is
already lost during matching via gfc_match_expr. */
-bool
+static bool
is_predefined_allocator (gfc_expr *expr)
{
return (gfc_resolve_expr (expr)
@@ -7210,9 +7207,19 @@ void
gfc_resolve_omp_allocate (gfc_namespace *ns, gfc_omp_namelist *list)
{
for (gfc_omp_namelist *n = list; n; n = n->next)
- n->sym->mark = 0;
- for (gfc_omp_namelist *n = list; n; n = n->next)
{
+ if (n->sym->attr.result || n->sym->result == n->sym)
+ {
+ gfc_error ("Unexpected function-result variable %qs at %L in "
+ "declarative !$OMP ALLOCATE", n->sym->name, &n->where);
+ continue;
+ }
+ if (ns->omp_allocate->sym->attr.proc_pointer)
+ {
+ gfc_error ("Procedure pointer %qs not supported with !$OMP "
+ "ALLOCATE at %L", n->sym->name, &n->where);
+ continue;
+ }
if (n->sym->attr.flavor != FL_VARIABLE)
{
gfc_error ("Argument %qs at %L to declarative !$OMP ALLOCATE "
@@ -7220,8 +7227,7 @@ gfc_resolve_omp_allocate (gfc_namespace *ns, gfc_omp_namelist *list)
&n->where);
continue;
}
- if (ns != n->sym->ns || n->sym->attr.use_assoc
- || n->sym->attr.host_assoc || n->sym->attr.imported)
+ if (ns != n->sym->ns || n->sym->attr.use_assoc || n->sym->attr.imported)
{
gfc_error ("Argument %qs at %L to declarative !$OMP ALLOCATE shall be"
" in the same scope as the variable declaration",
@@ -7234,7 +7240,13 @@ gfc_resolve_omp_allocate (gfc_namespace *ns, gfc_omp_namelist *list)
"declarative !$OMP ALLOCATE", n->sym->name, &n->where);
continue;
}
- if (n->sym->mark)
+ if (n->sym->attr.codimension)
+ {
+ gfc_error ("Unexpected coarray argument %qs as argument at %L to "
+ "declarative !$OMP ALLOCATE", n->sym->name, &n->where);
+ continue;
+ }
+ if (n->sym->attr.omp_allocate)
{
if (n->sym->attr.in_common)
{
@@ -7249,7 +7261,28 @@ gfc_resolve_omp_allocate (gfc_namespace *ns, gfc_omp_namelist *list)
n->sym->name, &n->where);
continue;
}
- n->sym->mark = 1;
+ /* For 'equivalence(a,b)', a 'union_type {<type> a,b} equiv.0' is created
+ with a value expression for 'a' as 'equiv.0.a' (likewise for b); while
+ this can be handled, EQUIVALENCE is marked as obsolescent since Fortran
+ 2018 and also not widely used. However, it could be supported,
+ if needed. */
+ if (n->sym->attr.in_equivalence)
+ {
+ gfc_error ("Sorry, EQUIVALENCE object %qs not supported with !$OMP "
+ "ALLOCATE at %L", n->sym->name, &n->where);
+ continue;
+ }
+ /* Similar for Cray pointer/pointee - they could be implemented but as
+ common vendor extension but nowadays rarely used and requiring
+ -fcray-pointer, there is no need to support them. */
+ if (n->sym->attr.cray_pointer || n->sym->attr.cray_pointee)
+ {
+ gfc_error ("Sorry, Cray pointers and pointees such as %qs are not "
+ "supported with !$OMP ALLOCATE at %L",
+ n->sym->name, &n->where);
+ continue;
+ }
+ n->sym->attr.omp_allocate = 1;
if ((n->sym->ts.type == BT_CLASS && n->sym->attr.class_ok
&& CLASS_DATA (n->sym)->attr.allocatable)
|| (n->sym->ts.type != BT_CLASS && n->sym->attr.allocatable))
@@ -7307,8 +7340,6 @@ gfc_resolve_omp_allocate (gfc_namespace *ns, gfc_omp_namelist *list)
"%<omp_allocator_handle_kind%> kind at %L",
&n->u2.allocator->where);
}
- gfc_error ("Sorry, declarative !$OMP ALLOCATE at %L not yet supported",
- &list->where);
}
/* Resolve ASSUME's and ASSUMES' assumption clauses. Note that absent/contains
@@ -7897,6 +7928,9 @@ resolve_omp_clauses (gfc_code *code, gfc_omp_clauses *omp_clauses,
{
if (n->sym == NULL)
continue;
+ if (n->sym->attr.codimension)
+ gfc_error ("Unexpected coarray %qs in %<allocate%> at %L",
+ n->sym->name, &n->where);
for (a = code->block->next->ext.alloc.list; a; a = a->next)
if (a->expr->expr_type == EXPR_VARIABLE
&& a->expr->symtree->n.sym == n->sym)
@@ -11245,6 +11279,8 @@ resolve_omp_target (gfc_code *code)
if (!code->ext.omp_clauses->contains_teams_construct)
return;
gfc_code *c = code->block->next;
+ if (c->op == EXEC_BLOCK)
+ c = c->ext.block.ns->code;
if (code->ext.omp_clauses->target_first_st_is_teams
&& ((GFC_IS_TEAMS_CONSTRUCT (c->op) && c->next == NULL)
|| (c->op == EXEC_BLOCK
diff --git a/gcc/fortran/options.cc b/gcc/fortran/options.cc
index 27311961..2ad2247 100644
--- a/gcc/fortran/options.cc
+++ b/gcc/fortran/options.cc
@@ -555,9 +555,12 @@ gfc_handle_fpe_option (const char *arg, bool trap)
pos++;
result = 0;
- if (!trap && strncmp ("none", arg, pos) == 0)
+ if (strncmp ("none", arg, pos) == 0)
{
- gfc_option.fpe_summary = 0;
+ if (trap)
+ gfc_option.fpe = 0;
+ else
+ gfc_option.fpe_summary = 0;
arg += pos;
pos = 0;
continue;
@@ -586,7 +589,7 @@ gfc_handle_fpe_option (const char *arg, bool trap)
break;
}
}
- if (!result && !trap)
+ if (!result && trap)
gfc_fatal_error ("Argument to %<-ffpe-trap%> is not valid: %s", arg);
else if (!result)
gfc_fatal_error ("Argument to %<-ffpe-summary%> is not valid: %s", arg);
diff --git a/gcc/fortran/parse.cc b/gcc/fortran/parse.cc
index 5838680..e103ebe 100644
--- a/gcc/fortran/parse.cc
+++ b/gcc/fortran/parse.cc
@@ -833,18 +833,18 @@ check_omp_allocate_stmt (locus *loc)
&n->expr->where, gfc_ascii_statement (ST_OMP_ALLOCATE));
return false;
}
+ /* Procedure pointers are not allocatable; hence, we do not regard them as
+ pointers here - and reject them later in gfc_resolve_omp_allocate. */
bool alloc_ptr;
if (n->sym->ts.type == BT_CLASS && n->sym->attr.class_ok)
alloc_ptr = (CLASS_DATA (n->sym)->attr.allocatable
|| CLASS_DATA (n->sym)->attr.class_pointer);
else
- alloc_ptr = (n->sym->attr.allocatable || n->sym->attr.pointer
- || n->sym->attr.proc_pointer);
+ alloc_ptr = n->sym->attr.allocatable || n->sym->attr.pointer;
if (alloc_ptr
|| (n->sym->ns && n->sym->ns->proc_name
&& (n->sym->ns->proc_name->attr.allocatable
- || n->sym->ns->proc_name->attr.pointer
- || n->sym->ns->proc_name->attr.proc_pointer)))
+ || n->sym->ns->proc_name->attr.pointer)))
has_allocatable = true;
else
has_non_allocatable = true;
@@ -5814,7 +5814,7 @@ parse_omp_structured_block (gfc_statement omp_st, bool workshare_stmts_only)
{
gfc_statement st, omp_end_st, first_st;
gfc_code *cp, *np;
- gfc_state_data s;
+ gfc_state_data s, s2;
accept_statement (omp_st);
@@ -5915,13 +5915,21 @@ parse_omp_structured_block (gfc_statement omp_st, bool workshare_stmts_only)
gfc_notify_std (GFC_STD_F2008, "BLOCK construct at %C");
my_ns = gfc_build_block_ns (gfc_current_ns);
- gfc_current_ns = my_ns;
- my_parent = my_ns->parent;
-
new_st.op = EXEC_BLOCK;
new_st.ext.block.ns = my_ns;
new_st.ext.block.assoc = NULL;
accept_statement (ST_BLOCK);
+
+ push_state (&s2, COMP_BLOCK, my_ns->proc_name);
+ gfc_current_ns = my_ns;
+ my_parent = my_ns->parent;
+ if (omp_st == ST_OMP_SECTIONS
+ || omp_st == ST_OMP_PARALLEL_SECTIONS)
+ {
+ np = new_level (cp);
+ np->op = cp->op;
+ }
+
first_st = next_statement ();
st = parse_spec (first_st);
}
@@ -5937,6 +5945,8 @@ parse_omp_structured_block (gfc_statement omp_st, bool workshare_stmts_only)
case ST_OMP_TEAMS_LOOP:
{
gfc_state_data *stk = gfc_state_stack->previous;
+ if (stk->state == COMP_OMP_STRICTLY_STRUCTURED_BLOCK)
+ stk = stk->previous;
stk->tail->ext.omp_clauses->target_first_st_is_teams = true;
break;
}
@@ -6035,8 +6045,10 @@ parse_omp_structured_block (gfc_statement omp_st, bool workshare_stmts_only)
else if (block_construct && st == ST_END_BLOCK)
{
accept_statement (st);
+ gfc_current_ns->code = gfc_state_stack->head;
gfc_current_ns = my_parent;
- pop_state ();
+ pop_state (); /* Inner BLOCK */
+ pop_state (); /* Outer COMP_OMP_STRICTLY_STRUCTURED_BLOCK */
st = next_statement ();
if (st == omp_end_st)
diff --git a/gcc/fortran/scanner.cc b/gcc/fortran/scanner.cc
index 9f0d9a7..e2a25a1 100644
--- a/gcc/fortran/scanner.cc
+++ b/gcc/fortran/scanner.cc
@@ -877,7 +877,7 @@ skip_free_comments (void)
/* If -fopenmp/-fopenacc, we need to handle here 2 things:
1) don't treat !$omp/!$acc as comments, but directives
- 2) handle OpenMP/OpenACC conditional compilation, where
+ 2) handle OpenMP conditional compilation, where
!$ should be treated as 2 spaces (for initial lines
only if followed by space). */
if (at_bol)
@@ -1106,7 +1106,7 @@ skip_fixed_comments (void)
/* If -fopenmp/-fopenacc, we need to handle here 2 things:
1) don't treat !$omp/!$acc|c$omp/c$acc|*$omp / *$acc as comments,
but directives
- 2) handle OpenMP/OpenACC conditional compilation, where
+ 2) handle OpenMP conditional compilation, where
!$|c$|*$ should be treated as 2 spaces if the characters
in columns 3 to 6 are valid fixed form label columns
characters. */
diff --git a/gcc/fortran/trans-array.cc b/gcc/fortran/trans-array.cc
index e0fc8eb..bbb81f4 100644
--- a/gcc/fortran/trans-array.cc
+++ b/gcc/fortran/trans-array.cc
@@ -82,6 +82,9 @@ along with GCC; see the file COPYING3. If not see
#include "tree.h"
#include "gfortran.h"
#include "gimple-expr.h"
+#include "tree-iterator.h"
+#include "stringpool.h" /* Required by "attribs.h". */
+#include "attribs.h" /* For lookup_attribute. */
#include "trans.h"
#include "fold-const.h"
#include "constructor.h"
@@ -6770,6 +6773,15 @@ gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym,
gimplifier to allocate storage, and all that good stuff. */
tmp = fold_build1_loc (input_location, DECL_EXPR, TREE_TYPE (decl), decl);
gfc_add_expr_to_block (&init, tmp);
+ if (sym->attr.omp_allocate)
+ {
+ /* Save location of size calculation to ensure GOMP_alloc is placed
+ after it. */
+ tree omp_alloc = lookup_attribute ("omp allocate",
+ DECL_ATTRIBUTES (decl));
+ TREE_CHAIN (TREE_CHAIN (TREE_VALUE (omp_alloc)))
+ = build_tree_list (NULL_TREE, tsi_stmt (tsi_last (init.head)));
+ }
}
if (onstack)
@@ -6798,8 +6810,22 @@ gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym,
gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
return;
}
+ if (sym->attr.omp_allocate)
+ {
+ /* The size is the number of elements in the array, so multiply by the
+ size of an element to get the total size. */
+ tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, fold_convert (gfc_array_index_type, tmp));
+ size = gfc_evaluate_now (size, &init);
- if (flag_stack_arrays)
+ tree omp_alloc = lookup_attribute ("omp allocate",
+ DECL_ATTRIBUTES (decl));
+ TREE_CHAIN (TREE_CHAIN (TREE_VALUE (omp_alloc)))
+ = build_tree_list (size, NULL_TREE);
+ space = NULL_TREE;
+ }
+ else if (flag_stack_arrays)
{
gcc_assert (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE);
space = build_decl (gfc_get_location (&sym->declared_at),
@@ -9320,6 +9346,12 @@ structure_alloc_comps (gfc_symbol * der_type, tree decl, tree dest,
gfc_add_expr_to_block (&fnblock, tmp);
}
+ /* Still having a descriptor array of rank == 0 here, indicates an
+ allocatable coarrays. Dereference it correctly. */
+ if (GFC_DESCRIPTOR_TYPE_P (decl_type))
+ {
+ decl = build_fold_indirect_ref (gfc_conv_array_data (decl));
+ }
/* Otherwise, act on the components or recursively call self to
act on a chain of components. */
for (c = der_type->components; c; c = c->next)
@@ -11507,7 +11539,11 @@ gfc_trans_deferred_array (gfc_symbol * sym, gfc_wrapped_block * block)
{
int rank;
rank = sym->as ? sym->as->rank : 0;
- tmp = gfc_deallocate_alloc_comp (sym->ts.u.derived, descriptor, rank);
+ tmp = gfc_deallocate_alloc_comp (sym->ts.u.derived, descriptor, rank,
+ (sym->attr.codimension
+ && flag_coarray == GFC_FCOARRAY_LIB)
+ ? GFC_STRUCTURE_CAF_MODE_IN_COARRAY
+ : 0);
gfc_add_expr_to_block (&cleanup, tmp);
}
@@ -11521,9 +11557,11 @@ gfc_trans_deferred_array (gfc_symbol * sym, gfc_wrapped_block * block)
NULL_TREE, NULL_TREE, true, e,
sym->attr.codimension
? GFC_CAF_COARRAY_DEREGISTER
- : GFC_CAF_COARRAY_NOCOARRAY);
+ : GFC_CAF_COARRAY_NOCOARRAY,
+ NULL_TREE, gfc_finish_block (&cleanup));
if (e)
gfc_free_expr (e);
+ gfc_init_block (&cleanup);
gfc_add_expr_to_block (&cleanup, tmp);
}
diff --git a/gcc/fortran/trans-decl.cc b/gcc/fortran/trans-decl.cc
index b0fd25e..a3f037b 100644
--- a/gcc/fortran/trans-decl.cc
+++ b/gcc/fortran/trans-decl.cc
@@ -48,6 +48,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimplify.h"
#include "omp-general.h"
#include "attr-fnspec.h"
+#include "tree-iterator.h"
#define MAX_LABEL_VALUE 99999
@@ -4652,6 +4653,36 @@ gfc_trans_deferred_vars (gfc_symbol * proc_sym, gfc_wrapped_block * block)
init_intent_out_dt (proc_sym, block);
gfc_restore_backend_locus (&loc);
+ /* For some reasons, internal procedures point to the parent's
+ namespace. Top-level procedure and variables inside BLOCK are fine. */
+ gfc_namespace *omp_ns = proc_sym->ns;
+ if (proc_sym->ns->proc_name != proc_sym)
+ for (omp_ns = proc_sym->ns->contained; omp_ns;
+ omp_ns = omp_ns->sibling)
+ if (omp_ns->proc_name == proc_sym)
+ break;
+
+ /* Add 'omp allocate' attribute for gfc_trans_auto_array_allocation and
+ unset attr.omp_allocate for 'omp allocate allocator(omp_default_mem_alloc),
+ which has the normal codepath except for an invalid-use check in the ME.
+ The main processing happens later in this function. */
+ for (struct gfc_omp_namelist *n = omp_ns ? omp_ns->omp_allocate : NULL;
+ n; n = n->next)
+ if (!TREE_STATIC (n->sym->backend_decl))
+ {
+ /* Add empty entries - described and to be filled below. */
+ tree tmp = build_tree_list (NULL_TREE, NULL_TREE);
+ TREE_CHAIN (tmp) = build_tree_list (NULL_TREE, NULL_TREE);
+ DECL_ATTRIBUTES (n->sym->backend_decl)
+ = tree_cons (get_identifier ("omp allocate"), tmp,
+ DECL_ATTRIBUTES (n->sym->backend_decl));
+ if (n->u.align == NULL
+ && n->u2.allocator != NULL
+ && n->u2.allocator->expr_type == EXPR_CONSTANT
+ && mpz_cmp_si (n->u2.allocator->value.integer, 1) == 0)
+ n->sym->attr.omp_allocate = 0;
+ }
+
for (sym = proc_sym->tlink; sym != proc_sym; sym = sym->tlink)
{
bool alloc_comp_or_fini = (sym->ts.type == BT_DERIVED)
@@ -5105,6 +5136,101 @@ gfc_trans_deferred_vars (gfc_symbol * proc_sym, gfc_wrapped_block * block)
gcc_unreachable ();
}
+ /* Handle 'omp allocate'. This has to be after the block above as
+ gfc_add_init_cleanup (..., init, ...) puts 'init' of later calls
+ before earlier calls. The code is a bit more complex as gfortran does
+ not really work with bind expressions / BIND_EXPR_VARS properly, i.e.
+ gimplify_bind_expr needs some help for placing the GOMP_alloc. Thus,
+ we pass on the location of the allocate-assignment expression and,
+ if the size is not constant, the size variable if Fortran computes this
+ differently. We also might add an expression location after which the
+ code has to be added, e.g. for character len expressions, which affect
+ the UNIT_SIZE. */
+ gfc_expr *last_allocator = NULL;
+ if (omp_ns && omp_ns->omp_allocate)
+ {
+ if (!block->init || TREE_CODE (block->init) != STATEMENT_LIST)
+ {
+ tree tmp = build1_v (LABEL_EXPR, gfc_build_label_decl (NULL_TREE));
+ append_to_statement_list (tmp, &block->init);
+ }
+ if (!block->cleanup || TREE_CODE (block->cleanup) != STATEMENT_LIST)
+ {
+ tree tmp = build1_v (LABEL_EXPR, gfc_build_label_decl (NULL_TREE));
+ append_to_statement_list (tmp, &block->cleanup);
+ }
+ }
+ tree init_stmtlist = block->init;
+ tree cleanup_stmtlist = block->cleanup;
+ se.expr = NULL_TREE;
+ for (struct gfc_omp_namelist *n = omp_ns ? omp_ns->omp_allocate : NULL;
+ n; n = n->next)
+ if (!TREE_STATIC (n->sym->backend_decl))
+ {
+ tree align = (n->u.align ? gfc_conv_constant_to_tree (n->u.align)
+ : NULL_TREE);
+ if (last_allocator != n->u2.allocator)
+ {
+ location_t loc = input_location;
+ gfc_init_se (&se, NULL);
+ if (n->u2.allocator)
+ {
+ input_location = gfc_get_location (&n->u2.allocator->where);
+ gfc_conv_expr (&se, n->u2.allocator);
+ }
+ /* We need to evalulate non-constants - also to find the location
+ after which the GOMP_alloc has to be added to - also as BLOCK
+ does not yield a new BIND_EXPR_BODY. */
+ if (n->u2.allocator
+ && (!(CONSTANT_CLASS_P (se.expr) && DECL_P (se.expr))
+ || se.pre.head || se.post.head))
+ {
+ stmtblock_t tmpblock;
+ gfc_init_block (&tmpblock);
+ se.expr = gfc_evaluate_now (se.expr, &tmpblock);
+ /* First post then pre because the new code is inserted
+ at the top. */
+ gfc_add_init_cleanup (block, gfc_finish_block (&se.post), NULL);
+ gfc_add_init_cleanup (block, gfc_finish_block (&tmpblock),
+ NULL);
+ gfc_add_init_cleanup (block, gfc_finish_block (&se.pre), NULL);
+ }
+ last_allocator = n->u2.allocator;
+ input_location = loc;
+ }
+
+ /* 'omp allocate( {purpose: allocator, value: align},
+ {purpose: init-stmtlist, value: cleanup-stmtlist},
+ {purpose: size-var, value: last-size-expr}}
+ where init-stmt/cleanup-stmt is the STATEMENT list to find the
+ try-final block; last-size-expr is to find the location after
+ which to add the code and 'size-var' is for the proper size, cf.
+ gfc_trans_auto_array_allocation - either or both of the latter
+ can be NULL. */
+ tree tmp = lookup_attribute ("omp allocate",
+ DECL_ATTRIBUTES (n->sym->backend_decl));
+ tmp = TREE_VALUE (tmp);
+ TREE_PURPOSE (tmp) = se.expr;
+ TREE_VALUE (tmp) = align;
+ TREE_PURPOSE (TREE_CHAIN (tmp)) = init_stmtlist;
+ TREE_VALUE (TREE_CHAIN (tmp)) = cleanup_stmtlist;
+ }
+ else if (n->sym->attr.in_common)
+ {
+ gfc_error ("Sorry, !$OMP allocate for COMMON block variable %qs at %L "
+ "not supported", n->sym->common_block->name,
+ &n->sym->common_block->where);
+ break;
+ }
+ else
+ {
+ gfc_error ("Sorry, !$OMP allocate for variable %qs at %L with SAVE "
+ "attribute not yet implemented", n->sym->name,
+ &n->sym->declared_at);
+ /* FIXME: Remember to handle last_allocator. */
+ break;
+ }
+
gfc_init_block (&tmpblock);
for (f = gfc_sym_get_dummy_args (proc_sym); f; f = f->next)
diff --git a/gcc/fortran/trans-expr.cc b/gcc/fortran/trans-expr.cc
index cca2f4e..1b8be08 100644
--- a/gcc/fortran/trans-expr.cc
+++ b/gcc/fortran/trans-expr.cc
@@ -10403,11 +10403,36 @@ gfc_trans_pointer_assignment (gfc_expr * expr1, gfc_expr * expr2)
}
if (expr1->ts.type == BT_CHARACTER
- && expr1->symtree->n.sym->ts.deferred
- && expr1->symtree->n.sym->ts.u.cl->backend_decl
- && VAR_P (expr1->symtree->n.sym->ts.u.cl->backend_decl))
+ && expr1->ts.deferred)
{
- tmp = expr1->symtree->n.sym->ts.u.cl->backend_decl;
+ gfc_symbol *psym = expr1->symtree->n.sym;
+ tmp = NULL_TREE;
+ if (psym->ts.type == BT_CHARACTER)
+ {
+ gcc_assert (psym->ts.u.cl->backend_decl
+ && VAR_P (psym->ts.u.cl->backend_decl));
+ tmp = psym->ts.u.cl->backend_decl;
+ }
+ else if (expr1->ts.u.cl->backend_decl
+ && VAR_P (expr1->ts.u.cl->backend_decl))
+ tmp = expr1->ts.u.cl->backend_decl;
+ else if (TREE_CODE (lse.expr) == COMPONENT_REF)
+ {
+ gfc_ref *ref = expr1->ref;
+ for (;ref; ref = ref->next)
+ {
+ if (ref->type == REF_COMPONENT
+ && ref->u.c.component->ts.type == BT_CHARACTER
+ && gfc_deferred_strlen (ref->u.c.component, &tmp))
+ tmp = fold_build3_loc (input_location, COMPONENT_REF,
+ TREE_TYPE (tmp),
+ TREE_OPERAND (lse.expr, 0),
+ tmp, NULL_TREE);
+ }
+ }
+
+ gcc_assert (tmp);
+
if (expr2->expr_type != EXPR_NULL)
gfc_add_modify (&block, tmp,
fold_convert (TREE_TYPE (tmp), strlen_rhs));
@@ -10723,7 +10748,7 @@ gfc_trans_scalar_assign (gfc_se * lse, gfc_se * rse, gfc_typespec ts,
{
tmp_var = gfc_evaluate_now (lse->expr, &lse->pre);
tmp = gfc_deallocate_alloc_comp_no_caf (ts.u.derived, tmp_var,
- 0, true);
+ 0, gfc_may_be_finalized (ts));
if (deep_copy)
tmp = build3_v (COND_EXPR, cond, build_empty_stmt (input_location),
tmp);
@@ -11990,7 +12015,10 @@ gfc_trans_assignment_1 (gfc_expr * expr1, gfc_expr * expr2, bool init_flag,
&& !is_runtime_conformable (expr1, expr2);
/* Only analyze the expressions for coarray properties, when in coarray-lib
- mode. */
+ mode. Avoid false-positive uninitialized diagnostics with initializing
+ the codimension flag unconditionally. */
+ lhs_caf_attr.codimension = false;
+ rhs_caf_attr.codimension = false;
if (flag_coarray == GFC_FCOARRAY_LIB)
{
lhs_caf_attr = gfc_caf_attr (expr1, false, &lhs_refs_comp);
diff --git a/gcc/fortran/trans-openmp.cc b/gcc/fortran/trans-openmp.cc
index 2f116fd..7930f2f 100644
--- a/gcc/fortran/trans-openmp.cc
+++ b/gcc/fortran/trans-openmp.cc
@@ -2739,34 +2739,48 @@ gfc_trans_omp_clauses (stmtblock_t *block, gfc_omp_clauses *clauses,
}
break;
case OMP_LIST_ALLOCATE:
- for (; n != NULL; n = n->next)
- if (n->sym->attr.referenced)
- {
- tree t = gfc_trans_omp_variable (n->sym, false);
- if (t != error_mark_node)
- {
- tree node = build_omp_clause (input_location,
- OMP_CLAUSE_ALLOCATE);
- OMP_CLAUSE_DECL (node) = t;
- if (n->u2.allocator)
- {
- tree allocator_;
- gfc_init_se (&se, NULL);
- gfc_conv_expr (&se, n->u2.allocator);
- allocator_ = gfc_evaluate_now (se.expr, block);
- OMP_CLAUSE_ALLOCATE_ALLOCATOR (node) = allocator_;
- }
- if (n->u.align)
- {
- tree align_;
- gfc_init_se (&se, NULL);
- gfc_conv_expr (&se, n->u.align);
- align_ = gfc_evaluate_now (se.expr, block);
- OMP_CLAUSE_ALLOCATE_ALIGN (node) = align_;
- }
- omp_clauses = gfc_trans_add_clause (node, omp_clauses);
- }
- }
+ {
+ tree allocator_ = NULL_TREE;
+ gfc_expr *alloc_expr = NULL;
+ for (; n != NULL; n = n->next)
+ if (n->sym->attr.referenced)
+ {
+ tree t = gfc_trans_omp_variable (n->sym, false);
+ if (t != error_mark_node)
+ {
+ tree node = build_omp_clause (input_location,
+ OMP_CLAUSE_ALLOCATE);
+ OMP_CLAUSE_DECL (node) = t;
+ if (n->u2.allocator)
+ {
+ if (alloc_expr != n->u2.allocator)
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, n->u2.allocator);
+ gfc_add_block_to_block (block, &se.pre);
+ allocator_ = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+ }
+ OMP_CLAUSE_ALLOCATE_ALLOCATOR (node) = allocator_;
+ }
+ alloc_expr = n->u2.allocator;
+ if (n->u.align)
+ {
+ tree align_;
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, n->u.align);
+ gcc_assert (CONSTANT_CLASS_P (se.expr)
+ && se.pre.head == NULL
+ && se.post.head == NULL);
+ align_ = se.expr;
+ OMP_CLAUSE_ALLOCATE_ALIGN (node) = align_;
+ }
+ omp_clauses = gfc_trans_add_clause (node, omp_clauses);
+ }
+ }
+ else
+ alloc_expr = n->u2.allocator;
+ }
break;
case OMP_LIST_LINEAR:
{
@@ -7184,11 +7198,14 @@ gfc_trans_omp_sections (gfc_code *code, gfc_omp_clauses *clauses)
static tree
gfc_trans_omp_single (gfc_code *code, gfc_omp_clauses *clauses)
{
- tree omp_clauses = gfc_trans_omp_clauses (NULL, clauses, code->loc);
+ stmtblock_t block;
+ gfc_start_block (&block);
+ tree omp_clauses = gfc_trans_omp_clauses (&block, clauses, code->loc);
tree stmt = gfc_trans_omp_code (code->block->next, true);
stmt = build2_loc (gfc_get_location (&code->loc), OMP_SINGLE, void_type_node,
stmt, omp_clauses);
- return stmt;
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
}
static tree
diff --git a/gcc/function.cc b/gcc/function.cc
index e92384a..afb0b33 100644
--- a/gcc/function.cc
+++ b/gcc/function.cc
@@ -300,7 +300,7 @@ get_stack_local_alignment (tree type, machine_mode mode)
static bool
try_fit_stack_local (poly_int64 start, poly_int64 length,
poly_int64 size, unsigned int alignment,
- poly_int64_pod *poffset)
+ poly_int64 *poffset)
{
poly_int64 this_frame_offset;
int frame_off, frame_alignment, frame_phase;
@@ -1431,7 +1431,7 @@ static poly_int64 cfa_offset;
offset indirectly through the pointer. Otherwise, return 0. */
static rtx
-instantiate_new_reg (rtx x, poly_int64_pod *poffset)
+instantiate_new_reg (rtx x, poly_int64 *poffset)
{
rtx new_rtx;
poly_int64 offset;
@@ -6112,6 +6112,8 @@ thread_prologue_and_epilogue_insns (void)
&& returnjump_p (BB_END (e->src)))
e->flags &= ~EDGE_FALLTHRU;
}
+
+ find_sub_basic_blocks (BLOCK_FOR_INSN (epilogue_seq));
}
else if (next_active_insn (BB_END (exit_fallthru_edge->src)))
{
@@ -6210,6 +6212,8 @@ thread_prologue_and_epilogue_insns (void)
set_insn_locations (seq, epilogue_location);
emit_insn_before (seq, insn);
+
+ find_sub_basic_blocks (BLOCK_FOR_INSN (insn));
}
}
diff --git a/gcc/function.h b/gcc/function.h
index e290ff5..5caf1e1 100644
--- a/gcc/function.h
+++ b/gcc/function.h
@@ -94,7 +94,7 @@ extern GTY ((length ("crtl->emit.x_reg_rtx_no"))) rtx * regno_reg_rtx;
struct GTY(()) expr_status {
/* Number of units that we should eventually pop off the stack.
These are the arguments to function calls that have already returned. */
- poly_int64_pod x_pending_stack_adjust;
+ poly_int64 x_pending_stack_adjust;
/* Under some ABIs, it is the caller's responsibility to pop arguments
pushed for function calls. A naive implementation would simply pop
@@ -117,7 +117,7 @@ struct GTY(()) expr_status {
boundary can be momentarily unaligned while pushing the arguments.
Record the delta since last aligned boundary here in order to get
stack alignment in the nested function calls working right. */
- poly_int64_pod x_stack_pointer_delta;
+ poly_int64 x_stack_pointer_delta;
/* Nonzero means __builtin_saveregs has already been done in this function.
The value is the pseudoreg containing the value __builtin_saveregs
@@ -537,7 +537,7 @@ extern struct machine_function * (*init_machine_status) (void);
struct args_size
{
- poly_int64_pod constant;
+ poly_int64 constant;
tree var;
};
diff --git a/gcc/gengtype.cc b/gcc/gengtype.cc
index 3db0a9b..fc469e4 100644
--- a/gcc/gengtype.cc
+++ b/gcc/gengtype.cc
@@ -5234,9 +5234,7 @@ main (int argc, char **argv)
POS_HERE (do_scalar_typedef ("REAL_VALUE_TYPE", &pos));
POS_HERE (do_scalar_typedef ("FIXED_VALUE_TYPE", &pos));
POS_HERE (do_scalar_typedef ("double_int", &pos));
- POS_HERE (do_scalar_typedef ("poly_int64_pod", &pos));
POS_HERE (do_scalar_typedef ("offset_int", &pos));
- POS_HERE (do_scalar_typedef ("widest_int", &pos));
POS_HERE (do_scalar_typedef ("int64_t", &pos));
POS_HERE (do_scalar_typedef ("poly_int64", &pos));
POS_HERE (do_scalar_typedef ("poly_uint64", &pos));
diff --git a/gcc/genmatch.cc b/gcc/genmatch.cc
index 03d325e..e9d7afa 100644
--- a/gcc/genmatch.cc
+++ b/gcc/genmatch.cc
@@ -5458,8 +5458,8 @@ main (int argc, char **argv)
line_table = XCNEW (class line_maps);
linemap_init (line_table, 0);
- line_table->reallocator = xrealloc;
- line_table->round_alloc_size = round_alloc_size;
+ line_table->m_reallocator = xrealloc;
+ line_table->m_round_alloc_size = round_alloc_size;
r = cpp_create_reader (CLK_GNUC99, NULL, line_table);
cpp_callbacks *cb = cpp_get_callbacks (r);
diff --git a/gcc/genmodes.cc b/gcc/genmodes.cc
index 55ac2ad..5446a3e 100644
--- a/gcc/genmodes.cc
+++ b/gcc/genmodes.cc
@@ -980,7 +980,7 @@ calc_wider_mode (void)
}
}
-/* Text to add to the constant part of a poly_int_pod initializer in
+/* Text to add to the constant part of a poly_int initializer in
order to fill out te whole structure. */
#if NUM_POLY_INT_COEFFS == 1
#define ZERO_COEFFS ""
@@ -1080,7 +1080,7 @@ extern __inline__ __attribute__((__always_inline__, __gnu_inline__))\n\
poly_uint16\n\
mode_size_inline (machine_mode mode)\n\
{\n\
- extern %spoly_uint16_pod mode_size[NUM_MACHINE_MODES];\n\
+ extern %spoly_uint16 mode_size[NUM_MACHINE_MODES];\n\
gcc_assert (mode >= 0 && mode < NUM_MACHINE_MODES);\n\
switch (mode)\n\
{\n", adj_nunits || adj_bytesize ? "" : "const ");
@@ -1114,7 +1114,7 @@ extern __inline__ __attribute__((__always_inline__, __gnu_inline__))\n\
poly_uint16\n\
mode_nunits_inline (machine_mode mode)\n\
{\n\
- extern %spoly_uint16_pod mode_nunits[NUM_MACHINE_MODES];\n\
+ extern %spoly_uint16 mode_nunits[NUM_MACHINE_MODES];\n\
switch (mode)\n\
{\n", adj_nunits ? "" : "const ");
@@ -1480,7 +1480,7 @@ emit_mode_precision (void)
int c;
struct mode_data *m;
- print_maybe_const_decl ("%spoly_uint16_pod", "mode_precision",
+ print_maybe_const_decl ("%spoly_uint16", "mode_precision",
"NUM_MACHINE_MODES", adj_nunits);
for_all_modes (c, m)
@@ -1499,7 +1499,7 @@ emit_mode_size (void)
int c;
struct mode_data *m;
- print_maybe_const_decl ("%spoly_uint16_pod", "mode_size",
+ print_maybe_const_decl ("%spoly_uint16", "mode_size",
"NUM_MACHINE_MODES", adj_nunits || adj_bytesize);
for_all_modes (c, m)
@@ -1514,7 +1514,7 @@ emit_mode_nunits (void)
int c;
struct mode_data *m;
- print_maybe_const_decl ("%spoly_uint16_pod", "mode_nunits",
+ print_maybe_const_decl ("%spoly_uint16", "mode_nunits",
"NUM_MACHINE_MODES", adj_nunits);
for_all_modes (c, m)
diff --git a/gcc/gensupport.cc b/gcc/gensupport.cc
index f7164b3..dd920d6 100644
--- a/gcc/gensupport.cc
+++ b/gcc/gensupport.cc
@@ -894,20 +894,9 @@ convert_syntax (rtx x, file_location loc)
if (!expect_char (&templ, '['))
fatal_at (loc, "expecing `[' to begin section list");
- parse_section_layout (loc, &templ, "cons:", tconvec, true);
-
- /* Check for any duplicate cons entries and sort based on i. */
- for (auto e : tconvec)
- {
- unsigned idx = e.idx;
- if (idx >= convec.size ())
- convec.resize (idx + 1);
+ skip_spaces (&templ);
- if (convec[idx].idx >= 0)
- fatal_at (loc, "duplicate cons number found: %d", idx);
- convec[idx] = e;
- }
- tconvec.clear ();
+ parse_section_layout (loc, &templ, "cons:", tconvec, true);
if (*templ != ']')
{
@@ -951,13 +940,13 @@ convert_syntax (rtx x, file_location loc)
new_templ += '\n';
new_templ.append (buffer);
/* Parse the constraint list, then the attribute list. */
- if (convec.size () > 0)
- parse_section (&templ, convec.size (), alt_no, convec, loc,
+ if (tconvec.size () > 0)
+ parse_section (&templ, tconvec.size (), alt_no, tconvec, loc,
"constraint");
if (attrvec.size () > 0)
{
- if (convec.size () > 0 && !expect_char (&templ, ';'))
+ if (tconvec.size () > 0 && !expect_char (&templ, ';'))
fatal_at (loc, "expected `;' to separate constraints "
"and attributes in alternative %d", alt_no);
@@ -1027,6 +1016,19 @@ convert_syntax (rtx x, file_location loc)
++alt_no;
}
+ /* Check for any duplicate cons entries and sort based on i. */
+ for (auto e : tconvec)
+ {
+ unsigned idx = e.idx;
+ if (idx >= convec.size ())
+ convec.resize (idx + 1);
+
+ if (convec[idx].idx >= 0)
+ fatal_at (loc, "duplicate cons number found: %d", idx);
+ convec[idx] = e;
+ }
+ tconvec.clear ();
+
/* Write the constraints and attributes into their proper places. */
if (convec.size () > 0)
add_constraints (x, loc, convec);
diff --git a/gcc/ggc-common.cc b/gcc/ggc-common.cc
index 95803fa..39e2581 100644
--- a/gcc/ggc-common.cc
+++ b/gcc/ggc-common.cc
@@ -75,6 +75,18 @@ ggc_mark_root_tab (const_ggc_root_tab_t rt)
(*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
}
+/* Zero out all the roots in the table RT. */
+
+static void
+ggc_zero_rtab_roots (const_ggc_root_tab_t rt)
+{
+ size_t i;
+
+ for ( ; rt->base != NULL; rt++)
+ for (i = 0; i < rt->nelt; i++)
+ (*(void **) ((char *)rt->base + rt->stride * i)) = (void*)0;
+}
+
/* Iterate through all registered roots and mark each element. */
void
@@ -1307,8 +1319,7 @@ ggc_common_finalize ()
memset (rti->base, 0, rti->stride * rti->nelt);
for (rt = gt_ggc_rtab; *rt; rt++)
- for (rti = *rt; rti->base != NULL; rti++)
- memset (rti->base, 0, rti->stride * rti->nelt);
+ ggc_zero_rtab_roots (*rt);
for (rt = gt_pch_scalar_rtab; *rt; rt++)
for (rti = *rt; rti->base != NULL; rti++)
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index 04d9fac..853edd9 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -876,10 +876,8 @@ size_must_be_zero_p (tree size)
wide_int zero = wi::zero (TYPE_PRECISION (type));
value_range valid_range (type, zero, ssize_max);
value_range vr;
- if (cfun)
- get_range_query (cfun)->range_of_expr (vr, size);
- else
- get_global_range_query ()->range_of_expr (vr, size);
+ get_range_query (cfun)->range_of_expr (vr, size);
+
if (vr.undefined_p ())
vr.set_varying (TREE_TYPE (size));
vr.intersect (valid_range);
@@ -7857,7 +7855,7 @@ gimple_fold_stmt_to_constant (gimple *stmt, tree (*valueize) (tree))
is not explicitly available, but it is known to be zero
such as 'static const int a;'. */
static tree
-get_base_constructor (tree base, poly_int64_pod *bit_offset,
+get_base_constructor (tree base, poly_int64 *bit_offset,
tree (*valueize)(tree))
{
poly_int64 bit_offset2, size, max_size;
diff --git a/gcc/gimple-lower-bitint.cc b/gcc/gimple-lower-bitint.cc
index d1651a0..6655859 100644
--- a/gcc/gimple-lower-bitint.cc
+++ b/gcc/gimple-lower-bitint.cc
@@ -1932,7 +1932,8 @@ range_to_prec (tree op, gimple *stmt)
unsigned int prec = TYPE_PRECISION (type);
if (!optimize
- || !get_range_query (cfun)->range_of_expr (r, op, stmt))
+ || !get_range_query (cfun)->range_of_expr (r, op, stmt)
+ || r.undefined_p ())
{
if (TYPE_UNSIGNED (type))
return prec;
@@ -2066,6 +2067,9 @@ bitint_large_huge::handle_operand_addr (tree op, gimple *stmt,
}
else if (gimple_code (g) == GIMPLE_NOP)
{
+ *prec = TYPE_UNSIGNED (TREE_TYPE (op)) ? limb_prec : -limb_prec;
+ if (prec_stored)
+ *prec_stored = *prec;
tree var = create_tmp_var (m_limb_type);
TREE_ADDRESSABLE (var) = 1;
ret = build_fold_addr_expr (var);
diff --git a/gcc/gimple-match-head.cc b/gcc/gimple-match-head.cc
index ea6387a..3220723 100644
--- a/gcc/gimple-match-head.cc
+++ b/gcc/gimple-match-head.cc
@@ -274,7 +274,7 @@ gimple_bitwise_equal_p (tree expr1, tree expr2, tree (*valueize) (tree))
bool gimple_bit_not_with_nop (tree, tree *, tree (*) (tree));
bool gimple_maybe_cmp (tree, tree *, tree (*) (tree));
-/* Helper function for bitwise_equal_p macro. */
+/* Helper function for bitwise_inverted_equal_p macro. */
static inline bool
gimple_bitwise_inverted_equal_p (tree expr1, tree expr2, bool &wascmp, tree (*valueize) (tree))
diff --git a/gcc/gimple-range-cache.cc b/gcc/gimple-range-cache.cc
index 3c81993..89c0845 100644
--- a/gcc/gimple-range-cache.cc
+++ b/gcc/gimple-range-cache.cc
@@ -1470,6 +1470,9 @@ ranger_cache::fill_block_cache (tree name, basic_block bb, basic_block def_bb)
{
if (rel != VREL_EQ)
range_cast (equiv_range, type);
+ else
+ adjust_equivalence_range (equiv_range);
+
if (block_result.intersect (equiv_range))
{
if (DEBUG_RANGE_CACHE)
diff --git a/gcc/gimple-range-fold.cc b/gcc/gimple-range-fold.cc
index d1945cc..6e9530c 100644
--- a/gcc/gimple-range-fold.cc
+++ b/gcc/gimple-range-fold.cc
@@ -50,10 +50,8 @@ fur_source::fur_source (range_query *q)
{
if (q)
m_query = q;
- else if (cfun)
- m_query = get_range_query (cfun);
else
- m_query = get_global_range_query ();
+ m_query = get_range_query (cfun);
m_gori = NULL;
}
diff --git a/gcc/gimple-range-gori.cc b/gcc/gimple-range-gori.cc
index 2694e55..887da0f 100644
--- a/gcc/gimple-range-gori.cc
+++ b/gcc/gimple-range-gori.cc
@@ -1146,10 +1146,7 @@ gori_compute::compute_operand1_range (vrange &r,
// If op1 == op2, create a new trio for just this call.
if (op1 == op2 && gimple_range_ssa_p (op1))
- {
- relation_kind k = get_identity_relation (op1, op1_range);
- trio = relation_trio (trio.lhs_op1 (), trio.lhs_op2 (), k);
- }
+ trio = relation_trio (trio.lhs_op1 (), trio.lhs_op2 (), VREL_EQ);
if (!handler.calc_op1 (r, lhs, op2_range, trio))
return false;
}
@@ -1225,10 +1222,7 @@ gori_compute::compute_operand2_range (vrange &r,
// If op1 == op2, create a new trio for this stmt.
if (op1 == op2 && gimple_range_ssa_p (op1))
- {
- relation_kind k = get_identity_relation (op1, op1_range);
- trio = relation_trio (trio.lhs_op1 (), trio.lhs_op2 (), k);
- }
+ trio = relation_trio (trio.lhs_op1 (), trio.lhs_op2 (), VREL_EQ);
// Intersect with range for op2 based on lhs and op1.
if (!handler.calc_op2 (r, lhs, op1_range, trio))
return false;
@@ -1605,3 +1599,216 @@ gori_export_iterator::get_name ()
}
return NULL_TREE;
}
+
+// This is a helper class to set up STMT with a known LHS for further GORI
+// processing.
+
+class gori_stmt_info : public gimple_range_op_handler
+{
+public:
+ gori_stmt_info (vrange &lhs, gimple *stmt, range_query *q);
+ Value_Range op1_range;
+ Value_Range op2_range;
+ tree ssa1;
+ tree ssa2;
+};
+
+
+// Uses query Q to get the known ranges on STMT with a LHS range
+// for op1_range and op2_range and set ssa1 and ssa2 if either or both of
+// those operands are SSA_NAMES.
+
+gori_stmt_info::gori_stmt_info (vrange &lhs, gimple *stmt, range_query *q)
+ : gimple_range_op_handler (stmt)
+{
+ ssa1 = NULL;
+ ssa2 = NULL;
+ // Don't handle switches as yet for vector processing.
+ if (is_a<gswitch *> (stmt))
+ return;
+
+ // No frther processing for VARYING or undefined.
+ if (lhs.undefined_p () || lhs.varying_p ())
+ return;
+
+ // If there is no range-op handler, we are also done.
+ if (!*this)
+ return;
+
+ // Only evaluate logical cases if both operands must be the same as the LHS.
+ // Otherwise its becomes exponential in time, as well as more complicated.
+ if (is_gimple_logical_p (stmt))
+ {
+ gcc_checking_assert (range_compatible_p (lhs.type (), boolean_type_node));
+ enum tree_code code = gimple_expr_code (stmt);
+ if (code == TRUTH_OR_EXPR || code == BIT_IOR_EXPR)
+ {
+ // [0, 0] = x || y means both x and y must be zero.
+ if (!lhs.singleton_p () || !lhs.zero_p ())
+ return;
+ }
+ else if (code == TRUTH_AND_EXPR || code == BIT_AND_EXPR)
+ {
+ // [1, 1] = x && y means both x and y must be one.
+ if (!lhs.singleton_p () || lhs.zero_p ())
+ return;
+ }
+ }
+
+ tree op1 = operand1 ();
+ tree op2 = operand2 ();
+ ssa1 = gimple_range_ssa_p (op1);
+ ssa2 = gimple_range_ssa_p (op2);
+ // If both operands are the same, only process one of them.
+ if (ssa1 && ssa1 == ssa2)
+ ssa2 = NULL_TREE;
+
+ // Extract current ranges for the operands.
+ fur_stmt src (stmt, q);
+ if (op1)
+ {
+ op1_range.set_type (TREE_TYPE (op1));
+ src.get_operand (op1_range, op1);
+ }
+
+ // And satisfy the second operand for single op satements.
+ if (op2)
+ {
+ op2_range.set_type (TREE_TYPE (op2));
+ src.get_operand (op2_range, op2);
+ }
+ else if (op1)
+ op2_range = op1_range;
+ return;
+}
+
+
+// Process STMT using LHS as the range of the LHS. Invoke GORI processing
+// to resolve ranges for all SSA_NAMES feeding STMT which may be altered
+// based on LHS. Fill R with the results, and resolve all incoming
+// ranges using range-query Q.
+
+static void
+gori_calc_operands (vrange &lhs, gimple *stmt, ssa_cache &r, range_query *q)
+{
+ struct gori_stmt_info si(lhs, stmt, q);
+ if (!si)
+ return;
+
+ Value_Range tmp;
+ // Now evaluate operand ranges, and set them in the edge cache.
+ // If there was already a range, leave it and do no further evaluation.
+ if (si.ssa1 && !r.has_range (si.ssa1))
+ {
+ tmp.set_type (TREE_TYPE (si.ssa1));
+ if (si.calc_op1 (tmp, lhs, si.op2_range))
+ si.op1_range.intersect (tmp);
+ r.set_range (si.ssa1, si.op1_range);
+ gimple *src = SSA_NAME_DEF_STMT (si.ssa1);
+ // If defintion is in the same basic lock, evaluate it.
+ if (src && gimple_bb (src) == gimple_bb (stmt))
+ gori_calc_operands (si.op1_range, src, r, q);
+ }
+
+ if (si.ssa2 && !r.has_range (si.ssa2))
+ {
+ tmp.set_type (TREE_TYPE (si.ssa2));
+ if (si.calc_op2 (tmp, lhs, si.op1_range))
+ si.op2_range.intersect (tmp);
+ r.set_range (si.ssa2, si.op2_range);
+ gimple *src = SSA_NAME_DEF_STMT (si.ssa2);
+ if (src && gimple_bb (src) == gimple_bb (stmt))
+ gori_calc_operands (si.op2_range, src, r, q);
+ }
+}
+
+// Use ssa_cache R as a repository for all outgoing ranges on edge E that
+// can be calculated. Use OGR if present to establish starting edge ranges,
+// and Q to resolve operand values. If Q is NULL use the current range
+// query available to the system.
+
+bool
+gori_on_edge (ssa_cache &r, edge e, range_query *q, gimple_outgoing_range *ogr)
+{
+ // Start with an empty vector
+ r.clear ();
+ int_range_max lhs;
+ // Determine if there is an outgoing edge.
+ gimple *stmt;
+ if (ogr)
+ stmt = ogr->edge_range_p (lhs, e);
+ else
+ {
+ stmt = gimple_outgoing_range_stmt_p (e->src);
+ if (stmt && is_a<gcond *> (stmt))
+ gcond_edge_range (lhs, e);
+ else
+ stmt = NULL;
+ }
+ if (!stmt)
+ return false;
+ gori_calc_operands (lhs, stmt, r, q);
+ return true;
+}
+
+// Helper for GORI_NAME_ON_EDGE which uses query Q to determine if STMT
+// provides a range for NAME, and returns it in R if so. If it does not,
+// continue processing feeding statments until we run out of statements
+// or fine a range for NAME.
+
+bool
+gori_name_helper (vrange &r, tree name, vrange &lhs, gimple *stmt,
+ range_query *q)
+{
+ struct gori_stmt_info si(lhs, stmt, q);
+ if (!si)
+ return false;
+
+ if (si.ssa1 == name)
+ return si.calc_op1 (r, lhs, si.op2_range);
+ if (si.ssa2 == name)
+ return si.calc_op2 (r, lhs, si.op1_range);
+
+ Value_Range tmp;
+ // Now evaluate operand ranges, and set them in the edge cache.
+ // If there was already a range, leave it and do no further evaluation.
+ if (si.ssa1)
+ {
+ tmp.set_type (TREE_TYPE (si.ssa1));
+ if (si.calc_op1 (tmp, lhs, si.op2_range))
+ si.op1_range.intersect (tmp);
+ gimple *src = SSA_NAME_DEF_STMT (si.ssa1);
+ // If defintion is in the same basic lock, evaluate it.
+ if (src && gimple_bb (src) == gimple_bb (stmt))
+ if (gori_name_helper (r, name, si.op1_range, src, q))
+ return true;
+ }
+
+ if (si.ssa2)
+ {
+ tmp.set_type (TREE_TYPE (si.ssa2));
+ if (si.calc_op2 (tmp, lhs, si.op1_range))
+ si.op2_range.intersect (tmp);
+ gimple *src = SSA_NAME_DEF_STMT (si.ssa2);
+ if (src && gimple_bb (src) == gimple_bb (stmt))
+ if (gori_name_helper (r, name, si.op2_range, src, q))
+ return true;
+ }
+ return false;
+}
+
+// Check if NAME has an outgoing range on edge E. Use query Q to evaluate
+// the operands. Return TRUE and the range in R if there is an outgoing range.
+// This is like gori_on_edge except it only looks for the single name and
+// does not require an ssa_cache.
+
+bool
+gori_name_on_edge (vrange &r, tree name, edge e, range_query *q)
+{
+ int_range_max lhs;
+ gimple *stmt = gimple_outgoing_range_stmt_p (e->src);
+ if (!stmt || !is_a<gcond *> (stmt))
+ return false;
+ gcond_edge_range (lhs, e);
+ return gori_name_helper (r, name, lhs, stmt, q);
+}
diff --git a/gcc/gimple-range-gori.h b/gcc/gimple-range-gori.h
index b8d97d1..e75ade0 100644
--- a/gcc/gimple-range-gori.h
+++ b/gcc/gimple-range-gori.h
@@ -208,6 +208,21 @@ private:
int m_not_executable_flag;
};
+// These APIs are used to query GORI if there are ranges generated on an edge.
+// GORI_ON_EDGE is used to get all the ranges at once (returned in an
+// ssa_cache structure).
+// GORI_NAME_ON_EDGE is used to simply ask if NAME has a range on edge E
+
+// Fill ssa-cache R with any outgoing ranges on edge E, using OGR and QUERY.
+bool gori_on_edge (class ssa_cache &r, edge e,
+ range_query *query = NULL,
+ gimple_outgoing_range *ogr = NULL);
+
+// Query if NAME has an outgoing range on edge E, and return it in R if so.
+// Note this doesnt use ranger, its a static GORI analysis of the range in
+// block e->src and is based on any branch at the exit of that block.
+bool gori_name_on_edge (vrange &r, tree name, edge e, range_query *q = NULL);
+
// For each name that is an import into BB's exports..
#define FOR_EACH_GORI_IMPORT_NAME(gori, bb, name) \
for (gori_export_iterator iter ((gori).imports ((bb))); \
diff --git a/gcc/gimple-range.cc b/gcc/gimple-range.cc
index 13c3308..5e9bb39 100644
--- a/gcc/gimple-range.cc
+++ b/gcc/gimple-range.cc
@@ -928,3 +928,303 @@ assume_query::dump (FILE *f)
}
fprintf (f, "------------------------------\n");
}
+
+// ---------------------------------------------------------------------------
+
+
+// Create a DOM based ranger for use by a DOM walk pass.
+
+dom_ranger::dom_ranger () : m_global (), m_out ()
+{
+ m_freelist.create (0);
+ m_freelist.truncate (0);
+ m_e0.create (0);
+ m_e0.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ m_e1.create (0);
+ m_e1.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ m_pop_list = BITMAP_ALLOC (NULL);
+ if (dump_file && (param_ranger_debug & RANGER_DEBUG_TRACE))
+ tracer.enable_trace ();
+}
+
+// Dispose of a DOM ranger.
+
+dom_ranger::~dom_ranger ()
+{
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Non-varying global ranges:\n");
+ fprintf (dump_file, "=========================:\n");
+ m_global.dump (dump_file);
+ }
+ BITMAP_FREE (m_pop_list);
+ m_e1.release ();
+ m_e0.release ();
+ m_freelist.release ();
+}
+
+// Implement range of EXPR on stmt S, and return it in R.
+// Return false if no range can be calculated.
+
+bool
+dom_ranger::range_of_expr (vrange &r, tree expr, gimple *s)
+{
+ unsigned idx;
+ if (!gimple_range_ssa_p (expr))
+ return get_tree_range (r, expr, s);
+
+ if ((idx = tracer.header ("range_of_expr ")))
+ {
+ print_generic_expr (dump_file, expr, TDF_SLIM);
+ if (s)
+ {
+ fprintf (dump_file, " at ");
+ print_gimple_stmt (dump_file, s, 0, TDF_SLIM);
+ }
+ else
+ fprintf (dump_file, "\n");
+ }
+
+ if (s)
+ range_in_bb (r, gimple_bb (s), expr);
+ else
+ m_global.range_of_expr (r, expr, s);
+
+ if (idx)
+ tracer.trailer (idx, " ", true, expr, r);
+ return true;
+}
+
+
+// Return TRUE and the range if edge E has a range set for NAME in
+// block E->src.
+
+bool
+dom_ranger::edge_range (vrange &r, edge e, tree name)
+{
+ bool ret = false;
+ basic_block bb = e->src;
+
+ // Check if BB has any outgoing ranges on edge E.
+ ssa_lazy_cache *out = NULL;
+ if (EDGE_SUCC (bb, 0) == e)
+ out = m_e0[bb->index];
+ else if (EDGE_SUCC (bb, 1) == e)
+ out = m_e1[bb->index];
+
+ // If there is an edge vector and it has a range, pick it up.
+ if (out && out->has_range (name))
+ ret = out->get_range (r, name);
+
+ return ret;
+}
+
+
+// Return the range of EXPR on edge E in R.
+// Return false if no range can be calculated.
+
+bool
+dom_ranger::range_on_edge (vrange &r, edge e, tree expr)
+{
+ basic_block bb = e->src;
+ unsigned idx;
+ if ((idx = tracer.header ("range_on_edge ")))
+ {
+ fprintf (dump_file, "%d->%d for ",e->src->index, e->dest->index);
+ print_generic_expr (dump_file, expr, TDF_SLIM);
+ fputc ('\n',dump_file);
+ }
+
+ if (!gimple_range_ssa_p (expr))
+ return get_tree_range (r, expr, NULL);
+
+ if (!edge_range (r, e, expr))
+ range_in_bb (r, bb, expr);
+
+ if (idx)
+ tracer.trailer (idx, " ", true, expr, r);
+ return true;
+}
+
+// Return the range of NAME as it exists at the end of block BB in R.
+
+void
+dom_ranger::range_in_bb (vrange &r, basic_block bb, tree name)
+{
+ basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (name));
+ // Loop through dominators until we get to the entry block, or we find
+ // either the defintion block for NAME, or a single pred edge with a range.
+ while (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ {
+ // If we hit the deifntion block, pick up the global value.
+ if (bb == def_bb)
+ {
+ m_global.range_of_expr (r, name);
+ return;
+ }
+ // If its a single pred, check the outgoing range of the edge.
+ if (EDGE_COUNT (bb->preds) == 1
+ && edge_range (r, EDGE_PRED (bb, 0), name))
+ return;
+ // Otherwise move up to the dominator, and check again.
+ bb = get_immediate_dominator (CDI_DOMINATORS, bb);
+ }
+ m_global.range_of_expr (r, name);
+}
+
+
+// Calculate the range of NAME, as the def of stmt S and return it in R.
+// Return FALSE if no range cqn be calculated.
+// Also set the global range for NAME as this should only be called within
+// the def block during a DOM walk.
+// Outgoing edges were pre-calculated, so when we establish a global defintion
+// check if any outgoing edges hav ranges that can be combined with the
+// global.
+
+bool
+dom_ranger::range_of_stmt (vrange &r, gimple *s, tree name)
+{
+ unsigned idx;
+ bool ret;
+ if (!name)
+ name = gimple_range_ssa_p (gimple_get_lhs (s));
+
+ gcc_checking_assert (!name || name == gimple_get_lhs (s));
+
+ if ((idx = tracer.header ("range_of_stmt ")))
+ print_gimple_stmt (dump_file, s, 0, TDF_SLIM);
+
+ // Its already been calculated.
+ if (name && m_global.has_range (name))
+ {
+ ret = m_global.range_of_expr (r, name, s);
+ if (idx)
+ tracer.trailer (idx, " Already had value ", ret, name, r);
+ return ret;
+ }
+
+ // If there is a new calculated range and it is not varying, set
+ // a global range.
+ ret = fold_range (r, s, this);
+ if (ret && name && m_global.merge_range (name, r) && !r.varying_p ())
+ {
+ if (set_range_info (name, r) && dump_file)
+ {
+ fprintf (dump_file, "Global Exported: ");
+ print_generic_expr (dump_file, name, TDF_SLIM);
+ fprintf (dump_file, " = ");
+ r.dump (dump_file);
+ fputc ('\n', dump_file);
+ }
+ basic_block bb = gimple_bb (s);
+ unsigned bbi = bb->index;
+ Value_Range vr (TREE_TYPE (name));
+ // If there is a range on edge 0, update it.
+ if (m_e0[bbi] && m_e0[bbi]->has_range (name))
+ {
+ if (m_e0[bbi]->merge_range (name, r) && dump_file
+ && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Outgoing range for ");
+ print_generic_expr (dump_file, name, TDF_SLIM);
+ fprintf (dump_file, " updated on edge %d->%d : ", bbi,
+ EDGE_SUCC (bb, 0)->dest->index);
+ if (m_e0[bbi]->get_range (vr, name))
+ vr.dump (dump_file);
+ fputc ('\n', dump_file);
+ }
+ }
+ // If there is a range on edge 1, update it.
+ if (m_e1[bbi] && m_e1[bbi]->has_range (name))
+ {
+ if (m_e1[bbi]->merge_range (name, r) && dump_file
+ && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Outgoing range for ");
+ print_generic_expr (dump_file, name, TDF_SLIM);
+ fprintf (dump_file, " updated on edge %d->%d : ", bbi,
+ EDGE_SUCC (bb, 1)->dest->index);
+ if (m_e1[bbi]->get_range (vr, name))
+ vr.dump (dump_file);
+ fputc ('\n', dump_file);
+ }
+ }
+ }
+ if (idx)
+ tracer.trailer (idx, " ", ret, name, r);
+ return ret;
+}
+
+// Check if GORI has an ranges on edge E. If there re, store them in
+// either the E0 or E1 vector based on EDGE_0.
+// If there are no ranges, put the empty lazy_cache entry on the freelist
+// for use next time.
+
+void
+dom_ranger::maybe_push_edge (edge e, bool edge_0)
+{
+ ssa_lazy_cache *e_cache;
+ if (!m_freelist.is_empty ())
+ e_cache = m_freelist.pop ();
+ else
+ e_cache = new ssa_lazy_cache;
+ gori_on_edge (*e_cache, e, this, &m_out);
+ if (e_cache->empty_p ())
+ m_freelist.safe_push (e_cache);
+ else
+ {
+ if (edge_0)
+ m_e0[e->src->index] = e_cache;
+ else
+ m_e1[e->src->index] = e_cache;
+ }
+}
+
+// Preprocess block BB. If there are any outgoing edges, precalculate
+// the outgoing ranges and store them. Note these are done before
+// we process the block, so global values have not been set yet.
+// These are "pure" outgoing ranges inflicted by the condition.
+
+void
+dom_ranger::pre_bb (basic_block bb)
+{
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "#FVRP entering BB %d\n", bb->index);
+
+ // Next, see if this block needs outgoing edges calculated.
+ gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
+ if (!gsi_end_p (gsi))
+ {
+ gimple *s = gsi_stmt (gsi);
+ if (is_a<gcond *> (s) && gimple_range_op_handler::supported_p (s))
+ {
+ maybe_push_edge (EDGE_SUCC (bb, 0), true);
+ maybe_push_edge (EDGE_SUCC (bb, 1), false);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ if (m_e0[bb->index])
+ {
+ fprintf (dump_file, "\nEdge ranges BB %d->%d\n",
+ bb->index, EDGE_SUCC (bb, 0)->dest->index);
+ m_e0[bb->index]->dump(dump_file);
+ }
+ if (m_e1[bb->index])
+ {
+ fprintf (dump_file, "\nEdge ranges BB %d->%d\n",
+ bb->index, EDGE_SUCC (bb, 1)->dest->index);
+ m_e1[bb->index]->dump(dump_file);
+ }
+ }
+ }
+ }
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "#FVRP DONE entering BB %d\n", bb->index);
+}
+
+// Perform any post block processing.
+
+void
+dom_ranger::post_bb (basic_block)
+{
+}
diff --git a/gcc/gimple-range.h b/gcc/gimple-range.h
index 6587e49..5807a2b 100644
--- a/gcc/gimple-range.h
+++ b/gcc/gimple-range.h
@@ -101,5 +101,33 @@ protected:
gori_compute m_gori;
};
+// DOM based ranger for fast VRP.
+// This must be processed in DOM order, and does only basic range operations.
+class dom_ranger : public range_query
+{
+public:
+ dom_ranger ();
+ ~dom_ranger ();
+
+ virtual bool range_of_expr (vrange &r, tree expr, gimple *s = NULL) override;
+ virtual bool range_on_edge (vrange &r, edge e, tree expr) override;
+ virtual bool range_of_stmt (vrange &r, gimple *s, tree name = NULL) override;
+
+ bool edge_range (vrange &r, edge e, tree name);
+ void range_in_bb (vrange &r, basic_block bb, tree name);
+
+ void pre_bb (basic_block bb);
+ void post_bb (basic_block bb);
+protected:
+ DISABLE_COPY_AND_ASSIGN (dom_ranger);
+ void maybe_push_edge (edge e, bool edge_0);
+ ssa_cache m_global;
+ gimple_outgoing_range m_out;
+ vec<ssa_lazy_cache *> m_freelist;
+ vec<ssa_lazy_cache *> m_e0;
+ vec<ssa_lazy_cache *> m_e1;
+ bitmap m_pop_list;
+ range_tracer tracer;
+};
#endif // GCC_GIMPLE_RANGE_H
diff --git a/gcc/gimple-ssa-sprintf.cc b/gcc/gimple-ssa-sprintf.cc
index 1897570..b01687e 100644
--- a/gcc/gimple-ssa-sprintf.cc
+++ b/gcc/gimple-ssa-sprintf.cc
@@ -1181,8 +1181,15 @@ adjust_range_for_overflow (tree dirtype, tree *argmin, tree *argmax)
*argmin),
size_int (dirprec)))))
{
- *argmin = force_fit_type (dirtype, wi::to_widest (*argmin), 0, false);
- *argmax = force_fit_type (dirtype, wi::to_widest (*argmax), 0, false);
+ unsigned int maxprec = MAX (argprec, dirprec);
+ *argmin = force_fit_type (dirtype,
+ wide_int::from (wi::to_wide (*argmin), maxprec,
+ TYPE_SIGN (argtype)),
+ 0, false);
+ *argmax = force_fit_type (dirtype,
+ wide_int::from (wi::to_wide (*argmax), maxprec,
+ TYPE_SIGN (argtype)),
+ 0, false);
/* If *ARGMIN is still less than *ARGMAX the conversion above
is safe. Otherwise, it has overflowed and would be unsafe. */
diff --git a/gcc/gimple-ssa-store-merging.cc b/gcc/gimple-ssa-store-merging.cc
index 542958d..fc6ce4b 100644
--- a/gcc/gimple-ssa-store-merging.cc
+++ b/gcc/gimple-ssa-store-merging.cc
@@ -227,7 +227,7 @@ struct symbolic_number {
tree type;
tree base_addr;
tree offset;
- poly_int64_pod bytepos;
+ poly_int64 bytepos;
tree src;
tree alias_set;
tree vuse;
diff --git a/gcc/gimple-ssa-strength-reduction.cc b/gcc/gimple-ssa-strength-reduction.cc
index 65d6c06..bb4a4f5 100644
--- a/gcc/gimple-ssa-strength-reduction.cc
+++ b/gcc/gimple-ssa-strength-reduction.cc
@@ -238,7 +238,7 @@ public:
tree stride;
/* The index constant i. */
- widest_int index;
+ offset_int index;
/* The type of the candidate. This is normally the type of base_expr,
but casts may have occurred when combining feeding instructions.
@@ -333,7 +333,7 @@ class incr_info_d
{
public:
/* The increment that relates a candidate to its basis. */
- widest_int incr;
+ offset_int incr;
/* How many times the increment occurs in the candidate tree. */
unsigned count;
@@ -677,7 +677,7 @@ record_potential_basis (slsr_cand_t c, tree base)
static slsr_cand_t
alloc_cand_and_find_basis (enum cand_kind kind, gimple *gs, tree base,
- const widest_int &index, tree stride, tree ctype,
+ const offset_int &index, tree stride, tree ctype,
tree stype, unsigned savings)
{
slsr_cand_t c = (slsr_cand_t) obstack_alloc (&cand_obstack,
@@ -893,7 +893,7 @@ slsr_process_phi (gphi *phi, bool speed)
int (i * S).
Otherwise, just return double int zero. */
-static widest_int
+static offset_int
backtrace_base_for_ref (tree *pbase)
{
tree base_in = *pbase;
@@ -922,7 +922,7 @@ backtrace_base_for_ref (tree *pbase)
{
/* X = B + (1 * S), S is integer constant. */
*pbase = base_cand->base_expr;
- return wi::to_widest (base_cand->stride);
+ return wi::to_offset (base_cand->stride);
}
else if (base_cand->kind == CAND_ADD
&& TREE_CODE (base_cand->stride) == INTEGER_CST
@@ -966,13 +966,13 @@ backtrace_base_for_ref (tree *pbase)
*PINDEX: C1 + (C2 * C3) + C4 + (C5 * C3) */
static bool
-restructure_reference (tree *pbase, tree *poffset, widest_int *pindex,
+restructure_reference (tree *pbase, tree *poffset, offset_int *pindex,
tree *ptype)
{
tree base = *pbase, offset = *poffset;
- widest_int index = *pindex;
+ offset_int index = *pindex;
tree mult_op0, t1, t2, type;
- widest_int c1, c2, c3, c4, c5;
+ offset_int c1, c2, c3, c4, c5;
offset_int mem_offset;
if (!base
@@ -985,18 +985,18 @@ restructure_reference (tree *pbase, tree *poffset, widest_int *pindex,
return false;
t1 = TREE_OPERAND (base, 0);
- c1 = widest_int::from (mem_offset, SIGNED);
+ c1 = offset_int::from (mem_offset, SIGNED);
type = TREE_TYPE (TREE_OPERAND (base, 1));
mult_op0 = TREE_OPERAND (offset, 0);
- c3 = wi::to_widest (TREE_OPERAND (offset, 1));
+ c3 = wi::to_offset (TREE_OPERAND (offset, 1));
if (TREE_CODE (mult_op0) == PLUS_EXPR)
if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST)
{
t2 = TREE_OPERAND (mult_op0, 0);
- c2 = wi::to_widest (TREE_OPERAND (mult_op0, 1));
+ c2 = wi::to_offset (TREE_OPERAND (mult_op0, 1));
}
else
return false;
@@ -1006,7 +1006,7 @@ restructure_reference (tree *pbase, tree *poffset, widest_int *pindex,
if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST)
{
t2 = TREE_OPERAND (mult_op0, 0);
- c2 = -wi::to_widest (TREE_OPERAND (mult_op0, 1));
+ c2 = -wi::to_offset (TREE_OPERAND (mult_op0, 1));
}
else
return false;
@@ -1057,7 +1057,7 @@ slsr_process_ref (gimple *gs)
HOST_WIDE_INT cbitpos;
if (reversep || !bitpos.is_constant (&cbitpos))
return;
- widest_int index = cbitpos;
+ offset_int index = cbitpos;
if (!restructure_reference (&base, &offset, &index, &type))
return;
@@ -1079,7 +1079,7 @@ create_mul_ssa_cand (gimple *gs, tree base_in, tree stride_in, bool speed)
{
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
tree stype = NULL_TREE;
- widest_int index;
+ offset_int index;
unsigned savings = 0;
slsr_cand_t c;
slsr_cand_t base_cand = base_cand_from_table (base_in);
@@ -1112,7 +1112,7 @@ create_mul_ssa_cand (gimple *gs, tree base_in, tree stride_in, bool speed)
============================
X = B + ((i' * S) * Z) */
base = base_cand->base_expr;
- index = base_cand->index * wi::to_widest (base_cand->stride);
+ index = base_cand->index * wi::to_offset (base_cand->stride);
stride = stride_in;
ctype = base_cand->cand_type;
stype = TREE_TYPE (stride_in);
@@ -1149,7 +1149,7 @@ static slsr_cand_t
create_mul_imm_cand (gimple *gs, tree base_in, tree stride_in, bool speed)
{
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
- widest_int index, temp;
+ offset_int index, temp;
unsigned savings = 0;
slsr_cand_t c;
slsr_cand_t base_cand = base_cand_from_table (base_in);
@@ -1165,7 +1165,7 @@ create_mul_imm_cand (gimple *gs, tree base_in, tree stride_in, bool speed)
X = Y * c
============================
X = (B + i') * (S * c) */
- temp = wi::to_widest (base_cand->stride) * wi::to_widest (stride_in);
+ temp = wi::to_offset (base_cand->stride) * wi::to_offset (stride_in);
if (wi::fits_to_tree_p (temp, TREE_TYPE (stride_in)))
{
base = base_cand->base_expr;
@@ -1200,7 +1200,7 @@ create_mul_imm_cand (gimple *gs, tree base_in, tree stride_in, bool speed)
===========================
X = (B + S) * c */
base = base_cand->base_expr;
- index = wi::to_widest (base_cand->stride);
+ index = wi::to_offset (base_cand->stride);
stride = stride_in;
ctype = base_cand->cand_type;
if (has_single_use (base_in))
@@ -1281,7 +1281,7 @@ create_add_ssa_cand (gimple *gs, tree base_in, tree addend_in,
{
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
tree stype = NULL_TREE;
- widest_int index;
+ offset_int index;
unsigned savings = 0;
slsr_cand_t c;
slsr_cand_t base_cand = base_cand_from_table (base_in);
@@ -1300,7 +1300,7 @@ create_add_ssa_cand (gimple *gs, tree base_in, tree addend_in,
===========================
X = Y + ((+/-1 * S) * B) */
base = base_in;
- index = wi::to_widest (addend_cand->stride);
+ index = wi::to_offset (addend_cand->stride);
if (subtract_p)
index = -index;
stride = addend_cand->base_expr;
@@ -1350,7 +1350,7 @@ create_add_ssa_cand (gimple *gs, tree base_in, tree addend_in,
===========================
Value: X = Y + ((-1 * S) * B) */
base = base_in;
- index = wi::to_widest (subtrahend_cand->stride);
+ index = wi::to_offset (subtrahend_cand->stride);
index = -index;
stride = subtrahend_cand->base_expr;
ctype = TREE_TYPE (base_in);
@@ -1389,13 +1389,13 @@ create_add_ssa_cand (gimple *gs, tree base_in, tree addend_in,
about BASE_IN into the new candidate. Return the new candidate. */
static slsr_cand_t
-create_add_imm_cand (gimple *gs, tree base_in, const widest_int &index_in,
+create_add_imm_cand (gimple *gs, tree base_in, const offset_int &index_in,
bool speed)
{
enum cand_kind kind = CAND_ADD;
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
tree stype = NULL_TREE;
- widest_int index, multiple;
+ offset_int index, multiple;
unsigned savings = 0;
slsr_cand_t c;
slsr_cand_t base_cand = base_cand_from_table (base_in);
@@ -1405,7 +1405,7 @@ create_add_imm_cand (gimple *gs, tree base_in, const widest_int &index_in,
signop sign = TYPE_SIGN (TREE_TYPE (base_cand->stride));
if (TREE_CODE (base_cand->stride) == INTEGER_CST
- && wi::multiple_of_p (index_in, wi::to_widest (base_cand->stride),
+ && wi::multiple_of_p (index_in, wi::to_offset (base_cand->stride),
sign, &multiple))
{
/* Y = (B + i') * S, S constant, c = kS for some integer k
@@ -1494,7 +1494,7 @@ slsr_process_add (gimple *gs, tree rhs1, tree rhs2, bool speed)
else if (TREE_CODE (rhs2) == INTEGER_CST)
{
/* Record an interpretation for the add-immediate. */
- widest_int index = wi::to_widest (rhs2);
+ offset_int index = wi::to_offset (rhs2);
if (subtract_p)
index = -index;
@@ -2079,7 +2079,7 @@ phi_dependent_cand_p (slsr_cand_t c)
/* Calculate the increment required for candidate C relative to
its basis. */
-static widest_int
+static offset_int
cand_increment (slsr_cand_t c)
{
slsr_cand_t basis;
@@ -2102,10 +2102,10 @@ cand_increment (slsr_cand_t c)
for this candidate, return the absolute value of that increment
instead. */
-static inline widest_int
+static inline offset_int
cand_abs_increment (slsr_cand_t c)
{
- widest_int increment = cand_increment (c);
+ offset_int increment = cand_increment (c);
if (!address_arithmetic_p && wi::neg_p (increment))
increment = -increment;
@@ -2126,7 +2126,7 @@ cand_already_replaced (slsr_cand_t c)
replace_conditional_candidate. */
static void
-replace_mult_candidate (slsr_cand_t c, tree basis_name, widest_int bump)
+replace_mult_candidate (slsr_cand_t c, tree basis_name, offset_int bump)
{
tree target_type = TREE_TYPE (gimple_assign_lhs (c->cand_stmt));
enum tree_code cand_code = gimple_assign_rhs_code (c->cand_stmt);
@@ -2245,7 +2245,7 @@ replace_unconditional_candidate (slsr_cand_t c)
return;
basis = lookup_cand (c->basis);
- widest_int bump = cand_increment (c) * wi::to_widest (c->stride);
+ offset_int bump = cand_increment (c) * wi::to_offset (c->stride);
replace_mult_candidate (c, gimple_assign_lhs (basis->cand_stmt), bump);
}
@@ -2255,7 +2255,7 @@ replace_unconditional_candidate (slsr_cand_t c)
MAX_INCR_VEC_LEN increments have been found. */
static inline int
-incr_vec_index (const widest_int &increment)
+incr_vec_index (const offset_int &increment)
{
unsigned i;
@@ -2275,7 +2275,7 @@ incr_vec_index (const widest_int &increment)
static tree
create_add_on_incoming_edge (slsr_cand_t c, tree basis_name,
- widest_int increment, edge e, location_t loc,
+ offset_int increment, edge e, location_t loc,
bool known_stride)
{
tree lhs, basis_type;
@@ -2299,7 +2299,7 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name,
{
tree bump_tree;
enum tree_code code = plus_code;
- widest_int bump = increment * wi::to_widest (c->stride);
+ offset_int bump = increment * wi::to_offset (c->stride);
if (wi::neg_p (bump) && !POINTER_TYPE_P (basis_type))
{
code = MINUS_EXPR;
@@ -2427,7 +2427,7 @@ create_phi_basis_1 (slsr_cand_t c, gimple *from_phi, tree basis_name,
feeding_def = gimple_assign_lhs (basis->cand_stmt);
else
{
- widest_int incr = -basis->index;
+ offset_int incr = -basis->index;
feeding_def = create_add_on_incoming_edge (c, basis_name, incr,
e, loc, known_stride);
}
@@ -2444,7 +2444,7 @@ create_phi_basis_1 (slsr_cand_t c, gimple *from_phi, tree basis_name,
else
{
slsr_cand_t arg_cand = base_cand_from_table (arg);
- widest_int diff = arg_cand->index - basis->index;
+ offset_int diff = arg_cand->index - basis->index;
feeding_def = create_add_on_incoming_edge (c, basis_name, diff,
e, loc, known_stride);
}
@@ -2525,7 +2525,7 @@ replace_conditional_candidate (slsr_cand_t c)
basis_name, loc, KNOWN_STRIDE);
/* Replace C with an add of the new basis phi and a constant. */
- widest_int bump = c->index * wi::to_widest (c->stride);
+ offset_int bump = c->index * wi::to_offset (c->stride);
replace_mult_candidate (c, name, bump);
}
@@ -2614,7 +2614,7 @@ replace_uncond_cands_and_profitable_phis (slsr_cand_t c)
{
/* A multiply candidate with a stride of 1 is just an artifice
of a copy or cast; there is no value in replacing it. */
- if (c->kind == CAND_MULT && wi::to_widest (c->stride) != 1)
+ if (c->kind == CAND_MULT && wi::to_offset (c->stride) != 1)
{
/* A candidate dependent upon a phi will replace a multiply by
a constant with an add, and will insert at most one add for
@@ -2681,7 +2681,7 @@ count_candidates (slsr_cand_t c)
candidates with the same increment, also record T_0 for subsequent use. */
static void
-record_increment (slsr_cand_t c, widest_int increment, bool is_phi_adjust)
+record_increment (slsr_cand_t c, offset_int increment, bool is_phi_adjust)
{
bool found = false;
unsigned i;
@@ -2786,7 +2786,7 @@ record_phi_increments_1 (slsr_cand_t basis, gimple *phi)
record_phi_increments_1 (basis, arg_def);
else
{
- widest_int diff;
+ offset_int diff;
if (operand_equal_p (arg, phi_cand->base_expr, 0))
{
@@ -2856,7 +2856,7 @@ record_increments (slsr_cand_t c)
/* Recursive helper function for phi_incr_cost. */
static int
-phi_incr_cost_1 (slsr_cand_t c, const widest_int &incr, gimple *phi,
+phi_incr_cost_1 (slsr_cand_t c, const offset_int &incr, gimple *phi,
int *savings)
{
unsigned i;
@@ -2883,7 +2883,7 @@ phi_incr_cost_1 (slsr_cand_t c, const widest_int &incr, gimple *phi,
}
else
{
- widest_int diff;
+ offset_int diff;
slsr_cand_t arg_cand;
/* When the PHI argument is just a pass-through to the base
@@ -2925,7 +2925,7 @@ phi_incr_cost_1 (slsr_cand_t c, const widest_int &incr, gimple *phi,
uses. */
static int
-phi_incr_cost (slsr_cand_t c, const widest_int &incr, gimple *phi,
+phi_incr_cost (slsr_cand_t c, const offset_int &incr, gimple *phi,
int *savings)
{
int retval = phi_incr_cost_1 (c, incr, phi, savings);
@@ -2981,10 +2981,10 @@ optimize_cands_for_speed_p (slsr_cand_t c)
static int
lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c,
- const widest_int &incr, bool count_phis)
+ const offset_int &incr, bool count_phis)
{
int local_cost, sib_cost, savings = 0;
- widest_int cand_incr = cand_abs_increment (c);
+ offset_int cand_incr = cand_abs_increment (c);
if (cand_already_replaced (c))
local_cost = cost_in;
@@ -3027,11 +3027,11 @@ lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c,
would go dead. */
static int
-total_savings (int repl_savings, slsr_cand_t c, const widest_int &incr,
+total_savings (int repl_savings, slsr_cand_t c, const offset_int &incr,
bool count_phis)
{
int savings = 0;
- widest_int cand_incr = cand_abs_increment (c);
+ offset_int cand_incr = cand_abs_increment (c);
if (incr == cand_incr && !cand_already_replaced (c))
savings += repl_savings + c->dead_savings;
@@ -3239,7 +3239,7 @@ ncd_for_two_cands (basic_block bb1, basic_block bb2,
candidates, return the earliest candidate in the block in *WHERE. */
static basic_block
-ncd_with_phi (slsr_cand_t c, const widest_int &incr, gphi *phi,
+ncd_with_phi (slsr_cand_t c, const offset_int &incr, gphi *phi,
basic_block ncd, slsr_cand_t *where)
{
unsigned i;
@@ -3255,7 +3255,7 @@ ncd_with_phi (slsr_cand_t c, const widest_int &incr, gphi *phi,
ncd = ncd_with_phi (c, incr, as_a <gphi *> (arg_def), ncd, where);
else
{
- widest_int diff;
+ offset_int diff;
if (operand_equal_p (arg, phi_cand->base_expr, 0))
diff = -basis->index;
@@ -3282,7 +3282,7 @@ ncd_with_phi (slsr_cand_t c, const widest_int &incr, gphi *phi,
return the earliest candidate in the block in *WHERE. */
static basic_block
-ncd_of_cand_and_phis (slsr_cand_t c, const widest_int &incr, slsr_cand_t *where)
+ncd_of_cand_and_phis (slsr_cand_t c, const offset_int &incr, slsr_cand_t *where)
{
basic_block ncd = NULL;
@@ -3308,7 +3308,7 @@ ncd_of_cand_and_phis (slsr_cand_t c, const widest_int &incr, slsr_cand_t *where)
*WHERE. */
static basic_block
-nearest_common_dominator_for_cands (slsr_cand_t c, const widest_int &incr,
+nearest_common_dominator_for_cands (slsr_cand_t c, const offset_int &incr,
slsr_cand_t *where)
{
basic_block sib_ncd = NULL, dep_ncd = NULL, this_ncd = NULL, ncd;
@@ -3385,7 +3385,7 @@ insert_initializers (slsr_cand_t c)
gassign *init_stmt;
gassign *cast_stmt = NULL;
tree new_name, incr_tree, init_stride;
- widest_int incr = incr_vec[i].incr;
+ offset_int incr = incr_vec[i].incr;
if (!profitable_increment_p (i)
|| incr == 1
@@ -3550,7 +3550,7 @@ all_phi_incrs_profitable_1 (slsr_cand_t c, gphi *phi, int *spread)
else
{
int j;
- widest_int increment;
+ offset_int increment;
if (operand_equal_p (arg, phi_cand->base_expr, 0))
increment = -basis->index;
@@ -3681,7 +3681,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name)
tree orig_rhs1, orig_rhs2;
tree rhs2;
enum tree_code orig_code, repl_code;
- widest_int cand_incr;
+ offset_int cand_incr;
orig_code = gimple_assign_rhs_code (c->cand_stmt);
orig_rhs1 = gimple_assign_rhs1 (c->cand_stmt);
@@ -3839,7 +3839,7 @@ replace_profitable_candidates (slsr_cand_t c)
{
if (!cand_already_replaced (c))
{
- widest_int increment = cand_abs_increment (c);
+ offset_int increment = cand_abs_increment (c);
enum tree_code orig_code = gimple_assign_rhs_code (c->cand_stmt);
int i;
diff --git a/gcc/gimple-ssa-warn-access.cc b/gcc/gimple-ssa-warn-access.cc
index ac07a6f..e439d1b 100644
--- a/gcc/gimple-ssa-warn-access.cc
+++ b/gcc/gimple-ssa-warn-access.cc
@@ -332,7 +332,7 @@ check_nul_terminated_array (GimpleOrTree expr, tree src, tree bound)
{
Value_Range r (TREE_TYPE (bound));
- get_global_range_query ()->range_of_expr (r, bound);
+ get_range_query (cfun)->range_of_expr (r, bound);
if (r.undefined_p () || r.varying_p ())
return true;
@@ -2141,7 +2141,7 @@ private:
void check_dangling_uses (tree, tree, bool = false, bool = false);
void check_dangling_uses ();
void check_dangling_stores ();
- void check_dangling_stores (basic_block, hash_set<tree> &, auto_bitmap &);
+ bool check_dangling_stores (basic_block, hash_set<tree> &);
void warn_invalid_pointer (tree, gimple *, gimple *, tree, bool, bool = false);
@@ -4524,17 +4524,13 @@ pass_waccess::check_dangling_uses (tree var, tree decl, bool maybe /* = false */
/* Diagnose stores in BB and (recursively) its predecessors of the addresses
of local variables into nonlocal pointers that are left dangling after
- the function returns. BBS is a bitmap of basic blocks visited. */
+ the function returns. Returns true when we can continue walking
+ the CFG to predecessors. */
-void
+bool
pass_waccess::check_dangling_stores (basic_block bb,
- hash_set<tree> &stores,
- auto_bitmap &bbs)
+ hash_set<tree> &stores)
{
- if (!bitmap_set_bit (bbs, bb->index))
- /* Avoid cycles. */
- return;
-
/* Iterate backwards over the statements looking for a store of
the address of a local variable into a nonlocal pointer. */
for (auto gsi = gsi_last_nondebug_bb (bb); ; gsi_prev_nondebug (&gsi))
@@ -4550,7 +4546,7 @@ pass_waccess::check_dangling_stores (basic_block bb,
&& !(gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE)))
/* Avoid looking before nonconst, nonpure calls since those might
use the escaped locals. */
- return;
+ return false;
if (!is_gimple_assign (stmt) || gimple_clobber_p (stmt)
|| !gimple_store_p (stmt))
@@ -4576,7 +4572,7 @@ pass_waccess::check_dangling_stores (basic_block bb,
gimple *def_stmt = SSA_NAME_DEF_STMT (lhs_ref.ref);
if (!gimple_nop_p (def_stmt))
/* Avoid looking at or before stores into unknown objects. */
- return;
+ return false;
lhs_ref.ref = SSA_NAME_VAR (lhs_ref.ref);
}
@@ -4620,13 +4616,7 @@ pass_waccess::check_dangling_stores (basic_block bb,
}
}
- edge e;
- edge_iterator ei;
- FOR_EACH_EDGE (e, ei, bb->preds)
- {
- basic_block pred = e->src;
- check_dangling_stores (pred, stores, bbs);
- }
+ return true;
}
/* Diagnose stores of the addresses of local variables into nonlocal
@@ -4635,9 +4625,32 @@ pass_waccess::check_dangling_stores (basic_block bb,
void
pass_waccess::check_dangling_stores ()
{
+ if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (m_func)->preds) == 0)
+ return;
+
auto_bitmap bbs;
hash_set<tree> stores;
- check_dangling_stores (EXIT_BLOCK_PTR_FOR_FN (m_func), stores, bbs);
+ auto_vec<edge_iterator, 8> worklist (n_basic_blocks_for_fn (cfun) + 1);
+ worklist.quick_push (ei_start (EXIT_BLOCK_PTR_FOR_FN (m_func)->preds));
+ do
+ {
+ edge_iterator ei = worklist.last ();
+ basic_block src = ei_edge (ei)->src;
+ if (bitmap_set_bit (bbs, src->index))
+ {
+ if (check_dangling_stores (src, stores)
+ && EDGE_COUNT (src->preds) > 0)
+ worklist.quick_push (ei_start (src->preds));
+ }
+ else
+ {
+ if (ei_one_before_end_p (ei))
+ worklist.pop ();
+ else
+ ei_next (&worklist.last ());
+ }
+ }
+ while (!worklist.is_empty ());
}
/* Check for and diagnose uses of dangling pointers to auto objects
diff --git a/gcc/gimple-ssa-warn-alloca.cc b/gcc/gimple-ssa-warn-alloca.cc
index 2d8ab93..4c97434 100644
--- a/gcc/gimple-ssa-warn-alloca.cc
+++ b/gcc/gimple-ssa-warn-alloca.cc
@@ -310,7 +310,7 @@ pass_walloca::execute (function *fun)
enum opt_code wcode
= is_vla ? OPT_Wvla_larger_than_ : OPT_Walloca_larger_than_;
- char buff[WIDE_INT_MAX_PRECISION / 4 + 4];
+ char buff[WIDE_INT_MAX_INL_PRECISION / 4 + 4];
switch (t.type)
{
case ALLOCA_OK:
@@ -329,6 +329,7 @@ pass_walloca::execute (function *fun)
"large")))
&& t.limit != 0)
{
+ gcc_assert (t.limit.get_len () < WIDE_INT_MAX_INL_ELTS);
print_decu (t.limit, buff);
inform (loc, "limit is %wu bytes, but argument "
"may be as large as %s",
@@ -347,6 +348,7 @@ pass_walloca::execute (function *fun)
: G_("argument to %<alloca%> is too large")))
&& t.limit != 0)
{
+ gcc_assert (t.limit.get_len () < WIDE_INT_MAX_INL_ELTS);
print_decu (t.limit, buff);
inform (loc, "limit is %wu bytes, but argument is %s",
is_vla ? warn_vla_limit : adjusted_alloca_limit,
diff --git a/gcc/gimplify.cc b/gcc/gimplify.cc
index 9f4722f..22ff107 100644
--- a/gcc/gimplify.cc
+++ b/gcc/gimplify.cc
@@ -1405,18 +1405,46 @@ gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p)
|| alloc == NULL_TREE
|| !integer_onep (alloc)))
{
- tree tmp = build_pointer_type (TREE_TYPE (t));
- tree v = create_tmp_var (tmp, get_name (t));
- DECL_IGNORED_P (v) = 0;
- tmp = remove_attribute ("omp allocate", DECL_ATTRIBUTES (t));
- DECL_ATTRIBUTES (v)
- = tree_cons (get_identifier ("omp allocate var"),
- build_tree_list (NULL_TREE, t), tmp);
- tmp = build_fold_indirect_ref (v);
- TREE_THIS_NOTRAP (tmp) = 1;
- SET_DECL_VALUE_EXPR (t, tmp);
- DECL_HAS_VALUE_EXPR_P (t) = 1;
- tree sz = TYPE_SIZE_UNIT (TREE_TYPE (t));
+ /* Fortran might already use a pointer type internally;
+ use that pointer except for type(C_ptr) and type(C_funptr);
+ note that normal proc pointers are rejected. */
+ tree type = TREE_TYPE (t);
+ tree tmp, v;
+ if (lang_GNU_Fortran ()
+ && POINTER_TYPE_P (type)
+ && TREE_TYPE (type) != void_type_node
+ && TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE)
+ {
+ type = TREE_TYPE (type);
+ v = t;
+ }
+ else
+ {
+ tmp = build_pointer_type (type);
+ v = create_tmp_var (tmp, get_name (t));
+ DECL_IGNORED_P (v) = 0;
+ DECL_ATTRIBUTES (v)
+ = tree_cons (get_identifier ("omp allocate var"),
+ build_tree_list (NULL_TREE, t),
+ remove_attribute ("omp allocate",
+ DECL_ATTRIBUTES (t)));
+ tmp = build_fold_indirect_ref (v);
+ TREE_THIS_NOTRAP (tmp) = 1;
+ SET_DECL_VALUE_EXPR (t, tmp);
+ DECL_HAS_VALUE_EXPR_P (t) = 1;
+ }
+ tree sz = TYPE_SIZE_UNIT (type);
+ /* The size to use in Fortran might not match TYPE_SIZE_UNIT;
+ hence, for some decls, a size variable is saved in the
+ attributes; use it, if available. */
+ if (TREE_CHAIN (TREE_VALUE (attr))
+ && TREE_CHAIN (TREE_CHAIN (TREE_VALUE (attr)))
+ && TREE_PURPOSE (
+ TREE_CHAIN (TREE_CHAIN (TREE_VALUE (attr)))))
+ {
+ sz = TREE_CHAIN (TREE_CHAIN (TREE_VALUE (attr)));
+ sz = TREE_PURPOSE (sz);
+ }
if (alloc == NULL_TREE)
alloc = build_zero_cst (ptr_type_node);
if (align == NULL_TREE)
@@ -1425,28 +1453,98 @@ gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p)
align = build_int_cst (size_type_node,
MAX (tree_to_uhwi (align),
DECL_ALIGN_UNIT (t)));
+ location_t loc = DECL_SOURCE_LOCATION (t);
tmp = builtin_decl_explicit (BUILT_IN_GOMP_ALLOC);
- tmp = build_call_expr_loc (DECL_SOURCE_LOCATION (t), tmp,
- 3, align, sz, alloc);
- tmp = fold_build2_loc (DECL_SOURCE_LOCATION (t), MODIFY_EXPR,
- TREE_TYPE (v), v,
+ tmp = build_call_expr_loc (loc, tmp, 3, align, sz, alloc);
+ tmp = fold_build2_loc (loc, MODIFY_EXPR, TREE_TYPE (v), v,
fold_convert (TREE_TYPE (v), tmp));
- gcc_assert (BIND_EXPR_BODY (bind_expr) != NULL_TREE
- && (TREE_CODE (BIND_EXPR_BODY (bind_expr))
- == STATEMENT_LIST));
- tree_stmt_iterator e = tsi_start (BIND_EXPR_BODY (bind_expr));
- while (!tsi_end_p (e))
+ gcc_assert (BIND_EXPR_BODY (bind_expr) != NULL_TREE);
+ /* Ensure that either TREE_CHAIN (TREE_VALUE (attr) is set
+ and GOMP_FREE added here or that DECL_HAS_VALUE_EXPR_P (t)
+ is set, using in a condition much further below. */
+ gcc_assert (DECL_HAS_VALUE_EXPR_P (t)
+ || TREE_CHAIN (TREE_VALUE (attr)));
+ if (TREE_CHAIN (TREE_VALUE (attr)))
{
- if ((TREE_CODE (*e) == DECL_EXPR
- && TREE_OPERAND (*e, 0) == t)
- || (TREE_CODE (*e) == CLEANUP_POINT_EXPR
- && TREE_CODE (TREE_OPERAND (*e, 0)) == DECL_EXPR
- && TREE_OPERAND (TREE_OPERAND (*e, 0), 0) == t))
- break;
+ /* Fortran is special as it does not have properly nest
+ declarations in blocks. And as there is no
+ initializer, there is also no expression to look for.
+ Hence, the FE makes the statement list of the
+ try-finally block available. We can put the GOMP_alloc
+ at the top, unless an allocator or size expression
+ requires to put it afterward; note that the size is
+ always later in generated code; for strings, no
+ size expr but still an expr might be available.
+ As LTO does not handle a statement list, 'sl' has
+ to be removed; done so by removing the attribute. */
+ DECL_ATTRIBUTES (t)
+ = remove_attribute ("omp allocate",
+ DECL_ATTRIBUTES (t));
+ tree sl = TREE_PURPOSE (TREE_CHAIN (TREE_VALUE (attr)));
+ tree_stmt_iterator e = tsi_start (sl);
+ tree needle = NULL_TREE;
+ if (TREE_CHAIN (TREE_CHAIN (TREE_VALUE (attr))))
+ {
+ needle = TREE_CHAIN (TREE_CHAIN (TREE_VALUE (attr)));
+ needle = (TREE_VALUE (needle) ? TREE_VALUE (needle)
+ : sz);
+ }
+ else if (TREE_CHAIN (TREE_CHAIN (TREE_VALUE (attr))))
+ needle = sz;
+ else if (DECL_P (alloc) && DECL_ARTIFICIAL (alloc))
+ needle = alloc;
+
+ if (needle != NULL_TREE)
+ {
+ while (!tsi_end_p (e))
+ {
+ if (*e == needle
+ || (TREE_CODE (*e) == MODIFY_EXPR
+ && TREE_OPERAND (*e, 0) == needle))
+ break;
+ ++e;
+ }
+ gcc_assert (!tsi_end_p (e));
+ }
+ tsi_link_after (&e, tmp, TSI_SAME_STMT);
+
+ /* As the cleanup is in BIND_EXPR_BODY, GOMP_free is added
+ here; for C/C++ it will be added in the 'cleanup'
+ section after gimplification. But Fortran already has
+ a try-finally block. */
+ sl = TREE_VALUE (TREE_CHAIN (TREE_VALUE (attr)));
+ e = tsi_last (sl);
+ tmp = builtin_decl_explicit (BUILT_IN_GOMP_FREE);
+ tmp = build_call_expr_loc (EXPR_LOCATION (*e), tmp, 2, v,
+ build_zero_cst (ptr_type_node));
+ tsi_link_after (&e, tmp, TSI_SAME_STMT);
+ tmp = build_clobber (TREE_TYPE (v), CLOBBER_EOL);
+ tmp = fold_build2_loc (loc, MODIFY_EXPR, TREE_TYPE (v), v,
+ fold_convert (TREE_TYPE (v), tmp));
++e;
+ tsi_link_after (&e, tmp, TSI_SAME_STMT);
}
- gcc_assert (!tsi_end_p (e));
- tsi_link_before (&e, tmp, TSI_SAME_STMT);
+ else
+ {
+ gcc_assert (TREE_CODE (BIND_EXPR_BODY (bind_expr))
+ == STATEMENT_LIST);
+ tree_stmt_iterator e;
+ e = tsi_start (BIND_EXPR_BODY (bind_expr));
+ while (!tsi_end_p (e))
+ {
+ if ((TREE_CODE (*e) == DECL_EXPR
+ && TREE_OPERAND (*e, 0) == t)
+ || (TREE_CODE (*e) == CLEANUP_POINT_EXPR
+ && (TREE_CODE (TREE_OPERAND (*e, 0))
+ == DECL_EXPR)
+ && (TREE_OPERAND (TREE_OPERAND (*e, 0), 0)
+ == t)))
+ break;
+ ++e;
+ }
+ gcc_assert (!tsi_end_p (e));
+ tsi_link_before (&e, tmp, TSI_SAME_STMT);
+ }
}
}
@@ -1544,11 +1642,19 @@ gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p)
&& TREE_USED (t)
&& lookup_attribute ("omp allocate", DECL_ATTRIBUTES (t)))
{
+ /* For Fortran, TREE_CHAIN (TREE_VALUE (attr)) is set, which
+ causes that the GOMP_free call is already added above;
+ and "omp allocate" is removed from DECL_ATTRIBUTES. */
+ tree v = TREE_OPERAND (DECL_VALUE_EXPR (t), 0);
tree tmp = builtin_decl_explicit (BUILT_IN_GOMP_FREE);
- tmp = build_call_expr_loc (end_locus, tmp, 2,
- TREE_OPERAND (DECL_VALUE_EXPR (t), 0),
+ tmp = build_call_expr_loc (end_locus, tmp, 2, v,
build_zero_cst (ptr_type_node));
gimplify_and_add (tmp, &cleanup);
+ gimple *clobber_stmt;
+ tmp = build_clobber (TREE_TYPE (v), CLOBBER_EOL);
+ clobber_stmt = gimple_build_assign (v, tmp);
+ gimple_set_location (clobber_stmt, end_locus);
+ gimplify_seq_add_stmt (&cleanup, clobber_stmt);
}
if (!DECL_HARD_REGISTER (t)
&& !TREE_THIS_VOLATILE (t)
diff --git a/gcc/godump.cc b/gcc/godump.cc
index bdd2d10..c6f547c 100644
--- a/gcc/godump.cc
+++ b/gcc/godump.cc
@@ -1154,7 +1154,11 @@ go_output_typedef (class godump_container *container, tree decl)
snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_UNSIGNED,
tree_to_uhwi (value));
else
- print_hex (wi::to_wide (element), buf);
+ {
+ wide_int w = wi::to_wide (element);
+ gcc_assert (w.get_len () <= WIDE_INT_MAX_INL_ELTS);
+ print_hex (w, buf);
+ }
mhval->value = xstrdup (buf);
*slot = mhval;
diff --git a/gcc/graphite-isl-ast-to-gimple.cc b/gcc/graphite-isl-ast-to-gimple.cc
index 6c0d6f9..f62b0fe 100644
--- a/gcc/graphite-isl-ast-to-gimple.cc
+++ b/gcc/graphite-isl-ast-to-gimple.cc
@@ -274,7 +274,7 @@ widest_int_from_isl_expr_int (__isl_keep isl_ast_expr *expr)
isl_val *val = isl_ast_expr_get_val (expr);
size_t n = isl_val_n_abs_num_chunks (val, sizeof (HOST_WIDE_INT));
HOST_WIDE_INT *chunks = XALLOCAVEC (HOST_WIDE_INT, n);
- if (n > WIDE_INT_MAX_ELTS
+ if (n > WIDEST_INT_MAX_ELTS
|| isl_val_get_abs_num_chunks (val, sizeof (HOST_WIDE_INT), chunks) == -1)
{
isl_val_free (val);
diff --git a/gcc/graphviz.cc b/gcc/graphviz.cc
index 177fc20..ea7bb9d 100644
--- a/gcc/graphviz.cc
+++ b/gcc/graphviz.cc
@@ -36,13 +36,10 @@ graphviz_out::graphviz_out (pretty_printer *pp)
void
graphviz_out::print (const char *fmt, ...)
{
- text_info text;
va_list ap;
va_start (ap, fmt);
- text.err_no = errno;
- text.args_ptr = &ap;
- text.format_spec = fmt;
+ text_info text (fmt, &ap, errno);
pp_format (m_pp, &text);
pp_output_formatted_text (m_pp);
va_end (ap);
@@ -54,15 +51,12 @@ graphviz_out::print (const char *fmt, ...)
void
graphviz_out::println (const char *fmt, ...)
{
- text_info text;
va_list ap;
write_indent ();
va_start (ap, fmt);
- text.err_no = errno;
- text.args_ptr = &ap;
- text.format_spec = fmt;
+ text_info text (fmt, &ap, errno);
pp_format (m_pp, &text);
pp_output_formatted_text (m_pp);
va_end (ap);
diff --git a/gcc/inchash.h b/gcc/inchash.h
index 41ae153..dc594da 100644
--- a/gcc/inchash.h
+++ b/gcc/inchash.h
@@ -59,7 +59,7 @@ class hash
/* Add polynomial value V, treating each element as an unsigned int. */
template<unsigned int N, typename T>
- void add_poly_int (const poly_int_pod<N, T> &v)
+ void add_poly_int (const poly_int<N, T> &v)
{
for (unsigned int i = 0; i < N; ++i)
add_int (v.coeffs[i]);
@@ -73,7 +73,7 @@ class hash
/* Add polynomial value V, treating each element as a HOST_WIDE_INT. */
template<unsigned int N, typename T>
- void add_poly_hwi (const poly_int_pod<N, T> &v)
+ void add_poly_hwi (const poly_int<N, T> &v)
{
for (unsigned int i = 0; i < N; ++i)
add_hwi (v.coeffs[i]);
diff --git a/gcc/input.cc b/gcc/input.cc
index eaf301e..fd09fcc 100644
--- a/gcc/input.cc
+++ b/gcc/input.cc
@@ -443,7 +443,10 @@ file_cache::evicted_cache_tab_entry (unsigned *highest_use_count)
accessed by caret diagnostic. This cache is added to an array of
cache and can be retrieved by lookup_file_in_cache_tab. This
function returns the created cache. Note that only the last
- num_file_slots files are cached. */
+ num_file_slots files are cached.
+
+ This can return nullptr if the FILE_PATH can't be opened for
+ reading, or if the content can't be converted to the input_charset. */
file_cache_slot*
file_cache::add_file (const char *file_path)
@@ -547,7 +550,10 @@ file_cache::~file_cache ()
/* Lookup the cache used for the content of a given file accessed by
caret diagnostic. If no cached file was found, create a new cache
for this file, add it to the array of cached file and return
- it. */
+ it.
+
+ This can return nullptr on a cache miss if FILE_PATH can't be opened for
+ reading, or if the content can't be converted to the input_charset. */
file_cache_slot*
file_cache::lookup_or_add_file (const char *file_path)
@@ -946,7 +952,7 @@ file_cache_slot::read_line_num (size_t line_num,
If the function fails, a NULL char_span is returned. */
char_span
-location_get_source_line (const char *file_path, int line)
+file_cache::get_source_line (const char *file_path, int line)
{
char *buffer = NULL;
ssize_t len;
@@ -957,9 +963,7 @@ location_get_source_line (const char *file_path, int line)
if (file_path == NULL)
return char_span (NULL, 0);
- diagnostic_file_cache_init ();
-
- file_cache_slot *c = global_dc->m_file_cache->lookup_or_add_file (file_path);
+ file_cache_slot *c = lookup_or_add_file (file_path);
if (c == NULL)
return char_span (NULL, 0);
@@ -970,6 +974,13 @@ location_get_source_line (const char *file_path, int line)
return char_span (buffer, len);
}
+char_span
+location_get_source_line (const char *file_path, int line)
+{
+ diagnostic_file_cache_init ();
+ return global_dc->m_file_cache->get_source_line (file_path, line);
+}
+
/* Return a NUL-terminated copy of the source text between two locations, or
NULL if the arguments are invalid. The caller is responsible for freeing
the return value. */
@@ -1062,6 +1073,17 @@ get_source_text_between (location_t start, location_t end)
return xstrdup (buf);
}
+
+char_span
+file_cache::get_source_file_content (const char *file_path)
+{
+ file_cache_slot *c = lookup_or_add_file (file_path);
+ if (c == nullptr)
+ return char_span (nullptr, 0);
+ return c->get_full_file_content ();
+}
+
+
/* Get a borrowed char_span to the full content of FILE_PATH
as decoded according to the input charset, encoded as UTF-8. */
@@ -1069,9 +1091,7 @@ char_span
get_source_file_content (const char *file_path)
{
diagnostic_file_cache_init ();
-
- file_cache_slot *c = global_dc->m_file_cache->lookup_or_add_file (file_path);
- return c->get_full_file_content ();
+ return global_dc->m_file_cache->get_source_file_content (file_path);
}
/* Determine if FILE_PATH missing a trailing newline on its final line.
@@ -1184,7 +1204,9 @@ expansion_point_location (location_t location)
}
/* Construct a location with caret at CARET, ranging from START to
- finish e.g.
+ FINISH.
+
+ For example, consider:
11111111112
12345678901234567890
@@ -1200,16 +1222,7 @@ expansion_point_location (location_t location)
location_t
make_location (location_t caret, location_t start, location_t finish)
{
- location_t pure_loc = get_pure_location (caret);
- source_range src_range;
- src_range.m_start = get_start (start);
- src_range.m_finish = get_finish (finish);
- location_t combined_loc = COMBINE_LOCATION_DATA (line_table,
- pure_loc,
- src_range,
- NULL,
- 0);
- return combined_loc;
+ return line_table->make_location (caret, start, finish);
}
/* Same as above, but taking a source range rather than two locations. */
@@ -1218,7 +1231,8 @@ location_t
make_location (location_t caret, source_range src_range)
{
location_t pure_loc = get_pure_location (caret);
- return COMBINE_LOCATION_DATA (line_table, pure_loc, src_range, NULL, 0);
+ return line_table->get_or_create_combined_loc (pure_loc, src_range,
+ nullptr, 0);
}
/* An expanded_location stores the column in byte units. This function
@@ -1300,9 +1314,9 @@ dump_line_table_statistics (void)
fprintf (stderr, "Ad-hoc table entries used: " PRsa (5) "\n",
SIZE_AMOUNT (s.adhoc_table_entries_used));
fprintf (stderr, "optimized_ranges: " PRsa (5) "\n",
- SIZE_AMOUNT (line_table->num_optimized_ranges));
+ SIZE_AMOUNT (line_table->m_num_optimized_ranges));
fprintf (stderr, "unoptimized_ranges: " PRsa (5) "\n",
- SIZE_AMOUNT (line_table->num_unoptimized_ranges));
+ SIZE_AMOUNT (line_table->m_num_unoptimized_ranges));
fprintf (stderr, "\n");
}
@@ -1904,7 +1918,8 @@ location_with_discriminator (location_t locus, int discriminator)
if (locus == UNKNOWN_LOCATION)
return locus;
- return COMBINE_LOCATION_DATA (line_table, locus, src_range, block, discriminator);
+ return line_table->get_or_create_combined_loc (locus, src_range, block,
+ discriminator);
}
/* Return TRUE if LOCUS represents a location with a discriminator. */
@@ -2086,10 +2101,10 @@ line_table_test::line_table_test ()
saved_line_table = line_table;
line_table = ggc_alloc<line_maps> ();
linemap_init (line_table, BUILTINS_LOCATION);
- gcc_assert (saved_line_table->reallocator);
- line_table->reallocator = saved_line_table->reallocator;
- gcc_assert (saved_line_table->round_alloc_size);
- line_table->round_alloc_size = saved_line_table->round_alloc_size;
+ gcc_assert (saved_line_table->m_reallocator);
+ line_table->m_reallocator = saved_line_table->m_reallocator;
+ gcc_assert (saved_line_table->m_round_alloc_size);
+ line_table->m_round_alloc_size = saved_line_table->m_round_alloc_size;
line_table->default_range_bits = 0;
}
@@ -2102,10 +2117,10 @@ line_table_test::line_table_test (const line_table_case &case_)
saved_line_table = line_table;
line_table = ggc_alloc<line_maps> ();
linemap_init (line_table, BUILTINS_LOCATION);
- gcc_assert (saved_line_table->reallocator);
- line_table->reallocator = saved_line_table->reallocator;
- gcc_assert (saved_line_table->round_alloc_size);
- line_table->round_alloc_size = saved_line_table->round_alloc_size;
+ gcc_assert (saved_line_table->m_reallocator);
+ line_table->m_reallocator = saved_line_table->m_reallocator;
+ gcc_assert (saved_line_table->m_round_alloc_size);
+ line_table->m_round_alloc_size = saved_line_table->m_round_alloc_size;
line_table->default_range_bits = case_.m_default_range_bits;
if (case_.m_base_location)
{
diff --git a/gcc/input.h b/gcc/input.h
index d1087b7..bfd71df 100644
--- a/gcc/input.h
+++ b/gcc/input.h
@@ -150,6 +150,9 @@ class file_cache
void initialize_input_context (diagnostic_input_charset_callback ccb,
bool should_skip_bom);
+ char_span get_source_file_content (const char *file_path);
+ char_span get_source_line (const char *file_path, int line);
+
private:
file_cache_slot *evicted_cache_tab_entry (unsigned *highest_use_count);
file_cache_slot *add_file (const char *file_path);
diff --git a/gcc/ipa-cp.cc b/gcc/ipa-cp.cc
index 071c607..e056030 100644
--- a/gcc/ipa-cp.cc
+++ b/gcc/ipa-cp.cc
@@ -2749,11 +2749,22 @@ propagate_bits_across_jump_function (cgraph_edge *cs, int idx,
}
}
- if (jfunc->bits)
- return dest_lattice->meet_with (jfunc->bits->value, jfunc->bits->mask,
- precision);
- else
- return dest_lattice->set_to_bottom ();
+ Value_Range vr (parm_type);
+ if (jfunc->m_vr)
+ {
+ jfunc->m_vr->get_vrange (vr);
+ if (!vr.undefined_p () && !vr.varying_p ())
+ {
+ irange &r = as_a <irange> (vr);
+ irange_bitmask bm = r.get_bitmask ();
+ widest_int mask
+ = widest_int::from (bm.mask (), TYPE_SIGN (parm_type));
+ widest_int value
+ = widest_int::from (bm.value (), TYPE_SIGN (parm_type));
+ return dest_lattice->meet_with (value, mask, precision);
+ }
+ }
+ return dest_lattice->set_to_bottom ();
}
/* Propagate value range across jump function JFUNC that is associated with
@@ -6521,89 +6532,8 @@ ipcp_decision_stage (class ipa_topo_info *topo)
}
}
-/* Look up all the bits information that we have discovered and copy it over
- to the transformation summary. */
-
-static void
-ipcp_store_bits_results (void)
-{
- cgraph_node *node;
-
- FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
- {
- ipa_node_params *info = ipa_node_params_sum->get (node);
- bool dumped_sth = false;
- bool found_useful_result = false;
-
- if (!opt_for_fn (node->decl, flag_ipa_bit_cp) || !info)
- {
- if (dump_file)
- fprintf (dump_file, "Not considering %s for ipa bitwise propagation "
- "; -fipa-bit-cp: disabled.\n",
- node->dump_name ());
- continue;
- }
-
- if (info->ipcp_orig_node)
- info = ipa_node_params_sum->get (info->ipcp_orig_node);
- if (!info->lattices)
- /* Newly expanded artificial thunks do not have lattices. */
- continue;
-
- unsigned count = ipa_get_param_count (info);
- for (unsigned i = 0; i < count; i++)
- {
- ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
- if (plats->bits_lattice.constant_p ())
- {
- found_useful_result = true;
- break;
- }
- }
-
- if (!found_useful_result)
- continue;
-
- ipcp_transformation_initialize ();
- ipcp_transformation *ts = ipcp_transformation_sum->get_create (node);
- vec_safe_reserve_exact (ts->bits, count);
-
- for (unsigned i = 0; i < count; i++)
- {
- ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
- ipa_bits *jfbits;
-
- if (plats->bits_lattice.constant_p ())
- {
- jfbits
- = ipa_get_ipa_bits_for_value (plats->bits_lattice.get_value (),
- plats->bits_lattice.get_mask ());
- if (!dbg_cnt (ipa_cp_bits))
- jfbits = NULL;
- }
- else
- jfbits = NULL;
-
- ts->bits->quick_push (jfbits);
- if (!dump_file || !jfbits)
- continue;
- if (!dumped_sth)
- {
- fprintf (dump_file, "Propagated bits info for function %s:\n",
- node->dump_name ());
- dumped_sth = true;
- }
- fprintf (dump_file, " param %i: value = ", i);
- print_hex (jfbits->value, dump_file);
- fprintf (dump_file, ", mask = ");
- print_hex (jfbits->mask, dump_file);
- fprintf (dump_file, "\n");
- }
- }
-}
-
-/* Look up all VR information that we have discovered and copy it over
- to the transformation summary. */
+/* Look up all VR and bits information that we have discovered and copy it
+ over to the transformation summary. */
static void
ipcp_store_vr_results (void)
@@ -6613,7 +6543,10 @@ ipcp_store_vr_results (void)
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
{
ipa_node_params *info = ipa_node_params_sum->get (node);
+ bool dumped_sth = false;
bool found_useful_result = false;
+ bool do_vr = true;
+ bool do_bits = true;
if (!info || !opt_for_fn (node->decl, flag_ipa_vrp))
{
@@ -6621,8 +6554,18 @@ ipcp_store_vr_results (void)
fprintf (dump_file, "Not considering %s for VR discovery "
"and propagate; -fipa-ipa-vrp: disabled.\n",
node->dump_name ());
- continue;
+ do_vr = false;
+ }
+ if (!info || !opt_for_fn (node->decl, flag_ipa_bit_cp))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Not considering %s for ipa bitwise "
+ "propagation ; -fipa-bit-cp: disabled.\n",
+ node->dump_name ());
+ do_bits = false;
}
+ if (!do_bits && !do_vr)
+ continue;
if (info->ipcp_orig_node)
info = ipa_node_params_sum->get (info->ipcp_orig_node);
@@ -6634,12 +6577,18 @@ ipcp_store_vr_results (void)
for (unsigned i = 0; i < count; i++)
{
ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
- if (!plats->m_value_range.bottom_p ()
+ if (do_vr
+ && !plats->m_value_range.bottom_p ()
&& !plats->m_value_range.top_p ())
{
found_useful_result = true;
break;
}
+ if (do_bits && plats->bits_lattice.constant_p ())
+ {
+ found_useful_result = true;
+ break;
+ }
}
if (!found_useful_result)
continue;
@@ -6651,12 +6600,53 @@ ipcp_store_vr_results (void)
for (unsigned i = 0; i < count; i++)
{
ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ ipcp_bits_lattice *bits = NULL;
+
+ if (do_bits
+ && plats->bits_lattice.constant_p ()
+ && dbg_cnt (ipa_cp_bits))
+ bits = &plats->bits_lattice;
- if (!plats->m_value_range.bottom_p ()
+ if (do_vr
+ && !plats->m_value_range.bottom_p ()
&& !plats->m_value_range.top_p ()
&& dbg_cnt (ipa_cp_vr))
{
- ipa_vr vr (plats->m_value_range.m_vr);
+ if (bits)
+ {
+ Value_Range tmp = plats->m_value_range.m_vr;
+ tree type = ipa_get_type (info, i);
+ irange &r = as_a<irange> (tmp);
+ irange_bitmask bm (wide_int::from (bits->get_value (),
+ TYPE_PRECISION (type),
+ TYPE_SIGN (type)),
+ wide_int::from (bits->get_mask (),
+ TYPE_PRECISION (type),
+ TYPE_SIGN (type)));
+ r.update_bitmask (bm);
+ ipa_vr vr (tmp);
+ ts->m_vr->quick_push (vr);
+ }
+ else
+ {
+ ipa_vr vr (plats->m_value_range.m_vr);
+ ts->m_vr->quick_push (vr);
+ }
+ }
+ else if (bits)
+ {
+ tree type = ipa_get_type (info, i);
+ Value_Range tmp;
+ tmp.set_varying (type);
+ irange &r = as_a<irange> (tmp);
+ irange_bitmask bm (wide_int::from (bits->get_value (),
+ TYPE_PRECISION (type),
+ TYPE_SIGN (type)),
+ wide_int::from (bits->get_mask (),
+ TYPE_PRECISION (type),
+ TYPE_SIGN (type)));
+ r.update_bitmask (bm);
+ ipa_vr vr (tmp);
ts->m_vr->quick_push (vr);
}
else
@@ -6664,6 +6654,21 @@ ipcp_store_vr_results (void)
ipa_vr vr;
ts->m_vr->quick_push (vr);
}
+
+ if (!dump_file || !bits)
+ continue;
+
+ if (!dumped_sth)
+ {
+ fprintf (dump_file, "Propagated bits info for function %s:\n",
+ node->dump_name ());
+ dumped_sth = true;
+ }
+ fprintf (dump_file, " param %i: value = ", i);
+ print_hex (bits->get_value (), dump_file);
+ fprintf (dump_file, ", mask = ");
+ print_hex (bits->get_mask (), dump_file);
+ fprintf (dump_file, "\n");
}
}
}
@@ -6696,9 +6701,7 @@ ipcp_driver (void)
ipcp_propagate_stage (&topo);
/* Decide what constant propagation and cloning should be performed. */
ipcp_decision_stage (&topo);
- /* Store results of bits propagation. */
- ipcp_store_bits_results ();
- /* Store results of value range propagation. */
+ /* Store results of value range and bits propagation. */
ipcp_store_vr_results ();
/* Free all IPCP structures. */
diff --git a/gcc/ipa-fnsummary.cc b/gcc/ipa-fnsummary.cc
index f1244da2..a2495ff 100644
--- a/gcc/ipa-fnsummary.cc
+++ b/gcc/ipa-fnsummary.cc
@@ -679,12 +679,8 @@ evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
if (!vr.undefined_p () && !vr.varying_p ())
{
if (!avals->m_known_value_ranges.length ())
- {
- avals->m_known_value_ranges.safe_grow (count, true);
- for (int i = 0; i < count; ++i)
- new (&avals->m_known_value_ranges[i])
- Value_Range ();
- }
+ avals->m_known_value_ranges.safe_grow_cleared (count,
+ true);
avals->m_known_value_ranges[i] = vr;
}
}
diff --git a/gcc/ipa-modref-tree.cc b/gcc/ipa-modref-tree.cc
index de89d87..36bc803 100644
--- a/gcc/ipa-modref-tree.cc
+++ b/gcc/ipa-modref-tree.cc
@@ -653,17 +653,17 @@ modref_access_node::dump (FILE *out)
if (parm_offset_known)
{
fprintf (out, " param offset:");
- print_dec ((poly_int64_pod)parm_offset, out, SIGNED);
+ print_dec ((poly_int64)parm_offset, out, SIGNED);
}
}
if (range_info_useful_p ())
{
fprintf (out, " offset:");
- print_dec ((poly_int64_pod)offset, out, SIGNED);
+ print_dec ((poly_int64)offset, out, SIGNED);
fprintf (out, " size:");
- print_dec ((poly_int64_pod)size, out, SIGNED);
+ print_dec ((poly_int64)size, out, SIGNED);
fprintf (out, " max_size:");
- print_dec ((poly_int64_pod)max_size, out, SIGNED);
+ print_dec ((poly_int64)max_size, out, SIGNED);
if (adjustments)
fprintf (out, " adjusted %i times", adjustments);
}
diff --git a/gcc/ipa-modref.cc b/gcc/ipa-modref.cc
index 278b2db..fe55621 100644
--- a/gcc/ipa-modref.cc
+++ b/gcc/ipa-modref.cc
@@ -474,7 +474,7 @@ dump_lto_records (modref_records_lto *tt, FILE *out)
FOR_EACH_VEC_SAFE_ELT (tt->bases, i, n)
{
fprintf (out, " Base %i:", (int)i);
- print_generic_expr (dump_file, n->base);
+ print_generic_expr (out, n->base);
fprintf (out, " (alias set %i)\n",
n->base ? get_alias_set (n->base) : 0);
if (n->every_ref)
@@ -487,7 +487,7 @@ dump_lto_records (modref_records_lto *tt, FILE *out)
FOR_EACH_VEC_SAFE_ELT (n->refs, j, r)
{
fprintf (out, " Ref %i:", (int)j);
- print_generic_expr (dump_file, r->ref);
+ print_generic_expr (out, r->ref);
fprintf (out, " (alias set %i)\n",
r->ref ? get_alias_set (r->ref) : 0);
if (r->every_access)
@@ -567,7 +567,7 @@ remove_modref_edge_summaries (cgraph_node *node)
/* Dump summary. */
void
-modref_summary::dump (FILE *out)
+modref_summary::dump (FILE *out) const
{
if (loads)
{
@@ -1331,7 +1331,7 @@ modref_access_analysis::merge_call_side_effects
if (parm_map[i].parm_offset_known)
{
fprintf (dump_file, " offset:");
- print_dec ((poly_int64_pod)parm_map[i].parm_offset,
+ print_dec ((poly_int64)parm_map[i].parm_offset,
dump_file, SIGNED);
}
}
@@ -1347,7 +1347,7 @@ modref_access_analysis::merge_call_side_effects
if (chain_map.parm_offset_known)
{
fprintf (dump_file, " offset:");
- print_dec ((poly_int64_pod)chain_map.parm_offset,
+ print_dec ((poly_int64)chain_map.parm_offset,
dump_file, SIGNED);
}
}
diff --git a/gcc/ipa-modref.h b/gcc/ipa-modref.h
index 2a2d31e..f7dedac 100644
--- a/gcc/ipa-modref.h
+++ b/gcc/ipa-modref.h
@@ -66,7 +66,7 @@ struct GTY(()) modref_summary
modref_summary ();
~modref_summary ();
- void dump (FILE *);
+ void dump (FILE *) const;
bool useful_p (int ecf_flags, bool check_flags = true);
void finalize (tree);
};
diff --git a/gcc/ipa-param-manipulation.cc b/gcc/ipa-param-manipulation.cc
index 4a185dd..ae52f17 100644
--- a/gcc/ipa-param-manipulation.cc
+++ b/gcc/ipa-param-manipulation.cc
@@ -1163,6 +1163,8 @@ ipa_param_body_adjustments::mark_dead_statements (tree dead_param,
stack.safe_push (lhs);
}
}
+ else if (gimple_code (stmt) == GIMPLE_RETURN)
+ gcc_assert (m_adjustments && m_adjustments->m_skip_return);
else
/* IPA-SRA does not analyze other types of statements. */
gcc_unreachable ();
@@ -1182,7 +1184,8 @@ ipa_param_body_adjustments::mark_dead_statements (tree dead_param,
}
/* Put all clobbers of of dereference of default definition of PARAM into
- m_dead_stmts. */
+ m_dead_stmts. If there are returns among uses of the default definition of
+ PARAM, verify they will be stripped off the return value. */
void
ipa_param_body_adjustments::mark_clobbers_dead (tree param)
@@ -1200,6 +1203,8 @@ ipa_param_body_adjustments::mark_clobbers_dead (tree param)
gimple *stmt = USE_STMT (use_p);
if (gimple_clobber_p (stmt))
m_dead_stmts.add (stmt);
+ else if (gimple_code (stmt) == GIMPLE_RETURN)
+ gcc_assert (m_adjustments && m_adjustments->m_skip_return);
}
}
diff --git a/gcc/ipa-prop.cc b/gcc/ipa-prop.cc
index 9efaa5c..9442cdd 100644
--- a/gcc/ipa-prop.cc
+++ b/gcc/ipa-prop.cc
@@ -66,49 +66,6 @@ function_summary <ipcp_transformation *> *ipcp_transformation_sum = NULL;
/* Edge summary for IPA-CP edge information. */
ipa_edge_args_sum_t *ipa_edge_args_sum;
-/* Traits for a hash table for reusing already existing ipa_bits. */
-
-struct ipa_bit_ggc_hash_traits : public ggc_cache_remove <ipa_bits *>
-{
- typedef ipa_bits *value_type;
- typedef ipa_bits *compare_type;
- static hashval_t
- hash (const ipa_bits *p)
- {
- hashval_t t = (hashval_t) p->value.to_shwi ();
- return iterative_hash_host_wide_int (p->mask.to_shwi (), t);
- }
- static bool
- equal (const ipa_bits *a, const ipa_bits *b)
- {
- return a->value == b->value && a->mask == b->mask;
- }
- static const bool empty_zero_p = true;
- static void
- mark_empty (ipa_bits *&p)
- {
- p = NULL;
- }
- static bool
- is_empty (const ipa_bits *p)
- {
- return p == NULL;
- }
- static bool
- is_deleted (const ipa_bits *p)
- {
- return p == reinterpret_cast<const ipa_bits *> (1);
- }
- static void
- mark_deleted (ipa_bits *&p)
- {
- p = reinterpret_cast<ipa_bits *> (1);
- }
-};
-
-/* Hash table for avoid repeated allocations of equal ipa_bits. */
-static GTY ((cache)) hash_table<ipa_bit_ggc_hash_traits> *ipa_bits_hash_table;
-
/* Traits for a hash table for reusing ranges. */
struct ipa_vr_ggc_hash_traits : public ggc_cache_remove <ipa_vr *>
@@ -528,17 +485,6 @@ ipa_print_node_jump_functions_for_edge (FILE *f, struct cgraph_edge *cs)
ctx->dump (dump_file);
}
- if (jump_func->bits)
- {
- fprintf (f, " value: ");
- print_hex (jump_func->bits->value, f);
- fprintf (f, ", mask: ");
- print_hex (jump_func->bits->mask, f);
- fprintf (f, "\n");
- }
- else
- fprintf (f, " Unknown bits\n");
-
if (jump_func->m_vr)
{
jump_func->m_vr->dump (f);
@@ -2267,39 +2213,6 @@ ipa_get_callee_param_type (struct cgraph_edge *e, int i)
return NULL;
}
-/* Return ipa_bits with VALUE and MASK values, which can be either a newly
- allocated structure or a previously existing one shared with other jump
- functions and/or transformation summaries. */
-
-ipa_bits *
-ipa_get_ipa_bits_for_value (const widest_int &value, const widest_int &mask)
-{
- ipa_bits tmp;
- tmp.value = value;
- tmp.mask = mask;
-
- ipa_bits **slot = ipa_bits_hash_table->find_slot (&tmp, INSERT);
- if (*slot)
- return *slot;
-
- ipa_bits *res = ggc_alloc<ipa_bits> ();
- res->value = value;
- res->mask = mask;
- *slot = res;
-
- return res;
-}
-
-/* Assign to JF a pointer to ipa_bits structure with VALUE and MASK. Use hash
- table in order to avoid creating multiple same ipa_bits structures. */
-
-static void
-ipa_set_jfunc_bits (ipa_jump_func *jf, const widest_int &value,
- const widest_int &mask)
-{
- jf->bits = ipa_get_ipa_bits_for_value (value, mask);
-}
-
/* Return a pointer to an ipa_vr just like TMP, but either find it in
ipa_vr_hash_table or allocate it in GC memory. */
@@ -2393,10 +2306,31 @@ ipa_compute_jump_functions_for_edge (struct ipa_func_body_info *fbi,
addr_nonzero = true;
if (addr_nonzero)
+ vr.set_nonzero (TREE_TYPE (arg));
+
+ unsigned HOST_WIDE_INT bitpos;
+ unsigned align, prec = TYPE_PRECISION (TREE_TYPE (arg));
+
+ get_pointer_alignment_1 (arg, &align, &bitpos);
+
+ if (align > BITS_PER_UNIT
+ && opt_for_fn (cs->caller->decl, flag_ipa_bit_cp))
{
- vr.set_nonzero (TREE_TYPE (arg));
+ wide_int mask
+ = wi::bit_and_not (wi::mask (prec, false, prec),
+ wide_int::from (align / BITS_PER_UNIT - 1,
+ prec, UNSIGNED));
+ wide_int value = wide_int::from (bitpos / BITS_PER_UNIT, prec,
+ UNSIGNED);
+ irange_bitmask bm (value, mask);
+ if (!addr_nonzero)
+ vr.set_varying (TREE_TYPE (arg));
+ irange &r = as_a <irange> (vr);
+ r.update_bitmask (bm);
ipa_set_jfunc_vr (jfunc, vr);
}
+ else if (addr_nonzero)
+ ipa_set_jfunc_vr (jfunc, vr);
else
gcc_assert (!jfunc->m_vr);
}
@@ -2421,30 +2355,6 @@ ipa_compute_jump_functions_for_edge (struct ipa_func_body_info *fbi,
gcc_assert (!jfunc->m_vr);
}
- if (INTEGRAL_TYPE_P (TREE_TYPE (arg)) && !vr.undefined_p ())
- {
- irange &r = as_a <irange> (vr);
- irange_bitmask bm = r.get_bitmask ();
- signop sign = TYPE_SIGN (TREE_TYPE (arg));
- ipa_set_jfunc_bits (jfunc,
- widest_int::from (bm.value (), sign),
- widest_int::from (bm.mask (), sign));
- }
- else if (POINTER_TYPE_P (TREE_TYPE (arg)))
- {
- unsigned HOST_WIDE_INT bitpos;
- unsigned align;
-
- get_pointer_alignment_1 (arg, &align, &bitpos);
- widest_int mask = wi::bit_and_not
- (wi::mask<widest_int> (TYPE_PRECISION (TREE_TYPE (arg)), false),
- align / BITS_PER_UNIT - 1);
- widest_int value = bitpos / BITS_PER_UNIT;
- ipa_set_jfunc_bits (jfunc, value, mask);
- }
- else
- gcc_assert (!jfunc->bits);
-
if (is_gimple_ip_invariant (arg)
|| (VAR_P (arg)
&& is_global_var (arg)
@@ -4398,8 +4308,6 @@ ipa_check_create_edge_args (void)
ipa_edge_args_sum
= (new (ggc_alloc_no_dtor<ipa_edge_args_sum_t> ())
ipa_edge_args_sum_t (symtab, true));
- if (!ipa_bits_hash_table)
- ipa_bits_hash_table = hash_table<ipa_bit_ggc_hash_traits>::create_ggc (37);
if (!ipa_vr_hash_table)
ipa_vr_hash_table = hash_table<ipa_vr_ggc_hash_traits>::create_ggc (37);
}
@@ -4432,8 +4340,6 @@ ipa_free_all_node_params (void)
void
ipcp_transformation_initialize (void)
{
- if (!ipa_bits_hash_table)
- ipa_bits_hash_table = hash_table<ipa_bit_ggc_hash_traits>::create_ggc (37);
if (!ipa_vr_hash_table)
ipa_vr_hash_table = hash_table<ipa_vr_ggc_hash_traits>::create_ggc (37);
if (ipcp_transformation_sum == NULL)
@@ -4636,7 +4542,6 @@ ipcp_transformation_t::duplicate(cgraph_node *, cgraph_node *dst,
if (dst->inlined_to)
return;
dst_trans->m_agg_values = vec_safe_copy (src_trans->m_agg_values);
- dst_trans->bits = vec_safe_copy (src_trans->bits);
dst_trans->m_vr = vec_safe_copy (src_trans->m_vr);
}
@@ -4859,13 +4764,6 @@ ipa_write_jump_function (struct output_block *ob,
}
bp = bitpack_create (ob->main_stream);
- bp_pack_value (&bp, !!jump_func->bits, 1);
- streamer_write_bitpack (&bp);
- if (jump_func->bits)
- {
- streamer_write_widest_int (ob, jump_func->bits->value);
- streamer_write_widest_int (ob, jump_func->bits->mask);
- }
if (jump_func->m_vr)
jump_func->m_vr->streamer_write (ob);
else
@@ -4992,18 +4890,6 @@ ipa_read_jump_function (class lto_input_block *ib,
jump_func->agg.items->quick_push (item);
}
- struct bitpack_d bp = streamer_read_bitpack (ib);
- bool bits_known = bp_unpack_value (&bp, 1);
- if (bits_known)
- {
- widest_int value = streamer_read_widest_int (ib);
- widest_int mask = streamer_read_widest_int (ib);
- if (prevails)
- ipa_set_jfunc_bits (jump_func, value, mask);
- }
- else
- jump_func->bits = NULL;
-
ipa_vr vr;
vr.streamer_read (ib, data_in);
if (vr.known_p ())
@@ -5387,7 +5273,6 @@ useful_ipcp_transformation_info_p (ipcp_transformation *ts)
if (!ts)
return false;
if (!vec_safe_is_empty (ts->m_agg_values)
- || !vec_safe_is_empty (ts->bits)
|| !vec_safe_is_empty (ts->m_vr))
return true;
return false;
@@ -5420,19 +5305,6 @@ write_ipcp_transformation_info (output_block *ob, cgraph_node *node,
streamer_write_uhwi (ob, vec_safe_length (ts->m_vr));
for (const ipa_vr &parm_vr : ts->m_vr)
parm_vr.streamer_write (ob);
-
- streamer_write_uhwi (ob, vec_safe_length (ts->bits));
- for (const ipa_bits *bits_jfunc : ts->bits)
- {
- struct bitpack_d bp = bitpack_create (ob->main_stream);
- bp_pack_value (&bp, !!bits_jfunc, 1);
- streamer_write_bitpack (&bp);
- if (bits_jfunc)
- {
- streamer_write_widest_int (ob, bits_jfunc->value);
- streamer_write_widest_int (ob, bits_jfunc->mask);
- }
- }
}
/* Stream in the aggregate value replacement chain for NODE from IB. */
@@ -5473,24 +5345,6 @@ read_ipcp_transformation_info (lto_input_block *ib, cgraph_node *node,
parm_vr->streamer_read (ib, data_in);
}
}
- count = streamer_read_uhwi (ib);
- if (count > 0)
- {
- vec_safe_grow_cleared (ts->bits, count, true);
- for (i = 0; i < count; i++)
- {
- struct bitpack_d bp = streamer_read_bitpack (ib);
- bool known = bp_unpack_value (&bp, 1);
- if (known)
- {
- const widest_int value = streamer_read_widest_int (ib);
- const widest_int mask = streamer_read_widest_int (ib);
- ipa_bits *bits
- = ipa_get_ipa_bits_for_value (value, mask);
- (*ts->bits)[i] = bits;
- }
- }
- }
}
/* Write all aggregate replacement for nodes in set. */
@@ -5796,7 +5650,9 @@ ipcp_get_parm_bits (tree parm, tree *value, widest_int *mask)
{
cgraph_node *cnode = cgraph_node::get (current_function_decl);
ipcp_transformation *ts = ipcp_get_transformation_summary (cnode);
- if (!ts || vec_safe_length (ts->bits) == 0)
+ if (!ts
+ || vec_safe_length (ts->m_vr) == 0
+ || !irange::supports_p (TREE_TYPE (parm)))
return false;
int i = ts->get_param_index (current_function_decl, parm);
@@ -5810,120 +5666,20 @@ ipcp_get_parm_bits (tree parm, tree *value, widest_int *mask)
return false;
}
- vec<ipa_bits *, va_gc> &bits = *ts->bits;
- if (!bits[i])
+ vec<ipa_vr, va_gc> &vr = *ts->m_vr;
+ if (!vr[i].known_p ())
+ return false;
+ Value_Range tmp;
+ vr[i].get_vrange (tmp);
+ if (tmp.undefined_p () || tmp.varying_p ())
return false;
- *mask = bits[i]->mask;
- *value = wide_int_to_tree (TREE_TYPE (parm), bits[i]->value);
+ irange &r = as_a <irange> (tmp);
+ irange_bitmask bm = r.get_bitmask ();
+ *mask = widest_int::from (bm.mask (), TYPE_SIGN (TREE_TYPE (parm)));
+ *value = wide_int_to_tree (TREE_TYPE (parm), bm.value ());
return true;
}
-/* Update bits info of formal parameters of NODE as described in TS. */
-
-static void
-ipcp_update_bits (struct cgraph_node *node, ipcp_transformation *ts)
-{
- if (vec_safe_is_empty (ts->bits))
- return;
- vec<ipa_bits *, va_gc> &bits = *ts->bits;
- unsigned count = bits.length ();
- if (!count)
- return;
-
- auto_vec<int, 16> new_indices;
- bool need_remapping = false;
- clone_info *cinfo = clone_info::get (node);
- if (cinfo && cinfo->param_adjustments)
- {
- cinfo->param_adjustments->get_updated_indices (&new_indices);
- need_remapping = true;
- }
- auto_vec <tree, 16> parm_decls;
- push_function_arg_decls (&parm_decls, node->decl);
-
- for (unsigned i = 0; i < count; ++i)
- {
- tree parm;
- if (need_remapping)
- {
- if (i >= new_indices.length ())
- continue;
- int idx = new_indices[i];
- if (idx < 0)
- continue;
- parm = parm_decls[idx];
- }
- else
- parm = parm_decls[i];
- gcc_checking_assert (parm);
-
-
- if (!bits[i]
- || !(INTEGRAL_TYPE_P (TREE_TYPE (parm))
- || POINTER_TYPE_P (TREE_TYPE (parm)))
- || !is_gimple_reg (parm))
- continue;
-
- tree ddef = ssa_default_def (DECL_STRUCT_FUNCTION (node->decl), parm);
- if (!ddef)
- continue;
-
- if (dump_file)
- {
- fprintf (dump_file, "Adjusting mask for param %u to ", i);
- print_hex (bits[i]->mask, dump_file);
- fprintf (dump_file, "\n");
- }
-
- if (INTEGRAL_TYPE_P (TREE_TYPE (ddef)))
- {
- unsigned prec = TYPE_PRECISION (TREE_TYPE (ddef));
- signop sgn = TYPE_SIGN (TREE_TYPE (ddef));
- wide_int mask = wide_int::from (bits[i]->mask, prec, UNSIGNED);
- wide_int value = wide_int::from (bits[i]->value, prec, sgn);
- set_bitmask (ddef, value, mask);
- }
- else
- {
- unsigned tem = bits[i]->mask.to_uhwi ();
- unsigned HOST_WIDE_INT bitpos = bits[i]->value.to_uhwi ();
- unsigned align = tem & -tem;
- unsigned misalign = bitpos & (align - 1);
-
- if (align > 1)
- {
- if (dump_file)
- fprintf (dump_file, "Adjusting align: %u, misalign: %u\n", align, misalign);
-
- unsigned old_align, old_misalign;
- struct ptr_info_def *pi = get_ptr_info (ddef);
- bool old_known = get_ptr_info_alignment (pi, &old_align, &old_misalign);
-
- if (old_known
- && old_align > align)
- {
- if (dump_file)
- {
- fprintf (dump_file, "But alignment was already %u.\n", old_align);
- if ((old_misalign & (align - 1)) != misalign)
- fprintf (dump_file, "old_misalign (%u) and misalign (%u) mismatch\n",
- old_misalign, misalign);
- }
- continue;
- }
-
- if (old_known
- && ((misalign & (old_align - 1)) != old_misalign)
- && dump_file)
- fprintf (dump_file, "old_misalign (%u) and misalign (%u) mismatch\n",
- old_misalign, misalign);
-
- set_ptr_info_alignment (pi, align, misalign);
- }
- }
- }
-}
-
/* Update value range of formal parameters of NODE as described in TS. */
static void
@@ -5985,6 +5741,77 @@ ipcp_update_vr (struct cgraph_node *node, ipcp_transformation *ts)
fprintf (dump_file, "]\n");
}
set_range_info (ddef, tmp);
+
+ if (POINTER_TYPE_P (TREE_TYPE (parm))
+ && opt_for_fn (node->decl, flag_ipa_bit_cp))
+ {
+ irange &r = as_a<irange> (tmp);
+ irange_bitmask bm = r.get_bitmask ();
+ unsigned tem = bm.mask ().to_uhwi ();
+ unsigned HOST_WIDE_INT bitpos = bm.value ().to_uhwi ();
+ unsigned align = tem & -tem;
+ unsigned misalign = bitpos & (align - 1);
+
+ if (align > 1)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "Adjusting mask for param %u to ", i);
+ print_hex (bm.mask (), dump_file);
+ fprintf (dump_file, "\n");
+ }
+
+ if (dump_file)
+ fprintf (dump_file,
+ "Adjusting align: %u, misalign: %u\n",
+ align, misalign);
+
+ unsigned old_align, old_misalign;
+ struct ptr_info_def *pi = get_ptr_info (ddef);
+ bool old_known = get_ptr_info_alignment (pi, &old_align,
+ &old_misalign);
+
+ if (old_known && old_align > align)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "But alignment was already %u.\n",
+ old_align);
+ if ((old_misalign & (align - 1)) != misalign)
+ fprintf (dump_file,
+ "old_misalign (%u) and misalign "
+ "(%u) mismatch\n",
+ old_misalign, misalign);
+ }
+ continue;
+ }
+
+ if (dump_file
+ && old_known
+ && ((misalign & (old_align - 1)) != old_misalign))
+ fprintf (dump_file,
+ "old_misalign (%u) and misalign (%u) "
+ "mismatch\n",
+ old_misalign, misalign);
+
+ set_ptr_info_alignment (pi, align, misalign);
+ }
+ }
+ else if (dump_file && INTEGRAL_TYPE_P (TREE_TYPE (parm)))
+ {
+ irange &r = as_a<irange> (tmp);
+ irange_bitmask bm = r.get_bitmask ();
+ unsigned prec = TYPE_PRECISION (TREE_TYPE (parm));
+ if (wi::ne_p (bm.mask (), wi::shwi (-1, prec)))
+ {
+ fprintf (dump_file,
+ "Adjusting mask for param %u to ", i);
+ print_hex (bm.mask (), dump_file);
+ fprintf (dump_file, "\n");
+ }
+ }
}
}
}
@@ -6008,12 +5835,10 @@ ipcp_transform_function (struct cgraph_node *node)
ipcp_transformation *ts = ipcp_get_transformation_summary (node);
if (!ts
|| (vec_safe_is_empty (ts->m_agg_values)
- && vec_safe_is_empty (ts->bits)
&& vec_safe_is_empty (ts->m_vr)))
return 0;
ts->maybe_create_parm_idx_map (cfun->decl);
- ipcp_update_bits (node, ts);
ipcp_update_vr (node, ts);
if (vec_safe_is_empty (ts->m_agg_values))
return 0;
diff --git a/gcc/ipa-prop.h b/gcc/ipa-prop.h
index 7e033d2..a7f34a8 100644
--- a/gcc/ipa-prop.h
+++ b/gcc/ipa-prop.h
@@ -292,18 +292,6 @@ public:
array_slice<const ipa_argagg_value> m_elts;
};
-/* Information about zero/non-zero bits. */
-class GTY(()) ipa_bits
-{
-public:
- /* The propagated value. */
- widest_int value;
- /* Mask corresponding to the value.
- Similar to ccp_lattice_t, if xth bit of mask is 0,
- implies xth bit of value is constant. */
- widest_int mask;
-};
-
/* Info about value ranges. */
class GTY(()) ipa_vr
@@ -342,11 +330,6 @@ struct GTY (()) ipa_jump_func
and its description. */
struct ipa_agg_jump_function agg;
- /* Information about zero/non-zero bits. The pointed to structure is shared
- betweed different jump functions. Use ipa_set_jfunc_bits to set this
- field. */
- class ipa_bits *bits;
-
/* Information about value range, containing valid data only when vr_known is
true. The pointed to structure is shared betweed different jump
functions. Use ipa_set_jfunc_vr to set this field. */
@@ -940,15 +923,13 @@ struct GTY(()) ipcp_transformation
{
/* Default constructor. */
ipcp_transformation ()
- : m_agg_values (nullptr), bits (nullptr), m_vr (nullptr),
- m_uid_to_idx (nullptr)
+ : m_agg_values (nullptr), m_vr (nullptr), m_uid_to_idx (nullptr)
{ }
/* Default destructor. */
~ipcp_transformation ()
{
vec_free (m_agg_values);
- vec_free (bits);
vec_free (m_vr);
}
@@ -968,8 +949,6 @@ struct GTY(()) ipcp_transformation
/* Known aggregate values. */
vec<ipa_argagg_value, va_gc> *m_agg_values;
- /* Known bits information. */
- vec<ipa_bits *, va_gc> *bits;
/* Value range information. */
vec<ipa_vr, va_gc> *m_vr;
/* If there are many parameters, this is a vector sorted by their DECL_UIDs
@@ -1172,8 +1151,6 @@ tree ipa_get_indirect_edge_target (struct cgraph_edge *ie,
struct cgraph_edge *ipa_make_edge_direct_to_target (struct cgraph_edge *, tree,
bool speculative = false);
tree ipa_impossible_devirt_target (struct cgraph_edge *, tree);
-ipa_bits *ipa_get_ipa_bits_for_value (const widest_int &value,
- const widest_int &mask);
/* Functions related to both. */
diff --git a/gcc/ipa-sra.cc b/gcc/ipa-sra.cc
index edba364..495d7e6 100644
--- a/gcc/ipa-sra.cc
+++ b/gcc/ipa-sra.cc
@@ -185,6 +185,13 @@ struct GTY(()) isra_param_desc
unsigned split_candidate : 1;
/* Is this a parameter passing stuff by reference? */
unsigned by_ref : 1;
+ /* If set, this parameter can only be a candidate for removal if the function
+ is going to loose its return value. */
+ unsigned remove_only_when_retval_removed : 1;
+ /* If set, this parameter can only be a candidate for splitting if the
+ function is going to loose its return value. Can only be meaningfully set
+ for by_ref parameters. */
+ unsigned split_only_when_retval_removed : 1;
/* Parameter hint set during IPA analysis when there is a caller which does
not construct the argument just to pass it to calls. Only meaningful for
by_ref parameters. */
@@ -206,7 +213,8 @@ struct gensum_param_desc
/* Number of accesses in the access tree rooted in field accesses. */
unsigned access_count;
- /* If the below is non-zero, this is the number of uses as actual arguments. */
+ /* If the below is non-zero, this is the number of uses as actual
+ arguments. */
int call_uses;
/* Number of times this parameter has been directly passed to. */
unsigned ptr_pt_count;
@@ -230,6 +238,13 @@ struct gensum_param_desc
without performing further checks (for example because it is a
REFERENCE_TYPE)? */
bool safe_ref;
+ /* If set, this parameter can only be a candidate for removal if the function
+ is going to loose its return value. */
+ bool remove_only_when_retval_removed;
+ /* If set, this parameter can only be a candidate for splitting if the
+ function is going to loose its return value. Can only be meaningfully set
+ for by_ref parameters. */
+ bool split_only_when_retval_removed;
/* Only meaningful for by_ref parameters. If set, this parameter can only be
a split candidate if all callers pass pointers that are known to point to
a chunk of memory large enough to contain all accesses. */
@@ -445,6 +460,8 @@ ipa_sra_function_summaries::duplicate (cgraph_node *, cgraph_node *,
d->locally_unused = s->locally_unused;
d->split_candidate = s->split_candidate;
d->by_ref = s->by_ref;
+ d->remove_only_when_retval_removed = s->remove_only_when_retval_removed;
+ d->split_only_when_retval_removed = s->split_only_when_retval_removed;
d->not_specially_constructed = s->not_specially_constructed;
d->conditionally_dereferenceable = s->conditionally_dereferenceable;
d->safe_size_set = s->safe_size_set;
@@ -732,17 +749,21 @@ static void
dump_gensum_param_descriptor (FILE *f, gensum_param_desc *desc)
{
if (desc->locally_unused)
- fprintf (f, " unused with %i call_uses\n", desc->call_uses);
+ fprintf (f, " unused with %i call_uses%s\n", desc->call_uses,
+ desc->remove_only_when_retval_removed ?
+ " remove_only_when_retval_removed" : "");
if (!desc->split_candidate)
{
fprintf (f, " not a candidate\n");
return;
}
if (desc->by_ref)
- fprintf (f, " %s%s by_ref with %u pass throughs\n",
+ fprintf (f, " %s%s%s by_ref with %u pass throughs\n",
desc->safe_ref ? "safe" : "unsafe",
desc->conditionally_dereferenceable
- ? " conditionally_dereferenceable" : " ok",
+ ? " conditionally_dereferenceable" : "",
+ desc->split_only_when_retval_removed
+ ? " split_only_when_retval_removed" : "",
desc->ptr_pt_count);
for (gensum_param_access *acc = desc->accesses; acc; acc = acc->next_sibling)
@@ -790,6 +811,10 @@ dump_isra_param_descriptor (FILE *f, isra_param_desc *desc, bool hints)
fprintf (f, " param_size_limit: %u, size_reached: %u%s",
desc->param_size_limit, desc->size_reached,
desc->by_ref ? ", by_ref" : "");
+ if (desc->remove_only_when_retval_removed)
+ fprintf (f, ", remove_only_when_retval_removed");
+ if (desc->split_only_when_retval_removed)
+ fprintf (f, ", split_only_when_retval_removed");
if (desc->by_ref && desc->conditionally_dereferenceable)
fprintf (f, ", conditionally_dereferenceable");
if (hints)
@@ -881,16 +906,18 @@ get_single_param_flow_source (const isra_param_flow *param_flow)
/* Inspect all uses of NAME and simple arithmetic calculations involving NAME
in FUN represented with NODE and return a negative number if any of them is
- used for something else than either an actual call argument, simple
- arithmetic operation or debug statement. If there are no such uses, return
- the number of actual arguments that this parameter eventually feeds to (or
- zero if there is none). For any such parameter, mark PARM_NUM as one of its
- sources. ANALYZED is a bitmap that tracks which SSA names we have already
- started investigating. */
+ used for something else than either an actual call argument, simple return,
+ simple arithmetic operation or debug statement. If there are no such uses,
+ return the number of actual arguments that this parameter eventually feeds
+ to (or zero if there is none). If there are any simple return uses, set
+ DESC->remove_only_when_retval_removed. For any such parameter, mark
+ PARM_NUM as one of its sources. ANALYZED is a bitmap that tracks which SSA
+ names we have already started investigating. */
static int
isra_track_scalar_value_uses (function *fun, cgraph_node *node, tree name,
- int parm_num, bitmap analyzed)
+ int parm_num, bitmap analyzed,
+ gensum_param_desc *desc)
{
int res = 0;
imm_use_iterator imm_iter;
@@ -964,7 +991,7 @@ isra_track_scalar_value_uses (function *fun, cgraph_node *node, tree name,
if (bitmap_set_bit (analyzed, SSA_NAME_VERSION (lhs)))
{
int tmp = isra_track_scalar_value_uses (fun, node, lhs, parm_num,
- analyzed);
+ analyzed, desc);
if (tmp < 0)
{
res = tmp;
@@ -973,6 +1000,16 @@ isra_track_scalar_value_uses (function *fun, cgraph_node *node, tree name,
res += tmp;
}
}
+ else if (greturn *gr = dyn_cast<greturn *>(stmt))
+ {
+ tree rv = gimple_return_retval (gr);
+ if (rv != name)
+ {
+ res = -1;
+ break;
+ }
+ desc->remove_only_when_retval_removed = true;
+ }
else
{
res = -1;
@@ -985,11 +1022,12 @@ isra_track_scalar_value_uses (function *fun, cgraph_node *node, tree name,
/* Inspect all uses of PARM, which must be a gimple register, in FUN (which is
also described by NODE) and simple arithmetic calculations involving PARM
and return false if any of them is used for something else than either an
- actual call argument, simple arithmetic operation or debug statement. If
- there are no such uses, return true and store the number of actual arguments
- that this parameter eventually feeds to (or zero if there is none) to
- *CALL_USES_P. For any such parameter, mark PARM_NUM as one of its
- sources.
+ actual call argument, simple return, simple arithmetic operation or debug
+ statement. If there are no such uses, return true and store the number of
+ actual arguments that this parameter eventually feeds to (or zero if there
+ is none) to DESC->call_uses and set DESC->remove_only_when_retval_removed if
+ there are any uses in return statemens. For any such parameter, mark
+ PARM_NUM as one of its sources.
This function is similar to ptr_parm_has_nonarg_uses but its results are
meant for unused parameter removal, as opposed to splitting of parameters
@@ -997,14 +1035,14 @@ isra_track_scalar_value_uses (function *fun, cgraph_node *node, tree name,
static bool
isra_track_scalar_param_local_uses (function *fun, cgraph_node *node, tree parm,
- int parm_num, int *call_uses_p)
+ int parm_num, gensum_param_desc *desc)
{
gcc_checking_assert (is_gimple_reg (parm));
tree name = ssa_default_def (fun, parm);
if (!name || has_zero_uses (name))
{
- *call_uses_p = 0;
+ desc->call_uses = 0;
return false;
}
@@ -1014,11 +1052,11 @@ isra_track_scalar_param_local_uses (function *fun, cgraph_node *node, tree parm,
bitmap analyzed = BITMAP_ALLOC (NULL);
int call_uses = isra_track_scalar_value_uses (fun, node, name, parm_num,
- analyzed);
+ analyzed, desc);
BITMAP_FREE (analyzed);
if (call_uses < 0)
return true;
- *call_uses_p = call_uses;
+ desc->call_uses = call_uses;
return false;
}
@@ -1026,9 +1064,11 @@ isra_track_scalar_param_local_uses (function *fun, cgraph_node *node, tree parm,
examine whether there are any nonarg uses that are not actual arguments or
otherwise infeasible uses. If so, return true, otherwise return false.
Create pass-through IPA flow records for any direct uses as argument calls
- and if returning false, store their number into *PT_COUNT_P. NODE and FUN
- must represent the function that is currently analyzed, PARM_NUM must be the
- index of the analyzed parameter.
+ and if returning false, store their number into DESC->ptr_pt_count. If
+ removal of return value would still allow splitting, return true but set
+ DESC->split_only_when_retval_removed. NODE and FUN must represent the
+ function that is currently analyzed, PARM_NUM must be the index of the
+ analyzed parameter.
This function is similar to isra_track_scalar_param_local_uses but its
results are meant for splitting of parameters passed by reference or turning
@@ -1037,7 +1077,7 @@ isra_track_scalar_param_local_uses (function *fun, cgraph_node *node, tree parm,
static bool
ptr_parm_has_nonarg_uses (cgraph_node *node, function *fun, tree parm,
- int parm_num, unsigned *pt_count_p)
+ int parm_num, gensum_param_desc *desc)
{
imm_use_iterator ui;
gimple *stmt;
@@ -1121,6 +1161,19 @@ ptr_parm_has_nonarg_uses (cgraph_node *node, function *fun, tree parm,
}
}
}
+ else if (greturn *gr = dyn_cast<greturn *>(stmt))
+ {
+ tree rv = gimple_return_retval (gr);
+ if (rv == name)
+ {
+ uses_ok++;
+ /* Analysis for feasibility of removal must have already reached
+ the conclusion that the flag must be set if it completed. */
+ gcc_assert (!desc->locally_unused
+ || desc->remove_only_when_retval_removed);
+ desc->split_only_when_retval_removed = true;
+ }
+ }
/* If the number of valid uses does not match the number of
uses in this stmt there is an unhandled use. */
@@ -1136,7 +1189,7 @@ ptr_parm_has_nonarg_uses (cgraph_node *node, function *fun, tree parm,
}
}
- *pt_count_p = pt_count;
+ desc->ptr_pt_count = pt_count;
return ret;
}
@@ -1166,7 +1219,6 @@ create_parameter_descriptors (cgraph_node *node,
if (dump_file && (dump_flags & TDF_DETAILS))
print_generic_expr (dump_file, parm, TDF_UID);
- int scalar_call_uses;
tree type = TREE_TYPE (parm);
if (TREE_THIS_VOLATILE (parm))
{
@@ -1194,15 +1246,15 @@ create_parameter_descriptors (cgraph_node *node,
}
if (is_gimple_reg (parm)
- && !isra_track_scalar_param_local_uses (fun, node, parm, num,
- &scalar_call_uses))
+ && !isra_track_scalar_param_local_uses (fun, node, parm, num, desc))
{
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " is a scalar with only %i call uses\n",
- scalar_call_uses);
-
desc->locally_unused = true;
- desc->call_uses = scalar_call_uses;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " is a scalar with only %i call uses%s\n",
+ desc->call_uses,
+ desc->remove_only_when_retval_removed
+ ? " and return uses": "");
}
if (POINTER_TYPE_P (type))
@@ -1253,8 +1305,7 @@ create_parameter_descriptors (cgraph_node *node,
"a va list\n");
continue;
}
- if (ptr_parm_has_nonarg_uses (node, fun, parm, num,
- &desc->ptr_pt_count))
+ if (ptr_parm_has_nonarg_uses (node, fun, parm, num, desc))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " not a candidate, reference has "
@@ -2628,6 +2679,8 @@ process_scan_results (cgraph_node *node, struct function *fun,
d->locally_unused = s->locally_unused;
d->split_candidate = s->split_candidate;
d->by_ref = s->by_ref;
+ d->remove_only_when_retval_removed = s->remove_only_when_retval_removed;
+ d->split_only_when_retval_removed = s->split_only_when_retval_removed;
d->conditionally_dereferenceable = s->conditionally_dereferenceable;
for (gensum_param_access *acc = s->accesses;
@@ -2789,6 +2842,8 @@ isra_write_node_summary (output_block *ob, cgraph_node *node)
bp_pack_value (&bp, desc->split_candidate, 1);
bp_pack_value (&bp, desc->by_ref, 1);
gcc_assert (!desc->not_specially_constructed);
+ bp_pack_value (&bp, desc->remove_only_when_retval_removed, 1);
+ bp_pack_value (&bp, desc->split_only_when_retval_removed, 1);
bp_pack_value (&bp, desc->conditionally_dereferenceable, 1);
gcc_assert (!desc->safe_size_set);
streamer_write_bitpack (&bp);
@@ -2913,6 +2968,8 @@ isra_read_node_info (struct lto_input_block *ib, cgraph_node *node,
desc->split_candidate = bp_unpack_value (&bp, 1);
desc->by_ref = bp_unpack_value (&bp, 1);
desc->not_specially_constructed = 0;
+ desc->remove_only_when_retval_removed = bp_unpack_value (&bp, 1);
+ desc->split_only_when_retval_removed = bp_unpack_value (&bp, 1);
desc->conditionally_dereferenceable = bp_unpack_value (&bp, 1);
desc->safe_size_set = 0;
}
@@ -4077,22 +4134,8 @@ zap_useless_ipcp_results (const isra_func_summary *ifs, ipcp_transformation *ts)
else if (removed_item)
ts->m_agg_values->truncate (dst_index);
- bool useful_bits = false;
- unsigned count = vec_safe_length (ts->bits);
- for (unsigned i = 0; i < count; i++)
- if ((*ts->bits)[i])
- {
- const isra_param_desc *desc = &(*ifs->m_parameters)[i];
- if (desc->locally_unused)
- (*ts->bits)[i] = NULL;
- else
- useful_bits = true;
- }
- if (!useful_bits)
- ts->bits = NULL;
-
bool useful_vr = false;
- count = vec_safe_length (ts->m_vr);
+ unsigned count = vec_safe_length (ts->m_vr);
for (unsigned i = 0; i < count; i++)
if ((*ts->m_vr)[i].known_p ())
{
@@ -4256,8 +4299,32 @@ adjust_parameter_descriptions (cgraph_node *node, isra_func_summary *ifs)
{
desc->locally_unused = false;
desc->split_candidate = false;
+ continue;
}
- else if (check_surviving
+
+ if (desc->split_only_when_retval_removed
+ && !ifs->m_return_ignored)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS)
+ && (desc->locally_unused || desc->split_candidate))
+ dump_bad_cond_indices.safe_push (i);
+
+ gcc_checking_assert (!desc->locally_unused
+ || desc->remove_only_when_retval_removed);
+ desc->locally_unused = false;
+ desc->split_candidate = false;
+ continue;
+ }
+ if (desc->remove_only_when_retval_removed
+ && !ifs->m_return_ignored)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS)
+ && (desc->locally_unused || desc->split_candidate))
+ dump_bad_cond_indices.safe_push (i);
+
+ desc->locally_unused = false;
+ }
+ if (check_surviving
&& (i >= surviving_params.length ()
|| !surviving_params[i]))
{
@@ -4269,67 +4336,65 @@ adjust_parameter_descriptions (cgraph_node *node, isra_func_summary *ifs)
if (dump_file && (dump_flags & TDF_DETAILS))
dump_dead_indices.safe_push (i);
}
- else
+
+ if (desc->split_candidate && desc->conditionally_dereferenceable)
{
- if (desc->split_candidate && desc->conditionally_dereferenceable)
- {
- gcc_assert (desc->safe_size_set);
- for (param_access *pa : *desc->accesses)
- if ((pa->unit_offset + pa->unit_size) > desc->safe_size)
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- dump_bad_cond_indices.safe_push (i);
- desc->split_candidate = false;
- break;
- }
- }
+ gcc_assert (desc->safe_size_set);
+ for (param_access *pa : *desc->accesses)
+ if ((pa->unit_offset + pa->unit_size) > desc->safe_size)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ dump_bad_cond_indices.safe_push (i);
+ desc->split_candidate = false;
+ break;
+ }
+ }
- if (desc->split_candidate)
+ if (desc->split_candidate)
+ {
+ if (desc->by_ref && !desc->not_specially_constructed)
{
- if (desc->by_ref && !desc->not_specially_constructed)
- {
- int extra_factor
- = opt_for_fn (node->decl,
- param_ipa_sra_ptrwrap_growth_factor);
- desc->param_size_limit = extra_factor * desc->param_size_limit;
- }
- if (size_would_violate_limit_p (desc, desc->size_reached))
- desc->split_candidate = false;
+ int extra_factor
+ = opt_for_fn (node->decl,
+ param_ipa_sra_ptrwrap_growth_factor);
+ desc->param_size_limit = extra_factor * desc->param_size_limit;
}
+ if (size_would_violate_limit_p (desc, desc->size_reached))
+ desc->split_candidate = false;
+ }
- /* Avoid ICEs on size-mismatched VIEW_CONVERT_EXPRs when callers and
- callees don't agree on types in aggregates and we try to do both
- IPA-CP and IPA-SRA. */
- if (ipcp_ts && desc->split_candidate)
+ /* Avoid ICEs on size-mismatched VIEW_CONVERT_EXPRs when callers and
+ callees don't agree on types in aggregates and we try to do both
+ IPA-CP and IPA-SRA. */
+ if (ipcp_ts && desc->split_candidate)
+ {
+ ipa_argagg_value_list avl (ipcp_ts);
+ for (const param_access *pa : desc->accesses)
{
- ipa_argagg_value_list avl (ipcp_ts);
- for (const param_access *pa : desc->accesses)
+ if (!pa->certain)
+ continue;
+ tree value = avl.get_value (i, pa->unit_offset);
+ if (value
+ && ((tree_to_uhwi (TYPE_SIZE (TREE_TYPE (value)))
+ / BITS_PER_UNIT)
+ != pa->unit_size))
{
- if (!pa->certain)
- continue;
- tree value = avl.get_value (i, pa->unit_offset);
- if (value
- && ((tree_to_uhwi (TYPE_SIZE (TREE_TYPE (value)))
- / BITS_PER_UNIT)
- != pa->unit_size))
- {
- desc->split_candidate = false;
- if (dump_file && (dump_flags & TDF_DETAILS))
- dump_dead_indices.safe_push (i);
- break;
- }
+ desc->split_candidate = false;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ dump_dead_indices.safe_push (i);
+ break;
}
}
-
- if (desc->locally_unused || desc->split_candidate)
- ret = false;
}
+
+ if (desc->locally_unused || desc->split_candidate)
+ ret = false;
}
dump_list_of_param_indices (node, "are dead on arrival or have a type "
"mismatch with IPA-CP", dump_dead_indices);
- dump_list_of_param_indices (node, "are not safe to dereference in all "
- "callers", dump_bad_cond_indices);
+ dump_list_of_param_indices (node, "fail additional requirements ",
+ dump_bad_cond_indices);
return ret;
}
diff --git a/gcc/ipa-utils.cc b/gcc/ipa-utils.cc
index 956c629..6024ac6 100644
--- a/gcc/ipa-utils.cc
+++ b/gcc/ipa-utils.cc
@@ -651,13 +651,14 @@ ipa_merge_profiles (struct cgraph_node *dst,
{
edge srce = EDGE_SUCC (srcbb, i);
edge dste = EDGE_SUCC (dstbb, i);
- dste->probability =
- dste->probability * dstbb->count.ipa ().probability_in
- (dstbb->count.ipa ()
- + srccount.ipa ())
- + srce->probability * srcbb->count.ipa ().probability_in
- (dstbb->count.ipa ()
- + srccount.ipa ());
+ profile_count sum =
+ dstbb->count.ipa () + srccount.ipa ();
+ if (sum.nonzero_p ())
+ dste->probability =
+ dste->probability * dstbb->count.ipa ().probability_in
+ (sum)
+ + srce->probability * srcbb->count.ipa ().probability_in
+ (sum);
}
dstbb->count = dstbb->count.ipa () + srccount.ipa ();
}
diff --git a/gcc/ira-costs.cc b/gcc/ira-costs.cc
index 8c93ace..d9e700e 100644
--- a/gcc/ira-costs.cc
+++ b/gcc/ira-costs.cc
@@ -1947,8 +1947,15 @@ find_costs_and_classes (FILE *dump_file)
}
if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
i_mem_cost = 0;
- else
- i_mem_cost -= equiv_savings;
+ else if (equiv_savings < 0)
+ i_mem_cost = -equiv_savings;
+ else if (equiv_savings > 0)
+ {
+ i_mem_cost = 0;
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ i_costs[k] += equiv_savings;
+ }
+
best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
best = ALL_REGS;
alt_class = NO_REGS;
diff --git a/gcc/ira-int.h b/gcc/ira-int.h
index e7460cf..0685e1f 100644
--- a/gcc/ira-int.h
+++ b/gcc/ira-int.h
@@ -619,7 +619,7 @@ public:
/* RTL representation of the stack slot. */
rtx mem;
/* Size of the stack slot. */
- poly_uint64_pod width;
+ poly_uint64 width;
};
/* The number of elements in the following array. */
diff --git a/gcc/ira.cc b/gcc/ira.cc
index 0b0d460..d7530f0 100644
--- a/gcc/ira.cc
+++ b/gcc/ira.cc
@@ -5542,6 +5542,9 @@ bool ira_conflicts_p;
/* Saved between IRA and reload. */
static int saved_flag_ira_share_spill_slots;
+/* Set to true while in IRA. */
+bool ira_in_progress = false;
+
/* This is the main entry of IRA. */
static void
ira (FILE *f)
@@ -6110,7 +6113,9 @@ public:
}
unsigned int execute (function *) final override
{
+ ira_in_progress = true;
ira (dump_file);
+ ira_in_progress = false;
return 0;
}
diff --git a/gcc/jit/ChangeLog b/gcc/jit/ChangeLog
index 1e60eae..df2e3b8 100644
--- a/gcc/jit/ChangeLog
+++ b/gcc/jit/ChangeLog
@@ -1,3 +1,8 @@
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * dummy-frontend.cc (jit_langhook_init): Update for change to
+ diagnostic_context callbacks.
+
2023-08-31 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
* jit-playback.cc: Change spelling to macOS.
diff --git a/gcc/jit/dummy-frontend.cc b/gcc/jit/dummy-frontend.cc
index 8b7294e..a729086 100644
--- a/gcc/jit/dummy-frontend.cc
+++ b/gcc/jit/dummy-frontend.cc
@@ -589,8 +589,8 @@ jit_langhook_init (void)
}
gcc_assert (global_dc);
- global_dc->begin_diagnostic = jit_begin_diagnostic;
- global_dc->end_diagnostic = jit_end_diagnostic;
+ diagnostic_starter (global_dc) = jit_begin_diagnostic;
+ diagnostic_finalizer (global_dc) = jit_end_diagnostic;
build_common_tree_nodes (false);
diff --git a/gcc/lra-constraints.cc b/gcc/lra-constraints.cc
index 76a1393..d10a2a3 100644
--- a/gcc/lra-constraints.cc
+++ b/gcc/lra-constraints.cc
@@ -3399,12 +3399,12 @@ base_plus_disp_to_reg (struct address_info *ad, rtx disp)
/* Make reload of index part of address AD. Return the new
pseudo. */
static rtx
-index_part_to_reg (struct address_info *ad)
+index_part_to_reg (struct address_info *ad, enum reg_class index_class)
{
rtx new_reg;
new_reg = lra_create_new_reg (GET_MODE (*ad->index), NULL_RTX,
- INDEX_REG_CLASS, NULL, "index term");
+ index_class, NULL, "index term");
expand_mult (GET_MODE (*ad->index), *ad->index_term,
GEN_INT (get_index_scale (ad)), new_reg, 1);
return new_reg;
@@ -3659,13 +3659,14 @@ process_address_1 (int nop, bool check_only_p,
/* If INDEX_REG_CLASS is assigned to base_term already and isn't to
index_term, swap them so to avoid assigning INDEX_REG_CLASS to both
when INDEX_REG_CLASS is a single register class. */
+ enum reg_class index_cl = index_reg_class (curr_insn);
if (ad.base_term != NULL
&& ad.index_term != NULL
- && ira_class_hard_regs_num[INDEX_REG_CLASS] == 1
+ && ira_class_hard_regs_num[index_cl] == 1
&& REG_P (*ad.base_term)
&& REG_P (*ad.index_term)
- && in_class_p (*ad.base_term, INDEX_REG_CLASS, NULL)
- && ! in_class_p (*ad.index_term, INDEX_REG_CLASS, NULL))
+ && in_class_p (*ad.base_term, index_cl, NULL)
+ && ! in_class_p (*ad.index_term, index_cl, NULL))
{
std::swap (ad.base, ad.index);
std::swap (ad.base_term, ad.index_term);
@@ -3681,7 +3682,7 @@ process_address_1 (int nop, bool check_only_p,
REGNO (*ad.base_term)) != NULL_RTX)
? after : NULL),
base_reg_class (ad.mode, ad.as, ad.base_outer_code,
- get_index_code (&ad)))))
+ get_index_code (&ad), curr_insn))))
{
change_p = true;
if (ad.base_term2 != NULL)
@@ -3689,7 +3690,7 @@ process_address_1 (int nop, bool check_only_p,
}
if (ad.index_term != NULL
&& process_addr_reg (ad.index_term, check_only_p,
- before, NULL, INDEX_REG_CLASS))
+ before, NULL, index_cl))
change_p = true;
/* Target hooks sometimes don't treat extra-constraint addresses as
@@ -3731,7 +3732,8 @@ process_address_1 (int nop, bool check_only_p,
rtx_insn *last = get_last_insn ();
int code = -1;
enum reg_class cl = base_reg_class (ad.mode, ad.as,
- SCRATCH, SCRATCH);
+ SCRATCH, SCRATCH,
+ curr_insn);
rtx addr = *ad.inner;
new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, NULL, "addr");
@@ -3794,9 +3796,10 @@ process_address_1 (int nop, bool check_only_p,
/* index * scale + disp => new base + index * scale,
case (1) above. */
enum reg_class cl = base_reg_class (ad.mode, ad.as, PLUS,
- GET_CODE (*ad.index));
+ GET_CODE (*ad.index),
+ curr_insn);
- lra_assert (INDEX_REG_CLASS != NO_REGS);
+ lra_assert (index_cl != NO_REGS);
new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, NULL, "disp");
lra_emit_move (new_reg, *ad.disp);
*ad.inner = simplify_gen_binary (PLUS, GET_MODE (new_reg),
@@ -3855,7 +3858,7 @@ process_address_1 (int nop, bool check_only_p,
*ad.base_term = XEXP (SET_SRC (set), 0);
*ad.disp_term = XEXP (SET_SRC (set), 1);
cl = base_reg_class (ad.mode, ad.as, ad.base_outer_code,
- get_index_code (&ad));
+ get_index_code (&ad), curr_insn);
regno = REGNO (*ad.base_term);
if (regno >= FIRST_PSEUDO_REGISTER
&& cl != lra_get_allocno_class (regno))
@@ -3892,14 +3895,15 @@ process_address_1 (int nop, bool check_only_p,
changed pseudo on the equivalent memory and a subreg of the
pseudo onto the memory of different mode for which the scale is
prohibitted. */
- new_reg = index_part_to_reg (&ad);
+ new_reg = index_part_to_reg (&ad, index_cl);
*ad.inner = simplify_gen_binary (PLUS, GET_MODE (new_reg),
*ad.base_term, new_reg);
}
else
{
enum reg_class cl = base_reg_class (ad.mode, ad.as,
- SCRATCH, SCRATCH);
+ SCRATCH, SCRATCH,
+ curr_insn);
rtx addr = *ad.inner;
new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, NULL, "addr");
@@ -4649,7 +4653,7 @@ curr_insn_transform (bool check_only_p)
push_to_sequence (before);
rclass = base_reg_class (GET_MODE (op), MEM_ADDR_SPACE (op),
- MEM, SCRATCH);
+ MEM, SCRATCH, curr_insn);
if (GET_RTX_CLASS (code) == RTX_AUTOINC)
new_reg = emit_inc (rclass, *loc, *loc,
/* This value does not matter for MODIFY. */
diff --git a/gcc/lra-eliminations.cc b/gcc/lra-eliminations.cc
index 4daaff1..f3b75e0 100644
--- a/gcc/lra-eliminations.cc
+++ b/gcc/lra-eliminations.cc
@@ -166,7 +166,7 @@ static class lra_elim_table self_elim_table;
/* Offsets should be used to restore original offsets for eliminable
hard register which just became not eliminable. Zero,
otherwise. */
-static poly_int64_pod self_elim_offsets[FIRST_PSEUDO_REGISTER];
+static poly_int64 self_elim_offsets[FIRST_PSEUDO_REGISTER];
/* Map: hard regno -> RTL presentation. RTL presentations of all
potentially eliminable hard registers are stored in the map. */
@@ -1294,14 +1294,14 @@ init_elim_table (void)
will cause, e.g., gen_rtx_REG (Pmode, STACK_POINTER_REGNUM) to
equal stack_pointer_rtx. We depend on this. Threfore we switch
off that we are in LRA temporarily. */
- lra_in_progress = 0;
+ lra_in_progress = false;
for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
{
ep->from_rtx = gen_rtx_REG (Pmode, ep->from);
ep->to_rtx = gen_rtx_REG (Pmode, ep->to);
eliminable_reg_rtx[ep->from] = ep->from_rtx;
}
- lra_in_progress = 1;
+ lra_in_progress = true;
}
/* Function for initialization of elimination once per function. It
diff --git a/gcc/lra.cc b/gcc/lra.cc
index 361f84f..bcc00ff 100644
--- a/gcc/lra.cc
+++ b/gcc/lra.cc
@@ -2262,8 +2262,8 @@ update_inc_notes (void)
}
}
-/* Set to 1 while in lra. */
-int lra_in_progress;
+/* Set to true while in LRA. */
+bool lra_in_progress = false;
/* Start of pseudo regnos before the LRA. */
int lra_new_regno_start;
@@ -2360,7 +2360,7 @@ lra (FILE *f)
if (flag_checking)
check_rtl (false);
- lra_in_progress = 1;
+ lra_in_progress = true;
lra_live_range_iter = lra_coalesce_iter = lra_constraint_iter = 0;
lra_assignment_iter = lra_assignment_iter_after_spill = 0;
@@ -2552,7 +2552,7 @@ lra (FILE *f)
ira_restore_scratches (lra_dump_file);
lra_eliminate (true, false);
lra_final_code_change ();
- lra_in_progress = 0;
+ lra_in_progress = false;
if (live_p)
lra_clear_live_ranges ();
lra_live_ranges_finish ();
diff --git a/gcc/lto-streamer-in.cc b/gcc/lto-streamer-in.cc
index d3128fc..b257c13 100644
--- a/gcc/lto-streamer-in.cc
+++ b/gcc/lto-streamer-in.cc
@@ -1122,13 +1122,16 @@ input_cfg (class lto_input_block *ib, class data_in *data_in,
loop->estimate_state = streamer_read_enum (ib, loop_estimation, EST_LAST);
loop->any_upper_bound = streamer_read_hwi (ib);
if (loop->any_upper_bound)
- loop->nb_iterations_upper_bound = streamer_read_widest_int (ib);
+ loop->nb_iterations_upper_bound
+ = bound_wide_int::from (streamer_read_widest_int (ib), SIGNED);
loop->any_likely_upper_bound = streamer_read_hwi (ib);
if (loop->any_likely_upper_bound)
- loop->nb_iterations_likely_upper_bound = streamer_read_widest_int (ib);
+ loop->nb_iterations_likely_upper_bound
+ = bound_wide_int::from (streamer_read_widest_int (ib), SIGNED);
loop->any_estimate = streamer_read_hwi (ib);
if (loop->any_estimate)
- loop->nb_iterations_estimate = streamer_read_widest_int (ib);
+ loop->nb_iterations_estimate
+ = bound_wide_int::from (streamer_read_widest_int (ib), SIGNED);
/* Read OMP SIMD related info. */
loop->safelen = streamer_read_hwi (ib);
@@ -1888,13 +1891,17 @@ lto_input_tree_1 (class lto_input_block *ib, class data_in *data_in,
tree type = stream_read_tree_ref (ib, data_in);
unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib);
unsigned HOST_WIDE_INT i;
- HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
+ HOST_WIDE_INT abuf[WIDE_INT_MAX_INL_ELTS], *a = abuf;
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ a = XALLOCAVEC (HOST_WIDE_INT, len);
for (i = 0; i < len; i++)
a[i] = streamer_read_hwi (ib);
gcc_assert (TYPE_PRECISION (type) <= WIDE_INT_MAX_PRECISION);
- result = wide_int_to_tree (type, wide_int::from_array
- (a, len, TYPE_PRECISION (type)));
+ result
+ = wide_int_to_tree (type,
+ wide_int::from_array (a, len,
+ TYPE_PRECISION (type)));
streamer_tree_cache_append (data_in->reader_cache, result, hash);
}
else if (tag == LTO_tree_scc || tag == LTO_trees)
diff --git a/gcc/lto-streamer-out.cc b/gcc/lto-streamer-out.cc
index 5ffa895..a1bbea8 100644
--- a/gcc/lto-streamer-out.cc
+++ b/gcc/lto-streamer-out.cc
@@ -2173,13 +2173,26 @@ output_cfg (struct output_block *ob, struct function *fn)
loop_estimation, EST_LAST, loop->estimate_state);
streamer_write_hwi (ob, loop->any_upper_bound);
if (loop->any_upper_bound)
- streamer_write_widest_int (ob, loop->nb_iterations_upper_bound);
+ {
+ widest_int w = widest_int::from (loop->nb_iterations_upper_bound,
+ SIGNED);
+ streamer_write_widest_int (ob, w);
+ }
streamer_write_hwi (ob, loop->any_likely_upper_bound);
if (loop->any_likely_upper_bound)
- streamer_write_widest_int (ob, loop->nb_iterations_likely_upper_bound);
+ {
+ widest_int w
+ = widest_int::from (loop->nb_iterations_likely_upper_bound,
+ SIGNED);
+ streamer_write_widest_int (ob, w);
+ }
streamer_write_hwi (ob, loop->any_estimate);
if (loop->any_estimate)
- streamer_write_widest_int (ob, loop->nb_iterations_estimate);
+ {
+ widest_int w = widest_int::from (loop->nb_iterations_estimate,
+ SIGNED);
+ streamer_write_widest_int (ob, w);
+ }
/* Write OMP SIMD related info. */
streamer_write_hwi (ob, loop->safelen);
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index d98880e..ca61d04 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,7 @@
+2023-09-30 Eugene Rozenfeld <erozen@microsoft.com>
+
+ * Make-lang.in: Make create_fdas_for_lto1 target not .PHONY
+
2023-09-19 Thomas Schwinge <thomas@codesourcery.com>
Pan Li <pan2.li@intel.com>
diff --git a/gcc/lto/Make-lang.in b/gcc/lto/Make-lang.in
index 98aa9f4..7dc0a9f 100644
--- a/gcc/lto/Make-lang.in
+++ b/gcc/lto/Make-lang.in
@@ -108,8 +108,6 @@ lto/lto-dump.o: $(LTO_OBJS)
components_in_prev = "bfd opcodes binutils fixincludes gas gcc gmp mpfr mpc isl gold intl ld libbacktrace libcpp libcody libdecnumber libiberty libiberty-linker-plugin libiconv zlib lto-plugin libctf libsframe"
components_in_prev_target = "libstdc++-v3 libsanitizer libvtv libgcc libbacktrace libphobos zlib libgomp libatomic"
-.PHONY: create_fdas_for_lto1
-
lto1.fda: create_fdas_for_lto1
$(PROFILE_MERGER) $(shell ls -ha lto1_*.fda) --output_file lto1.fda -gcov_version 2
@@ -134,6 +132,8 @@ create_fdas_for_lto1: ../stage1-gcc/lto1$(exeext) ../prev-gcc/$(PERF_DATA)
fi; \
done;
+ $(STAMP) $@
+
# LTO testing is done as part of C/C++/Fortran etc. testing.
check-lto:
diff --git a/gcc/m2/ChangeLog b/gcc/m2/ChangeLog
index cd18211..d690ec9 100644
--- a/gcc/m2/ChangeLog
+++ b/gcc/m2/ChangeLog
@@ -1,3 +1,232 @@
+2023-10-17 Gaius Mulley <gaiusmod2@gmail.com>
+
+ PR modula2/111756
+ * Make-lang.in (CM2DEP): New define conditionally set if
+ ($(CXXDEPMODE),depmode=gcc3).
+ (GM2_1): Use $(CM2DEP).
+ (m2/gm2-gcc/%.o): Ensure $(@D)/$(DEPDIR) is created.
+ Add $(CM2DEP) to the $(COMPILER) command and use $(POSTCOMPILE).
+ (m2/gm2-gcc/m2configure.o): Ditto.
+ (m2/gm2-lang.o): Ditto.
+ (m2/m2pp.o): Ditto.
+ (m2/gm2-gcc/rtegraph.o): Ditto.
+ (m2/mc-boot/$(SRC_PREFIX)%.o): Ditto.
+ (m2/mc-boot-ch/$(SRC_PREFIX)%.o): Ditto.
+ (m2/mc-boot-ch/$(SRC_PREFIX)%.o): Ditto.
+ (m2/mc-boot/main.o): Ditto.
+ (mcflex.o): Ditto.
+ (m2/gm2-libs-boot/M2RTS.o): Ditto.
+ (m2/gm2-libs-boot/%.o): Ditto.
+ (m2/gm2-libs-boot/%.o): Ditto.
+ (m2/gm2-libs-boot/RTcodummy.o): Ditto.
+ (m2/gm2-libs-boot/RTintdummy.o): Ditto.
+ (m2/gm2-libs-boot/wrapc.o): Ditto.
+ (m2/gm2-libs-boot/UnixArgs.o): Ditto.
+ (m2/gm2-libs-boot/choosetemp.o): Ditto.
+ (m2/gm2-libs-boot/errno.o): Ditto.
+ (m2/gm2-libs-boot/dtoa.o): Ditto.
+ (m2/gm2-libs-boot/ldtoa.o): Ditto.
+ (m2/gm2-libs-boot/termios.o): Ditto.
+ (m2/gm2-libs-boot/SysExceptions.o): Ditto.
+ (m2/gm2-libs-boot/SysStorage.o): Ditto.
+ (m2/gm2-compiler-boot/M2GCCDeclare.o): Ditto.
+ (m2/gm2-compiler-boot/M2Error.o): Ditto.
+ (m2/gm2-compiler-boot/%.o): Ditto.
+ (m2/gm2-compiler-boot/%.o): Ditto.
+ (m2/gm2-compiler-boot/m2flex.o): Ditto.
+ (m2/gm2-compiler/%.o): Ditto.
+ (m2/gm2-compiler/m2flex.o): Ditto.
+ (m2/gm2-libs-iso/%.o): Ditto.
+ (m2/gm2-libs/%.o): Ditto.
+ (m2/gm2-libs/%.o): Ditto.
+ (m2/gm2-libs/choosetemp.o): Ditto.
+ (m2/boot-bin/mklink$(exeext)): Ditto.
+ (m2/pge-boot/%.o): Ditto.
+ (m2/pge-boot/%.o): Ditto.
+ (m2/gm2-compiler/%.o): Ensure $(@D)/$(DEPDIR) is created and use
+ $(POSTCOMPILE).
+ (m2/gm2-compiler/%.o): Ditto.
+ (m2/gm2-libs-iso/%.o): Ditto.
+ (m2/gm2-libs/%.o): Ditto.
+ * README: Purge out of date info.
+ * gm2-compiler/M2Comp.mod (MakeSaveTempsFileNameExt): Import.
+ (OnExitDelete): Import.
+ (GetModuleDefImportStatementList): Import.
+ (GetModuleModImportStatementList): Import.
+ (GetImportModule): Import.
+ (IsImportStatement): Import.
+ (IsImport): Import.
+ (GetImportStatementList): Import.
+ (File): Import.
+ (Close): Import.
+ (EOF): Import.
+ (IsNoError): Import.
+ (WriteLine): Import.
+ (WriteChar): Import.
+ (FlushOutErr): Import.
+ (WriteS): Import.
+ (OpenToRead): Import.
+ (OpenToWrite): Import.
+ (ReadS): Import.
+ (WriteS): Import.
+ (GetM): Import.
+ (GetMM): Import.
+ (GetDepTarget): Import.
+ (GetMF): Import.
+ (GetMP): Import.
+ (GetObj): Import.
+ (GetMD): Import.
+ (GetMMD): Import.
+ (GenerateDefDependency): New procedure.
+ (GenerateDependenciesFromImport): New procedure.
+ (GenerateDependenciesFromList): New procedure.
+ (GenerateDependencies): New procedure.
+ (Compile): Re-write.
+ (compile): Re-format.
+ (CreateFileStem): New procedure function.
+ (DoPass0): Re-write.
+ (IsLibrary): New procedure function.
+ (IsUnique): New procedure function.
+ (Append): New procedure.
+ (MergeDep): New procedure.
+ (GetRuleTarget): New procedure function.
+ (ReadDepContents): New procedure function.
+ (WriteDep): New procedure.
+ (WritePhonyDep): New procedure.
+ (WriteDepContents): New procedure.
+ (CreateDepFilename): New procedure function.
+ (Pass0CheckDef): New procedure function.
+ (Pass0CheckMod): New procedure function.
+ (DoPass0): Re-write.
+ (DepContent): New variable.
+ (DepOutput): New variable.
+ (BaseName): New procedure function.
+ * gm2-compiler/M2GCCDeclare.mod (PrintTerse): Handle IsImport.
+ Replace IsGnuAsmVolatile with IsGnuAsm.
+ * gm2-compiler/M2Options.def (EXPORT QUALIFIED): Remove list.
+ (SetM): New procedure.
+ (GetM): New procedure function.
+ (SetMM): New procedure.
+ (GetMM): New procedure function.
+ (SetMF): New procedure.
+ (GetMF): New procedure function.
+ (SetPPOnly): New procedure.
+ (GetB): New procedure function.
+ (SetMD): New procedure.
+ (GetMD): New procedure function.
+ (SetMMD): New procedure.
+ (GetMMD): New procedure function.
+ (SetMQ): New procedure.
+ (SetMT): New procedure.
+ (GetMT): New procedure function.
+ (GetDepTarget): New procedure function.
+ (SetMP): New procedure.
+ (GetMP): New procedure function.
+ (SetObj): New procedure.
+ (SetSaveTempsDir): New procedure.
+ * gm2-compiler/M2Options.mod (SetM): New procedure.
+ (GetM): New procedure function.
+ (SetMM): New procedure.
+ (GetMM): New procedure function.
+ (SetMF): New procedure.
+ (GetMF): New procedure function.
+ (SetPPOnly): New procedure.
+ (GetB): New procedure function.
+ (SetMD): New procedure.
+ (GetMD): New procedure function.
+ (SetMMD): New procedure.
+ (GetMMD): New procedure function.
+ (SetMQ): New procedure.
+ (SetMT): New procedure.
+ (GetMT): New procedure function.
+ (GetDepTarget): New procedure function.
+ (SetMP): New procedure.
+ (GetMP): New procedure function.
+ (SetObj): New procedure.
+ (SetSaveTempsDir): New procedure.
+ * gm2-compiler/M2Preprocess.def (PreprocessModule): New parameters
+ topSource and outputDep. Re-write.
+ (MakeSaveTempsFileNameExt): New procedure function.
+ (OnExitDelete): New procedure function.
+ * gm2-compiler/M2Preprocess.mod (GetM): Import.
+ (GetMM): Import.
+ (OnExitDelete): Add debugging message.
+ (RemoveFile): Add debugging message.
+ (BaseName): Remove.
+ (BuildCommandLineExecute): New procedure function.
+ * gm2-compiler/M2Search.def (SetDefExtension): Remove unnecessary
+ spacing.
+ * gm2-compiler/SymbolTable.mod (GetSymName): Handle ImportSym and
+ ImportStatementSym.
+ * gm2-gcc/m2options.h (M2Options_SetMD): New function.
+ (M2Options_GetMD): New function.
+ (M2Options_SetMMD): New function.
+ (M2Options_GetMMD): New function.
+ (M2Options_SetM): New function.
+ (M2Options_GetM): New function.
+ (M2Options_SetMM): New function.
+ (M2Options_GetMM): New function.
+ (M2Options_GetMQ): New function.
+ (M2Options_SetMF): New function.
+ (M2Options_GetMF): New function.
+ (M2Options_SetMT): New function.
+ (M2Options_SetMP): New function.
+ (M2Options_GetMP): New function.
+ (M2Options_GetDepTarget): New function.
+ * gm2-lang.cc (gm2_langhook_init): Correct comment case.
+ (gm2_langhook_init_options): Add case OPT_M and
+ OPT_MM.
+ (gm2_langhook_post_options): Add case OPT_MF, OPT_MT,
+ OPT_MD and OPT_MMD.
+ * lang-specs.h (M2CPP): Pass though MF option.
+ (MDMMD): New define. Add MDMMD to "@modula-2".
+
+2023-10-15 Gaius Mulley <gaiusmod2@gmail.com>
+
+ * Make-lang.in (m2.tags): New rule.
+
+2023-10-11 Gaius Mulley <gaiusmod2@gmail.com>
+
+ * gm2-compiler/M2GCCDeclare.mod (DeclareSubrange): Check
+ the base type of the subrange against the ZTYPE and call
+ DeclareSubrangeNarrow if necessary.
+ (DeclareSubrangeNarrow): New procedure function.
+
+2023-10-11 Gaius Mulley <gaiusmod2@gmail.com>
+
+ PR modula2/111675
+ * gm2-compiler/M2CaseList.mod (appendTree): Replace
+ InitStringCharStar with InitString.
+ * gm2-compiler/M2GCCDeclare.mod: Import AreConstantsEqual.
+ (DeclareSubrange): Add zero alignment test and call
+ BuildSmallestTypeRange if necessary.
+ (WalkSubrangeDependants): Walk the align expression.
+ (IsSubrangeDependants): Test the align expression.
+ * gm2-compiler/M2Quads.mod (BuildStringAdrParam): Correct end name.
+ * gm2-compiler/P2SymBuild.mod (BuildTypeAlignment): Allow subranges
+ to be zero aligned (packed).
+ * gm2-compiler/SymbolTable.mod (Subrange): Add Align field.
+ (MakeSubrange): Set Align to NulSym.
+ (PutAlignment): Assign Subrange.Align to align.
+ (GetAlignment): Return Subrange.Align.
+ * gm2-gcc/m2expr.cc (noBitsRequired): Rewrite.
+ (calcNbits): Rename ...
+ (m2expr_calcNbits): ... to this and test for negative values.
+ (m2expr_BuildTBitSize): Replace calcNBits with m2expr_calcNbits.
+ * gm2-gcc/m2expr.def (calcNbits): Export.
+ * gm2-gcc/m2expr.h (m2expr_calcNbits): New prototype.
+ * gm2-gcc/m2type.cc (noBitsRequired): Remove.
+ (m2type_BuildSmallestTypeRange): Call m2expr_calcNbits.
+ (m2type_BuildSubrangeType): Create range_type from
+ build_range_type (type, lowval, highval).
+
+2023-09-29 Gaius Mulley <gaiusmod2@gmail.com>
+
+ * gm2-compiler/M2Quads.mod (EndBuildFor): Improve
+ block comments.
+ * gm2-libs-iso/SysClock.mod (ExtractDate): Replace
+ testDays with yearOfDays. New local variable monthOfDays.
+
2023-09-26 Gaius Mulley <gaiusmod2@gmail.com>
PR modula2/111510
diff --git a/gcc/m2/Make-lang.in b/gcc/m2/Make-lang.in
index a541518..d826d4c 100644
--- a/gcc/m2/Make-lang.in
+++ b/gcc/m2/Make-lang.in
@@ -27,7 +27,14 @@ GM2_CROSS_NAME = `echo gm2|sed '$(program_transform_cross_name)'`
M2_MAINTAINER = no
-GM2_1 = ./gm2 -B./m2/stage1 -g -fm2-g
+# CM2DEP must match the COMPILE and POSTCOMPILE defines in gcc/Make-lang.in
+ifeq ($(CXXDEPMODE),depmode=gcc3)
+CM2DEP=-MT $@ -MMD -MP -MF $(@D)/$(DEPDIR)/$(*F).TPo
+else
+CM2DEP=
+endif
+
+GM2_1 = ./gm2 -B./m2/stage1 -g -fm2-g $(CM2DEP)
GM2_FOR_TARGET = $(STAGE_CC_WRAPPER) ./gm2 -B./ -B$(build_tooldir)/bin/ -L$(objdir)/../ld $(TFLAGS)
@@ -62,6 +69,12 @@ m2 modula-2 modula2: gm2$(exeext) xgcc$(exeext) cc1gm2$(exeext) \
$(GCC_PASSES) $(GCC_PARTS)
m2.serial = cc1gm2$(exeext)
+m2.tags: force
+ cd $(srcdir)/m2; $(ETAGS) -o TAGS.sub \
+ *.cc *.h gm2-gcc/*.cc gm2-gcc/*.h \
+ gm2-libs-ch/*.h gm2-libs-ch/*.c gm2-libs-ch/*.cc; \
+ $(ETAGS) --include TAGS.sub --include ../TAGS.sub
+
m2.srcinfo: doc/m2.info
-cp -p $^ $(srcdir)/doc
@@ -576,35 +589,40 @@ GCC_HEADER_DEPENDENCIES_FOR_M2 = $(BUILD-BOOT-H) $(TIMEVAR_H) m2/gm2config.h $(C
$(generated_files) insn-attr-common.h
m2/gm2-gcc/%.o: $(srcdir)/m2/gm2-gcc/%.cc $(GCC_HEADER_DEPENDENCIES_FOR_M2)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(COMPILER) -c -g $(ALL_COMPILERFLAGS) \
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(COMPILER) $(CM2DEP) -c -g $(ALL_COMPILERFLAGS) \
$(ALL_CPPFLAGS) $(INCLUDES) $< $(OUTPUT_OPTION)
+ $(POSTCOMPILE)
m2/gm2-gcc/m2configure.o: $(srcdir)/m2/gm2-gcc/m2configure.cc \
$(SYSTEM_H) $(GCC_H) $(CONFIG_H) \
m2/gm2config.h $(TARGET_H) $(PLUGIN_HEADERS) \
$(generated_files) $(C_TREE_H) insn-attr-common.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(COMPILER) $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(COMPILER) $(CM2DEP) $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
$(DRIVER_DEFINES) \
-DLIBSUBDIR=\"$(libsubdir)\" \
-DPREFIX=\"$(prefix)\" \
-c $(srcdir)/m2/gm2-gcc/m2configure.cc $(OUTPUT_OPTION)
+ $(POSTCOMPILE)
m2/gm2-lang.o: $(srcdir)/m2/gm2-lang.cc gt-m2-gm2-lang.h $(GCC_HEADER_DEPENDENCIES_FOR_M2)
- $(COMPILER) -c -g $(GM2GCC) $(ALL_COMPILERFLAGS) \
+ $(COMPILER) $(CM2DEP) -c -g $(GM2GCC) $(ALL_COMPILERFLAGS) \
-DLIBSUBDIR=\"$(libsubdir)\" \
$(ALL_CPPFLAGS) $(INCLUDES) $< $(OUTPUT_OPTION)
+ $(POSTCOMPILE)
m2/m2pp.o : $(srcdir)/m2/m2pp.cc $(GCC_HEADER_DEPENDENCIES_FOR_M2)
- $(COMPILER) -c -g -DGM2 $(ALL_COMPILERFLAGS) \
+ $(COMPILER) $(CM2DEP) -c -g -DGM2 $(ALL_COMPILERFLAGS) \
$(ALL_CPPFLAGS) $(INCLUDES) $< $(OUTPUT_OPTION)
+ $(POSTCOMPILE)
m2/gm2-gcc/rtegraph.o: $(srcdir)/m2/gm2-gcc/rtegraph.cc $(GCC_HEADER_DEPENDENCIES_FOR_M2) \
gt-m2-rtegraph.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(COMPILER) -c -g $(GM2GCC) $(ALL_COMPILERFLAGS) \
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(COMPILER) $(CM2DEP) -c -g $(GM2GCC) $(ALL_COMPILERFLAGS) \
$(ALL_CPPFLAGS) $(INCLUDES) $< $(OUTPUT_OPTION)
+ $(POSTCOMPILE)
m2/gm2-gcc/$(SRC_PREFIX)%.h: $(srcdir)/m2/gm2-gcc/%.def $(MCDEPS)
-test -d $(@D) || $(mkinstalldirs) $(@D)
@@ -1382,126 +1400,150 @@ m2/boot-bin/mc$(exeext): $(BUILD-MC-BOOT-O) $(BUILD-MC-INTERFACE-O) \
mcflex.o m2/gm2-libs-boot/RTcodummy.o -lm
m2/mc-boot/$(SRC_PREFIX)%.o: m2/mc-boot/$(SRC_PREFIX)%.cc m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) $(CXXFLAGS) $(GM2_PICFLAGS) -g -c -I. -I$(srcdir)/m2/mc-boot-ch -I$(srcdir)/m2/mc-boot -I$(srcdir)/../include -I$(srcdir) $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) $(CXXFLAGS) $(GM2_PICFLAGS) -g -c -I. -I$(srcdir)/m2/mc-boot-ch -I$(srcdir)/m2/mc-boot -I$(srcdir)/../include -I$(srcdir) $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/mc-boot-ch/$(SRC_PREFIX)%.o: m2/mc-boot-ch/$(SRC_PREFIX)%.c m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) $(CXXFLAGS) $(GM2_PICFLAGS) -DHAVE_CONFIG_H -g -c -I. -Im2/gm2-libs -I$(srcdir)/../include -I$(srcdir) $(INCLUDES) -Im2/gm2-libs $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) $(CXXFLAGS) $(GM2_PICFLAGS) -DHAVE_CONFIG_H -g -c -I. -Im2/gm2-libs -I$(srcdir)/../include -I$(srcdir) $(INCLUDES) -Im2/gm2-libs $< -o $@
+ $(POSTCOMPILE)
m2/mc-boot-ch/$(SRC_PREFIX)%.o: m2/mc-boot-ch/$(SRC_PREFIX)%.cc m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) $(CXXFLAGS) $(GM2_PICFLAGS) -DHAVE_CONFIG_H -g -c -I. -Im2/gm2-libs -I$(srcdir)/../include -I$(srcdir) $(INCLUDES) -Im2/gm2-libs $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) $(CXXFLAGS) $(GM2_PICFLAGS) -DHAVE_CONFIG_H -g -c -I. -Im2/gm2-libs -I$(srcdir)/../include -I$(srcdir) $(INCLUDES) -Im2/gm2-libs $< -o $@
+ $(POSTCOMPILE)
m2/mc-boot/main.o: $(M2LINK) $(srcdir)/m2/init/mcinit
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
unset CC ; $(M2LINK) -s --langc++ --exit --name m2/mc-boot/main.cc $(srcdir)/m2/init/mcinit
- $(CXX) $(CXXFLAGS) $(GM2_PICFLAGS) -g -c -I. -I$(srcdir)/../include -I$(srcdir) $(INCLUDES) m2/mc-boot/main.cc -o $@
+ $(CXX) $(CM2DEP) $(CXXFLAGS) $(GM2_PICFLAGS) -g -c -I. -I$(srcdir)/../include -I$(srcdir) $(INCLUDES) m2/mc-boot/main.cc -o $@
+ $(POSTCOMPILE)
mcflex.o: mcflex.c m2/gm2-libs/gm2-libs-host.h
- $(CC) $(CFLAGS) $(GM2_PICFLAGS) -I$(srcdir)/m2/mc -g -c $< -o $@ # remember that mcReserved.h is copied into m2/mc
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CC) $(CM2DEP) $(CFLAGS) $(GM2_PICFLAGS) -I$(srcdir)/m2/mc -g -c $< -o $@ # remember that mcReserved.h is copied into m2/mc
+ $(POSTCOMPILE)
mcflex.c: $(srcdir)/m2/mc/mc.flex
flex -t $< > $@
m2/gm2-libs-boot/M2RTS.o: $(srcdir)/m2/gm2-libs/M2RTS.mod $(MCDEPS) $(BUILD-BOOT-H)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(MC) --suppress-noreturn -o=m2/gm2-libs-boot/M2RTS.c $(srcdir)/m2/gm2-libs/M2RTS.mod
- $(COMPILER) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(MCINCLUDES) $(INCLUDES) m2/gm2-libs-boot/M2RTS.c -o $@
+ $(COMPILER) $(CM2DEP) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(MCINCLUDES) $(INCLUDES) m2/gm2-libs-boot/M2RTS.c -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/%.o: $(srcdir)/m2/gm2-libs-boot/%.mod $(MCDEPS) $(BUILD-BOOT-H)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(MC) -o=m2/gm2-libs-boot/$*.c $(srcdir)/m2/gm2-libs-boot/$*.mod
- $(COMPILER) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) $(MCINCLUDES) m2/gm2-libs-boot/$*.c -o $@
+ $(COMPILER) $(CM2DEP) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) $(MCINCLUDES) m2/gm2-libs-boot/$*.c -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/%.o: $(srcdir)/m2/gm2-libs/%.mod $(MCDEPS) $(BUILD-BOOT-H)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(MC) -o=m2/gm2-libs-boot/$*.c $(srcdir)/m2/gm2-libs/$*.mod
- $(COMPILER) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(MCINCLUDES) $(INCLUDES) m2/gm2-libs-boot/$*.c -o $@
+ $(COMPILER) $(CM2DEP) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(MCINCLUDES) $(INCLUDES) m2/gm2-libs-boot/$*.c -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/$(SRC_PREFIX)%.h: $(srcdir)/m2/gm2-libs/%.def $(MCDEPS)
-test -d $(@D) || $(mkinstalldirs) $(@D)
$(MC) -o=$@ $(srcdir)/m2/gm2-libs/$*.def
m2/gm2-libs-boot/RTcodummy.o: $(srcdir)/m2/gm2-libs-ch/RTcodummy.c m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/RTintdummy.o: $(srcdir)/m2/gm2-libs-ch/RTintdummy.c m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/wrapc.o: $(srcdir)/m2/gm2-libs-ch/wrapc.c m2/gm2-libs-boot/$(SRC_PREFIX)wrapc.h m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c -DHAVE_CONFIG_H $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot -Im2/gm2-libs $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c -DHAVE_CONFIG_H $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot -Im2/gm2-libs $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/UnixArgs.o: $(srcdir)/m2/gm2-libs-ch/UnixArgs.cc m2/gm2-libs-boot/$(SRC_PREFIX)UnixArgs.h m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c -DIN_GCC $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/choosetemp.o: m2/gm2-libs-ch/choosetemp.c m2/gm2-libiberty/Gchoosetemp.h m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot -Im2/gm2-libiberty -I$(srcdir)/m2/gm2-libiberty/ $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot -Im2/gm2-libiberty -I$(srcdir)/m2/gm2-libiberty/ $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/errno.o: $(srcdir)/m2/gm2-libs-ch/errno.c m2/gm2-libs-boot/$(SRC_PREFIX)errno.h m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/dtoa.o: $(srcdir)/m2/gm2-libs-ch/dtoa.cc m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/ldtoa.o: $(srcdir)/m2/gm2-libs-ch/ldtoa.cc m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/termios.o: $(srcdir)/m2/gm2-libs-ch/termios.c $(BUILD-LIBS-BOOT-H) m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/SysExceptions.o: $(srcdir)/m2/gm2-libs-ch/SysExceptions.c \
m2/gm2-libs-boot/$(SRC_PREFIX)SysExceptions.h m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/SysStorage.o: $(srcdir)/m2/gm2-libs/SysStorage.mod $(MCDEPS) $(BUILD-BOOT-H)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(MC) -o=m2/gm2-libs-boot/SysStorage.c $(srcdir)/m2/gm2-libs/SysStorage.mod
- $(COMPILER) -DIN_GCC -c $(CFLAGS) $(GM2_PICFLAGS) \
+ $(COMPILER) $(CM2DEP) -DIN_GCC -c $(CFLAGS) $(GM2_PICFLAGS) \
-I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(MCINCLUDES) $(INCLUDES) \
m2/gm2-libs-boot/SysStorage.c -o m2/gm2-libs-boot/SysStorage.o
+ $(POSTCOMPILE)
m2/gm2-compiler-boot/M2GCCDeclare.o: $(srcdir)/m2/gm2-compiler/M2GCCDeclare.mod $(MCDEPS) $(BUILD-BOOT-H)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(MC) --extended-opaque -o=m2/gm2-compiler-boot/M2GCCDeclare.c $<
- $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(GM2GCC) \
+ $(COMPILER) $(CM2DEP) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(GM2GCC) \
-I. -I$(srcdir)/../include -I$(srcdir) \
-I. -Im2/gm2-libs-boot -Im2/gm2-compiler-boot \
-I$(srcdir)/m2/gm2-libiberty $(MCINCLUDES) $(INCLUDES) m2/gm2-compiler-boot/M2GCCDeclare.c -o $@
+ $(POSTCOMPILE)
m2/gm2-compiler-boot/M2Error.o: $(srcdir)/m2/gm2-compiler/M2Error.mod $(MCDEPS) $(BUILD-BOOT-H)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(MC) --extended-opaque -o=m2/gm2-compiler-boot/M2Error.c $<
- $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(GM2GCC) \
+ $(COMPILER) $(CM2DEP) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(GM2GCC) \
-I. -I$(srcdir)/../include -I$(srcdir) \
-I. -Im2/gm2-libs-boot -Im2/gm2-compiler-boot \
-I$(srcdir)/m2/gm2-libiberty $(MCINCLUDES) $(INCLUDES) m2/gm2-compiler-boot/M2Error.c -o $@
+ $(POSTCOMPILE)
m2/gm2-compiler-boot/%.o: $(srcdir)/m2/gm2-compiler/%.mod $(BUILD-BOOT-H) $(MCDEPS) $(BUILD-BOOT-H)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(MC) -o=m2/gm2-compiler-boot/$*.c $(srcdir)/m2/gm2-compiler/$*.mod
- $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(GM2GCC) \
+ $(COMPILER) $(CM2DEP) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(GM2GCC) \
-I. -I$(srcdir)/../include -I$(srcdir) \
-I. -Im2/gm2-libs-boot -Im2/gm2-compiler-boot -Im2/gm2-libiberty \
-I$(srcdir)/m2/gm2-libiberty $(MCINCLUDES) $(INCLUDES) m2/gm2-compiler-boot/$*.c -o $@
+ $(POSTCOMPILE)
m2/gm2-compiler-boot/%.o: m2/gm2-compiler-boot/%.mod $(MCDEPS) $(BUILD-BOOT-H)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(MC) -o=m2/gm2-compiler-boot/$*.c m2/gm2-compiler-boot/$*.mod
- $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(GM2GCC) \
+ $(COMPILER) $(CM2DEP) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(GM2GCC) \
-I. -I$(srcdir)/../include -I$(srcdir) \
-I. -Im2/gm2-libs-boot -Im2/gm2-compiler-boot \
-I$(srcdir)/m2/gm2-libiberty $(MCINCLUDES) $(INCLUDES) m2/gm2-compiler-boot/$*.c -o $@
+ $(POSTCOMPILE)
m2/gm2-compiler-boot/$(SRC_PREFIX)%.h: $(srcdir)/m2/gm2-compiler/%.def $(MCDEPS)
-test -d $(@D) || $(mkinstalldirs) $(@D)
@@ -1510,10 +1552,11 @@ m2/gm2-compiler-boot/$(SRC_PREFIX)%.h: $(srcdir)/m2/gm2-compiler/%.def $(MCDEPS)
m2/gm2-compiler-boot/m2flex.o: m2/gm2-compiler/m2flex.c $(BUILD-BOOT-H) $(TIMEVAR_H) \
$(BUILD-LIBS-BOOT-H) m2/gm2-compiler-boot/$(SRC_PREFIX)NameKey.h \
$(CONFIG_H) m2/gm2config.h $(TARGET_H) $(PLUGIN_HEADERS)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(COMPILER) -c -g $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(COMPILER) $(CM2DEP) -c -g $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
$(GM2GCC) $(INCLUDES) -I$(srcdir)/m2 \
-Im2 -Im2/gm2-compiler-boot -Im2/gm2-libs-boot $< -o $@
+ $(POSTCOMPILE)
m2/gm2-compiler/m2flex.c: $(srcdir)/m2/m2.flex $(TIMEVAR_H) insn-attr-common.h
-test -d $(@D) || $(mkinstalldirs) $(@D)
@@ -1526,25 +1569,30 @@ m2/gm2-libiberty/$(SRC_PREFIX)%.h: $(srcdir)/m2/gm2-libiberty/%.def $(MCDEPS)
# The rules to build objects in gm2-compiler and gm2-libs directories.
m2/gm2-compiler/%.o: $(srcdir)/m2/gm2-compiler/%.mod
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(GM2_1) $(GM2_FLAGS) -c -I$(srcdir)/m2/gm2-compiler -I$(srcdir)/m2/gm2-libs -I$(srcdir)/m2/gm2-gcc -I$(srcdir)/m2/gm2-libiberty $< -o $@
+ $(POSTCOMPILE)
m2/gm2-compiler/m2flex.o: m2/gm2-compiler/m2flex.c m2/gm2-libs/gm2-libs-host.h $(TIMEVAR_H)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(COMPILER) -c -g $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(COMPILER) $(CM2DEP) -c -g $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
$(GM2GCC) -Im2/gm2-compiler-boot -Im2/gm2-libs-boot $< -o $@
+ $(POSTCOMPILE)
m2/gm2-compiler/%.o: m2/gm2-compiler/%.mod
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(GM2_1) $(GM2_FLAGS) -c -I$(srcdir)/m2/gm2-compiler -I$(srcdir)/m2/gm2-libs -I$(srcdir)/m2/gm2-gcc -I$(srcdir)/m2/gm2-libiberty $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-iso/%.o: $(srcdir)/m2/gm2-libs-iso/%.c m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -DBUILD_GM2_LIBS_TARGET -DBUILD_GM2_LIBS -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -DBUILD_GM2_LIBS_TARGET -DBUILD_GM2_LIBS -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-iso/%.o: $(srcdir)/m2/gm2-libs-iso/%.mod
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(GM2_1) $(GM2_ISO_FLAGS) -c -B./ -Im2/gm2-libs-iso:$(srcdir)/m2/gm2-libs-iso -I$(srcdir)/m2/gm2-libs $< -o $@
+ $(POSTCOMPILE)
# We build the cc1gm2$(exeext) from the boot stage and then proceed to build it
@@ -1637,20 +1685,24 @@ m2/gm2-libs/libgm2.a: build-compiler gm2$(exeext) $(BUILD-LIBS)
$(RANLIB) $@
m2/gm2-libs/%.o: $(srcdir)/m2/gm2-libs/%.mod $(MCDEPS) $(BUILD-BOOT-H)
- -test -d $(@D) || $(mkinstalldirs) $(@D)
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
$(GM2_1) -c $(GM2_FLAGS) -Im2/gm2-libs -I$(srcdir)/m2/gm2-libs -I$(srcdir)/m2/gm2-libs-iso $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs/%.o: $(srcdir)/m2/gm2-libs-ch/%.c m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -DBUILD_GM2_LIBS -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -DBUILD_GM2_LIBS -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs/%.o: $(srcdir)/m2/gm2-libs-ch/%.cc m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs/choosetemp.o: m2/gm2-libs-ch/choosetemp.c m2/gm2-libiberty/Gchoosetemp.h m2/gm2-libs/gm2-libs-host.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot -Im2/gm2-libiberty -I$(srcdir)/m2/gm2-libiberty/ $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) -c $(CFLAGS) $(GM2_PICFLAGS) -Im2/gm2-libs -I$(srcdir)/m2 -Im2 -I. -Im2/gm2-libs-boot -Im2/gm2-libiberty -I$(srcdir)/m2/gm2-libiberty/ $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-libs-boot/libgm2.a: m2/boot-bin/mc$(exeext) $(BUILD-LIBS-BOOT)
-test -d $(@D) || $(mkinstalldirs) $(@D)
@@ -1669,8 +1721,9 @@ m2/gm2-compiler-boot/gm2.a: m2/boot-bin/mc$(exeext) m2/boot-bin/mklink$(exeext)
m2/gm2-compiler-boot/gm2.a: m2/boot-bin/mc$(exeext)
m2/boot-bin/mklink$(exeext): $(srcdir)/m2/tools-src/mklink.c
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) $(CFLAGS) $(LDFLAGS) -I$(srcdir)/m2 -Im2/gm2-libs-boot -Im2/gm2-compiler-boot -I$(srcdir)/m2/mc-boot-ch $(INCLUDES) $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) $(CFLAGS) $(LDFLAGS) -I$(srcdir)/m2 -Im2/gm2-libs-boot -Im2/gm2-compiler-boot -I$(srcdir)/m2/mc-boot-ch $(INCLUDES) $< -o $@
+ $(POSTCOMPILE)
m2/gm2-compiler-boot/$(SRC_PREFIX)%.h: $(srcdir)/m2/gm2-compiler-boot/%.def $(MCDEPS)
-test -d $(@D) || $(mkinstalldirs) $(@D)
@@ -1749,12 +1802,14 @@ ifeq ($(M2_MAINTAINER),yes)
include m2/Make-maintainer
else
m2/pge-boot/%.o: m2/pge-boot/%.c m2/gm2-libs/gm2-libs-host.h m2/gm2config.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) $(CFLAGS) $(GM2_PICFLAGS) $(INCLUDES) -I$(srcdir)/m2/pge-boot -Im2/gm2-libs -g -c $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) $(CFLAGS) $(GM2_PICFLAGS) $(INCLUDES) -I$(srcdir)/m2/pge-boot -Im2/gm2-libs -g -c $< -o $@
+ $(POSTCOMPILE)
m2/pge-boot/%.o: m2/pge-boot/%.cc m2/gm2-libs/gm2-libs-host.h m2/gm2config.h
- -test -d $(@D) || $(mkinstalldirs) $(@D)
- $(CXX) $(CXXFLAGS) $(GM2_PICFLAGS) $(INCLUDES) -I$(srcdir)/m2/pge-boot -Im2/gm2-libs -g -c $< -o $@
+ -test -d $(@D)/$(DEPDIR) || $(mkinstalldirs) $(@D)/$(DEPDIR)
+ $(CXX) $(CM2DEP) $(CXXFLAGS) $(GM2_PICFLAGS) $(INCLUDES) -I$(srcdir)/m2/pge-boot -Im2/gm2-libs -g -c $< -o $@
+ $(POSTCOMPILE)
$(PGE): $(BUILD-PGE-O)
+$(LINKER) $(ALL_LINKERFLAGS) $(LDFLAGS) -o $@ $(BUILD-PGE-O) -lm
diff --git a/gcc/m2/README b/gcc/m2/README
index 9de9e4f..9cb8772 100644
--- a/gcc/m2/README
+++ b/gcc/m2/README
@@ -1,25 +1,4 @@
-Building GNU Modula-2
-=====================
-
-Please see the GCC documentation (gcc.texi) and section
-(Installing GCC).
-
-Regression testing GM2
-======================
-
- cd host-build ; make check-m2
-
-runs all regression tests.
-
-Stress testing GM2
-==================
-
- cd host-build/gcc ; make gm2.paranoid
-
-builds gm2 using itself and then compiles each module with both
-versions of gm2 comparing the emitted assembler code.
-
Contributing to GNU Modula-2
============================
diff --git a/gcc/m2/gm2-compiler/M2CaseList.mod b/gcc/m2/gm2-compiler/M2CaseList.mod
index b7155e3..9a5dab4 100644
--- a/gcc/m2/gm2-compiler/M2CaseList.mod
+++ b/gcc/m2/gm2-compiler/M2CaseList.mod
@@ -975,7 +975,7 @@ BEGIN
appendString (InitStringChar ("'"))
END
ELSE
- appendString (InitStringCharStar ('CHR (')) ;
+ appendString (InitString ('CHR (')) ;
appendString (InitStringCharStar (CSTIntToString (value))) ;
appendString (InitStringChar (')'))
END
diff --git a/gcc/m2/gm2-compiler/M2Comp.mod b/gcc/m2/gm2-compiler/M2Comp.mod
index e33363e..0ce8510 100644
--- a/gcc/m2/gm2-compiler/M2Comp.mod
+++ b/gcc/m2/gm2-compiler/M2Comp.mod
@@ -22,26 +22,29 @@ along with GNU Modula-2; see the file COPYING3. If not see
IMPLEMENTATION MODULE M2Comp ;
-FROM M2Options IMPORT PPonly, Statistics, Quiet, WholeProgram,
- ExtendedOpaque, GenModuleList ;
-
-FROM M2Pass IMPORT SetPassToPass0, SetPassToPass1, SetPassToPass2, SetPassToPassC, SetPassToPass3,
- SetPassToNoPass, SetPassToPassHidden ;
+FROM M2Pass IMPORT SetPassToPass0, SetPassToPass1, SetPassToPass2, SetPassToPassC,
+ SetPassToPass3, SetPassToNoPass, SetPassToPassHidden ;
FROM M2Reserved IMPORT toktype ;
FROM M2Search IMPORT FindSourceDefFile, FindSourceModFile ;
FROM M2Code IMPORT Code ;
-FROM M2LexBuf IMPORT OpenSource, CloseSource, ResetForNewPass, currenttoken, GetToken, ReInitialize, currentstring, GetTokenNo ;
+
+FROM M2LexBuf IMPORT OpenSource, CloseSource, ResetForNewPass, currenttoken, GetToken,
+ ReInitialize, currentstring, GetTokenNo ;
+
FROM M2FileName IMPORT CalculateFileName ;
-FROM M2Preprocess IMPORT PreprocessModule ;
+FROM M2Preprocess IMPORT PreprocessModule, MakeSaveTempsFileNameExt, OnExitDelete ;
FROM libc IMPORT exit ;
FROM M2Error IMPORT ErrorStringAt, ErrorStringAt2, ErrorStringsAt2,
WriteFormat0, FlushErrors, FlushWarnings, ResetErrorScope ;
-FROM M2MetaError IMPORT MetaErrorString0, MetaErrorString1, MetaError0, MetaError1, MetaString0 ;
+FROM M2MetaError IMPORT MetaErrorString0, MetaErrorString1, MetaError0, MetaError1,
+ MetaString0 ;
+
FROM FormatStrings IMPORT Sprintf1 ;
FROM P0SymBuild IMPORT P0Init, P1Init ;
+FROM M2Debug IMPORT Assert ;
IMPORT m2flex ;
IMPORT P0SyntaxCheck ;
@@ -51,6 +54,7 @@ IMPORT PCBuild ;
IMPORT P3Build ;
IMPORT PHBuild ;
IMPORT PCSymBuild ;
+IMPORT DynamicStrings ;
FROM M2Batch IMPORT GetSource, GetModuleNo, GetDefinitionModuleFile, GetModuleFile,
AssociateModule, AssociateDefinition, MakeImplementationSource,
@@ -59,16 +63,32 @@ FROM M2Batch IMPORT GetSource, GetModuleNo, GetDefinitionModuleFile, GetModuleFi
FROM SymbolTable IMPORT GetSymName, IsDefImp, NulSym,
IsHiddenTypeDeclared, GetFirstUsed, GetMainModule, SetMainModule,
ResolveConstructorTypes, SanityCheckConstants, IsDefinitionForC,
- IsBuiltinInModule, PutModLink, IsDefLink, IsModLink,
- PutLibName ;
+ IsBuiltinInModule, PutModLink, IsDefLink, IsModLink, PutLibName,
+ GetModuleDefImportStatementList, GetModuleModImportStatementList,
+ GetImportModule, IsImportStatement, IsImport,
+ GetImportStatementList ;
+
+FROM M2Search IMPORT FindSourceDefFile ;
-FROM FIO IMPORT StdErr, StdOut ;
+FROM FIO IMPORT File, StdErr, StdOut, Close, EOF, IsNoError, WriteLine,
+ WriteChar, FlushOutErr ;
+
+FROM SFIO IMPORT WriteS, OpenToRead, OpenToWrite, ReadS, WriteS ;
FROM NameKey IMPORT Name, GetKey, KeyToCharStar, makekey ;
-FROM M2Printf IMPORT fprintf1 ;
+FROM M2Printf IMPORT fprintf0, fprintf1 ;
FROM M2Quiet IMPORT qprintf0, qprintf1, qprintf2 ;
-FROM DynamicStrings IMPORT String, InitString, KillString, InitStringCharStar, Dup, Mark, EqualArray, string ;
-FROM M2Options IMPORT Verbose, GetM2Prefix ;
+
+FROM M2Options IMPORT Verbose, GetM2Prefix, GetM, GetMM, GetDepTarget, GetMF, GetMP,
+ GetObj, PPonly, Statistics, Quiet, WholeProgram, GetMD, GetMMD,
+ ExtendedOpaque, GenModuleList ;
+
FROM PathName IMPORT DumpPathName ;
+FROM Lists IMPORT List, NoOfItemsInList, GetItemFromList ;
+FROM Indexing IMPORT Index, InitIndex, KillIndex, GetIndice, PutIndice, HighIndice ;
+
+FROM DynamicStrings IMPORT String, InitString, KillString, InitStringCharStar,
+ Dup, Mark, EqualArray, string, Length, ConCat, ConCatChar,
+ InitStringChar, RIndex, Slice, Equal, RemoveWhitePrefix ;
CONST
@@ -76,6 +96,8 @@ CONST
VAR
ModuleType : (None, Definition, Implementation, Program) ;
+ DepContent : Index ;
+ DepOutput : String ;
(*
@@ -124,36 +146,131 @@ END NeedToParseImplementation ;
(*
+ GenerateDefDependency - generate a single dependency for the definition module
+ providing that it can be found and is not blocked by -MM.
+*)
+
+PROCEDURE GenerateDefDependency (module: CARDINAL) ;
+VAR
+ stem,
+ fullpath,
+ named : String ;
+BEGIN
+ stem := InitStringCharStar (KeyToCharStar (GetSymName (module))) ;
+ named := NIL ;
+ IF FindSourceDefFile (stem, fullpath, named)
+ THEN
+ IF EqualArray (named, '') OR (NOT GetMM ())
+ THEN
+ MergeDep (DepContent, fullpath)
+ ELSE
+ fullpath := KillString (fullpath)
+ END
+ END ;
+ stem := KillString (stem) ;
+ named := KillString (named)
+END GenerateDefDependency ;
+
+
+(*
+ GenerateDependenciesFromImport - lookup the module associated with the import
+ and call GenerateDefDependency.
+*)
+
+PROCEDURE GenerateDependenciesFromImport (import: CARDINAL) ;
+VAR
+ module : CARDINAL ;
+BEGIN
+ Assert (IsImport (import)) ;
+ module := GetImportModule (import) ;
+ GenerateDefDependency (module)
+END GenerateDependenciesFromImport ;
+
+
+(*
+ GenerateDependenciesFromList - iterative over the import lists and for
+ each module issue a dependency.
+*)
+
+PROCEDURE GenerateDependenciesFromList (dep: List) ;
+VAR
+ importList: List ;
+ import : CARDINAL ;
+ i, n, j, m: CARDINAL ;
+BEGIN
+ n := NoOfItemsInList (dep) ;
+ i := 1 ;
+ WHILE i <= n DO
+ import := GetItemFromList (dep, i) ;
+ IF IsImportStatement (import)
+ THEN
+ importList := GetImportStatementList (import) ;
+ j := 1 ;
+ m := NoOfItemsInList (importList) ;
+ WHILE j <= m DO
+ import := GetItemFromList (importList, j) ;
+ GenerateDependenciesFromImport (import) ;
+ INC (j)
+ END
+ ELSE
+ GenerateDependenciesFromImport (import)
+ END ;
+ INC (i)
+ END
+END GenerateDependenciesFromList ;
+
+
+(*
+ GenerateDependencies - generate a list of dependencies for the main module where
+ the source code is found in sourcefile.
+*)
+
+PROCEDURE GenerateDependencies (sourcefile: String) ;
+BEGIN
+ IF IsDefImp (GetMainModule ())
+ THEN
+ GenerateDependenciesFromList (GetModuleDefImportStatementList (GetMainModule ())) ;
+ GenerateDefDependency (GetMainModule ())
+ END ;
+ GenerateDependenciesFromList (GetModuleModImportStatementList (GetMainModule ())) ;
+ WriteDepContents (DepOutput, DepContent)
+END GenerateDependencies ;
+
+
+(*
Compile - compile file, s, using a 5 pass technique.
*)
PROCEDURE Compile (s: String) ;
BEGIN
- DoPass0(s) ;
+ DoPass0 (s) ;
FlushWarnings ; FlushErrors ;
- IF PPonly
- THEN
- RETURN
- END;
ResetForNewPass ; ResetErrorScope ;
qprintf0('Pass 1: scopes, enumerated types, imports and exports\n') ;
DoPass1 ;
FlushWarnings ; FlushErrors ;
- qprintf0('Pass 2: constants and types\n') ;
- ResetForNewPass ; ResetErrorScope ;
- DoPass2 ;
- FlushWarnings ; FlushErrors ;
- qprintf0('Pass C: aggregate constants\n') ;
- ResetForNewPass ; ResetErrorScope ;
- DoPassC ;
- FlushWarnings ; FlushErrors ;
- qprintf0('Pass 3: quadruple generation\n') ;
- ResetForNewPass ; ResetErrorScope ;
- DoPass3 ;
- FlushWarnings ; FlushErrors ;
- qprintf0('Pass 4: gcc tree generation\n') ;
- Code ;
- FlushWarnings ; FlushErrors
+ IF GetM () OR GetMM ()
+ THEN
+ GenerateDependencies (s)
+ END ;
+ IF NOT PPonly
+ THEN
+ qprintf0('Pass 2: constants and types\n') ;
+ ResetForNewPass ; ResetErrorScope ;
+ DoPass2 ;
+ FlushWarnings ; FlushErrors ;
+ qprintf0('Pass C: aggregate constants\n') ;
+ ResetForNewPass ; ResetErrorScope ;
+ DoPassC ;
+ FlushWarnings ; FlushErrors ;
+ qprintf0('Pass 3: quadruple generation\n') ;
+ ResetForNewPass ; ResetErrorScope ;
+ DoPass3 ;
+ FlushWarnings ; FlushErrors ;
+ qprintf0('Pass 4: gcc tree generation\n') ;
+ Code ;
+ FlushWarnings ; FlushErrors
+ END
END Compile ;
@@ -165,9 +282,9 @@ PROCEDURE compile (filename: ADDRESS) ;
VAR
f: String ;
BEGIN
- f := InitStringCharStar(filename) ;
- Compile(f) ;
- f := KillString(f) ;
+ f := InitStringCharStar (filename) ;
+ Compile (f) ;
+ f := KillString (f)
END compile ;
@@ -229,7 +346,7 @@ END PeepInto ;
(*
- qprintLibName - print the libname
+ qprintLibName - print the libname.
*)
PROCEDURE qprintLibName (LibName: String) ;
@@ -242,165 +359,594 @@ END qprintLibName ;
(*
- DoPass0 -
+ CreateFileStem - create a stem using the template LibName_ModuleName.
+*)
+
+PROCEDURE CreateFileStem (SymName, LibName: String) : String ;
+BEGIN
+ IF Length (LibName) > 0
+ THEN
+ RETURN ConCat (Dup (LibName), ConCat (InitStringChar ('_'), SymName))
+ ELSE
+ RETURN SymName
+ END
+END CreateFileStem ;
+
+
+(*
+ Return basename of path. CutExt determines whether the .extension
+ should be removed.
*)
-PROCEDURE DoPass0 (s: String) ;
+PROCEDURE BaseName (Path: String; CutExt: BOOLEAN) : String ;
VAR
- Main,
- Sym : CARDINAL ;
- i : CARDINAL ;
- SymName,
- FileName,
- LibName,
- PPSource: String ;
+ ext,
+ basename: INTEGER ;
BEGIN
- P0Init ;
- SetPassToPass0 ;
- (* Maybe preprocess the main file. *)
- PPSource := PreprocessModule(s, TRUE);
- IF PPonly
+ basename := RIndex (Path, '/', 0) ;
+ IF basename = -1
THEN
- RETURN
- END;
- PeepInto (PPSource) ;
- Main := GetMainModule() ;
- i := 1 ;
- Sym := GetModuleNo(i) ;
- qprintf1('Compiling: %s\n', PPSource) ;
- IF Debugging
+ basename := 0
+ ELSE
+ basename := basename + 1
+ END ;
+ IF CutExt
THEN
- DumpPathName ('DoPass0')
+ ext := RIndex (Path, '.', 0) ;
+ IF ext=-1
+ THEN
+ ext := 0
+ END
+ ELSE
+ ext := 0
END ;
- IF Verbose
+ RETURN Slice (Path, basename, ext)
+END BaseName ;
+
+
+(*
+ IsLibrary - return TRUE if line contains a library module.
+*)
+
+PROCEDURE IsLibrary (line: String) : BOOLEAN ;
+VAR
+ moduleName,
+ libname, filename: String ;
+ result : BOOLEAN ;
+BEGIN
+ result := FALSE ;
+ moduleName := BaseName (line, TRUE) ;
+ filename := NIL ;
+ libname := NIL ;
+ IF FindSourceDefFile (moduleName, filename, libname)
THEN
- fprintf1 (StdOut, 'Compiling: %s\n', PPSource)
+ moduleName := KillString (moduleName) ;
+ IF Length (libname) > 0
+ THEN
+ moduleName := BaseName (line, FALSE) ;
+ line := BaseName (line, FALSE) ;
+ result := Equal (line, moduleName) ;
+ line := KillString (line) ;
+ END
END ;
- qprintf0('Pass 0: lexical analysis, parsing, modules and associated filenames\n') ;
- WHILE Sym#NulSym DO
- SymName := InitStringCharStar (KeyToCharStar (GetSymName (Sym))) ;
- IF IsDefImp (Sym)
+ libname := KillString (libname) ;
+ filename := KillString (filename) ;
+ moduleName := KillString (moduleName) ;
+ RETURN result
+END IsLibrary ;
+
+
+(*
+ IsUnique - return TRUE if line is unique in array content.
+*)
+
+PROCEDURE IsUnique (content: Index; line: String) : BOOLEAN ;
+VAR
+ high, i: CARDINAL ;
+BEGIN
+ high := HighIndice (content) ;
+ i := 1 ;
+ WHILE i <= high DO
+ IF Equal (line, GetIndice (content, i))
THEN
- LibName := NIL ;
- IF FindSourceDefFile (SymName, FileName, LibName)
+ RETURN FALSE
+ END ;
+ INC (i)
+ END ;
+ RETURN TRUE
+END IsUnique ;
+
+
+(*
+ Append - append line to array content.
+*)
+
+PROCEDURE Append (content: Index; line: String) ;
+VAR
+ high: CARDINAL ;
+BEGIN
+ high := HighIndice (content) ;
+ PutIndice (content, high+1, line)
+END Append ;
+
+
+(*
+ MergeDep - if line is unique in array content then append.
+ Check to see (and ignore) if line is a library module and -MM
+ is present.
+*)
+
+PROCEDURE MergeDep (content: Index; line: String) ;
+BEGIN
+ line := RemoveWhitePrefix (line) ;
+ IF (NOT EqualArray (line, "\")) AND (Length (line) > 0)
+ THEN
+ (* Ignore if -MM and is a library module. *)
+ IF NOT (GetMM () AND IsLibrary (line))
+ THEN
+ IF IsUnique (content, line)
THEN
- ModuleType := Definition ;
- IF OpenSource (AssociateDefinition (PreprocessModule (FileName, FALSE), Sym))
+ Append (content, line)
+ END
+ END
+ END
+END MergeDep ;
+
+
+(*
+ splitLine - split a line into words separated by spaces
+ and call MergeDep on each word.
+*)
+
+PROCEDURE splitLine (content: Index; line: String) ;
+VAR
+ word : String ;
+ space: INTEGER ;
+BEGIN
+ REPEAT
+ line := RemoveWhitePrefix (line) ;
+ space := DynamicStrings.Index (line, ' ', 0) ;
+ IF space > 0
+ THEN
+ word := Slice (line, 0, space) ;
+ word := RemoveWhitePrefix (word) ;
+ IF Length (word) > 0
+ THEN
+ MergeDep (content, word)
+ END ;
+ line := Slice (line, space, 0) ;
+ ELSIF space < 0
+ THEN
+ MergeDep (content, line)
+ END
+ UNTIL space <= 0
+END splitLine ;
+
+
+(*
+ MergeDeps - foreach dependency in ChildDep do
+ add dependency to ChildDep if not already present.
+ ignore all ChildDep if -MM and libname # "".
+*)
+
+PROCEDURE MergeDeps (content: Index; ChildDep, LibName: String) ;
+VAR
+ line: String ;
+ in : File ;
+BEGIN
+ IF (content # NIL) AND (NOT (GetMM () AND (Length (LibName) > 0)))
+ THEN
+ in := OpenToRead (ChildDep) ;
+ IF IsNoError (in)
+ THEN
+ line := ReadS (in) ; (* Skip over first line containing the module object. *)
+ WHILE NOT EOF (in) DO
+ line := ReadS (in) ;
+ splitLine (content, line)
+ END
+ END ;
+ Close (in)
+ END
+END MergeDeps ;
+
+
+(*
+ GetRuleTarget - return the rule target which is derived from the -MT arg
+ or -o arg or filename.mod.
+*)
+
+PROCEDURE GetRuleTarget (filename: String) : String ;
+BEGIN
+ IF GetDepTarget () # NIL
+ THEN
+ RETURN InitStringCharStar (GetDepTarget ())
+ ELSIF GetMF () # NIL
+ THEN
+ RETURN InitStringCharStar (GetMF ())
+ ELSE
+ RETURN ConCat (BaseName (filename, TRUE), InitString ('.o'))
+ END
+END GetRuleTarget ;
+
+
+(*
+ ReadDepContents - reads the contents of file dep into a dynamic array
+ and return the array. The file will be split into words
+ and each word stored as an entry in the array.
+*)
+
+PROCEDURE ReadDepContents (filename, dep: String) : Index ;
+VAR
+ content: Index ;
+ line : String ;
+ in : File ;
+BEGIN
+ content := NIL ;
+ IF GetM () OR GetMM ()
+ THEN
+ in := OpenToRead (dep) ;
+ (* The file might not be created (if -MD or -MMD is used as these options
+ operate without preprocessing) in which case we create an dynamic
+ array with the source filename and target. *)
+ content := InitIndex (1) ;
+ IF GetMD () OR GetMMD () OR (NOT IsNoError (in))
+ THEN
+ (* No preprocessing done therefore create first two lines using
+ target and source. *)
+ PutIndice (content, 1, ConCatChar (GetRuleTarget (filename), ':')) ;
+ PutIndice (content, 2, Dup (filename))
+ ELSE
+ (* Preprocessing (using cc1) has created one for us, so we read it. *)
+ WHILE NOT EOF (in) DO
+ line := ReadS (in) ;
+ splitLine (content, line)
+ END
+ END ;
+ Close (in)
+ END ;
+ RETURN content
+END ReadDepContents ;
+
+
+(*
+ WriteDep - write the dependencies and target to file out.
+*)
+
+PROCEDURE WriteDep (dep: String; contents: Index; out: File) ;
+VAR
+ i, h: CARDINAL ;
+ line: String ;
+BEGIN
+ i := 1 ;
+ h := HighIndice (contents) ;
+ WHILE i <= h DO
+ line := GetIndice (contents, i) ;
+ line := RemoveWhitePrefix (line) ;
+ IF Length (line) > 0
+ THEN
+ IF i = 1
+ THEN
+ (* First line is always the target. *)
+ IF GetDepTarget () # NIL
THEN
- IF NOT P0SyntaxCheck.CompilationUnit ()
- THEN
- WriteFormat0 ('compilation failed') ;
- CloseSource ;
- RETURN
- END ;
- qprintf2 (' Module %-20s : %s', SymName, FileName) ;
- qprintLibName (LibName) ;
- PutLibName (Sym, makekey (string (LibName))) ;
- IF IsDefinitionForC (Sym)
- THEN
- qprintf0 (' (for C)')
- END ;
- IF IsDefLink (Sym)
- THEN
- qprintf0 (' (linking)')
- END ;
- qprintf0 ('\n') ;
- CloseSource
- ELSE
- (* Unrecoverable error. *)
- MetaErrorString1 (Sprintf1 (InitString ('file {%%1EUAF%s} containing module {%%1a} cannot be found'),
- FileName), Sym)
+ line := ConCatChar (InitStringCharStar (GetDepTarget ()), ':')
END
+ ELSIF i > 1
+ THEN
+ WriteChar (out, ' ')
+ END ;
+ line := WriteS (out, line) ;
+ IF i < h
+ THEN
+ WriteChar (out, ' ') ;
+ WriteChar (out, '\')
+ END ;
+ WriteLine (out)
+ END ;
+ INC (i)
+ END
+END WriteDep ;
+
+
+(*
+ WritePhonyDep - write the dependencies and target to file out.
+*)
+
+PROCEDURE WritePhonyDep (dep: String; contents: Index; out: File) ;
+VAR
+ i, h: CARDINAL ;
+ line: String ;
+BEGIN
+ (* The first line is always the target and the second line is always
+ the top level source file. *)
+ i := 3 ;
+ h := HighIndice (contents) ;
+ WHILE i <= h DO
+ line := GetIndice (contents, i) ;
+ line := RemoveWhitePrefix (line) ;
+ IF Length (line) > 0
+ THEN
+ line := WriteS (out, line) ;
+ WriteChar (out, ':') ;
+ WriteLine (out)
+ END ;
+ INC (i)
+ END
+END WritePhonyDep ;
+
+
+(*
+ WriteDepContents - write the dynamic array to filename dep (or StdOut) if
+ the GetMF file is NIL.
+*)
+
+PROCEDURE WriteDepContents (dep: String; contents: Index) ;
+VAR
+ out: File ;
+BEGIN
+ IF (contents # NIL) AND (GetM () OR GetMM ())
+ THEN
+ IF GetMF () = NIL
+ THEN
+ out := StdOut ;
+ dep := OnExitDelete (dep)
+ ELSE
+ out := OpenToWrite (dep)
+ END ;
+ IF IsNoError (out)
+ THEN
+ WriteDep (dep, contents, out) ;
+ IF GetMP ()
+ THEN
+ WritePhonyDep (dep, contents, out)
+ END
+ END ;
+ IF GetMF () = NIL
+ THEN
+ FlushOutErr
+ ELSE
+ Close (out) ;
+ END ;
+ contents := KillIndex (contents)
+ END
+END WriteDepContents ;
+
+
+(*
+ CreateDepFilename - return a dependency filename associated with filename or use GetMF.
+*)
+
+PROCEDURE CreateDepFilename (filename: String) : String ;
+VAR
+ depfile: String ;
+BEGIN
+ IF GetMF () = NIL
+ THEN
+ depfile := MakeSaveTempsFileNameExt (filename, InitString ('.d')) ;
+ RETURN OnExitDelete (depfile)
+ ELSE
+ RETURN InitStringCharStar (GetMF ())
+ END
+END CreateDepFilename ;
+
+
+(*
+ Pass0CheckDef -
+*)
+
+PROCEDURE Pass0CheckDef (sym: CARDINAL) : BOOLEAN ;
+VAR
+ ChildDep,
+ SymName,
+ FileName,
+ LibName : String ;
+BEGIN
+ LibName := NIL ;
+ FileName := NIL ;
+ SymName := InitStringCharStar (KeyToCharStar (GetSymName (sym))) ;
+ IF IsDefImp (sym)
+ THEN
+ IF FindSourceDefFile (SymName, FileName, LibName)
+ THEN
+ ModuleType := Definition ;
+ ChildDep := MakeSaveTempsFileNameExt (CreateFileStem (SymName, LibName), InitString ('.def.d')) ;
+ IF OpenSource (AssociateDefinition (PreprocessModule (FileName, FALSE, TRUE,
+ ChildDep), sym))
+ THEN
+ IF NOT P0SyntaxCheck.CompilationUnit ()
+ THEN
+ WriteFormat0 ('compilation failed') ;
+ CloseSource ;
+ SymName := KillString (SymName) ;
+ FileName := KillString (FileName) ;
+ LibName := KillString (LibName) ;
+ RETURN FALSE
+ END ;
+ qprintf2 (' Module %-20s : %s', SymName, FileName) ;
+ qprintLibName (LibName) ;
+ PutLibName (sym, makekey (string (LibName))) ;
+ IF IsDefinitionForC (sym)
+ THEN
+ qprintf0 (' (for C)')
+ END ;
+ IF IsDefLink (sym)
+ THEN
+ qprintf0 (' (linking)')
+ END ;
+ qprintf0 ('\n') ;
+ CloseSource ;
+ MergeDeps (DepContent, ChildDep, LibName)
ELSE
(* Unrecoverable error. *)
- MetaError1 ('the file containing the definition module {%1EMAa} cannot be found', Sym)
- END ;
- ModuleType := Implementation
+ MetaErrorString1 (Sprintf1 (InitString ('file {%%1EUAF%s} containing module {%%1a} cannot be found'),
+ FileName), sym)
+ END
ELSE
- ModuleType := Program
+ (* Unrecoverable error. *)
+ MetaError1 ('the file containing the definition module {%1EMAa} cannot be found', sym)
END ;
- IF (Main=Sym) OR NeedToParseImplementation (Sym)
+ ModuleType := Implementation
+ ELSE
+ ModuleType := Program
+ END ;
+ SymName := KillString (SymName) ;
+ FileName := KillString (FileName) ;
+ LibName := KillString (LibName) ;
+ RETURN TRUE
+END Pass0CheckDef ;
+
+
+(*
+ Pass0CheckMod -
+*)
+
+PROCEDURE Pass0CheckMod (sym: CARDINAL; PPSource: String) : BOOLEAN ;
+VAR
+ Main : CARDINAL ;
+ ChildDep,
+ SymName,
+ FileName,
+ LibName : String ;
+BEGIN
+ SymName := InitStringCharStar (KeyToCharStar (GetSymName (sym))) ;
+ FileName := NIL ;
+ LibName := NIL ;
+ Main := GetMainModule () ;
+ IF (Main = sym) OR NeedToParseImplementation (sym)
+ THEN
+ (* Only need to read implementation module if hidden types are
+ declared or it is the main module. *)
+ IF Main = sym
THEN
- (* Only need to read implementation module if hidden types are declared or it is the main module *)
- LibName := NIL ;
- IF Main=Sym
+ FileName := Dup (PPSource) ;
+ LibName := InitStringCharStar (GetM2Prefix ()) ;
+ PutLibName (sym, makekey (string (LibName)))
+ ELSE
+ IF FindSourceModFile (SymName, FileName, LibName)
THEN
- FileName := Dup (PPSource) ;
- LibName := InitStringCharStar (GetM2Prefix ()) ;
- PutLibName (Sym, makekey (string (LibName)))
+ ChildDep := MakeSaveTempsFileNameExt (CreateFileStem (SymName, LibName), InitString ('.mod.d')) ;
+ FileName := PreprocessModule (FileName, FALSE, TRUE, ChildDep) ;
+ PutLibName (sym, makekey (string (LibName))) ;
+ MergeDeps (DepContent, ChildDep, LibName)
ELSE
- IF FindSourceModFile (SymName, FileName, LibName)
+ qprintf1 (' Module %-20s : implementation source file not found\n', SymName)
+ END
+ END ;
+
+ IF FileName # NIL
+ THEN
+ IF OpenSource (AssociateModule (Dup (FileName), sym))
+ THEN
+ IF NOT P0SyntaxCheck.CompilationUnit ()
THEN
- FileName := PreprocessModule (FileName, FALSE) ;
- PutLibName (Sym, makekey (string (LibName)))
- ELSE
- qprintf1 (' Module %-20s : implementation source file not found\n', SymName)
+ WriteFormat0 ('compilation failed') ;
+ CloseSource ;
+ SymName := KillString (SymName) ;
+ FileName := KillString (FileName) ;
+ LibName := KillString (LibName) ;
+ RETURN FALSE
+ END ;
+ qprintf2 (' Module %-20s : %s', SymName, FileName) ;
+ qprintLibName (LibName) ;
+ IF IsModLink (sym)
+ THEN
+ qprintf0 (' (linking)')
+ END ;
+ qprintf0 ('\n') ;
+ CloseSource
+ ELSE
+ (* It is quite legitimate to implement a module in C (and pretend it was a M2
+ implementation) providing that it is not the main program module and the
+ definition module does not declare a hidden type when -fextended-opaque
+ is used. *)
+ IF (NOT WholeProgram) OR (sym = Main) OR IsHiddenTypeDeclared (sym)
+ THEN
+ (* Unrecoverable error. *)
+ MetaErrorString1 (Sprintf1 (InitString ('file {%%1EUAF%s} containing module {%%1a} cannot be found'),
+ FileName), sym) ;
END
- END ;
-
- IF FileName#NIL
+ END
+ END
+ ELSIF GenModuleList
+ THEN
+ IF NOT IsDefinitionForC (sym)
+ THEN
+ (* The implementation module is only useful if -fgen-module-list= is
+ used (to gather all dependencies) although we do not insist upon finding the
+ implementation module. *)
+ LibName := NIL ;
+ IF FindSourceModFile (SymName, FileName, LibName)
THEN
- IF OpenSource (AssociateModule (Dup (FileName), Sym))
+ PutLibName (sym, makekey (string (LibName))) ;
+ qprintf2 (' Module %-20s : %s' , SymName, FileName) ;
+ qprintLibName (LibName) ;
+ qprintf0 (' (linking)\n') ;
+ ChildDep := MakeSaveTempsFileNameExt (CreateFileStem (SymName, LibName), InitString ('.mod.d')) ;
+ IF OpenSource (AssociateModule (PreprocessModule (FileName, FALSE, TRUE, ChildDep), sym))
THEN
+ PutModLink (sym, TRUE) ; (* This source is only used to determine link time info. *)
IF NOT P0SyntaxCheck.CompilationUnit ()
THEN
WriteFormat0 ('compilation failed') ;
CloseSource ;
- RETURN
- END ;
- qprintf2 (' Module %-20s : %s', SymName, FileName) ;
- qprintLibName (LibName) ;
- IF IsModLink (Sym)
- THEN
- qprintf0 (' (linking)')
+ SymName := KillString (SymName) ;
+ FileName := KillString (FileName) ;
+ LibName := KillString (LibName) ;
+ RETURN FALSE
END ;
- qprintf0 ('\n') ;
- CloseSource
- ELSE
- (* It is quite legitimate to implement a module in C (and pretend it was a M2
- implementation) providing that it is not the main program module and the
- definition module does not declare a hidden type when -fextended-opaque
- is used. *)
- IF (NOT WholeProgram) OR (Sym=Main) OR IsHiddenTypeDeclared (Sym)
- THEN
- (* Unrecoverable error. *)
- MetaErrorString1 (Sprintf1 (InitString ('file {%%1EUAF%s} containing module {%%1a} cannot be found'),
- FileName), Sym) ;
- END
+ CloseSource ;
+ MergeDeps (DepContent, ChildDep, LibName)
END
END
- ELSIF GenModuleList
+ END
+ END ;
+ SymName := KillString (SymName) ;
+ FileName := KillString (FileName) ;
+ LibName := KillString (LibName) ;
+ RETURN TRUE
+END Pass0CheckMod ;
+
+
+(*
+ DoPass0 -
+*)
+
+PROCEDURE DoPass0 (filename: String) ;
+VAR
+ sym : CARDINAL ;
+ i : CARDINAL ;
+ PPSource : String ;
+BEGIN
+ P0Init ;
+ SetPassToPass0 ;
+ (* Maybe preprocess the main file. *)
+ DepOutput := CreateDepFilename (filename) ;
+ PPSource := PreprocessModule (filename, TRUE, FALSE, DepOutput) ;
+ DepContent := ReadDepContents (filename, DepOutput) ;
+ PeepInto (PPSource) ;
+ i := 1 ;
+ sym := GetModuleNo (i) ;
+ qprintf1 ('Compiling: %s\n', PPSource) ;
+ IF Debugging
+ THEN
+ DumpPathName ('DoPass0')
+ END ;
+ IF Verbose
+ THEN
+ fprintf1 (StdOut, 'Compiling: %s\n', PPSource)
+ END ;
+ qprintf0 ('Pass 0: lexical analysis, parsing, modules and associated filenames\n') ;
+ WHILE sym # NulSym DO
+ IF NOT Pass0CheckDef (sym)
THEN
- IF NOT IsDefinitionForC (Sym)
- THEN
- (* The implementation is only useful if -fgen-module-list= is
- used and we do not insist upon it. *)
- LibName := NIL ;
- IF FindSourceModFile (SymName, FileName, LibName)
- THEN
- PutLibName (Sym, makekey (string (LibName))) ;
- qprintf2 (' Module %-20s : %s' , SymName, FileName) ;
- qprintLibName (LibName) ;
- qprintf0 (' (linking)\n') ;
- IF OpenSource (AssociateModule (PreprocessModule (FileName, FALSE), Sym))
- THEN
- PutModLink (Sym, TRUE) ; (* This source is only used to determine link time info. *)
- IF NOT P0SyntaxCheck.CompilationUnit ()
- THEN
- WriteFormat0 ('compilation failed') ;
- CloseSource ;
- RETURN
- END ;
- CloseSource
- END
- END
- END
+ RETURN
+ END ;
+ IF NOT Pass0CheckMod (sym, PPSource)
+ THEN
+ RETURN
END ;
- SymName := KillString (SymName) ;
- FileName := KillString (FileName) ;
- LibName := KillString (LibName) ;
INC (i) ;
- Sym := GetModuleNo (i)
+ sym := GetModuleNo (i)
END ;
SetPassToNoPass
END DoPass0 ;
@@ -706,5 +1252,7 @@ END DoPass3 ;
BEGIN
- ModuleType := None
+ ModuleType := None ;
+ DepContent := NIL ;
+ DepOutput := NIL
END M2Comp.
diff --git a/gcc/m2/gm2-compiler/M2GCCDeclare.mod b/gcc/m2/gm2-compiler/M2GCCDeclare.mod
index 87ca0da..2e5f60f 100644
--- a/gcc/m2/gm2-compiler/M2GCCDeclare.mod
+++ b/gcc/m2/gm2-compiler/M2GCCDeclare.mod
@@ -97,6 +97,7 @@ FROM SymbolTable IMPORT NulSym,
IsGnuAsm, IsGnuAsmVolatile, IsObject, IsTuple,
IsError, IsHiddenType, IsVarHeap,
IsComponent, IsPublic, IsExtern, IsCtor,
+ IsImport, IsImportStatement,
GetMainModule, GetBaseModule, GetModule, GetLocalSym,
PutModuleFinallyFunction,
GetProcedureScope, GetProcedureQuads,
@@ -156,8 +157,6 @@ FROM m2decl IMPORT BuildIntegerConstant, BuildStringConstant, BuildCStringConsta
BuildStartFunctionDeclaration,
BuildParameterDeclaration, BuildEndFunctionDeclaration,
DeclareKnownVariable, GetBitsPerBitset, BuildPtrToTypeString ;
-(* DeclareM2linkStaticInitialization,
- DeclareM2linkForcedModuleInitOrder ; *)
FROM m2type IMPORT MarkFunctionReferenced, BuildStartRecord, BuildStartVarient, BuildStartFunctionType,
BuildStartFieldVarient, BuildStartVarient, BuildStartType, BuildStartArrayType,
@@ -181,12 +180,13 @@ FROM m2type IMPORT MarkFunctionReferenced, BuildStartRecord, BuildStartVarient,
BuildEndFieldVarient, BuildArrayIndexType, BuildEndFunctionType,
BuildSetType, BuildEndVarient, BuildEndArrayType, InitFunctionTypeParameters,
BuildProcTypeParameterDeclaration, DeclareKnownType,
- ValueOutOfTypeRange, ExceedsTypeRange ;
+ ValueOutOfTypeRange, ExceedsTypeRange,
+ GetMaxFrom, GetMinFrom ;
FROM m2convert IMPORT BuildConvert ;
FROM m2expr IMPORT BuildSub, BuildLSL, BuildTBitSize, BuildAdd, BuildDivTrunc, BuildModTrunc,
- BuildSize, TreeOverflow,
+ BuildSize, TreeOverflow, AreConstantsEqual, CompareTrees,
GetPointerZero, GetIntegerZero, GetIntegerOne ;
FROM m2block IMPORT RememberType, pushGlobalScope, popGlobalScope, pushFunctionScope, popFunctionScope,
@@ -3511,6 +3511,44 @@ END DeclareEnumeration ;
(*
+ DeclareSubrangeNarrow - will return cardinal, integer, or type depending on whether
+ low..high fits in the C data type.
+*)
+
+PROCEDURE DeclareSubrangeNarrow (location: location_t;
+ high, low: CARDINAL; type: Tree) : Tree ;
+VAR
+ m2low, m2high,
+ lowtree,
+ hightree : Tree ;
+BEGIN
+ (* No zero alignment, therefore the front end will prioritize subranges to match
+ unsigned int, int, or ZTYPE assuming the low..high range fits. *)
+ lowtree := Mod2Gcc (low) ;
+ hightree := Mod2Gcc (high) ;
+ IF CompareTrees (lowtree, GetIntegerZero (location)) >= 0
+ THEN
+ (* low..high is always positive, can we use unsigned int? *)
+ m2high := GetMaxFrom (location, GetM2CardinalType ()) ;
+ IF CompareTrees (hightree, m2high) <= 0
+ THEN
+ RETURN GetM2CardinalType ()
+ END
+ ELSE
+ (* Must be a signed subrange base, can we use int? *)
+ m2high := GetMaxFrom (location, GetM2IntegerType ()) ;
+ m2low := GetMinFrom (location, GetM2IntegerType ()) ;
+ IF (CompareTrees (lowtree, m2low) >= 0) AND (CompareTrees (hightree, m2high) <= 0)
+ THEN
+ RETURN GetM2IntegerType ()
+ END
+ END ;
+ (* Fall back to the ZType. *)
+ RETURN type
+END DeclareSubrangeNarrow ;
+
+
+(*
DeclareSubrange - declare a subrange type.
*)
@@ -3518,15 +3556,30 @@ PROCEDURE DeclareSubrange (sym: CARDINAL) : Tree ;
VAR
type,
gccsym : Tree ;
+ align,
high, low: CARDINAL ;
location: location_t ;
BEGIN
location := TokenToLocation (GetDeclaredMod (sym)) ;
GetSubrange (sym, high, low) ;
- (* type := BuildSmallestTypeRange (location, Mod2Gcc(low), Mod2Gcc(high)) ; *)
+ align := GetAlignment (sym) ;
type := Mod2Gcc (GetSType (sym)) ;
+ IF align # NulSym
+ THEN
+ IF AreConstantsEqual (GetIntegerZero (location), Mod2Gcc (align))
+ THEN
+ type := BuildSmallestTypeRange (location, Mod2Gcc (low), Mod2Gcc (high))
+ ELSE
+ MetaError1 ('a non-zero alignment in a subrange type {%1Wa} is currently not implemented and will be ignored',
+ sym)
+ END
+ ELSIF GetSType (sym) = ZType
+ THEN
+ (* Can we narrow the ZType subrange to CARDINAL or INTEGER? *)
+ type := DeclareSubrangeNarrow (location, high, low, type)
+ END ;
gccsym := BuildSubrangeType (location,
- KeyToCharStar (GetFullSymName(sym)),
+ KeyToCharStar (GetFullSymName (sym)),
type, Mod2Gcc (low), Mod2Gcc (high)) ;
RETURN gccsym
END DeclareSubrange ;
@@ -3540,18 +3593,18 @@ PROCEDURE IncludeGetNth (l: List; sym: CARDINAL) ;
VAR
i: CARDINAL ;
BEGIN
- printf0(' ListOfSons [') ;
+ printf0 (' ListOfSons [') ;
i := 1 ;
- WHILE GetNth(sym, i)#NulSym DO
+ WHILE GetNth (sym, i) # NulSym DO
IF i>1
THEN
- printf0(', ') ;
+ printf0 (', ')
END ;
- IncludeItemIntoList(l, GetNth(sym, i)) ;
- PrintTerse(GetNth(sym, i)) ;
- INC(i)
+ IncludeItemIntoList (l, GetNth(sym, i)) ;
+ PrintTerse (GetNth (sym, i)) ;
+ INC (i)
END ;
- printf0(']')
+ printf0 (']')
END IncludeGetNth ;
@@ -4216,9 +4269,15 @@ BEGIN
ELSIF IsAModula2Type(sym)
THEN
printf2('sym %d IsAModula2Type (%a)', sym, n)
- ELSIF IsGnuAsmVolatile(sym)
+ ELSIF IsGnuAsm(sym)
+ THEN
+ printf2('sym %d IsGnuAsm (%a)', sym, n)
+ ELSIF IsImport (sym)
+ THEN
+ printf1('sym %d IsImport', sym)
+ ELSIF IsImportStatement (sym)
THEN
- printf2('sym %d IsGnuAsmVolatile (%a)', sym, n)
+ printf1('sym %d IsImportStatement', sym)
END ;
IF IsHiddenType(sym)
@@ -5314,8 +5373,8 @@ END WalkEnumerationDependants ;
PROCEDURE WalkSubrangeDependants (sym: CARDINAL; p: WalkAction) ;
VAR
- type,
- high, low: CARDINAL ;
+ type, align,
+ high, low : CARDINAL ;
BEGIN
GetSubrange(sym, high, low) ;
CheckResolveSubrange (sym) ;
@@ -5326,7 +5385,12 @@ BEGIN
END ;
(* low and high are not types but constants and they are resolved by M2GenGCC *)
p(low) ;
- p(high)
+ p(high) ;
+ align := GetAlignment (sym) ;
+ IF align # NulSym
+ THEN
+ p(align)
+ END
END WalkSubrangeDependants ;
@@ -5338,6 +5402,7 @@ END WalkSubrangeDependants ;
PROCEDURE IsSubrangeDependants (sym: CARDINAL; q: IsAction) : BOOLEAN ;
VAR
result : BOOLEAN ;
+ align,
type,
high, low: CARDINAL ;
BEGIN
@@ -5358,6 +5423,11 @@ BEGIN
THEN
result := FALSE
END ;
+ align := GetAlignment(sym) ;
+ IF (align#NulSym) AND (NOT q(align))
+ THEN
+ result := FALSE
+ END ;
RETURN( result )
END IsSubrangeDependants ;
diff --git a/gcc/m2/gm2-compiler/M2Options.def b/gcc/m2/gm2-compiler/M2Options.def
index b70cd8f..65ab8e8 100644
--- a/gcc/m2/gm2-compiler/M2Options.def
+++ b/gcc/m2/gm2-compiler/M2Options.def
@@ -34,72 +34,6 @@ FROM SYSTEM IMPORT ADDRESS ;
FROM DynamicStrings IMPORT String ;
FROM m2linemap IMPORT location_t ;
-EXPORT QUALIFIED SetReturnCheck, SetNilCheck, SetCaseCheck,
- SetCheckAll, SetVerboseUnbounded, SetQuiet, SetCpp, GetCpp,
- (* SetMakeall, SetMakeall0, SetIncludePath, *) SetAutoInit,
- SetUnboundedByReference,
- SetSearchPath, SetISO, SetPIM, SetPIM2, SetPIM3, SetPIM4,
- SetPositiveModFloor, SetCompilerDebugging, SetExceptions,
- SetStyle, SetPedantic, SetPedanticParamNames, SetPedanticCast,
- SetExtendedOpaque, SetXCode, SetQuadDebugging, SetSources,
- SetDumpSystemExports,
- SetSwig, SetOptimizing, SetForcedLocation,
- SetCC1Quiet, SetWholeProgram, SetDebugTraceQuad, SetDebugTraceAPI,
- SetVerbose, SetM2g, GetM2g,
- GetISO, GetPIM, GetPIM2, GetPIM3, GetPIM4,
- GetPositiveModFloor,
- SetFloatValueCheck, GetFloatValueCheck,
- SetWholeValueCheck, GetWholeValueCheck,
- SetLowerCaseKeywords,
- SetIndex, SetRange, SetWholeDiv, SetStrictTypeChecking,
- Setc, Getc, SetPPOnly, GetPPOnly,
- SetUselist, GetUselist, GetUselistFilename,
- SetShared,
-
- Iso, Pim, Pim2, Pim3, Pim4,
- PPonly, cflag,
- PositiveModFloorDiv,
- Pedantic, Verbose, Statistics,
- UnboundedByReference, VerboseUnbounded,
- Profiling, Coding, Optimizing,
- OptimizeBasicBlock, OptimizeUncalledProcedures,
- OptimizeCommonSubExpressions,
- StyleChecking, WholeProgram,
- NilChecking,
- WholeDivChecking, WholeValueChecking,
- IndexChecking, RangeChecking,
- ReturnChecking, CaseElseChecking,
- AutoInit,
- VariantValueChecking, CaseEnumChecking,
- UnusedVariableChecking, UnusedParameterChecking,
- UninitVariableChecking, SetUninitVariableChecking,
- UninitVariableConditionalChecking,
- SetUnusedVariableChecking, SetUnusedParameterChecking,
- Quiet, LineDirectives, StrictTypeChecking,
- CPreProcessor, Xcode, ExtendedOpaque,
- LowerCaseKeywords,
- PedanticParamNames, PedanticCast,
- DisplayQuadruples, DebugTraceQuad, DebugTraceAPI,
- CompilerDebugging, GenerateDebugging, GenerateLineDebug,
- DumpSystemExports, GenerateSwig, Exceptions,
- OverrideLocation, FinaliseOptions,
- DebugBuiltins, setdefextension, setmodextension,
- SetStatistics, SetWall,
- SetSaveTemps, SetSaveTempsDir, SaveTemps, GetSaveTempsDir,
- SetDumpDir, GetDumpDir, GenModuleList,
- CppArg, CppCommandLine, CppRemember,
- SetDebugFunctionLineNumbers, DebugFunctionLineNumbers,
- SetGenerateStatementNote, GenerateStatementNote,
- ScaffoldDynamic, ScaffoldStatic,
- SetScaffoldDynamic, SetScaffoldStatic,
- SetScaffoldMain, ScaffoldMain,
- SetRuntimeModuleOverride, GetRuntimeModuleOverride,
- SetGenModuleList, GetGenModuleFilename, SharedFlag,
- SetB, GetB, SetMD, GetMD, SetMMD, GetMMD, SetObj, GetObj,
- GetMQ, SetMQ, SetM2Prefix, GetM2Prefix,
- SetM2PathName, GetM2PathName, SetCaseEnumChecking,
- SetDebugBuiltins ;
-
VAR
PPonly, (* -E/M/MM present? - preprocessing only *)
@@ -191,6 +125,48 @@ VAR
(*
+ SetM - set the MFlag.
+*)
+
+PROCEDURE SetM (value: BOOLEAN) ;
+
+
+(*
+ GetM - set the MFlag.
+*)
+
+PROCEDURE GetM () : BOOLEAN ;
+
+
+(*
+ SetMM - set the MMFlag.
+*)
+
+PROCEDURE SetMM (value: BOOLEAN) ;
+
+
+(*
+ GetMM - set the MMFlag.
+*)
+
+PROCEDURE GetMM () : BOOLEAN ;
+
+
+(*
+ SetMF - assigns MFarg to the filename from arg.
+*)
+
+PROCEDURE SetMF (arg: ADDRESS) ;
+
+
+(*
+ GetMF - returns MFarg or NIL if never set.
+*)
+
+PROCEDURE GetMF () : ADDRESS ;
+
+
+(*
SetM2Prefix - assign arg to M2Prefix.
*)
@@ -231,6 +207,7 @@ PROCEDURE SetPPOnly (value: BOOLEAN) ;
PROCEDURE GetPPOnly () : BOOLEAN ;
+
(*
Setc - set the cflag (compile only flag -c) to value.
*)
@@ -251,53 +228,91 @@ PROCEDURE Getc () : BOOLEAN ;
PROCEDURE SetB (arg: ADDRESS) ;
+
(*
GetB - returns argument to the -B option as a string or NIL if it were never set.
*)
PROCEDURE GetB () : ADDRESS ;
+
(*
- SetMD - assigns MD file to arg.
+ SetMD - set the MDFlag to value.
*)
-PROCEDURE SetMD (arg: ADDRESS) ;
+PROCEDURE SetMD (value: BOOLEAN) ;
+
(*
- GetMD - returns the filename set for MD or NIL if it was never set.
+ GetMD - return the MDFlag.
*)
-PROCEDURE GetMD () : ADDRESS ;
+PROCEDURE GetMD () : BOOLEAN ;
(*
- SetMMD - assigns MMD file to arg.
+ SetMMD - set the MMDFlag to value.
*)
-PROCEDURE SetMMD (arg: ADDRESS) ;
+PROCEDURE SetMMD (value: BOOLEAN) ;
+
(*
- GetMMD - returns the filename set for MMD or NIL if it was never set.
+ GetMMD - return the MMDFlag.
*)
-PROCEDURE GetMMD () : ADDRESS ;
+PROCEDURE GetMMD () : BOOLEAN ;
+
(*
- SetMQ - assigns MQ file to arg.
+ SetMQ - adds a quoted target arg to the DepTarget sentence.
*)
PROCEDURE SetMQ (arg: ADDRESS) ;
+
(*
- GetMQ - returns the filename set for MQ or NIL if it was never set.
+ GetMQ - returns a C string containing all the -MQ arg values.
*)
PROCEDURE GetMQ () : ADDRESS ;
+
(*
- SetScaffoldDynamic - set the -fscaffold-dynamic flag.
+ SetMT - adds a target arg to the DepTarget sentence.
+*)
+
+PROCEDURE SetMT (arg: ADDRESS) ;
+
+
+(*
+ GetMT - returns a C string containing all the -MT arg values.
+*)
+
+PROCEDURE GetMT () : ADDRESS ;
+
+
+(*
+ GetDepTarget - returns the DepTarget as a C string.
+*)
+
+PROCEDURE GetDepTarget () : ADDRESS ;
+
+
+(*
+ SetMP - set the MPflag to value.
+*)
+
+PROCEDURE SetMP (value: BOOLEAN) ;
+
+
+(*
+ GetMP - get the MPflag.
*)
+PROCEDURE GetMP () : BOOLEAN ;
+
+
(*
SetObj - assigns given object file to arg.
*)
@@ -310,6 +325,7 @@ PROCEDURE SetObj (arg: ADDRESS) ;
PROCEDURE GetObj () : ADDRESS ;
+
(*
SetScaffoldDynamic - set the -fscaffold-dynamic flag.
*)
@@ -893,6 +909,7 @@ PROCEDURE SetSaveTempsDir (arg: ADDRESS) ;
PROCEDURE GetSaveTempsDir () : String ;
+
(*
SetDumpDir - Specify dump dir.
*)
diff --git a/gcc/m2/gm2-compiler/M2Options.mod b/gcc/m2/gm2-compiler/M2Options.mod
index 9d72a10..ece2d91 100644
--- a/gcc/m2/gm2-compiler/M2Options.mod
+++ b/gcc/m2/gm2-compiler/M2Options.mod
@@ -36,7 +36,7 @@ FROM m2configure IMPORT FullPathCPP ;
FROM DynamicStrings IMPORT String, Length, InitString, Mark, Slice, EqualArray,
InitStringCharStar, ConCatChar, ConCat, KillString,
- Dup, string,
+ Dup, string, char,
PushAllocation, PopAllocationExemption,
InitStringDB, InitStringCharStarDB,
InitStringCharDB, MultDB, DupDB, SliceDB ;
@@ -58,9 +58,10 @@ VAR
M2Prefix,
M2PathName,
Barg,
- MDarg,
- MMDarg,
- MQarg,
+ MFarg,
+ MTFlag,
+ MQFlag,
+ DepTarget,
CmdLineObj,
SaveTempsDir,
DumpDir,
@@ -68,6 +69,11 @@ VAR
UselistFilename,
RuntimeModuleOverride,
CppArgs : String ;
+ MFlag,
+ MMFlag,
+ MPFlag,
+ MDFlag,
+ MMDFlag,
UselistFlag,
CC1Quiet,
SeenSources : BOOLEAN ;
@@ -185,73 +191,228 @@ END GetB ;
(*
- SetMD - assigns MDarg to the filename from arg.
- This overrides any previous MMD.
+ SetM - set the MFlag.
*)
-PROCEDURE SetMD (arg: ADDRESS) ;
+PROCEDURE SetM (value: BOOLEAN) ;
BEGIN
- MMDarg := KillString (MMDarg) ;
- MDarg := KillString (MDarg) ;
- MDarg := InitStringCharStar (arg)
+ MFlag := value
+END SetM ;
+
+
+(*
+ GetM - set the MFlag.
+*)
+
+PROCEDURE GetM () : BOOLEAN ;
+BEGIN
+ RETURN MFlag
+END GetM ;
+
+
+(*
+ SetMM - set the MMFlag.
+*)
+
+PROCEDURE SetMM (value: BOOLEAN) ;
+BEGIN
+ MMFlag := value
+END SetMM ;
+
+
+(*
+ GetMM - set the MMFlag.
+*)
+
+PROCEDURE GetMM () : BOOLEAN ;
+BEGIN
+ RETURN MMFlag
+END GetMM ;
+
+
+(*
+ SetMD - set the MDFlag to value.
+*)
+
+PROCEDURE SetMD (value: BOOLEAN) ;
+BEGIN
+ MDFlag := value
END SetMD ;
(*
- GetMD - returns MDarg filename as a c-string or NIL if it was never set.
+ GetMD - return the MDFlag.
*)
-PROCEDURE GetMD () : ADDRESS ;
+PROCEDURE GetMD () : BOOLEAN ;
BEGIN
- RETURN string (MDarg)
+ RETURN MDFlag
END GetMD ;
(*
- SetMMD - assigns MMDarg to the filename from arg.
- This overrides any previous MD.
+ SetMMD - set the MMDFlag to value.
*)
-PROCEDURE SetMMD (arg: ADDRESS) ;
+PROCEDURE SetMMD (value: BOOLEAN) ;
BEGIN
- MDarg := KillString (MDarg) ;
- MMDarg := KillString (MMDarg) ;
- MMDarg := InitStringCharStar (arg)
+ MMDFlag := value
END SetMMD ;
(*
- GetMMD - returns MMDarg filename as a c-string or NIL if it was never set.
+ GetMMD - return the MMDFlag.
*)
-PROCEDURE GetMMD () : ADDRESS ;
+PROCEDURE GetMMD () : BOOLEAN ;
BEGIN
- RETURN string (MMDarg)
+ RETURN MMDFlag
END GetMMD ;
(*
- SetMQ - assigns MQarg to the filename from arg.
+ SetMF - assigns MFarg to the filename from arg.
+*)
+
+PROCEDURE SetMF (arg: ADDRESS) ;
+BEGIN
+ MFarg := KillString (MFarg) ;
+ MFarg := InitStringCharStar (arg)
+END SetMF ;
+
+
+(*
+ GetMF - returns MFarg or NIL if never set.
+*)
+
+PROCEDURE GetMF () : ADDRESS ;
+BEGIN
+ RETURN string (MFarg)
+END GetMF ;
+
+
+(*
+ SetMP - set the MPflag to value.
+*)
+
+PROCEDURE SetMP (value: BOOLEAN) ;
+BEGIN
+ MPFlag := value
+END SetMP ;
+
+
+(*
+ GetMP - get the MPflag.
+*)
+
+PROCEDURE GetMP () : BOOLEAN ;
+BEGIN
+ RETURN MPFlag
+END GetMP ;
+
+
+(*
+ AddWord - concats a word to sentence inserting a space if necessary.
+ sentence is returned. sentence will be created if it is NIL.
+*)
+
+PROCEDURE AddWord (sentence, word: String) : String ;
+BEGIN
+ IF word # NIL
+ THEN
+ IF sentence = NIL
+ THEN
+ sentence := Dup (word)
+ ELSE
+ sentence := ConCatChar (sentence, ' ') ;
+ sentence := ConCat (sentence, word)
+ END
+ END ;
+ RETURN sentence
+END AddWord ;
+
+
+(*
+ QuoteTarget - quote the '$' character.
+*)
+
+PROCEDURE QuoteTarget (target: String) : String ;
+VAR
+ quoted: String ;
+ i, n : CARDINAL ;
+BEGIN
+ quoted := InitString ('') ;
+ i := 0 ;
+ n := Length (target) ;
+ WHILE i < n DO
+ CASE char (target, i) OF
+
+ '$': quoted := ConCat (quoted, Mark (InitString ('$$')))
+
+ ELSE
+ quoted := ConCatChar (quoted, char (target, i))
+ END ;
+ INC (i)
+ END ;
+ RETURN quoted
+END QuoteTarget ;
+
+
+(*
+ SetMQ - adds a quoted target arg to the DepTarget sentence.
*)
PROCEDURE SetMQ (arg: ADDRESS) ;
BEGIN
- MQarg := KillString (MQarg) ;
- MQarg := InitStringCharStar (arg)
+ DepTarget := AddWord (DepTarget, QuoteTarget (InitStringCharStar (arg))) ;
+ MQFlag := AddWord (MQFlag, Mark (InitString ('-MQ'))) ;
+ MQFlag := AddWord (MQFlag, Mark (InitStringCharStar (arg)))
END SetMQ ;
(*
- GetMMD - returns MQarg filename as a c-string or NIL if it was never set.
+ GetMQ - returns a C string containing all the -MQ arg values.
*)
PROCEDURE GetMQ () : ADDRESS ;
BEGIN
- RETURN string (MQarg)
+ RETURN string (MQFlag)
END GetMQ ;
(*
+ SetMT - adds a target arg to the DepTarget sentence.
+*)
+
+PROCEDURE SetMT (arg: ADDRESS) ;
+BEGIN
+ DepTarget := AddWord (DepTarget, InitStringCharStar (arg)) ;
+ MTFlag := AddWord (MTFlag, Mark (InitString ('-MT'))) ;
+ MTFlag := AddWord (MTFlag, Mark (InitStringCharStar (arg)))
+END SetMT ;
+
+
+(*
+ GetMT - returns a C string containing all the -MT arg values.
+*)
+
+PROCEDURE GetMT () : ADDRESS ;
+BEGIN
+ RETURN string (MTFlag)
+END GetMT ;
+
+
+(*
+ GetDepTarget - returns the DepTarget as a C string.
+*)
+
+PROCEDURE GetDepTarget () : ADDRESS ;
+BEGIN
+ RETURN string (DepTarget)
+END GetDepTarget ;
+
+
+(*
SetObj - assigns CmdLineObj to the filename from arg.
*)
@@ -1498,14 +1659,20 @@ BEGIN
GenModuleListFilename := NIL ;
SharedFlag := FALSE ;
Barg := NIL ;
- MDarg := NIL ;
- MMDarg := NIL ;
- MQarg := NIL ;
+ MDFlag := FALSE ;
+ MMDFlag := FALSE ;
+ DepTarget := NIL ;
+ MPFlag := FALSE ;
SaveTempsDir := NIL ;
DumpDir := NIL ;
UninitVariableChecking := FALSE ;
UninitVariableConditionalChecking := FALSE ;
CaseEnumChecking := FALSE ;
+ MFlag := FALSE ;
+ MMFlag := FALSE ;
+ MFarg := NIL ;
+ MTFlag := NIL ;
+ MQFlag := NIL ;
M2Prefix := InitString ('') ;
M2PathName := InitString ('')
END M2Options.
diff --git a/gcc/m2/gm2-compiler/M2Preprocess.def b/gcc/m2/gm2-compiler/M2Preprocess.def
index 0258580..0683018 100644
--- a/gcc/m2/gm2-compiler/M2Preprocess.def
+++ b/gcc/m2/gm2-compiler/M2Preprocess.def
@@ -32,7 +32,6 @@ DEFINITION MODULE M2Preprocess ;
*)
FROM DynamicStrings IMPORT String ;
-EXPORT QUALIFIED PreprocessModule ;
(*
@@ -43,9 +42,31 @@ EXPORT QUALIFIED PreprocessModule ;
If preprocessing occurs then a temporary file is created
and its name is returned.
All temporary files will be deleted when the compiler exits.
+ outputdep is the filename which will contain the dependency
+ info if -M, -MM is provided. outputdep can be NIL in which case
+ it is ignored.
*)
-PROCEDURE PreprocessModule (filename: String; isMain: BOOLEAN) : String ;
+PROCEDURE PreprocessModule (filename: String;
+ topSource, deleteDep: BOOLEAN;
+ outputDep: String) : String ;
+
+
+(*
+ MakeSaveTempsFileNameExt - creates and return the temporary filename.ext.
+ in the current working directory unless
+ SaveTempsDir = obj, when we put it in the dumpdir
+ if that is specified (or fallback to '.' if not).
+*)
+
+PROCEDURE MakeSaveTempsFileNameExt (filename, ext: String) : String ;
+
+
+(*
+ OnExitDelete - when the application finishes delete filename.
+*)
+
+PROCEDURE OnExitDelete (filename: String) : String ;
END M2Preprocess.
diff --git a/gcc/m2/gm2-compiler/M2Preprocess.mod b/gcc/m2/gm2-compiler/M2Preprocess.mod
index d53228b..189101e 100644
--- a/gcc/m2/gm2-compiler/M2Preprocess.mod
+++ b/gcc/m2/gm2-compiler/M2Preprocess.mod
@@ -33,22 +33,35 @@ FROM libc IMPORT system, exit, unlink, printf, atexit ;
FROM Lists IMPORT List, InitList, KillList, IncludeItemIntoList, ForeachItemInListDo ;
FROM FIO IMPORT StdErr, StdOut ;
FROM M2Printf IMPORT fprintf1 ;
-FROM M2Options IMPORT Verbose, PPonly, GetObj, GetMD, GetMMD, GetMQ,
- CppCommandLine, SaveTemps, GetSaveTempsDir, GetDumpDir ;
+
+FROM M2Options IMPORT Verbose, PPonly, GetObj, GetMD, GetMMD, GetCpp, GetMQ,
+ CppCommandLine, SaveTemps, GetSaveTempsDir, GetDumpDir,
+ GetM, GetMM ;
+
FROM NameKey IMPORT Name, MakeKey, KeyToCharStar, makekey ;
+CONST
+ Debugging = FALSE ;
+
VAR
ListOfFiles: List ;
(*
- OnExitDelete -
+ OnExitDelete - when the application finishes delete filename.
*)
PROCEDURE OnExitDelete (filename: String) : String ;
BEGIN
- IncludeItemIntoList (ListOfFiles, makekey (filename)) ;
+ IF filename # NIL
+ THEN
+ IF Debugging
+ THEN
+ printf ("scheduling removal: %s\n", string (filename))
+ END ;
+ IncludeItemIntoList (ListOfFiles, makekey (string (filename)))
+ END ;
RETURN filename
END OnExitDelete ;
@@ -62,6 +75,10 @@ VAR
n: Name ;
BEGIN
n := w ;
+ IF Debugging
+ THEN
+ printf ("removing: %s\n", KeyToCharStar (n))
+ END ;
IF unlink (KeyToCharStar (n)) # 0
THEN
END
@@ -98,153 +115,174 @@ END GetFileName ;
(*
- Return basename.
+ MakeSaveTempsFileName - return a temporary file like
+ "./filename.{def,mod}.m2i" in the current working directory unless
+ SaveTempsDir = obj, when we put it in the dumpdir if that is specified
+ (or fallback to '.' if not).
+ We have to keep the original extension because that disambiguates .def
+ and .mod files (otherwise, we'd need two 'preprocessed' extensions).
*)
-(*
-PROCEDURE BaseName (Path: String) : String ;
-VAR
- ext,
- basename: INTEGER ;
+PROCEDURE MakeSaveTempsFileName (filename: String) : String ;
BEGIN
- basename := RIndex(Path, '/', 0) ;
- IF basename=-1
- THEN
- basename := 0
- ELSE
- basename := basename + 1
- END ;
- ext := RIndex(Path, '.', 0) ;
- IF ext=-1
- THEN
- ext := 0
- END ;
- RETURN Dup (Slice(Path, basename, ext))
-END BaseName ;
-*)
+ RETURN MakeSaveTempsFileNameExt (filename, InitString ('.m2i'))
+END MakeSaveTempsFileName ;
+
(*
- MakeSaveTempsFileName - return a temporary file like
- "./filename.{def,mod}.m2i" in the CWD unless SaveTempsDir = obj,
- when we put it in the dumpdir if that is specified (or fallback to '.'
- if not).
- We have to keep the original extension because that disambiguates .def
- and .mod files (otherwise, we'd need two 'preprocessed' extensions).
+ MakeSaveTempsFileNameExt - creates and return the temporary filename.ext.
+ in the current working directory unless
+ SaveTempsDir = obj, when we put it in the dumpdir
+ if that is specified (or fallback to '.' if not).
*)
-PROCEDURE MakeSaveTempsFileName (filename: String) : String ;
+PROCEDURE MakeSaveTempsFileNameExt (filename, ext: String) : String ;
VAR
NewName,
DumpDir,
NewDir : String ;
BEGIN
- NewName := ConCat (GetFileName (filename), InitString ('.m2i')) ;
- NewDir := GetSaveTempsDir () ;
- DumpDir := GetDumpDir () ;
-(* IF Verbose
+ NewName := ConCat (Dup (GetFileName (filename)), ext) ;
+ NewDir := Dup (GetSaveTempsDir ()) ;
+ DumpDir := Dup (GetDumpDir ()) ;
+ IF Debugging
THEN
fprintf1 (StdOut, "newname: %s", NewName) ;
fprintf1 (StdOut, " NewDir: %s", NewDir) ;
fprintf1 (StdOut, " DumpDir: %s\n", DumpDir)
END ;
-*)
IF (NewDir#NIL) AND EqualArray (NewDir, 'obj') AND (DumpDir#NIL)
THEN
- RETURN Dup (ConCat (DumpDir, NewName))
+ RETURN ConCat (DumpDir, NewName)
ELSE
- RETURN Dup (ConCat (InitString ('./'), NewName))
+ RETURN ConCat (InitString ('./'), NewName)
END ;
-END MakeSaveTempsFileName ;
+END MakeSaveTempsFileNameExt ;
(*
- PreprocessModule - preprocess a file, filename, returning the new filename
- of the preprocessed file.
- Preprocessing will only occur if requested by the user.
- If no preprocessing was requested then filename is returned.
- If preprocessing occurs then a temporary file is created
- and its name is returned.
- All temporary files will be deleted when the compiler exits.
+ BuildCommandLineExecute - build the cpp command line and execute the command and return
+ the tempfile containing the preprocessed source.
*)
-PROCEDURE PreprocessModule (filename: String; isMain: BOOLEAN) : String ;
+PROCEDURE BuildCommandLineExecute (filename: String;
+ topSource, deleteDep: BOOLEAN;
+ command, outputdep: String) : String ;
VAR
tempfile,
- command,
commandLine: String ;
BEGIN
- command := CppCommandLine () ;
- IF (command = NIL) OR EqualArray (command, '')
+ commandLine := Dup (command) ;
+ tempfile := NIL ;
+ (* We support MD and MMD for the main file only, at present. *)
+ IF topSource OR PPonly
THEN
- RETURN Dup (filename)
- ELSE
- commandLine := Dup (command) ;
- tempfile := NIL ;
- (* We support MD and MMD for the main file only, at present. *)
- IF isMain OR PPonly
+ IF GetMD ()
THEN
- IF GetMD () # NIL
- THEN
- tempfile := ConCat( Mark (InitString(' -MD ')),
- InitStringCharStar (GetMD ()))
- ELSIF GetMMD () # NIL
- THEN
- tempfile := ConCat( Mark (InitString(' -MMD ')),
- InitStringCharStar (GetMMD ()))
- END ;
- IF tempfile#NIL
- THEN
- commandLine := ConCat (Dup (commandLine), Dup (tempfile)) ;
- (* We can only add MQ if we already have an MD/MMD. *)
- IF GetMQ () # NIL
- THEN
- tempfile := ConCat( Mark (InitString(' -MQ ')),
- InitStringCharStar (GetMQ ())) ;
- commandLine := ConCat (Dup (commandLine), Dup (tempfile))
- END ;
- END ;
+ tempfile := ConCat (InitString(' -MD '), outputdep)
+ ELSIF GetMMD ()
+ THEN
+ tempfile := ConCat (InitString(' -MMD '), outputdep)
END ;
- (* The output file depends on whether we are in stand-alone PP mode, and
- if an output file is specified. *)
- tempfile := NIL ;
- IF PPonly
+ IF tempfile#NIL
THEN
- IF GetObj () # NIL
+ commandLine := ConCat (Dup (commandLine), Dup (tempfile)) ;
+ (* We can only add MQ if we already have an MD/MMD. *)
+ IF GetMQ () # NIL
THEN
- tempfile := InitStringCharStar (GetObj ())
- END ;
- ELSIF SaveTemps
+ tempfile := InitStringCharStar (GetMQ ()) ;
+ commandLine := ConCat (Dup (commandLine), Dup (tempfile))
+ END
+ END
+ END ;
+ (* The output file depends on whether we are in stand-alone PP mode, and
+ if an output file is specified. *)
+ tempfile := NIL ;
+ IF PPonly
+ THEN
+ IF GetObj () # NIL
THEN
- tempfile := MakeSaveTempsFileName (filename)
- ELSE
- tempfile := InitStringCharStar (make_temp_file (KeyToCharStar (MakeKey('.m2i'))))
- END ;
- commandLine := ConCat (ConCatChar (Dup (commandLine), ' '), filename) ;
- IF tempfile # NIL
+ tempfile := InitStringCharStar (GetObj ())
+ END
+ ELSIF SaveTemps
+ THEN
+ tempfile := MakeSaveTempsFileName (filename)
+ ELSE
+ tempfile := InitStringCharStar (make_temp_file (KeyToCharStar (MakeKey('.m2i'))))
+ END ;
+ commandLine := ConCat (ConCatChar (Dup (commandLine), ' '), filename) ;
+ IF tempfile # NIL
+ THEN
+ commandLine := ConCat (ConCat (Dup (commandLine),
+ Mark (InitString(' -o '))), tempfile) ;
+ END ;
+ IF (outputdep # NIL) AND (Length (outputdep) > 0) AND (GetM () OR GetMM ())
+ THEN
+ commandLine := ConCat (commandLine, ConCat (Mark (InitString (' -MF ')),
+ outputdep)) ;
+ IF deleteDep AND (NOT SaveTemps)
THEN
- commandLine := ConCat (ConCat (Dup (commandLine),
- Mark (InitString(' -o '))), tempfile) ;
- END ;
-(* use pexecute in the future
- res := pexecute(string(Slice(commandLine, 0, Index(commandLine, ' ', 0))), etc etc );
+ outputdep := OnExitDelete (outputdep)
+ END
+ END ;
+ (* use pexecute in the future
+ res := pexecute(string(Slice(commandLine, 0, Index(commandLine, ' ', 0))), etc etc ); *)
+ (* for now we'll use system *)
+ IF Verbose
+ THEN
+ fprintf1 (StdOut, "preprocess: %s\n", commandLine)
+ END ;
+ IF system (string (commandLine)) # 0
+ THEN
+ fprintf1 (StdErr, 'C preprocessor failed when preprocessing %s\n', filename) ;
+ exit (1)
+ END ;
+ commandLine := KillString (commandLine) ;
+ IF SaveTemps
+ THEN
+ RETURN tempfile
+ ELSE
+ RETURN OnExitDelete (tempfile)
+ END
+END BuildCommandLineExecute ;
+
+
+(*
+ PreprocessModule - preprocess a file, filename, returning the new filename
+ of the preprocessed file.
+ Preprocessing will only occur if requested by the user.
+ If no preprocessing was requested then filename is returned.
+ If preprocessing occurs then a temporary file is created
+ and its name is returned.
+ All temporary files will be deleted when the compiler exits.
+ outputdep is the filename which will contain the dependency
+ info if -M, -MM is provided. outputdep can be NIL in which case
+ it is ignored.
*)
- (* for now we'll use system *)
- IF Verbose
- THEN
- fprintf1 (StdOut, "preprocess: %s\n", commandLine)
- END ;
- IF system (string (commandLine)) # 0
+
+PROCEDURE PreprocessModule (filename: String;
+ topSource, deleteDep: BOOLEAN;
+ outputDep: String) : String ;
+VAR
+ command: String ;
+BEGIN
+ IF GetCpp ()
+ THEN
+ command := CppCommandLine () ;
+ IF (command = NIL) OR EqualArray (command, '')
THEN
- fprintf1 (StdErr, 'C preprocessor failed when preprocessing %s\n', filename) ;
- exit (1)
+ RETURN Dup (filename)
END ;
- commandLine := KillString (commandLine) ;
- IF SaveTemps
+ command := BuildCommandLineExecute (filename, topSource, deleteDep,
+ command, outputDep) ;
+ IF command = NIL
THEN
- RETURN tempfile
+ RETURN filename
ELSE
- RETURN OnExitDelete (tempfile)
+ RETURN command
END
+ ELSE
+ RETURN Dup (filename)
END
END PreprocessModule ;
diff --git a/gcc/m2/gm2-compiler/M2Quads.mod b/gcc/m2/gm2-compiler/M2Quads.mod
index 95ca15a..02a7db4 100644
--- a/gcc/m2/gm2-compiler/M2Quads.mod
+++ b/gcc/m2/gm2-compiler/M2Quads.mod
@@ -2594,7 +2594,7 @@ BEGIN
PushTtok (m2strnul, tok) ;
PushT (1) ;
BuildAdrFunction
-END BuildAdrFunction ;
+END BuildStringAdrParam ;
(*
@@ -4625,7 +4625,7 @@ BEGIN
BuildRange (InitForLoopEndRangeCheck (tsym, BySym)) ; (* --fixme-- pass endpostok. *)
IncQuad := NextQuad ;
(* we have explicitly checked using the above and also
- this addition can legally overflow if a cardinal type
+ this addition can legitimately overflow if a cardinal type
is counting down. The above test will generate a more
precise error message, so we suppress overflow detection
here. *)
@@ -4636,7 +4636,7 @@ BEGIN
BuildRange (InitForLoopEndRangeCheck (IdSym, BySym)) ;
IncQuad := NextQuad ;
(* we have explicitly checked using the above and also
- this addition can legally overflow if a cardinal type
+ this addition can legitimately overflow if a cardinal type
is counting down. The above test will generate a more
precise error message, so we suppress overflow detection
here. *)
@@ -5548,7 +5548,7 @@ END IsReallyPointer ;
(*
- LegalUnboundedParam - returns TRUE if the parameter, Actual, can legally be
+ LegalUnboundedParam - returns TRUE if the parameter, Actual, can legitimately be
passed to ProcSym, i, the, Formal, parameter.
*)
diff --git a/gcc/m2/gm2-compiler/M2Search.def b/gcc/m2/gm2-compiler/M2Search.def
index e77c754..6e16d69 100644
--- a/gcc/m2/gm2-compiler/M2Search.def
+++ b/gcc/m2/gm2-compiler/M2Search.def
@@ -89,6 +89,4 @@ PROCEDURE SetDefExtension (ext: String) ;
PROCEDURE SetModExtension (ext: String) ;
-
-
END M2Search.
diff --git a/gcc/m2/gm2-compiler/P2SymBuild.mod b/gcc/m2/gm2-compiler/P2SymBuild.mod
index 71f6b1c..a2e3eb1 100644
--- a/gcc/m2/gm2-compiler/P2SymBuild.mod
+++ b/gcc/m2/gm2-compiler/P2SymBuild.mod
@@ -1018,25 +1018,26 @@ VAR
type,
align : CARDINAL ;
BEGIN
- PopT(alignment) ;
- IF alignment=MakeKey('bytealignment')
+ PopT (alignment) ;
+ IF alignment = MakeKey ('bytealignment')
THEN
- PopT(align) ;
- PopT(type) ;
- IF align#NulSym
+ PopT (align) ;
+ PopT (type) ;
+ IF align # NulSym
THEN
- IF IsRecord(type) OR IsRecordField(type) OR IsType(type) OR IsArray(type) OR IsPointer(type)
+ IF IsRecord (type) OR IsRecordField (type) OR IsType (type) OR
+ IsArray (type) OR IsPointer( type) OR IsSubrange (type)
THEN
- PutAlignment(type, align)
+ PutAlignment (type, align)
ELSE
- MetaError1('not allowed to add an alignment attribute to type {%1ad}', type)
+ MetaError1 ('not allowed to add an alignment attribute to type {%1ad}', type)
END
END
- ELSIF alignment#NulName
+ ELSIF alignment # NulName
THEN
- WriteFormat1('unknown type alignment attribute, %a', alignment)
+ WriteFormat1 ('unknown type alignment attribute, %a', alignment)
ELSE
- PopT(type)
+ PopT (type)
END
END BuildTypeAlignment ;
diff --git a/gcc/m2/gm2-compiler/SymbolTable.mod b/gcc/m2/gm2-compiler/SymbolTable.mod
index dc41c12..c2ed90f 100644
--- a/gcc/m2/gm2-compiler/SymbolTable.mod
+++ b/gcc/m2/gm2-compiler/SymbolTable.mod
@@ -280,6 +280,7 @@ TYPE
Size : PtrToValue ; (* Size of subrange type. *)
Type : CARDINAL ; (* Index to type symbol for *)
(* the type of subrange. *)
+ Align : CARDINAL ; (* Alignment for this type. *)
ConstLitTree: SymbolTree ; (* constants of this type. *)
packedInfo : PackedInfo ; (* the equivalent packed type *)
oafamily : CARDINAL ; (* The oafamily for this sym *)
@@ -6152,6 +6153,7 @@ BEGIN
(* ConstExpression. *)
Type := NulSym ; (* Index to a type. Determines *)
(* the type of subrange. *)
+ Align := NulSym ; (* The alignment of this type. *)
InitPacked(packedInfo) ; (* not packed and no equivalent *)
InitTree(ConstLitTree) ; (* constants of this type. *)
Size := InitValue() ; (* Size determines the type size *)
@@ -7686,7 +7688,9 @@ BEGIN
PartialUnboundedSym : n := GetSymName(PartialUnbounded.Type) |
TupleSym : n := NulName |
GnuAsmSym : n := NulName |
- InterfaceSym : n := NulName
+ InterfaceSym : n := NulName |
+ ImportSym : n := NulName |
+ ImportStatementSym : n := NulName
ELSE
InternalError ('unexpected symbol type')
@@ -14600,10 +14604,11 @@ BEGIN
RecordFieldSym: RecordField.Align := align |
TypeSym : Type.Align := align |
ArraySym : Array.Align := align |
- PointerSym : Pointer.Align := align
+ PointerSym : Pointer.Align := align |
+ SubrangeSym : Subrange.Align := align
ELSE
- InternalError ('expecting record, field, pointer, type or an array symbol')
+ InternalError ('expecting record, field, pointer, type, subrange or an array symbol')
END
END
END PutAlignment ;
@@ -14628,10 +14633,11 @@ BEGIN
ArraySym : RETURN( Array.Align ) |
PointerSym : RETURN( Pointer.Align ) |
VarientFieldSym: RETURN( GetAlignment(VarientField.Parent) ) |
- VarientSym : RETURN( GetAlignment(Varient.Parent) )
+ VarientSym : RETURN( GetAlignment(Varient.Parent) ) |
+ SubrangeSym : RETURN( Subrange.Align )
ELSE
- InternalError ('expecting record, field, pointer, type or an array symbol')
+ InternalError ('expecting record, field, pointer, type, subrange or an array symbol')
END
END
END GetAlignment ;
diff --git a/gcc/m2/gm2-gcc/m2expr.cc b/gcc/m2/gm2-gcc/m2expr.cc
index 32222d2..bb56a57 100644
--- a/gcc/m2/gm2-gcc/m2expr.cc
+++ b/gcc/m2/gm2-gcc/m2expr.cc
@@ -2758,13 +2758,10 @@ noBitsRequired (tree values)
{
int bits = tree_floor_log2 (values);
- if (integer_pow2p (values))
- return m2decl_BuildIntegerConstant (bits + 1);
- else
- return m2decl_BuildIntegerConstant (bits + 1);
+ return m2decl_BuildIntegerConstant (bits + 1);
}
-/* getMax return the result of max(a, b). */
+/* getMax return the result of max (a, b). */
static tree
getMax (tree a, tree b)
@@ -2778,8 +2775,8 @@ getMax (tree a, tree b)
/* calcNbits return the smallest number of bits required to
represent: min..max. */
-static tree
-calcNbits (location_t location, tree min, tree max)
+tree
+m2expr_calcNbits (location_t location, tree min, tree max)
{
int negative = false;
tree t = testLimits (location, m2type_GetIntegerType (), min, max);
@@ -2832,7 +2829,7 @@ m2expr_BuildTBitSize (location_t location, tree type)
TYPE_MAX_VALUE (type), false);
min = m2convert_BuildConvert (location, m2type_GetIntegerType (),
TYPE_MIN_VALUE (type), false);
- return calcNbits (location, min, max);
+ return m2expr_calcNbits (location, min, max);
case BOOLEAN_TYPE:
return m2expr_GetIntegerOne (location);
default:
diff --git a/gcc/m2/gm2-gcc/m2expr.def b/gcc/m2/gm2-gcc/m2expr.def
index e8027a6..e1ae799 100644
--- a/gcc/m2/gm2-gcc/m2expr.def
+++ b/gcc/m2/gm2-gcc/m2expr.def
@@ -721,4 +721,12 @@ PROCEDURE ConstantExpressionWarning (value: Tree) ;
PROCEDURE BuildAddAddress (location: location_t; op1, op2: Tree) : Tree ;
+(*
+ calcNbits - return the smallest number of bits required to
+ represent: min..max.
+*)
+
+PROCEDURE calcNbits (location: location_t; min, max: Tree) : Tree ;
+
+
END m2expr.
diff --git a/gcc/m2/gm2-gcc/m2expr.h b/gcc/m2/gm2-gcc/m2expr.h
index d15f00b..bf5e0b8 100644
--- a/gcc/m2/gm2-gcc/m2expr.h
+++ b/gcc/m2/gm2-gcc/m2expr.h
@@ -240,7 +240,7 @@ EXTERN tree m2expr_BuildAddAddress (location_t location, tree op1, tree op2);
EXTERN tree m2expr_BuildRDiv (location_t location, tree op1, tree op2,
bool needconvert);
EXTERN int m2expr_GetCstInteger (tree cst);
-
+EXTERN tree m2expr_calcNbits (location_t location, tree min, tree max);
EXTERN void m2expr_init (location_t location);
#undef EXTERN
diff --git a/gcc/m2/gm2-gcc/m2options.h b/gcc/m2/gm2-gcc/m2options.h
index 2ed2c9a..290b69c 100644
--- a/gcc/m2/gm2-gcc/m2options.h
+++ b/gcc/m2/gm2-gcc/m2options.h
@@ -124,12 +124,21 @@ EXTERN void M2Options_SetGenModuleList (bool value, const char *filename);
EXTERN void M2Options_SetShared (bool value);
EXTERN void M2Options_SetB (const char *arg);
EXTERN char *M2Options_GetB (void);
-EXTERN void M2Options_SetMD (const char *arg);
-EXTERN char *M2Options_GetMD (void);
-EXTERN void M2Options_SetMMD (const char *arg);
-EXTERN char *M2Options_GetMMD (void);
+EXTERN void M2Options_SetM (bool value);
+EXTERN bool M2Options_GetM (void);
+EXTERN void M2Options_SetMM (bool value);
+EXTERN bool M2Options_GetMM (void);
+EXTERN void M2Options_SetMD (bool value);
+EXTERN bool M2Options_GetMD (void);
+EXTERN void M2Options_SetMMD (bool value);
+EXTERN bool M2Options_GetMMD (void);
EXTERN void M2Options_SetMQ (const char *arg);
-EXTERN char *M2Options_GetMQ (void);
+EXTERN void M2Options_SetMF (const char *arg);
+EXTERN char *M2Options_GetMF (void);
+EXTERN void M2Options_SetMT (const char *arg);
+EXTERN void M2Options_SetMP (bool value);
+EXTERN bool M2Options_GetMP (void);
+EXTERN char *M2Options_GetDepTarget (void);
EXTERN void M2Options_SetObj (const char *arg);
EXTERN char *M2Options_GetObj (void);
EXTERN void M2Options_SetM2Prefix (const char *arg);
diff --git a/gcc/m2/gm2-gcc/m2type.cc b/gcc/m2/gm2-gcc/m2type.cc
index 86edde5..f6a0f07 100644
--- a/gcc/m2/gm2-gcc/m2type.cc
+++ b/gcc/m2/gm2-gcc/m2type.cc
@@ -894,22 +894,6 @@ m2type_GetCardinalAddressType (void)
return m2_cardinal_address_type_node;
}
-/* noBitsRequired returns the number of bits required to contain,
- values. How many bits are required to represent all numbers
- between: 0..values-1 */
-
-static tree
-noBitsRequired (tree values)
-{
- int bits = tree_floor_log2 (values);
-
- if (integer_pow2p (values))
- /* remember we start counting from zero. */
- return m2decl_BuildIntegerConstant (bits);
- else
- return m2decl_BuildIntegerConstant (bits + 1);
-}
-
#if 0
/* build_set_type creates a set type from the, domain, [low..high].
The values low..high all have type, range_type. */
@@ -1118,9 +1102,7 @@ m2type_BuildSmallestTypeRange (location_t location, tree low, tree high)
m2assert_AssertLocation (location);
low = fold (low);
high = fold (high);
- bits = fold (noBitsRequired (
- m2expr_BuildAdd (location, m2expr_BuildSub (location, high, low, false),
- m2expr_GetIntegerOne (location), false)));
+ bits = fold (m2expr_calcNbits (location, low, high));
return build_m2_specific_size_type (location, INTEGER_TYPE,
TREE_INT_CST_LOW (bits),
tree_int_cst_sgn (low) < 0);
@@ -2519,8 +2501,7 @@ m2type_BuildSubrangeType (location_t location, char *name, tree type,
error ("high bound for the subrange has overflowed");
/* First build a type with the base range. */
- range_type = build_range_type (type, TYPE_MIN_VALUE (type),
- TYPE_MAX_VALUE (type));
+ range_type = build_range_type (type, lowval, highval);
TYPE_UNSIGNED (range_type) = TYPE_UNSIGNED (type);
#if 0
diff --git a/gcc/m2/gm2-lang.cc b/gcc/m2/gm2-lang.cc
index 45b5fe2..7cf1185 100644
--- a/gcc/m2/gm2-lang.cc
+++ b/gcc/m2/gm2-lang.cc
@@ -131,7 +131,7 @@ gm2_langhook_init (void)
if (M2Options_GetPPOnly ())
{
- /* preprocess the file here. */
+ /* Preprocess the file here. */
gm2_langhook_parse_file ();
return false; /* Finish now, no further compilation. */
}
@@ -234,23 +234,54 @@ gm2_langhook_init_options (unsigned int decoded_options_count,
building_cpp_command = true;
}
M2Options_CppArg (opt, arg, (option->flags & CL_JOINED)
- && !(option->flags & CL_SEPARATE));
+ && !(option->flags & CL_SEPARATE));
break;
+
case OPT_M:
+ /* Output a rule suitable for make describing the dependencies of the
+ main source file. */
+ if (in_cpp_args)
+ {
+ gcc_checking_assert (building_cpp_command);
+ /* This is a preprocessor command. */
+ M2Options_CppArg (opt, arg, (option->flags & CL_JOINED)
+ && !(option->flags & CL_SEPARATE));
+ }
+ M2Options_SetPPOnly (value);
+ M2Options_SetM (value);
+ break;
+
case OPT_MM:
- gcc_checking_assert (building_cpp_command);
+ if (in_cpp_args)
+ {
+ gcc_checking_assert (building_cpp_command);
+ /* This is a preprocessor command. */
+ M2Options_CppArg (opt, arg, (option->flags & CL_JOINED)
+ && !(option->flags & CL_SEPARATE));
+ }
M2Options_SetPPOnly (value);
- /* This is a preprocessor command. */
- M2Options_CppArg (opt, arg, (option->flags & CL_JOINED)
- && !(option->flags & CL_SEPARATE));
+ M2Options_SetMM (value);
break;
- /* We can only use MQ when the command line is either PP-only, or
+ case OPT_MF:
+ if (!in_cpp_args)
+ M2Options_SetMF (arg);
+ break;
+
+ case OPT_MP:
+ M2Options_SetMP (value);
+ break;
+
+ /* We can only use MQ and MT when the command line is either PP-only, or
when there is a MD/MMD on it. */
case OPT_MQ:
M2Options_SetMQ (arg);
break;
+ case OPT_MT:
+ M2Options_SetMT (arg);
+ break;
+
case OPT_o:
M2Options_SetObj (arg);
break;
@@ -266,14 +297,23 @@ gm2_langhook_init_options (unsigned int decoded_options_count,
For now skip all plugins to avoid fails with the m2 one. */
break;
- /* Preprocessor arguments with a following filename, we add these
- back to the main file preprocess line, but not to dependents
- TODO Handle MF. */
+ /* Preprocessor arguments with a following filename. */
case OPT_MD:
- M2Options_SetMD (arg);
+ M2Options_SetMD (value);
+ if (value)
+ {
+ M2Options_SetM (true);
+ M2Options_SetMF (arg);
+ }
break;
+
case OPT_MMD:
- M2Options_SetMMD (arg);
+ M2Options_SetMMD (value);
+ if (value)
+ {
+ M2Options_SetMM (true);
+ M2Options_SetMF (arg);
+ }
break;
/* Modula 2 claimed options we pass to the preprocessor. */
@@ -744,7 +784,7 @@ gm2_langhook_post_options (const char **pfilename)
if (allow_libraries)
add_m2_import_paths (flibs);
- /* Returning false means that the backend should be used. */
+ /* Returning false means that the backend should be used. */
return M2Options_GetPPOnly ();
}
diff --git a/gcc/m2/gm2-libs-iso/SysClock.mod b/gcc/m2/gm2-libs-iso/SysClock.mod
index 56d5503..5f2c377 100644
--- a/gcc/m2/gm2-libs-iso/SysClock.mod
+++ b/gcc/m2/gm2-libs-iso/SysClock.mod
@@ -137,7 +137,8 @@ END daysInYear ;
(*
- ExtractDate - extracts the year, month, day from days.
+ ExtractDate - extracts the year, month, day from secs. days is the
+ total days since 1970.
*)
PROCEDURE ExtractDate (days: LONGCARD;
@@ -145,28 +146,29 @@ PROCEDURE ExtractDate (days: LONGCARD;
VAR
testMonth,
testYear : CARDINAL ;
- testDays : LONGCARD ;
+ monthOfDays,
+ yearOfDays : LONGCARD ;
BEGIN
testYear := 1970 ;
LOOP
- testDays := daysInYear (31, 12, testYear) ;
- IF days < testDays
+ yearOfDays := daysInYear (31, 12, testYear) ;
+ IF days < yearOfDays
THEN
year := testYear ;
testMonth := 1 ;
LOOP
- testDays := daysInMonth (year, testMonth) ;
- IF days < testDays
+ monthOfDays := daysInMonth (year, testMonth) ;
+ IF days < monthOfDays
THEN
day := VAL (Day, days) + MIN (Day) ;
month := VAL (Month, testMonth) ;
RETURN
END ;
- DEC (days, testDays) ;
+ DEC (days, monthOfDays) ;
INC (testMonth)
END
ELSE
- DEC (days, testDays) ;
+ DEC (days, yearOfDays) ;
INC (testYear)
END
END
@@ -218,6 +220,8 @@ BEGIN
printf ("getclock = %ld\n", sec)
END ;
WITH userData DO
+ (* Here we keep dividing sec by max seconds, minutes, hours
+ to convert sec into total days since epoch. *)
second := VAL (Sec, DivMod (sec, MAX (Sec) + 1)) ;
minute := VAL (Min, DivMod (sec, MAX (Min) + 1)) ;
hour := VAL (Hour, DivMod (sec, MAX (Hour) + 1)) ;
diff --git a/gcc/m2/lang-specs.h b/gcc/m2/lang-specs.h
index a564779..5429363 100644
--- a/gcc/m2/lang-specs.h
+++ b/gcc/m2/lang-specs.h
@@ -23,11 +23,15 @@ along with GCC; see the file COPYING3. If not see
/* A spec for the 'integrated' preprocessor implementation for Modula-2. */
#define M2CPP \
- "%{E|M|MM|fcpp: %{E} -fcpp-begin " \
+ "%{E|M|MM|fcpp: %{E} %{MF} -fcpp-begin " \
" %{!E:-E} %(cpp_unique_options) -traditional-cpp -ansi " \
" -fcpp-end %{B*} %{save-temps*} ; \
: %{v} %I %{B*} %{save-temps*} } "
+#define MDMMD \
+ " %{MD:-MD %{!o:%b.d}%{o*:%.d%*} %{!MT:-MT %b%O} %{MT} %{MQ} %{MF}} " \
+ " %{MMD:-MMD %{!o:%b.d}%{o*:%.d%*} %{!MT:-MT %b%O} %{MT} %{MQ} %{MF}} "
+
/* We have three modes:
1. When the preprocessing step is explict and there is no following
compilation. Here we do a similar process to cc1 -E where most of
@@ -43,9 +47,7 @@ along with GCC; see the file COPYING3. If not see
"%{E|M|MM:\
cc1gm2 " M2CPP " %{!fcpp:-fcpp;:%{fcpp}} %{fm2-pathname*} %i } \
%{!E:%{!M:%{!MM:\
- cc1gm2 " M2CPP " %(cc1_options) %{fm2-pathname*} %i %{c} \
- %{!fcpp:%{MD|MMD|MF*: \
- %eto generate dependencies you must specify '-fcpp' }} \
+ cc1gm2 " M2CPP MDMMD " %(cc1_options) %{fm2-pathname*} %i %{c} \
%{!fsyntax-only:%(invoke_as)} \
}}}", 0, 0, 0},
{".m2i", "@modula-2-cpp-output", 0, 0, 0},
diff --git a/gcc/machmode.h b/gcc/machmode.h
index a22df60..efdb6e5 100644
--- a/gcc/machmode.h
+++ b/gcc/machmode.h
@@ -22,10 +22,10 @@ along with GCC; see the file COPYING3. If not see
typedef opt_mode<machine_mode> opt_machine_mode;
-extern CONST_MODE_SIZE poly_uint16_pod mode_size[NUM_MACHINE_MODES];
-extern CONST_MODE_PRECISION poly_uint16_pod mode_precision[NUM_MACHINE_MODES];
+extern CONST_MODE_SIZE poly_uint16 mode_size[NUM_MACHINE_MODES];
+extern CONST_MODE_PRECISION poly_uint16 mode_precision[NUM_MACHINE_MODES];
extern const unsigned short mode_inner[NUM_MACHINE_MODES];
-extern CONST_MODE_NUNITS poly_uint16_pod mode_nunits[NUM_MACHINE_MODES];
+extern CONST_MODE_NUNITS poly_uint16 mode_nunits[NUM_MACHINE_MODES];
extern CONST_MODE_UNIT_SIZE unsigned char mode_unit_size[NUM_MACHINE_MODES];
extern const unsigned short mode_unit_precision[NUM_MACHINE_MODES];
extern const unsigned short mode_next[NUM_MACHINE_MODES];
diff --git a/gcc/match.pd b/gcc/match.pd
index f0be325..a56838f 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -1350,6 +1350,34 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& TYPE_PRECISION (TREE_TYPE (@0)) == 1)
(bit_ior @0 (bit_xor @1 { build_one_cst (type); }))))
+/* a | ((~a) ^ b) --> a | (~b) (alt version of the above 2) */
+(simplify
+ (bit_ior:c @0 (bit_xor:cs @1 @2))
+ (with { bool wascmp; }
+ (if (bitwise_inverted_equal_p (@0, @1, wascmp)
+ && (!wascmp || element_precision (type) == 1))
+ (bit_ior @0 (bit_not @2)))))
+
+/* a & ~(a ^ b) --> a & b */
+(simplify
+ (bit_and:c @0 (bit_not (bit_xor:c @0 @1)))
+ (bit_and @0 @1))
+
+/* a & (a == b) --> a & b (boolean version of the above). */
+(simplify
+ (bit_and:c @0 (nop_convert? (eq:c @0 @1)))
+ (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && TYPE_PRECISION (TREE_TYPE (@0)) == 1)
+ (bit_and @0 @1)))
+
+/* a & ((~a) ^ b) --> a & b (alt version of the above 2) */
+(simplify
+ (bit_and:c @0 (bit_xor:c @1 @2))
+ (with { bool wascmp; }
+ (if (bitwise_inverted_equal_p (@0, @1, wascmp)
+ && (!wascmp || element_precision (type) == 1))
+ (bit_and @0 @2))))
+
/* (a | b) | (a &^ b) --> a | b */
(for op (bit_and bit_xor)
(simplify
@@ -1522,6 +1550,14 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
@0))
+
+/* `a & (x | CST)` -> a if we know that (a & ~CST) == 0 */
+(simplify
+ (bit_and:c SSA_NAME@0 (bit_ior @1 INTEGER_CST@2))
+ (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@2)) == 0)
+ @0))
+
/* x | C -> C if we know that x & ~C == 0. */
(simplify
(bit_ior SSA_NAME@0 INTEGER_CST@1)
@@ -5045,6 +5081,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* (v ? w : 0) ? a : b is just (v & w) ? a : b
Currently disabled after pass lvec because ARM understands
VEC_COND_EXPR<v==w,-1,0> but not a plain v==w fed to BIT_IOR_EXPR. */
+#if GIMPLE
+/* These can only be done in gimple as fold likes to convert:
+ (CMP) & N into (CMP) ? N : 0
+ and we try to match the same pattern again and again. */
(simplify
(vec_cond (vec_cond:s @0 @3 integer_zerop) @1 @2)
(if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
@@ -5079,6 +5119,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(vec_cond @0 @3 (vec_cond:s @1 @2 @3))
(if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_and (bit_not @0) @1) @2 @3)))
+#endif
/* Canonicalize mask ? { 0, ... } : { -1, ...} to ~mask if the mask
types are compatible. */
@@ -5100,36 +5141,53 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(switch
(if (integer_zerop (@2))
(switch
- /* a ? 1 : 0 -> a if 0 and 1 are integral types. */
+ /* a ? 1 : 0 -> a if 0 and 1 are integral types. */
(if (integer_onep (@1))
(convert (convert:boolean_type_node @0)))
+ /* a ? -1 : 0 -> -a. */
+ (if (INTEGRAL_TYPE_P (type) && integer_all_onesp (@1))
+ (if (TYPE_PRECISION (type) == 1)
+ /* For signed 1-bit precision just cast bool to the type. */
+ (convert (convert:boolean_type_node @0))
+ (if (TREE_CODE (type) == BOOLEAN_TYPE)
+ (with {
+ tree intt = build_nonstandard_integer_type (TYPE_PRECISION (type),
+ TYPE_UNSIGNED (type));
+ }
+ (convert (negate (convert:intt (convert:boolean_type_node @0)))))
+ (negate (convert:type (convert:boolean_type_node @0))))))
/* a ? powerof2cst : 0 -> a << (log2(powerof2cst)) */
(if (INTEGRAL_TYPE_P (type) && integer_pow2p (@1))
(with {
tree shift = build_int_cst (integer_type_node, tree_log2 (@1));
}
- (lshift (convert (convert:boolean_type_node @0)) { shift; })))
- /* a ? -1 : 0 -> -a. No need to check the TYPE_PRECISION not being 1
- here as the powerof2cst case above will handle that case correctly. */
- (if (INTEGRAL_TYPE_P (type) && integer_all_onesp (@1))
- (negate (convert:type (convert:boolean_type_node @0))))))
+ (lshift (convert (convert:boolean_type_node @0)) { shift; })))))
(if (integer_zerop (@1))
(switch
- /* a ? 0 : 1 -> !a. */
+ /* a ? 0 : 1 -> !a. */
(if (integer_onep (@2))
(convert (bit_xor (convert:boolean_type_node @0) { boolean_true_node; })))
- /* a ? powerof2cst : 0 -> (!a) << (log2(powerof2cst)) */
+ /* a ? 0 : -1 -> -(!a). */
+ (if (INTEGRAL_TYPE_P (type) && integer_all_onesp (@2))
+ (if (TYPE_PRECISION (type) == 1)
+ /* For signed 1-bit precision just cast bool to the type. */
+ (convert (bit_xor (convert:boolean_type_node @0) { boolean_true_node; }))
+ (if (TREE_CODE (type) == BOOLEAN_TYPE)
+ (with {
+ tree intt = build_nonstandard_integer_type (TYPE_PRECISION (type),
+ TYPE_UNSIGNED (type));
+ }
+ (convert (negate (convert:intt (bit_xor (convert:boolean_type_node @0)
+ { boolean_true_node; })))))
+ (negate (convert:type (bit_xor (convert:boolean_type_node @0)
+ { boolean_true_node; }))))))
+ /* a ? 0 : powerof2cst -> (!a) << (log2(powerof2cst)) */
(if (INTEGRAL_TYPE_P (type) && integer_pow2p (@2))
(with {
tree shift = build_int_cst (integer_type_node, tree_log2 (@2));
}
(lshift (convert (bit_xor (convert:boolean_type_node @0)
- { boolean_true_node; })) { shift; })))
- /* a ? -1 : 0 -> -(!a). No need to check the TYPE_PRECISION not being 1
- here as the powerof2cst case above will handle that case correctly. */
- (if (INTEGRAL_TYPE_P (type) && integer_all_onesp (@2))
- (negate (convert:type (bit_xor (convert:boolean_type_node @0)
- { boolean_true_node; }))))))))
+ { boolean_true_node; })) { shift; })))))))
/* (a > 1) ? 0 : (cast)a is the same as (cast)(a == 1)
for unsigned types. */
@@ -5161,7 +5219,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* Optimize
# x_5 in range [cst1, cst2] where cst2 = cst1 + 1
- x_5 ? cstN ? cst4 : cst3
+ x_5 == cstN ? cst4 : cst3
# op is == or != and N is 1 or 2
to r_6 = x_5 + (min (cst3, cst4) - cst1) or
r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
@@ -5197,7 +5255,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
type1 = type;
auto prec = TYPE_PRECISION (type1);
auto unsign = TYPE_UNSIGNED (type1);
- type1 = build_nonstandard_integer_type (prec, unsign);
+ if (TREE_CODE (type1) == BOOLEAN_TYPE)
+ type1 = build_nonstandard_integer_type (prec, unsign);
min = wide_int::from (min, prec,
TYPE_SIGN (TREE_TYPE (@0)));
wide_int a = wide_int::from (wi::to_wide (arg0), prec,
@@ -5236,14 +5295,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
}
(if (code == PLUS_EXPR)
(convert (plus (convert:type1 @0) { arg; }))
- (convert (minus { arg; } (convert:type1 @0)))
- )
- )
- )
- )
- )
- )
-)
+ (convert (minus { arg; } (convert:type1 @0))))))))))
#endif
(simplify
@@ -5616,42 +5668,51 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* A == 0 ? A : -A same as -A */
(for cmp (eq uneq)
(simplify
- (cnd (cmp @0 zerop) @0 (negate@1 @0))
- (if (!HONOR_SIGNED_ZEROS (type))
+ (cnd (cmp @0 zerop) @2 (negate@1 @2))
+ (if (!HONOR_SIGNED_ZEROS (type)
+ && bitwise_equal_p (@0, @2))
@1))
(simplify
- (cnd (cmp @0 zerop) zerop (negate@1 @0))
- (if (!HONOR_SIGNED_ZEROS (type))
+ (cnd (cmp @0 zerop) zerop (negate@1 @2))
+ (if (!HONOR_SIGNED_ZEROS (type)
+ && bitwise_equal_p (@0, @2))
@1))
)
/* A != 0 ? A : -A same as A */
(for cmp (ne ltgt)
(simplify
- (cnd (cmp @0 zerop) @0 (negate @0))
- (if (!HONOR_SIGNED_ZEROS (type))
- @0))
+ (cnd (cmp @0 zerop) @1 (negate @1))
+ (if (!HONOR_SIGNED_ZEROS (type)
+ && bitwise_equal_p (@0, @1))
+ @1))
(simplify
- (cnd (cmp @0 zerop) @0 integer_zerop)
- (if (!HONOR_SIGNED_ZEROS (type))
- @0))
+ (cnd (cmp @0 zerop) @1 integer_zerop)
+ (if (!HONOR_SIGNED_ZEROS (type)
+ && bitwise_equal_p (@0, @1))
+ @1))
)
/* A >=/> 0 ? A : -A same as abs (A) */
(for cmp (ge gt)
(simplify
- (cnd (cmp @0 zerop) @0 (negate @0))
- (if (!HONOR_SIGNED_ZEROS (type)
- && !TYPE_UNSIGNED (type))
- (abs @0))))
+ (cnd (cmp @0 zerop) @1 (negate @1))
+ (if (!HONOR_SIGNED_ZEROS (TREE_TYPE(@0))
+ && !TYPE_UNSIGNED (TREE_TYPE(@0))
+ && bitwise_equal_p (@0, @1))
+ (if (TYPE_UNSIGNED (type))
+ (absu:type @0)
+ (abs @0)))))
/* A <=/< 0 ? A : -A same as -abs (A) */
(for cmp (le lt)
(simplify
- (cnd (cmp @0 zerop) @0 (negate @0))
- (if (!HONOR_SIGNED_ZEROS (type)
- && !TYPE_UNSIGNED (type))
- (if (ANY_INTEGRAL_TYPE_P (type)
- && !TYPE_OVERFLOW_WRAPS (type))
+ (cnd (cmp @0 zerop) @1 (negate @1))
+ (if (!HONOR_SIGNED_ZEROS (TREE_TYPE(@0))
+ && !TYPE_UNSIGNED (TREE_TYPE(@0))
+ && bitwise_equal_p (@0, @1))
+ (if ((ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
+ || TYPE_UNSIGNED (type))
(with {
- tree utype = unsigned_type_for (type);
+ tree utype = unsigned_type_for (TREE_TYPE(@0));
}
(convert (negate (absu:utype @0))))
(negate (abs @0)))))
@@ -5900,18 +5961,20 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* Fold ~X op ~Y as Y op X. */
(for cmp (simple_comparison)
(simplify
- (cmp (bit_not@2 @0) (bit_not@3 @1))
+ (cmp (nop_convert1?@4 (bit_not@2 @0)) (nop_convert2? (bit_not@3 @1)))
(if (single_use (@2) && single_use (@3))
- (cmp @1 @0))))
+ (with { tree otype = TREE_TYPE (@4); }
+ (cmp (convert:otype @1) (convert:otype @0))))))
/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
(for cmp (simple_comparison)
scmp (swapped_simple_comparison)
(simplify
- (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
+ (cmp (nop_convert? (bit_not@2 @0)) CONSTANT_CLASS_P@1)
(if (single_use (@2)
&& (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
- (scmp @0 (bit_not @1)))))
+ (with { tree otype = TREE_TYPE (@1); }
+ (scmp (convert:otype @0) (bit_not @1))))))
(for cmp (simple_comparison)
(simplify
@@ -6420,8 +6483,12 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
code and here to avoid a spurious overflow flag on the resulting
constant which fold_convert produces. */
(if (TREE_CODE (@1) == INTEGER_CST)
- (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
- TREE_OVERFLOW (@1)); })
+ (cmp @00 { force_fit_type (TREE_TYPE (@00),
+ wide_int::from (wi::to_wide (@1),
+ MAX (TYPE_PRECISION (TREE_TYPE (@1)),
+ TYPE_PRECISION (TREE_TYPE (@00))),
+ TYPE_SIGN (TREE_TYPE (@1))),
+ 0, TREE_OVERFLOW (@1)); })
(cmp @00 (convert @1)))
(if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
@@ -6741,13 +6808,11 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(with { bool wascmp; }
(if (INTEGRAL_TYPE_P (type)
&& bitwise_inverted_equal_p (@1, @2, wascmp)
- && (!wascmp || element_precision (type) == 1))
- (with {
- auto prec = TYPE_PRECISION (type);
- auto unsign = TYPE_UNSIGNED (type);
- tree inttype = build_nonstandard_integer_type (prec, unsign);
- }
- (convert (bit_xor (negate (convert:inttype @0)) (convert:inttype @2)))))))
+ && (!wascmp || TYPE_PRECISION (type) == 1))
+ (if ((!TYPE_UNSIGNED (type) && TREE_CODE (type) == BOOLEAN_TYPE)
+ || TYPE_PRECISION (type) == 1)
+ (bit_xor (convert:type @0) @2)
+ (bit_xor (negate (convert:type @0)) @2)))))
#endif
/* Simplify pointer equality compares using PTA. */
@@ -8905,6 +8970,30 @@ and,
&& fold_real_zero_addition_p (type, NULL_TREE, @5, 0)))
(IFN_COND_LEN_ADD @1 @0 @2 @0 @3 @4)))
+/* Detect simplification for vector condition folding where
+
+ c = mask1 ? (masked_op mask2 a b) : b
+
+ into
+
+ c = masked_op (mask1 & mask2) a b
+
+ where the operation can be partially applied to one operand. */
+
+(for cond_op (COND_BINARY)
+ (simplify
+ (vec_cond @0
+ (cond_op:s @1 @2 @3 @4) @3)
+ (cond_op (bit_and @1 @0) @2 @3 @4)))
+
+/* And same for ternary expressions. */
+
+(for cond_op (COND_TERNARY)
+ (simplify
+ (vec_cond @0
+ (cond_op:s @1 @2 @3 @4 @5) @4)
+ (cond_op (bit_and @1 @0) @2 @3 @4 @5)))
+
/* For pointers @0 and @2 and nonnegative constant offset @1, look for
expressions like:
diff --git a/gcc/omp-general.cc b/gcc/omp-general.cc
index 1e31014..b88d593 100644
--- a/gcc/omp-general.cc
+++ b/gcc/omp-general.cc
@@ -1986,13 +1986,17 @@ omp_get_context_selector (tree ctx, const char *set, const char *sel)
return NULL_TREE;
}
+/* Needs to be a GC-friendly widest_int variant, but precision is
+ desirable to be the same on all targets. */
+typedef generic_wide_int <fixed_wide_int_storage <1024> > score_wide_int;
+
/* Compute *SCORE for context selector CTX. Return true if the score
would be different depending on whether it is a declare simd clone or
not. DECLARE_SIMD should be true for the case when it would be
a declare simd clone. */
static bool
-omp_context_compute_score (tree ctx, widest_int *score, bool declare_simd)
+omp_context_compute_score (tree ctx, score_wide_int *score, bool declare_simd)
{
tree construct = omp_get_context_selector (ctx, "construct", NULL);
bool has_kind = omp_get_context_selector (ctx, "device", "kind");
@@ -2007,7 +2011,11 @@ omp_context_compute_score (tree ctx, widest_int *score, bool declare_simd)
if (TREE_PURPOSE (t3)
&& strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t3)), " score") == 0
&& TREE_CODE (TREE_VALUE (t3)) == INTEGER_CST)
- *score += wi::to_widest (TREE_VALUE (t3));
+ {
+ tree t4 = TREE_VALUE (t3);
+ *score += score_wide_int::from (wi::to_wide (t4),
+ TYPE_SIGN (TREE_TYPE (t4)));
+ }
if (construct || has_kind || has_arch || has_isa)
{
int scores[12];
@@ -2028,16 +2036,16 @@ omp_context_compute_score (tree ctx, widest_int *score, bool declare_simd)
*score = -1;
return ret;
}
- *score += wi::shifted_mask <widest_int> (scores[b + n], 1, false);
+ *score += wi::shifted_mask <score_wide_int> (scores[b + n], 1, false);
}
if (has_kind)
- *score += wi::shifted_mask <widest_int> (scores[b + nconstructs],
+ *score += wi::shifted_mask <score_wide_int> (scores[b + nconstructs],
1, false);
if (has_arch)
- *score += wi::shifted_mask <widest_int> (scores[b + nconstructs] + 1,
+ *score += wi::shifted_mask <score_wide_int> (scores[b + nconstructs] + 1,
1, false);
if (has_isa)
- *score += wi::shifted_mask <widest_int> (scores[b + nconstructs] + 2,
+ *score += wi::shifted_mask <score_wide_int> (scores[b + nconstructs] + 2,
1, false);
}
else /* FIXME: Implement this. */
@@ -2051,9 +2059,9 @@ struct GTY(()) omp_declare_variant_entry {
/* NODE of the variant. */
cgraph_node *variant;
/* Score if not in declare simd clone. */
- widest_int score;
+ score_wide_int score;
/* Score if in declare simd clone. */
- widest_int score_in_declare_simd_clone;
+ score_wide_int score_in_declare_simd_clone;
/* Context selector for the variant. */
tree ctx;
/* True if the context selector is known to match already. */
@@ -2214,12 +2222,12 @@ omp_resolve_late_declare_variant (tree alt)
}
}
- widest_int max_score = -1;
+ score_wide_int max_score = -1;
varentry2 = NULL;
FOR_EACH_VEC_SAFE_ELT (entryp->variants, i, varentry1)
if (matches[i])
{
- widest_int score
+ score_wide_int score
= (cur_node->simdclone ? varentry1->score_in_declare_simd_clone
: varentry1->score);
if (score > max_score)
@@ -2300,8 +2308,8 @@ omp_resolve_declare_variant (tree base)
if (any_deferred)
{
- widest_int max_score1 = 0;
- widest_int max_score2 = 0;
+ score_wide_int max_score1 = 0;
+ score_wide_int max_score2 = 0;
bool first = true;
unsigned int i;
tree attr1, attr2;
@@ -2311,8 +2319,8 @@ omp_resolve_declare_variant (tree base)
vec_alloc (entry.variants, variants.length ());
FOR_EACH_VEC_ELT (variants, i, attr1)
{
- widest_int score1;
- widest_int score2;
+ score_wide_int score1;
+ score_wide_int score2;
bool need_two;
tree ctx = TREE_VALUE (TREE_VALUE (attr1));
need_two = omp_context_compute_score (ctx, &score1, false);
@@ -2471,16 +2479,16 @@ omp_resolve_declare_variant (tree base)
variants[j] = NULL_TREE;
}
}
- widest_int max_score1 = 0;
- widest_int max_score2 = 0;
+ score_wide_int max_score1 = 0;
+ score_wide_int max_score2 = 0;
bool first = true;
FOR_EACH_VEC_ELT (variants, i, attr1)
if (attr1)
{
if (variant1)
{
- widest_int score1;
- widest_int score2;
+ score_wide_int score1;
+ score_wide_int score2;
bool need_two;
tree ctx;
if (first)
@@ -2552,7 +2560,7 @@ omp_lto_output_declare_variant_alt (lto_simple_output_block *ob,
gcc_assert (nvar != LCC_NOT_FOUND);
streamer_write_hwi_stream (ob->main_stream, nvar);
- for (widest_int *w = &varentry->score; ;
+ for (score_wide_int *w = &varentry->score; ;
w = &varentry->score_in_declare_simd_clone)
{
unsigned len = w->get_len ();
@@ -2602,15 +2610,15 @@ omp_lto_input_declare_variant_alt (lto_input_block *ib, cgraph_node *node,
omp_declare_variant_entry varentry;
varentry.variant
= dyn_cast<cgraph_node *> (nodes[streamer_read_hwi (ib)]);
- for (widest_int *w = &varentry.score; ;
+ for (score_wide_int *w = &varentry.score; ;
w = &varentry.score_in_declare_simd_clone)
{
unsigned len2 = streamer_read_hwi (ib);
- HOST_WIDE_INT arr[WIDE_INT_MAX_ELTS];
- gcc_assert (len2 <= WIDE_INT_MAX_ELTS);
+ HOST_WIDE_INT arr[WIDE_INT_MAX_HWIS (1024)];
+ gcc_assert (len2 <= WIDE_INT_MAX_HWIS (1024));
for (unsigned int j = 0; j < len2; j++)
arr[j] = streamer_read_hwi (ib);
- *w = widest_int::from_array (arr, len2, true);
+ *w = score_wide_int::from_array (arr, len2, true);
if (w == &varentry.score_in_declare_simd_clone)
break;
}
diff --git a/gcc/omp-low.cc b/gcc/omp-low.cc
index b0c3ef7..91ef74f 100644
--- a/gcc/omp-low.cc
+++ b/gcc/omp-low.cc
@@ -4547,7 +4547,7 @@ public:
tree lastlane;
vec<tree, va_heap> simt_eargs;
gimple_seq simt_dlist;
- poly_uint64_pod max_vf;
+ poly_uint64 max_vf;
bool is_simt;
};
diff --git a/gcc/opt-functions.awk b/gcc/opt-functions.awk
index 36de463..a58e938 100644
--- a/gcc/opt-functions.awk
+++ b/gcc/opt-functions.awk
@@ -387,3 +387,16 @@ function integer_range_info(range_option, init, option, uinteger_used)
else
return "-1, -1"
}
+
+# Find the index of VAR in VAR_ARRY which as length N_VAR_ARRY. If
+# VAR is not found, return N_VAR_ARRY. That means the var is a new
+# defination.
+function find_index(var, var_arry, n_var_arry)
+{
+ for (var_index = 0; var_index < n_var_arry; var_index++)
+ {
+ if (var_arry[var_index] == var)
+ break
+ }
+ return var_index
+}
diff --git a/gcc/opt-problem.cc b/gcc/opt-problem.cc
index 9917271..ae85df3 100644
--- a/gcc/opt-problem.cc
+++ b/gcc/opt-problem.cc
@@ -62,10 +62,8 @@ opt_problem::opt_problem (const dump_location_t &loc,
{
dump_pretty_printer pp (&dump_context::get (), MSG_MISSED_OPTIMIZATION);
- text_info text;
- text.err_no = errno;
- text.args_ptr = ap;
- text.format_spec = fmt; /* No i18n is performed. */
+ text_info text (fmt, /* No i18n is performed. */
+ ap, errno);
/* Phases 1 and 2, using pp_format. */
pp_format (&pp, &text);
diff --git a/gcc/opt-read.awk b/gcc/opt-read.awk
index fc4e3d7..f74d847 100644
--- a/gcc/opt-read.awk
+++ b/gcc/opt-read.awk
@@ -22,6 +22,7 @@ BEGIN {
n_opts = 0
n_langs = 0
n_target_save = 0
+ n_target_vars = 0
n_extra_vars = 0
n_extra_target_vars = 0
n_extra_masks = 0
@@ -121,7 +122,21 @@ BEGIN {
n_opts++;
}
else {
- extra_masks[n_extra_masks++] = name
+ target_var = opt_args("Var", $0)
+ if (target_var)
+ {
+ target_var = opt_args("Var", $1)
+ var_index = find_index(target_var, target_vars, n_target_vars)
+ if (var_index == n_target_vars)
+ {
+ target_vars[n_target_vars++] = target_var
+ }
+ other_masks[var_index "," n_other_mask[var_index]++] = name
+ }
+ else
+ {
+ extra_masks[n_extra_masks++] = name
+ }
}
}
}
diff --git a/gcc/optabs.cc b/gcc/optabs.cc
index e1898da..f0a048a 100644
--- a/gcc/optabs.cc
+++ b/gcc/optabs.cc
@@ -533,15 +533,13 @@ expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
has unknown behavior. Do a single shift first, then shift by the
remainder. It's OK to use ~OP1 as the remainder if shift counts
are truncated to the mode size. */
- carries = expand_binop (word_mode, reverse_unsigned_shift,
- outof_input, const1_rtx, 0, unsignedp, methods);
- if (shift_mask == BITS_PER_WORD - 1)
- {
- tmp = immed_wide_int_const
- (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
- tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
- 0, true, methods);
- }
+ carries = simplify_expand_binop (word_mode, reverse_unsigned_shift,
+ outof_input, const1_rtx, 0,
+ unsignedp, methods);
+ if (carries == const0_rtx)
+ tmp = const0_rtx;
+ else if (shift_mask == BITS_PER_WORD - 1)
+ tmp = expand_unop (op1_mode, one_cmpl_optab, op1, 0, true);
else
{
tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
@@ -552,22 +550,29 @@ expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
}
if (tmp == 0 || carries == 0)
return false;
- carries = expand_binop (word_mode, reverse_unsigned_shift,
- carries, tmp, 0, unsignedp, methods);
+ if (carries != const0_rtx && tmp != const0_rtx)
+ carries = simplify_expand_binop (word_mode, reverse_unsigned_shift,
+ carries, tmp, 0, unsignedp, methods);
if (carries == 0)
return false;
- /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
- so the result can go directly into INTO_TARGET if convenient. */
- tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
- into_target, unsignedp, methods);
- if (tmp == 0)
- return false;
+ if (into_input != const0_rtx)
+ {
+ /* Shift INTO_INPUT logically by OP1. This is the last use of
+ INTO_INPUT so the result can go directly into INTO_TARGET if
+ convenient. */
+ tmp = simplify_expand_binop (word_mode, unsigned_shift, into_input,
+ op1, into_target, unsignedp, methods);
+ if (tmp == 0)
+ return false;
- /* Now OR in the bits carried over from OUTOF_INPUT. */
- if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
- into_target, unsignedp, methods))
- return false;
+ /* Now OR in the bits carried over from OUTOF_INPUT. */
+ if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
+ into_target, unsignedp, methods))
+ return false;
+ }
+ else
+ emit_move_insn (into_target, carries);
/* Use a standard word_mode shift for the out-of half. */
if (outof_target != 0)
diff --git a/gcc/opth-gen.awk b/gcc/opth-gen.awk
index 71404f9..2655157 100644
--- a/gcc/opth-gen.awk
+++ b/gcc/opth-gen.awk
@@ -406,6 +406,18 @@ for (i = 0; i < n_extra_masks; i++) {
print "#define MASK_" extra_masks[i] " (1U << " masknum[""]++ ")"
}
+for (i = 0; i < n_target_vars; i++)
+{
+ if (find_index(target_vars[i], extra_target_vars, n_extra_target_vars) == n_extra_target_vars)
+ continue
+ for (j = 0; j < n_other_mask[i]; j++)
+ {
+ print "#define MASK_" other_masks[i "," j] " (1U << " other_masknum[i]++ ")"
+ }
+ if (other_masknum[i] > 32)
+ print "#error too many target masks for" extra_target_vars[i]
+}
+
for (var in masknum) {
if (var != "" && host_wide_int[var] == "yes") {
print "#if defined(HOST_BITS_PER_WIDE_INT) && " masknum[var] " > HOST_BITS_PER_WIDE_INT"
@@ -419,6 +431,20 @@ for (var in masknum) {
print "#error too many masks for " var
}
}
+for (i = 0; i < n_target_vars; i++)
+{
+ if (find_index(target_vars[i], extra_target_vars, n_extra_target_vars) == n_extra_target_vars)
+ continue
+ for (j = 0; j < n_other_mask[i]; j++)
+ {
+ print "#define TARGET_" other_masks[i "," j] \
+ " ((" target_vars[i] " & MASK_" other_masks[i "," j] ") != 0)"
+ print "#define TARGET_" other_masks[i "," j] "_P(" target_vars[i] ")" \
+ " (((" target_vars[i] ") & MASK_" other_masks[i "," j] ") != 0)"
+ print "#define TARGET_" other_masks[i "," j] "_OPTS_P(opts)" \
+ " (((opts->x_" target_vars[i] ") & MASK_" other_masks[i "," j] ") != 0)"
+ }
+}
print ""
for (i = 0; i < n_opts; i++) {
@@ -447,15 +473,22 @@ for (i = 0; i < n_opts; i++) {
" ((" vname " & " mask original_name ") != 0)"
print "#define TARGET_" name "_P(" vname ")" \
" (((" vname ") & " mask original_name ") != 0)"
+ print "#define TARGET_" name "_OPTS_P(opts)" \
+ " (((opts->x_" vname ") & " mask original_name ") != 0)"
print "#define TARGET_EXPLICIT_" name "_P(opts)" \
" ((opts->x_" vname "_explicit & " mask original_name ") != 0)"
print "#define SET_TARGET_" name "(opts) opts->x_" vname " |= " mask original_name
}
}
for (i = 0; i < n_extra_masks; i++) {
- if (extra_mask_macros[extra_masks[i]] == 0)
+ if (extra_mask_macros[extra_masks[i]] == 0) {
print "#define TARGET_" extra_masks[i] \
" ((target_flags & MASK_" extra_masks[i] ") != 0)"
+ print "#define TARGET_" extra_masks[i] "_P(target_flags)" \
+ " (((target_flags) & " extra_masks[i] ") != 0)"
+ print "#define TARGET_" extra_masks[i] "_OPTS_P(opts)" \
+ " (((opts->x_target_flags) & MASK_" extra_masks[i] ") != 0)"
+ }
}
print ""
diff --git a/gcc/opts.cc b/gcc/opts.cc
index ac81d4e..573dcf8 100644
--- a/gcc/opts.cc
+++ b/gcc/opts.cc
@@ -2864,15 +2864,15 @@ common_handle_option (struct gcc_options *opts,
break;
case OPT_fdiagnostics_show_caret:
- dc->show_caret = value;
+ dc->m_source_printing.enabled = value;
break;
case OPT_fdiagnostics_show_labels:
- dc->show_labels_p = value;
+ dc->m_source_printing.show_labels_p = value;
break;
case OPT_fdiagnostics_show_line_numbers:
- dc->show_line_numbers_p = value;
+ dc->m_source_printing.show_line_numbers_p = value;
break;
case OPT_fdiagnostics_color_:
@@ -2936,7 +2936,7 @@ common_handle_option (struct gcc_options *opts,
break;
case OPT_fdiagnostics_minimum_margin_width_:
- dc->min_margin_width = value;
+ dc->m_source_printing.min_margin_width = value;
break;
case OPT_fdump_:
diff --git a/gcc/passes.def b/gcc/passes.def
index 4110a47..df7965d 100644
--- a/gcc/passes.def
+++ b/gcc/passes.def
@@ -221,7 +221,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_fre, true /* may_iterate */);
NEXT_PASS (pass_merge_phi);
NEXT_PASS (pass_thread_jumps_full, /*first=*/true);
- NEXT_PASS (pass_vrp, true /* warn_array_bounds_p */);
+ NEXT_PASS (pass_vrp, false /* final_p*/);
NEXT_PASS (pass_dse);
NEXT_PASS (pass_dce);
/* pass_stdarg is always run and at this point we execute
@@ -348,7 +348,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_dominator, false /* may_peel_loop_headers_p */);
NEXT_PASS (pass_strlen);
NEXT_PASS (pass_thread_jumps_full, /*first=*/false);
- NEXT_PASS (pass_vrp, false /* warn_array_bounds_p */);
+ NEXT_PASS (pass_vrp, true /* final_p */);
/* Run CCP to compute alignment and nonzero bits. */
NEXT_PASS (pass_ccp, true /* nonzero_p */);
NEXT_PASS (pass_warn_restrict);
@@ -519,6 +519,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_peephole2);
NEXT_PASS (pass_if_after_reload);
NEXT_PASS (pass_regrename);
+ NEXT_PASS (pass_fold_mem_offsets);
NEXT_PASS (pass_cprop_hardreg);
NEXT_PASS (pass_fast_rtl_dce);
NEXT_PASS (pass_reorder_blocks);
diff --git a/gcc/poly-int-types.h b/gcc/poly-int-types.h
index 07e5da0..6a41e7b 100644
--- a/gcc/poly-int-types.h
+++ b/gcc/poly-int-types.h
@@ -20,14 +20,6 @@ along with GCC; see the file COPYING3. If not see
#ifndef HAVE_POLY_INT_TYPES_H
#define HAVE_POLY_INT_TYPES_H
-typedef poly_int_pod<NUM_POLY_INT_COEFFS, unsigned short> poly_uint16_pod;
-typedef poly_int_pod<NUM_POLY_INT_COEFFS, HOST_WIDE_INT> poly_int64_pod;
-typedef poly_int_pod<NUM_POLY_INT_COEFFS,
- unsigned HOST_WIDE_INT> poly_uint64_pod;
-typedef poly_int_pod<NUM_POLY_INT_COEFFS, offset_int> poly_offset_int_pod;
-typedef poly_int_pod<NUM_POLY_INT_COEFFS, wide_int> poly_wide_int_pod;
-typedef poly_int_pod<NUM_POLY_INT_COEFFS, widest_int> poly_widest_int_pod;
-
typedef poly_int<NUM_POLY_INT_COEFFS, unsigned short> poly_uint16;
typedef poly_int<NUM_POLY_INT_COEFFS, HOST_WIDE_INT> poly_int64;
typedef poly_int<NUM_POLY_INT_COEFFS, unsigned HOST_WIDE_INT> poly_uint64;
diff --git a/gcc/poly-int.h b/gcc/poly-int.h
index 7bff5e5..828714e 100644
--- a/gcc/poly-int.h
+++ b/gcc/poly-int.h
@@ -29,7 +29,6 @@ along with GCC; see the file COPYING3. If not see
#ifndef HAVE_POLY_INT_H
#define HAVE_POLY_INT_H
-template<unsigned int N, typename T> struct poly_int_pod;
template<unsigned int N, typename T> class poly_int;
/* poly_coeff_traiits<T> describes the properties of a poly_int
@@ -58,7 +57,11 @@ template<unsigned int N, typename T> class poly_int;
- poly_coeff_traits<T>::result is a type that can hold results of
operations on T. This is different from T itself in cases where T
- is the result of an accessor like wi::to_offset. */
+ is the result of an accessor like wi::to_offset.
+
+ - poly_coeff_traits<T>::init_cast<Arg>::type is the type to which
+ an argument of type Arg should be casted before being used to
+ initialize a coefficient of type T. */
template<typename T, wi::precision_type = wi::int_traits<T>::precision_type>
struct poly_coeff_traits;
@@ -74,6 +77,9 @@ struct poly_coeff_traits<T, wi::FLEXIBLE_PRECISION>
+ ((T (1) << (precision - 2)) - 1))
: T (-1));
static const int rank = sizeof (T) * 2 + !signedness;
+
+ template<typename Arg>
+ struct init_cast { using type = T; };
};
template<typename T>
@@ -84,6 +90,23 @@ struct poly_coeff_traits<T, wi::VAR_PRECISION>
static const int signedness = -1;
static const int precision = WIDE_INT_MAX_PRECISION;
static const int rank = INT_MAX;
+
+ template<typename Arg>
+ struct init_cast { using type = const Arg &; };
+};
+
+template<typename T>
+struct poly_coeff_traits<T, wi::INL_CONST_PRECISION>
+{
+ typedef WI_UNARY_RESULT (T) result;
+ typedef int int_type;
+ /* These types are always signed. */
+ static const int signedness = 1;
+ static const int precision = wi::int_traits<T>::precision;
+ static const int rank = precision * 2 / CHAR_BIT;
+
+ template<typename Arg>
+ struct init_cast { using type = const Arg &; };
};
template<typename T>
@@ -95,6 +118,9 @@ struct poly_coeff_traits<T, wi::CONST_PRECISION>
static const int signedness = 1;
static const int precision = wi::int_traits<T>::precision;
static const int rank = precision * 2 / CHAR_BIT;
+
+ template<typename Arg>
+ struct init_cast { using type = const Arg &; };
};
/* Information about a pair of coefficient types. */
@@ -172,17 +198,13 @@ struct poly_int_traits
typedef typename poly_coeff_traits<T>::int_type int_type;
};
template<unsigned int N, typename C>
-struct poly_int_traits<poly_int_pod<N, C> >
+struct poly_int_traits<poly_int<N, C> >
{
static const bool is_poly = true;
static const unsigned int num_coeffs = N;
typedef C coeff_type;
typedef typename poly_coeff_traits<C>::int_type int_type;
};
-template<unsigned int N, typename C>
-struct poly_int_traits<poly_int<N, C> > : poly_int_traits<poly_int_pod<N, C> >
-{
-};
/* SFINAE class that makes T2 available as "type" if T1 is a non-polynomial
type. */
@@ -332,31 +354,55 @@ struct poly_result<T1, T2, 2>
? (void) ((RES).coeffs[I] = VALUE) \
: (void) ((RES).coeffs[I].~C (), new (&(RES).coeffs[I]) C (VALUE)))
-/* A base POD class for polynomial integers. The polynomial has N
- coefficients of type C. */
+/* poly_int_full and poly_int_hungry are used internally within poly_int
+ for delegated initializers. poly_int_full indicates that a parameter
+ pack has enough elements to initialize every coefficient. poly_int_hungry
+ indicates that at least one extra zero must be added. */
+struct poly_int_full {};
+struct poly_int_hungry {};
+
+/* poly_int_fullness<B>::type is poly_int_full when B is true and
+ poly_int_hungry when B is false. */
+template<bool> struct poly_int_fullness;
+template<> struct poly_int_fullness<false> { using type = poly_int_hungry; };
+template<> struct poly_int_fullness<true> { using type = poly_int_full; };
+
+/* A class containing polynomial integers. The polynomial has N coefficients
+ of type C, and N - 1 indeterminates. */
template<unsigned int N, typename C>
-struct poly_int_pod
+struct poly_int
{
public:
+ poly_int () = default;
+ poly_int (const poly_int &) = default;
+
+ template<typename Ca>
+ poly_int (const poly_int<N, Ca> &);
+
+ template<typename ...Cs>
+ constexpr poly_int (const Cs &...);
+
+ poly_int &operator = (const poly_int &) = default;
+
template<typename Ca>
- poly_int_pod &operator = (const poly_int_pod<N, Ca> &);
+ poly_int &operator = (const poly_int<N, Ca> &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int_pod>::type &operator = (const Ca &);
+ typename if_nonpoly<Ca, poly_int>::type &operator = (const Ca &);
template<typename Ca>
- poly_int_pod &operator += (const poly_int_pod<N, Ca> &);
+ poly_int &operator += (const poly_int<N, Ca> &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int_pod>::type &operator += (const Ca &);
+ typename if_nonpoly<Ca, poly_int>::type &operator += (const Ca &);
template<typename Ca>
- poly_int_pod &operator -= (const poly_int_pod<N, Ca> &);
+ poly_int &operator -= (const poly_int<N, Ca> &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int_pod>::type &operator -= (const Ca &);
+ typename if_nonpoly<Ca, poly_int>::type &operator -= (const Ca &);
template<typename Ca>
- typename if_nonpoly<Ca, poly_int_pod>::type &operator *= (const Ca &);
+ typename if_nonpoly<Ca, poly_int>::type &operator *= (const Ca &);
- poly_int_pod &operator <<= (unsigned int);
+ poly_int &operator <<= (unsigned int);
bool is_constant () const;
@@ -366,13 +412,13 @@ public:
C to_constant () const;
template<typename Ca>
- static poly_int<N, C> from (const poly_int_pod<N, Ca> &, unsigned int,
+ static poly_int<N, C> from (const poly_int<N, Ca> &, unsigned int,
signop);
template<typename Ca>
- static poly_int<N, C> from (const poly_int_pod<N, Ca> &, signop);
+ static poly_int<N, C> from (const poly_int<N, Ca> &, signop);
- bool to_shwi (poly_int_pod<N, HOST_WIDE_INT> *) const;
- bool to_uhwi (poly_int_pod<N, unsigned HOST_WIDE_INT> *) const;
+ bool to_shwi (poly_int<N, HOST_WIDE_INT> *) const;
+ bool to_uhwi (poly_int<N, unsigned HOST_WIDE_INT> *) const;
poly_int<N, HOST_WIDE_INT> force_shwi () const;
poly_int<N, unsigned HOST_WIDE_INT> force_uhwi () const;
@@ -381,12 +427,50 @@ public:
#endif
C coeffs[N];
+
+private:
+ template<typename ...Cs>
+ constexpr poly_int (poly_int_full, const Cs &...);
+
+ template<typename C0, typename ...Cs>
+ constexpr poly_int (poly_int_hungry, const C0 &, const Cs &...);
};
template<unsigned int N, typename C>
template<typename Ca>
-inline poly_int_pod<N, C>&
-poly_int_pod<N, C>::operator = (const poly_int_pod<N, Ca> &a)
+inline
+poly_int<N, C>::poly_int (const poly_int<N, Ca> &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, *this, i, a.coeffs[i]);
+}
+
+template<unsigned int N, typename C>
+template<typename ...Cs>
+inline constexpr
+poly_int<N, C>::poly_int (const Cs &... cs)
+ : poly_int (typename poly_int_fullness<sizeof... (Cs) >= N>::type (),
+ cs...) {}
+
+/* Initialize with c0, cs..., and some trailing zeros. */
+template<unsigned int N, typename C>
+template<typename C0, typename ...Cs>
+inline constexpr
+poly_int<N, C>::poly_int (poly_int_hungry, const C0 &c0, const Cs &... cs)
+ : poly_int (c0, cs..., wi::ints_for<C>::zero (c0)) {}
+
+/* Initialize with cs... directly, casting where necessary. */
+template<unsigned int N, typename C>
+template<typename ...Cs>
+inline constexpr
+poly_int<N, C>::poly_int (poly_int_full, const Cs &... cs)
+ : coeffs { (typename poly_coeff_traits<C>::
+ template init_cast<Cs>::type (cs))... } {}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline poly_int<N, C>&
+poly_int<N, C>::operator = (const poly_int<N, Ca> &a)
{
for (unsigned int i = 0; i < N; i++)
POLY_SET_COEFF (C, *this, i, a.coeffs[i]);
@@ -395,8 +479,8 @@ poly_int_pod<N, C>::operator = (const poly_int_pod<N, Ca> &a)
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
-poly_int_pod<N, C>::operator = (const Ca &a)
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
+poly_int<N, C>::operator = (const Ca &a)
{
POLY_SET_COEFF (C, *this, 0, a);
if (N >= 2)
@@ -407,8 +491,8 @@ poly_int_pod<N, C>::operator = (const Ca &a)
template<unsigned int N, typename C>
template<typename Ca>
-inline poly_int_pod<N, C>&
-poly_int_pod<N, C>::operator += (const poly_int_pod<N, Ca> &a)
+inline poly_int<N, C>&
+poly_int<N, C>::operator += (const poly_int<N, Ca> &a)
{
for (unsigned int i = 0; i < N; i++)
this->coeffs[i] += a.coeffs[i];
@@ -417,8 +501,8 @@ poly_int_pod<N, C>::operator += (const poly_int_pod<N, Ca> &a)
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
-poly_int_pod<N, C>::operator += (const Ca &a)
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
+poly_int<N, C>::operator += (const Ca &a)
{
this->coeffs[0] += a;
return *this;
@@ -426,8 +510,8 @@ poly_int_pod<N, C>::operator += (const Ca &a)
template<unsigned int N, typename C>
template<typename Ca>
-inline poly_int_pod<N, C>&
-poly_int_pod<N, C>::operator -= (const poly_int_pod<N, Ca> &a)
+inline poly_int<N, C>&
+poly_int<N, C>::operator -= (const poly_int<N, Ca> &a)
{
for (unsigned int i = 0; i < N; i++)
this->coeffs[i] -= a.coeffs[i];
@@ -436,8 +520,8 @@ poly_int_pod<N, C>::operator -= (const poly_int_pod<N, Ca> &a)
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
-poly_int_pod<N, C>::operator -= (const Ca &a)
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
+poly_int<N, C>::operator -= (const Ca &a)
{
this->coeffs[0] -= a;
return *this;
@@ -445,8 +529,8 @@ poly_int_pod<N, C>::operator -= (const Ca &a)
template<unsigned int N, typename C>
template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
-poly_int_pod<N, C>::operator *= (const Ca &a)
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
+poly_int<N, C>::operator *= (const Ca &a)
{
for (unsigned int i = 0; i < N; i++)
this->coeffs[i] *= a;
@@ -454,8 +538,8 @@ poly_int_pod<N, C>::operator *= (const Ca &a)
}
template<unsigned int N, typename C>
-inline poly_int_pod<N, C>&
-poly_int_pod<N, C>::operator <<= (unsigned int a)
+inline poly_int<N, C>&
+poly_int<N, C>::operator <<= (unsigned int a)
{
for (unsigned int i = 0; i < N; i++)
this->coeffs[i] <<= a;
@@ -466,7 +550,7 @@ poly_int_pod<N, C>::operator <<= (unsigned int a)
template<unsigned int N, typename C>
inline bool
-poly_int_pod<N, C>::is_constant () const
+poly_int<N, C>::is_constant () const
{
if (N >= 2)
for (unsigned int i = 1; i < N; i++)
@@ -481,7 +565,7 @@ poly_int_pod<N, C>::is_constant () const
template<unsigned int N, typename C>
template<typename T>
inline typename if_lossless<T, C, bool>::type
-poly_int_pod<N, C>::is_constant (T *const_value) const
+poly_int<N, C>::is_constant (T *const_value) const
{
if (is_constant ())
{
@@ -499,7 +583,7 @@ poly_int_pod<N, C>::is_constant (T *const_value) const
template<unsigned int N, typename C>
inline C
-poly_int_pod<N, C>::to_constant () const
+poly_int<N, C>::to_constant () const
{
gcc_checking_assert (is_constant ());
return this->coeffs[0];
@@ -512,8 +596,8 @@ poly_int_pod<N, C>::to_constant () const
template<unsigned int N, typename C>
template<typename Ca>
inline poly_int<N, C>
-poly_int_pod<N, C>::from (const poly_int_pod<N, Ca> &a,
- unsigned int bitsize, signop sgn)
+poly_int<N, C>::from (const poly_int<N, Ca> &a, unsigned int bitsize,
+ signop sgn)
{
poly_int<N, C> r;
for (unsigned int i = 0; i < N; i++)
@@ -527,7 +611,7 @@ poly_int_pod<N, C>::from (const poly_int_pod<N, Ca> &a,
template<unsigned int N, typename C>
template<typename Ca>
inline poly_int<N, C>
-poly_int_pod<N, C>::from (const poly_int_pod<N, Ca> &a, signop sgn)
+poly_int<N, C>::from (const poly_int<N, Ca> &a, signop sgn)
{
poly_int<N, C> r;
for (unsigned int i = 0; i < N; i++)
@@ -541,7 +625,7 @@ poly_int_pod<N, C>::from (const poly_int_pod<N, Ca> &a, signop sgn)
template<unsigned int N, typename C>
inline bool
-poly_int_pod<N, C>::to_shwi (poly_int_pod<N, HOST_WIDE_INT> *r) const
+poly_int<N, C>::to_shwi (poly_int<N, HOST_WIDE_INT> *r) const
{
for (unsigned int i = 0; i < N; i++)
if (!wi::fits_shwi_p (this->coeffs[i]))
@@ -558,7 +642,7 @@ poly_int_pod<N, C>::to_shwi (poly_int_pod<N, HOST_WIDE_INT> *r) const
template<unsigned int N, typename C>
inline bool
-poly_int_pod<N, C>::to_uhwi (poly_int_pod<N, unsigned HOST_WIDE_INT> *r) const
+poly_int<N, C>::to_uhwi (poly_int<N, unsigned HOST_WIDE_INT> *r) const
{
for (unsigned int i = 0; i < N; i++)
if (!wi::fits_uhwi_p (this->coeffs[i]))
@@ -573,9 +657,9 @@ poly_int_pod<N, C>::to_uhwi (poly_int_pod<N, unsigned HOST_WIDE_INT> *r) const
template<unsigned int N, typename C>
inline poly_int<N, HOST_WIDE_INT>
-poly_int_pod<N, C>::force_shwi () const
+poly_int<N, C>::force_shwi () const
{
- poly_int_pod<N, HOST_WIDE_INT> r;
+ poly_int<N, HOST_WIDE_INT> r;
for (unsigned int i = 0; i < N; i++)
r.coeffs[i] = this->coeffs[i].to_shwi ();
return r;
@@ -586,9 +670,9 @@ poly_int_pod<N, C>::force_shwi () const
template<unsigned int N, typename C>
inline poly_int<N, unsigned HOST_WIDE_INT>
-poly_int_pod<N, C>::force_uhwi () const
+poly_int<N, C>::force_uhwi () const
{
- poly_int_pod<N, unsigned HOST_WIDE_INT> r;
+ poly_int<N, unsigned HOST_WIDE_INT> r;
for (unsigned int i = 0; i < N; i++)
r.coeffs[i] = this->coeffs[i].to_uhwi ();
return r;
@@ -599,170 +683,13 @@ poly_int_pod<N, C>::force_uhwi () const
template<unsigned int N, typename C>
inline
-poly_int_pod<N, C>::operator C () const
+poly_int<N, C>::operator C () const
{
gcc_checking_assert (this->is_constant ());
return this->coeffs[0];
}
#endif
-/* The main class for polynomial integers. The class provides
- constructors that are necessarily missing from the POD base. */
-template<unsigned int N, typename C>
-class poly_int : public poly_int_pod<N, C>
-{
-public:
- poly_int () {}
-
- template<typename Ca>
- poly_int (const poly_int<N, Ca> &);
- template<typename Ca>
- poly_int (const poly_int_pod<N, Ca> &);
- template<typename C0>
- poly_int (const C0 &);
- template<typename C0, typename C1>
- poly_int (const C0 &, const C1 &);
-
- template<typename Ca>
- poly_int &operator = (const poly_int_pod<N, Ca> &);
- template<typename Ca>
- typename if_nonpoly<Ca, poly_int>::type &operator = (const Ca &);
-
- template<typename Ca>
- poly_int &operator += (const poly_int_pod<N, Ca> &);
- template<typename Ca>
- typename if_nonpoly<Ca, poly_int>::type &operator += (const Ca &);
-
- template<typename Ca>
- poly_int &operator -= (const poly_int_pod<N, Ca> &);
- template<typename Ca>
- typename if_nonpoly<Ca, poly_int>::type &operator -= (const Ca &);
-
- template<typename Ca>
- typename if_nonpoly<Ca, poly_int>::type &operator *= (const Ca &);
-
- poly_int &operator <<= (unsigned int);
-};
-
-template<unsigned int N, typename C>
-template<typename Ca>
-inline
-poly_int<N, C>::poly_int (const poly_int<N, Ca> &a)
-{
- for (unsigned int i = 0; i < N; i++)
- POLY_SET_COEFF (C, *this, i, a.coeffs[i]);
-}
-
-template<unsigned int N, typename C>
-template<typename Ca>
-inline
-poly_int<N, C>::poly_int (const poly_int_pod<N, Ca> &a)
-{
- for (unsigned int i = 0; i < N; i++)
- POLY_SET_COEFF (C, *this, i, a.coeffs[i]);
-}
-
-template<unsigned int N, typename C>
-template<typename C0>
-inline
-poly_int<N, C>::poly_int (const C0 &c0)
-{
- POLY_SET_COEFF (C, *this, 0, c0);
- for (unsigned int i = 1; i < N; i++)
- POLY_SET_COEFF (C, *this, i, wi::ints_for<C>::zero (this->coeffs[0]));
-}
-
-template<unsigned int N, typename C>
-template<typename C0, typename C1>
-inline
-poly_int<N, C>::poly_int (const C0 &c0, const C1 &c1)
-{
- STATIC_ASSERT (N >= 2);
- POLY_SET_COEFF (C, *this, 0, c0);
- POLY_SET_COEFF (C, *this, 1, c1);
- for (unsigned int i = 2; i < N; i++)
- POLY_SET_COEFF (C, *this, i, wi::ints_for<C>::zero (this->coeffs[0]));
-}
-
-template<unsigned int N, typename C>
-template<typename Ca>
-inline poly_int<N, C>&
-poly_int<N, C>::operator = (const poly_int_pod<N, Ca> &a)
-{
- for (unsigned int i = 0; i < N; i++)
- this->coeffs[i] = a.coeffs[i];
- return *this;
-}
-
-template<unsigned int N, typename C>
-template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
-poly_int<N, C>::operator = (const Ca &a)
-{
- this->coeffs[0] = a;
- if (N >= 2)
- for (unsigned int i = 1; i < N; i++)
- this->coeffs[i] = wi::ints_for<C>::zero (this->coeffs[0]);
- return *this;
-}
-
-template<unsigned int N, typename C>
-template<typename Ca>
-inline poly_int<N, C>&
-poly_int<N, C>::operator += (const poly_int_pod<N, Ca> &a)
-{
- for (unsigned int i = 0; i < N; i++)
- this->coeffs[i] += a.coeffs[i];
- return *this;
-}
-
-template<unsigned int N, typename C>
-template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
-poly_int<N, C>::operator += (const Ca &a)
-{
- this->coeffs[0] += a;
- return *this;
-}
-
-template<unsigned int N, typename C>
-template<typename Ca>
-inline poly_int<N, C>&
-poly_int<N, C>::operator -= (const poly_int_pod<N, Ca> &a)
-{
- for (unsigned int i = 0; i < N; i++)
- this->coeffs[i] -= a.coeffs[i];
- return *this;
-}
-
-template<unsigned int N, typename C>
-template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
-poly_int<N, C>::operator -= (const Ca &a)
-{
- this->coeffs[0] -= a;
- return *this;
-}
-
-template<unsigned int N, typename C>
-template<typename Ca>
-inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
-poly_int<N, C>::operator *= (const Ca &a)
-{
- for (unsigned int i = 0; i < N; i++)
- this->coeffs[i] *= a;
- return *this;
-}
-
-template<unsigned int N, typename C>
-inline poly_int<N, C>&
-poly_int<N, C>::operator <<= (unsigned int a)
-{
- for (unsigned int i = 0; i < N; i++)
- this->coeffs[i] <<= a;
- return *this;
-}
-
/* Return true if every coefficient of A is in the inclusive range [B, C]. */
template<typename Ca, typename Cb, typename Cc>
@@ -774,7 +701,7 @@ coeffs_in_range_p (const Ca &a, const Cb &b, const Cc &c)
template<unsigned int N, typename Ca, typename Cb, typename Cc>
inline typename if_nonpoly<Ca, bool>::type
-coeffs_in_range_p (const poly_int_pod<N, Ca> &a, const Cb &b, const Cc &c)
+coeffs_in_range_p (const poly_int<N, Ca> &a, const Cb &b, const Cc &c)
{
for (unsigned int i = 0; i < N; i++)
if (a.coeffs[i] < b || a.coeffs[i] > c)
@@ -787,7 +714,7 @@ namespace wi {
template<unsigned int N>
inline poly_int<N, hwi_with_prec>
-shwi (const poly_int_pod<N, HOST_WIDE_INT> &a, unsigned int precision)
+shwi (const poly_int<N, HOST_WIDE_INT> &a, unsigned int precision)
{
poly_int<N, hwi_with_prec> r;
for (unsigned int i = 0; i < N; i++)
@@ -799,7 +726,7 @@ shwi (const poly_int_pod<N, HOST_WIDE_INT> &a, unsigned int precision)
template<unsigned int N>
inline poly_int<N, hwi_with_prec>
-uhwi (const poly_int_pod<N, unsigned HOST_WIDE_INT> &a, unsigned int precision)
+uhwi (const poly_int<N, unsigned HOST_WIDE_INT> &a, unsigned int precision)
{
poly_int<N, hwi_with_prec> r;
for (unsigned int i = 0; i < N; i++)
@@ -811,7 +738,7 @@ uhwi (const poly_int_pod<N, unsigned HOST_WIDE_INT> &a, unsigned int precision)
template<unsigned int N, typename Ca>
inline POLY_POLY_RESULT (N, Ca, Ca)
-sext (const poly_int_pod<N, Ca> &a, unsigned int precision)
+sext (const poly_int<N, Ca> &a, unsigned int precision)
{
typedef POLY_POLY_COEFF (Ca, Ca) C;
poly_int<N, C> r;
@@ -824,7 +751,7 @@ sext (const poly_int_pod<N, Ca> &a, unsigned int precision)
template<unsigned int N, typename Ca>
inline POLY_POLY_RESULT (N, Ca, Ca)
-zext (const poly_int_pod<N, Ca> &a, unsigned int precision)
+zext (const poly_int<N, Ca> &a, unsigned int precision)
{
typedef POLY_POLY_COEFF (Ca, Ca) C;
poly_int<N, C> r;
@@ -836,7 +763,7 @@ zext (const poly_int_pod<N, Ca> &a, unsigned int precision)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_POLY_RESULT (N, Ca, Cb)
-operator + (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+operator + (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_POLY_COEFF (Ca, Cb) C;
@@ -848,7 +775,7 @@ operator + (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_CONST_RESULT (N, Ca, Cb)
-operator + (const poly_int_pod<N, Ca> &a, const Cb &b)
+operator + (const poly_int<N, Ca> &a, const Cb &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CONST_COEFF (Ca, Cb) C;
@@ -862,7 +789,7 @@ operator + (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline CONST_POLY_RESULT (N, Ca, Cb)
-operator + (const Ca &a, const poly_int_pod<N, Cb> &b)
+operator + (const Ca &a, const poly_int<N, Cb> &b)
{
typedef POLY_CAST (Cb, Ca) NCb;
typedef CONST_POLY_COEFF (Ca, Cb) C;
@@ -879,7 +806,7 @@ namespace wi {
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-add (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+add (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
@@ -890,7 +817,7 @@ add (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-add (const poly_int_pod<N, Ca> &a, const Cb &b)
+add (const poly_int<N, Ca> &a, const Cb &b)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
@@ -903,7 +830,7 @@ add (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-add (const Ca &a, const poly_int_pod<N, Cb> &b)
+add (const Ca &a, const poly_int<N, Cb> &b)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
@@ -916,7 +843,7 @@ add (const Ca &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-add (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
+add (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b,
signop sgn, wi::overflow_type *overflow)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
@@ -935,7 +862,7 @@ add (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
template<unsigned int N, typename Ca, typename Cb>
inline POLY_POLY_RESULT (N, Ca, Cb)
-operator - (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+operator - (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_POLY_COEFF (Ca, Cb) C;
@@ -947,7 +874,7 @@ operator - (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_CONST_RESULT (N, Ca, Cb)
-operator - (const poly_int_pod<N, Ca> &a, const Cb &b)
+operator - (const poly_int<N, Ca> &a, const Cb &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CONST_COEFF (Ca, Cb) C;
@@ -961,7 +888,7 @@ operator - (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline CONST_POLY_RESULT (N, Ca, Cb)
-operator - (const Ca &a, const poly_int_pod<N, Cb> &b)
+operator - (const Ca &a, const poly_int<N, Cb> &b)
{
typedef POLY_CAST (Cb, Ca) NCb;
typedef CONST_POLY_COEFF (Ca, Cb) C;
@@ -978,7 +905,7 @@ namespace wi {
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-sub (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+sub (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
@@ -989,7 +916,7 @@ sub (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-sub (const poly_int_pod<N, Ca> &a, const Cb &b)
+sub (const poly_int<N, Ca> &a, const Cb &b)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
@@ -1002,7 +929,7 @@ sub (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-sub (const Ca &a, const poly_int_pod<N, Cb> &b)
+sub (const Ca &a, const poly_int<N, Cb> &b)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
@@ -1015,7 +942,7 @@ sub (const Ca &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-sub (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
+sub (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b,
signop sgn, wi::overflow_type *overflow)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
@@ -1034,7 +961,7 @@ sub (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
template<unsigned int N, typename Ca>
inline POLY_POLY_RESULT (N, Ca, Ca)
-operator - (const poly_int_pod<N, Ca> &a)
+operator - (const poly_int<N, Ca> &a)
{
typedef POLY_CAST (Ca, Ca) NCa;
typedef POLY_POLY_COEFF (Ca, Ca) C;
@@ -1049,7 +976,7 @@ namespace wi {
template<unsigned int N, typename Ca>
inline poly_int<N, WI_UNARY_RESULT (Ca)>
-neg (const poly_int_pod<N, Ca> &a)
+neg (const poly_int<N, Ca> &a)
{
typedef WI_UNARY_RESULT (Ca) C;
poly_int<N, C> r;
@@ -1060,7 +987,7 @@ neg (const poly_int_pod<N, Ca> &a)
template<unsigned int N, typename Ca>
inline poly_int<N, WI_UNARY_RESULT (Ca)>
-neg (const poly_int_pod<N, Ca> &a, wi::overflow_type *overflow)
+neg (const poly_int<N, Ca> &a, wi::overflow_type *overflow)
{
typedef WI_UNARY_RESULT (Ca) C;
poly_int<N, C> r;
@@ -1077,7 +1004,7 @@ neg (const poly_int_pod<N, Ca> &a, wi::overflow_type *overflow)
template<unsigned int N, typename Ca>
inline POLY_POLY_RESULT (N, Ca, Ca)
-operator ~ (const poly_int_pod<N, Ca> &a)
+operator ~ (const poly_int<N, Ca> &a)
{
if (N >= 2)
return -1 - a;
@@ -1086,7 +1013,7 @@ operator ~ (const poly_int_pod<N, Ca> &a)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_CONST_RESULT (N, Ca, Cb)
-operator * (const poly_int_pod<N, Ca> &a, const Cb &b)
+operator * (const poly_int<N, Ca> &a, const Cb &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CONST_COEFF (Ca, Cb) C;
@@ -1098,7 +1025,7 @@ operator * (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline CONST_POLY_RESULT (N, Ca, Cb)
-operator * (const Ca &a, const poly_int_pod<N, Cb> &b)
+operator * (const Ca &a, const poly_int<N, Cb> &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef CONST_POLY_COEFF (Ca, Cb) C;
@@ -1113,7 +1040,7 @@ namespace wi {
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-mul (const poly_int_pod<N, Ca> &a, const Cb &b)
+mul (const poly_int<N, Ca> &a, const Cb &b)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
@@ -1124,7 +1051,7 @@ mul (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-mul (const Ca &a, const poly_int_pod<N, Cb> &b)
+mul (const Ca &a, const poly_int<N, Cb> &b)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
@@ -1135,7 +1062,7 @@ mul (const Ca &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
-mul (const poly_int_pod<N, Ca> &a, const Cb &b,
+mul (const poly_int<N, Ca> &a, const Cb &b,
signop sgn, wi::overflow_type *overflow)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
@@ -1153,7 +1080,7 @@ mul (const poly_int_pod<N, Ca> &a, const Cb &b,
template<unsigned int N, typename Ca, typename Cb>
inline POLY_POLY_RESULT (N, Ca, Ca)
-operator << (const poly_int_pod<N, Ca> &a, const Cb &b)
+operator << (const poly_int<N, Ca> &a, const Cb &b)
{
typedef POLY_CAST (Ca, Ca) NCa;
typedef POLY_POLY_COEFF (Ca, Ca) C;
@@ -1168,7 +1095,7 @@ namespace wi {
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Ca)>
-lshift (const poly_int_pod<N, Ca> &a, const Cb &b)
+lshift (const poly_int<N, Ca> &a, const Cb &b)
{
typedef WI_BINARY_RESULT (Ca, Ca) C;
poly_int<N, C> r;
@@ -1184,7 +1111,7 @@ template<unsigned int N, typename C>
inline poly_int<N, HOST_WIDE_INT>
sext_hwi (const poly_int<N, C> &a, unsigned int precision)
{
- poly_int_pod<N, HOST_WIDE_INT> r;
+ poly_int<N, HOST_WIDE_INT> r;
for (unsigned int i = 0; i < N; i++)
r.coeffs[i] = sext_hwi (a.coeffs[i], precision);
return r;
@@ -1236,7 +1163,7 @@ maybe_eq_2 (const Ca &a0, const Ca &a1, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline bool
-maybe_eq (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+maybe_eq (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
STATIC_ASSERT (N <= 2);
if (N == 2)
@@ -1246,7 +1173,7 @@ maybe_eq (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Cb, bool>::type
-maybe_eq (const poly_int_pod<N, Ca> &a, const Cb &b)
+maybe_eq (const poly_int<N, Ca> &a, const Cb &b)
{
STATIC_ASSERT (N <= 2);
if (N == 2)
@@ -1256,7 +1183,7 @@ maybe_eq (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Ca, bool>::type
-maybe_eq (const Ca &a, const poly_int_pod<N, Cb> &b)
+maybe_eq (const Ca &a, const poly_int<N, Cb> &b)
{
STATIC_ASSERT (N <= 2);
if (N == 2)
@@ -1275,7 +1202,7 @@ maybe_eq (const Ca &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline bool
-maybe_ne (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+maybe_ne (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
if (N >= 2)
for (unsigned int i = 1; i < N; i++)
@@ -1286,7 +1213,7 @@ maybe_ne (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Cb, bool>::type
-maybe_ne (const poly_int_pod<N, Ca> &a, const Cb &b)
+maybe_ne (const poly_int<N, Ca> &a, const Cb &b)
{
if (N >= 2)
for (unsigned int i = 1; i < N; i++)
@@ -1297,7 +1224,7 @@ maybe_ne (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Ca, bool>::type
-maybe_ne (const Ca &a, const poly_int_pod<N, Cb> &b)
+maybe_ne (const Ca &a, const poly_int<N, Cb> &b)
{
if (N >= 2)
for (unsigned int i = 1; i < N; i++)
@@ -1324,7 +1251,7 @@ maybe_ne (const Ca &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline bool
-maybe_le (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+maybe_le (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
if (N >= 2)
for (unsigned int i = 1; i < N; i++)
@@ -1335,7 +1262,7 @@ maybe_le (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Cb, bool>::type
-maybe_le (const poly_int_pod<N, Ca> &a, const Cb &b)
+maybe_le (const poly_int<N, Ca> &a, const Cb &b)
{
if (N >= 2)
for (unsigned int i = 1; i < N; i++)
@@ -1346,7 +1273,7 @@ maybe_le (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Ca, bool>::type
-maybe_le (const Ca &a, const poly_int_pod<N, Cb> &b)
+maybe_le (const Ca &a, const poly_int<N, Cb> &b)
{
if (N >= 2)
for (unsigned int i = 1; i < N; i++)
@@ -1366,7 +1293,7 @@ maybe_le (const Ca &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline bool
-maybe_lt (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+maybe_lt (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
if (N >= 2)
for (unsigned int i = 1; i < N; i++)
@@ -1377,7 +1304,7 @@ maybe_lt (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Cb, bool>::type
-maybe_lt (const poly_int_pod<N, Ca> &a, const Cb &b)
+maybe_lt (const poly_int<N, Ca> &a, const Cb &b)
{
if (N >= 2)
for (unsigned int i = 1; i < N; i++)
@@ -1388,7 +1315,7 @@ maybe_lt (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Ca, bool>::type
-maybe_lt (const Ca &a, const poly_int_pod<N, Cb> &b)
+maybe_lt (const Ca &a, const poly_int<N, Cb> &b)
{
if (N >= 2)
for (unsigned int i = 1; i < N; i++)
@@ -1442,7 +1369,7 @@ ordered_p (const T1 &a, const T2 &b)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_POLY_RESULT (N, Ca, Cb)
-ordered_min (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+ordered_min (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
if (known_le (a, b))
return a;
@@ -1456,7 +1383,7 @@ ordered_min (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline CONST_POLY_RESULT (N, Ca, Cb)
-ordered_min (const Ca &a, const poly_int_pod<N, Cb> &b)
+ordered_min (const Ca &a, const poly_int<N, Cb> &b)
{
if (known_le (a, b))
return a;
@@ -1470,7 +1397,7 @@ ordered_min (const Ca &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_CONST_RESULT (N, Ca, Cb)
-ordered_min (const poly_int_pod<N, Ca> &a, const Cb &b)
+ordered_min (const poly_int<N, Ca> &a, const Cb &b)
{
if (known_le (a, b))
return a;
@@ -1490,7 +1417,7 @@ ordered_min (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_POLY_RESULT (N, Ca, Cb)
-ordered_max (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+ordered_max (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
if (known_le (a, b))
return b;
@@ -1504,7 +1431,7 @@ ordered_max (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline CONST_POLY_RESULT (N, Ca, Cb)
-ordered_max (const Ca &a, const poly_int_pod<N, Cb> &b)
+ordered_max (const Ca &a, const poly_int<N, Cb> &b)
{
if (known_le (a, b))
return b;
@@ -1518,7 +1445,7 @@ ordered_max (const Ca &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_CONST_RESULT (N, Ca, Cb)
-ordered_max (const poly_int_pod<N, Ca> &a, const Cb &b)
+ordered_max (const poly_int<N, Ca> &a, const Cb &b)
{
if (known_le (a, b))
return b;
@@ -1535,7 +1462,7 @@ ordered_max (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca>
inline Ca
-constant_lower_bound (const poly_int_pod<N, Ca> &a)
+constant_lower_bound (const poly_int<N, Ca> &a)
{
gcc_checking_assert (known_ge (a, POLY_INT_TYPE (Ca) (0)));
return a.coeffs[0];
@@ -1545,7 +1472,7 @@ constant_lower_bound (const poly_int_pod<N, Ca> &a)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_CONST_COEFF (Ca, Cb)
-constant_lower_bound_with_limit (const poly_int_pod<N, Ca> &a, const Cb &b)
+constant_lower_bound_with_limit (const poly_int<N, Ca> &a, const Cb &b)
{
if (known_ge (a, b))
return a.coeffs[0];
@@ -1557,7 +1484,7 @@ constant_lower_bound_with_limit (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_CONST_COEFF (Ca, Cb)
-constant_upper_bound_with_limit (const poly_int_pod<N, Ca> &a, const Cb &b)
+constant_upper_bound_with_limit (const poly_int<N, Ca> &a, const Cb &b)
{
if (known_le (a, b))
return a.coeffs[0];
@@ -1570,7 +1497,7 @@ constant_upper_bound_with_limit (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_CONST_RESULT (N, Ca, Cb)
-lower_bound (const poly_int_pod<N, Ca> &a, const Cb &b)
+lower_bound (const poly_int<N, Ca> &a, const Cb &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -1587,14 +1514,14 @@ lower_bound (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline CONST_POLY_RESULT (N, Ca, Cb)
-lower_bound (const Ca &a, const poly_int_pod<N, Cb> &b)
+lower_bound (const Ca &a, const poly_int<N, Cb> &b)
{
return lower_bound (b, a);
}
template<unsigned int N, typename Ca, typename Cb>
inline POLY_POLY_RESULT (N, Ca, Cb)
-lower_bound (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+lower_bound (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -1619,7 +1546,7 @@ lower_bound (const Ca &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_CONST_RESULT (N, Ca, Cb)
-upper_bound (const poly_int_pod<N, Ca> &a, const Cb &b)
+upper_bound (const poly_int<N, Ca> &a, const Cb &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -1636,14 +1563,14 @@ upper_bound (const poly_int_pod<N, Ca> &a, const Cb &b)
template<unsigned int N, typename Ca, typename Cb>
inline CONST_POLY_RESULT (N, Ca, Cb)
-upper_bound (const Ca &a, const poly_int_pod<N, Cb> &b)
+upper_bound (const Ca &a, const poly_int<N, Cb> &b)
{
return upper_bound (b, a);
}
template<unsigned int N, typename Ca, typename Cb>
inline POLY_POLY_RESULT (N, Ca, Cb)
-upper_bound (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+upper_bound (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -1660,7 +1587,7 @@ upper_bound (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca>
inline POLY_BINARY_COEFF (Ca, Ca)
-coeff_gcd (const poly_int_pod<N, Ca> &a)
+coeff_gcd (const poly_int<N, Ca> &a)
{
/* Find the first nonzero coefficient, stopping at 0 whatever happens. */
unsigned int i;
@@ -1681,7 +1608,7 @@ coeff_gcd (const poly_int_pod<N, Ca> &a)
template<unsigned int N, typename Ca, typename Cb>
POLY_CONST_RESULT (N, Ca, Cb)
-common_multiple (const poly_int_pod<N, Ca> &a, Cb b)
+common_multiple (const poly_int<N, Ca> &a, Cb b)
{
POLY_BINARY_COEFF (Ca, Ca) xgcd = coeff_gcd (a);
return a * (least_common_multiple (xgcd, b) / xgcd);
@@ -1689,7 +1616,7 @@ common_multiple (const poly_int_pod<N, Ca> &a, Cb b)
template<unsigned int N, typename Ca, typename Cb>
inline CONST_POLY_RESULT (N, Ca, Cb)
-common_multiple (const Ca &a, const poly_int_pod<N, Cb> &b)
+common_multiple (const Ca &a, const poly_int<N, Cb> &b)
{
return common_multiple (b, a);
}
@@ -1704,8 +1631,7 @@ common_multiple (const Ca &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
POLY_POLY_RESULT (N, Ca, Cb)
-force_common_multiple (const poly_int_pod<N, Ca> &a,
- const poly_int_pod<N, Cb> &b)
+force_common_multiple (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
if (b.is_constant ())
return common_multiple (a, b.coeffs[0]);
@@ -1743,8 +1669,7 @@ force_common_multiple (const poly_int_pod<N, Ca> &a,
template<unsigned int N, typename Ca, typename Cb>
inline int
-compare_sizes_for_sort (const poly_int_pod<N, Ca> &a,
- const poly_int_pod<N, Cb> &b)
+compare_sizes_for_sort (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
for (unsigned int i = N; i-- > 0; )
if (a.coeffs[i] != b.coeffs[i])
@@ -1756,7 +1681,7 @@ compare_sizes_for_sort (const poly_int_pod<N, Ca> &a,
template<unsigned int N, typename Ca, typename Cb>
inline bool
-can_align_p (const poly_int_pod<N, Ca> &value, Cb align)
+can_align_p (const poly_int<N, Ca> &value, Cb align)
{
for (unsigned int i = 1; i < N; i++)
if ((value.coeffs[i] & (align - 1)) != 0)
@@ -1769,8 +1694,8 @@ can_align_p (const poly_int_pod<N, Ca> &value, Cb align)
template<unsigned int N, typename Ca, typename Cb>
inline bool
-can_align_up (const poly_int_pod<N, Ca> &value, Cb align,
- poly_int_pod<N, Ca> *aligned)
+can_align_up (const poly_int<N, Ca> &value, Cb align,
+ poly_int<N, Ca> *aligned)
{
if (!can_align_p (value, align))
return false;
@@ -1783,8 +1708,8 @@ can_align_up (const poly_int_pod<N, Ca> &value, Cb align,
template<unsigned int N, typename Ca, typename Cb>
inline bool
-can_align_down (const poly_int_pod<N, Ca> &value, Cb align,
- poly_int_pod<N, Ca> *aligned)
+can_align_down (const poly_int<N, Ca> &value, Cb align,
+ poly_int<N, Ca> *aligned)
{
if (!can_align_p (value, align))
return false;
@@ -1798,8 +1723,8 @@ can_align_down (const poly_int_pod<N, Ca> &value, Cb align,
template<unsigned int N, typename Ca, typename Cb, typename Cc>
inline bool
-known_equal_after_align_up (const poly_int_pod<N, Ca> &a,
- const poly_int_pod<N, Cb> &b,
+known_equal_after_align_up (const poly_int<N, Ca> &a,
+ const poly_int<N, Cb> &b,
Cc align)
{
poly_int<N, Ca> aligned_a;
@@ -1815,8 +1740,8 @@ known_equal_after_align_up (const poly_int_pod<N, Ca> &a,
template<unsigned int N, typename Ca, typename Cb, typename Cc>
inline bool
-known_equal_after_align_down (const poly_int_pod<N, Ca> &a,
- const poly_int_pod<N, Cb> &b,
+known_equal_after_align_down (const poly_int<N, Ca> &a,
+ const poly_int<N, Cb> &b,
Cc align)
{
poly_int<N, Ca> aligned_a;
@@ -1835,7 +1760,7 @@ known_equal_after_align_down (const poly_int_pod<N, Ca> &a,
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, Ca>
-force_align_up (const poly_int_pod<N, Ca> &value, Cb align)
+force_align_up (const poly_int<N, Ca> &value, Cb align)
{
gcc_checking_assert (can_align_p (value, align));
return value + (-value.coeffs[0] & (align - 1));
@@ -1850,7 +1775,7 @@ force_align_up (const poly_int_pod<N, Ca> &value, Cb align)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, Ca>
-force_align_down (const poly_int_pod<N, Ca> &value, Cb align)
+force_align_down (const poly_int<N, Ca> &value, Cb align)
{
gcc_checking_assert (can_align_p (value, align));
return value - (value.coeffs[0] & (align - 1));
@@ -1862,7 +1787,7 @@ force_align_down (const poly_int_pod<N, Ca> &value, Cb align)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, Ca>
-aligned_lower_bound (const poly_int_pod<N, Ca> &value, Cb align)
+aligned_lower_bound (const poly_int<N, Ca> &value, Cb align)
{
poly_int<N, Ca> r;
for (unsigned int i = 0; i < N; i++)
@@ -1879,7 +1804,7 @@ aligned_lower_bound (const poly_int_pod<N, Ca> &value, Cb align)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, Ca>
-aligned_upper_bound (const poly_int_pod<N, Ca> &value, Cb align)
+aligned_upper_bound (const poly_int<N, Ca> &value, Cb align)
{
poly_int<N, Ca> r;
for (unsigned int i = 0; i < N; i++)
@@ -1898,7 +1823,7 @@ aligned_upper_bound (const poly_int_pod<N, Ca> &value, Cb align)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, Ca>
-force_align_down_and_div (const poly_int_pod<N, Ca> &value, Cb align)
+force_align_down_and_div (const poly_int<N, Ca> &value, Cb align)
{
gcc_checking_assert (can_align_p (value, align));
@@ -1922,7 +1847,7 @@ force_align_down_and_div (const poly_int_pod<N, Ca> &value, Cb align)
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, Ca>
-force_align_up_and_div (const poly_int_pod<N, Ca> &value, Cb align)
+force_align_up_and_div (const poly_int<N, Ca> &value, Cb align)
{
gcc_checking_assert (can_align_p (value, align));
@@ -1942,7 +1867,7 @@ force_align_up_and_div (const poly_int_pod<N, Ca> &value, Cb align)
template<unsigned int N, typename Ca, typename Cb, typename Cm>
inline bool
-known_misalignment (const poly_int_pod<N, Ca> &value, Cb align, Cm *misalign)
+known_misalignment (const poly_int<N, Ca> &value, Cb align, Cm *misalign)
{
gcc_checking_assert (align != 0);
if (!can_align_p (value, align))
@@ -1957,7 +1882,7 @@ known_misalignment (const poly_int_pod<N, Ca> &value, Cb align, Cm *misalign)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_BINARY_COEFF (Ca, Ca)
-force_get_misalignment (const poly_int_pod<N, Ca> &a, Cb align)
+force_get_misalignment (const poly_int<N, Ca> &a, Cb align)
{
gcc_checking_assert (can_align_p (a, align));
return a.coeffs[0] & (align - 1);
@@ -1968,7 +1893,7 @@ force_get_misalignment (const poly_int_pod<N, Ca> &a, Cb align)
template<unsigned int N, typename Ca>
inline POLY_BINARY_COEFF (Ca, Ca)
-known_alignment (const poly_int_pod<N, Ca> &a)
+known_alignment (const poly_int<N, Ca> &a)
{
typedef POLY_BINARY_COEFF (Ca, Ca) C;
C r = a.coeffs[0];
@@ -1982,7 +1907,7 @@ known_alignment (const poly_int_pod<N, Ca> &a)
template<unsigned int N, typename Ca, typename Cb, typename Cr>
inline typename if_nonpoly<Cb, bool>::type
-can_ior_p (const poly_int_pod<N, Ca> &a, Cb b, Cr *result)
+can_ior_p (const poly_int<N, Ca> &a, Cb b, Cr *result)
{
/* Coefficients 1 and above must be a multiple of something greater
than B. */
@@ -2001,7 +1926,7 @@ can_ior_p (const poly_int_pod<N, Ca> &a, Cb b, Cr *result)
template<unsigned int N, typename Ca, typename Cb, typename Cm>
inline typename if_nonpoly<Cb, bool>::type
-constant_multiple_p (const poly_int_pod<N, Ca> &a, Cb b, Cm *multiple)
+constant_multiple_p (const poly_int<N, Ca> &a, Cb b, Cm *multiple)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -2016,7 +1941,7 @@ constant_multiple_p (const poly_int_pod<N, Ca> &a, Cb b, Cm *multiple)
template<unsigned int N, typename Ca, typename Cb, typename Cm>
inline typename if_nonpoly<Ca, bool>::type
-constant_multiple_p (Ca a, const poly_int_pod<N, Cb> &b, Cm *multiple)
+constant_multiple_p (Ca a, const poly_int<N, Cb> &b, Cm *multiple)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -2033,8 +1958,8 @@ constant_multiple_p (Ca a, const poly_int_pod<N, Cb> &b, Cm *multiple)
template<unsigned int N, typename Ca, typename Cb, typename Cm>
inline bool
-constant_multiple_p (const poly_int_pod<N, Ca> &a,
- const poly_int_pod<N, Cb> &b, Cm *multiple)
+constant_multiple_p (const poly_int<N, Ca> &a,
+ const poly_int<N, Cb> &b, Cm *multiple)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -2061,7 +1986,7 @@ constant_multiple_p (const poly_int_pod<N, Ca> &a,
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Cb, bool>::type
-constant_multiple_p (const poly_int_pod<N, Ca> &a, Cb b)
+constant_multiple_p (const poly_int<N, Ca> &a, Cb b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -2075,7 +2000,7 @@ constant_multiple_p (const poly_int_pod<N, Ca> &a, Cb b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Ca, bool>::type
-constant_multiple_p (Ca a, const poly_int_pod<N, Cb> &b)
+constant_multiple_p (Ca a, const poly_int<N, Cb> &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -2091,8 +2016,7 @@ constant_multiple_p (Ca a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline bool
-constant_multiple_p (const poly_int_pod<N, Ca> &a,
- const poly_int_pod<N, Cb> &b)
+constant_multiple_p (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -2127,7 +2051,7 @@ multiple_p (Ca a, Cb b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Cb, bool>::type
-multiple_p (const poly_int_pod<N, Ca> &a, Cb b)
+multiple_p (const poly_int<N, Ca> &a, Cb b)
{
for (unsigned int i = 0; i < N; ++i)
if (a.coeffs[i] % b != 0)
@@ -2139,7 +2063,7 @@ multiple_p (const poly_int_pod<N, Ca> &a, Cb b)
template<unsigned int N, typename Ca, typename Cb>
inline typename if_nonpoly<Ca, bool>::type
-multiple_p (Ca a, const poly_int_pod<N, Cb> &b)
+multiple_p (Ca a, const poly_int<N, Cb> &b)
{
typedef POLY_INT_TYPE (Ca) int_type;
@@ -2153,7 +2077,7 @@ multiple_p (Ca a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb>
inline bool
-multiple_p (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+multiple_p (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
if (b.is_constant ())
return multiple_p (a, b.coeffs[0]);
@@ -2179,7 +2103,7 @@ multiple_p (Ca a, Cb b, Cm *multiple)
template<unsigned int N, typename Ca, typename Cb, typename Cm>
inline typename if_nonpoly<Cb, bool>::type
-multiple_p (const poly_int_pod<N, Ca> &a, Cb b, poly_int_pod<N, Cm> *multiple)
+multiple_p (const poly_int<N, Ca> &a, Cb b, poly_int<N, Cm> *multiple)
{
if (!multiple_p (a, b))
return false;
@@ -2193,7 +2117,7 @@ multiple_p (const poly_int_pod<N, Ca> &a, Cb b, poly_int_pod<N, Cm> *multiple)
template<unsigned int N, typename Ca, typename Cb, typename Cm>
inline typename if_nonpoly<Ca, bool>::type
-multiple_p (Ca a, const poly_int_pod<N, Cb> &b, Cm *multiple)
+multiple_p (Ca a, const poly_int<N, Cb> &b, Cm *multiple)
{
typedef POLY_CAST (Ca, Cb) NCa;
@@ -2211,8 +2135,8 @@ multiple_p (Ca a, const poly_int_pod<N, Cb> &b, Cm *multiple)
template<unsigned int N, typename Ca, typename Cb, typename Cm>
inline bool
-multiple_p (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
- poly_int_pod<N, Cm> *multiple)
+multiple_p (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b,
+ poly_int<N, Cm> *multiple)
{
if (b.is_constant ())
return multiple_p (a, b.coeffs[0], multiple);
@@ -2223,7 +2147,7 @@ multiple_p (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
template<unsigned int N, typename Ca, typename Cb>
inline POLY_CONST_RESULT (N, Ca, Cb)
-exact_div (const poly_int_pod<N, Ca> &a, Cb b)
+exact_div (const poly_int<N, Ca> &a, Cb b)
{
typedef POLY_CONST_COEFF (Ca, Cb) C;
poly_int<N, C> r;
@@ -2239,7 +2163,7 @@ exact_div (const poly_int_pod<N, Ca> &a, Cb b)
template<unsigned int N, typename Ca, typename Cb>
inline POLY_POLY_RESULT (N, Ca, Cb)
-exact_div (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+exact_div (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b)
{
if (b.is_constant ())
return exact_div (a, b.coeffs[0]);
@@ -2270,7 +2194,7 @@ exact_div (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
template<unsigned int N, typename Ca, typename Cb, typename Cq>
inline typename if_nonpoly2<Cb, Cq, bool>::type
-can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b, Cq *quotient)
+can_div_trunc_p (const poly_int<N, Ca> &a, Cb b, Cq *quotient)
{
typedef POLY_CAST (Ca, Cb) NCa;
typedef POLY_CAST (Cb, Ca) NCb;
@@ -2286,8 +2210,7 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b, Cq *quotient)
template<unsigned int N, typename Ca, typename Cb, typename Cq>
inline typename if_nonpoly<Cq, bool>::type
-can_div_trunc_p (const poly_int_pod<N, Ca> &a,
- const poly_int_pod<N, Cb> &b,
+can_div_trunc_p (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b,
Cq *quotient)
{
/* We can calculate Q from the case in which the indeterminates
@@ -2397,8 +2320,7 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a,
template<unsigned int N, typename Ca, typename Cb, typename Cq, typename Cr>
inline typename if_nonpoly<Cq, bool>::type
-can_div_trunc_p (const poly_int_pod<N, Ca> &a,
- const poly_int_pod<N, Cb> &b,
+can_div_trunc_p (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b,
Cq *quotient, Cr *remainder)
{
if (!can_div_trunc_p (a, b, quotient))
@@ -2417,8 +2339,8 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a,
template<unsigned int N, typename Ca, typename Cb, typename Cq>
inline typename if_nonpoly<Cb, bool>::type
-can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
- poly_int_pod<N, Cq> *quotient)
+can_div_trunc_p (const poly_int<N, Ca> &a, Cb b,
+ poly_int<N, Cq> *quotient)
{
/* The remainder must be constant. */
for (unsigned int i = 1; i < N; ++i)
@@ -2433,8 +2355,8 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
template<unsigned int N, typename Ca, typename Cb, typename Cq, typename Cr>
inline typename if_nonpoly<Cb, bool>::type
-can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
- poly_int_pod<N, Cq> *quotient, Cr *remainder)
+can_div_trunc_p (const poly_int<N, Ca> &a, Cb b,
+ poly_int<N, Cq> *quotient, Cr *remainder)
{
if (!can_div_trunc_p (a, b, quotient))
return false;
@@ -2450,9 +2372,8 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
template<unsigned int N, typename Ca, typename Cb, typename Cq>
inline bool
-can_div_trunc_p (const poly_int_pod<N, Ca> &a,
- const poly_int_pod<N, Cb> &b,
- poly_int_pod<N, Cq> *quotient)
+can_div_trunc_p (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b,
+ poly_int<N, Cq> *quotient)
{
if (b.is_constant ())
return can_div_trunc_p (a, b.coeffs[0], quotient);
@@ -2473,8 +2394,7 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a,
template<unsigned int N, typename Ca, typename Cb, typename Cq>
inline typename if_nonpoly<Cq, bool>::type
-can_div_away_from_zero_p (const poly_int_pod<N, Ca> &a,
- const poly_int_pod<N, Cb> &b,
+can_div_away_from_zero_p (const poly_int<N, Ca> &a, const poly_int<N, Cb> &b,
Cq *quotient)
{
if (!can_div_trunc_p (a, b, quotient))
@@ -2489,7 +2409,7 @@ can_div_away_from_zero_p (const poly_int_pod<N, Ca> &a,
template<unsigned int N, typename C>
void
-print_dec (const poly_int_pod<N, C> &value, FILE *file, signop sgn)
+print_dec (const poly_int<N, C> &value, FILE *file, signop sgn)
{
if (value.is_constant ())
print_dec (value.coeffs[0], file, sgn);
@@ -2509,7 +2429,7 @@ print_dec (const poly_int_pod<N, C> &value, FILE *file, signop sgn)
template<unsigned int N, typename C>
void
-print_dec (const poly_int_pod<N, C> &value, FILE *file)
+print_dec (const poly_int<N, C> &value, FILE *file)
{
STATIC_ASSERT (poly_coeff_traits<C>::signedness >= 0);
print_dec (value, file,
@@ -2520,7 +2440,7 @@ print_dec (const poly_int_pod<N, C> &value, FILE *file)
template<unsigned int N, typename C>
void
-print_hex (const poly_int_pod<N, C> &value, FILE *file)
+print_hex (const poly_int<N, C> &value, FILE *file)
{
if (value.is_constant ())
print_hex (value.coeffs[0], file);
@@ -2570,7 +2490,7 @@ struct poly_span_traits<T1, T2, unsigned HOST_WIDE_INT>
template<unsigned int N, typename T>
static poly_int<N, unsigned HOST_WIDE_INT>
- cast (const poly_int_pod<N, T> &x) { return x; }
+ cast (const poly_int<N, T> &x) { return x; }
};
/* Return true if SIZE represents a known size, assuming that all-ones
@@ -2709,8 +2629,8 @@ endpoint_representable_p (const T &pos, const T &size)
template<unsigned int N, typename C>
inline bool
-endpoint_representable_p (const poly_int_pod<N, C> &pos,
- const poly_int_pod<N, C> &size)
+endpoint_representable_p (const poly_int<N, C> &pos,
+ const poly_int<N, C> &size)
{
if (known_size_p (size))
for (unsigned int i = 0; i < N; ++i)
@@ -2721,19 +2641,19 @@ endpoint_representable_p (const poly_int_pod<N, C> &pos,
template<unsigned int N, typename C>
void
-gt_ggc_mx (poly_int_pod<N, C> *)
+gt_ggc_mx (poly_int<N, C> *)
{
}
template<unsigned int N, typename C>
void
-gt_pch_nx (poly_int_pod<N, C> *)
+gt_pch_nx (poly_int<N, C> *)
{
}
template<unsigned int N, typename C>
void
-gt_pch_nx (poly_int_pod<N, C> *, gt_pointer_operator, void *)
+gt_pch_nx (poly_int<N, C> *, gt_pointer_operator, void *)
{
}
diff --git a/gcc/pretty-print.cc b/gcc/pretty-print.cc
index 3d789a2..75446cc 100644
--- a/gcc/pretty-print.cc
+++ b/gcc/pretty-print.cc
@@ -811,7 +811,7 @@ pp_clear_state (pretty_printer *pp)
/* Print X to PP in decimal. */
template<unsigned int N, typename T>
void
-pp_wide_integer (pretty_printer *pp, const poly_int_pod<N, T> &x)
+pp_wide_integer (pretty_printer *pp, const poly_int<N, T> &x)
{
if (x.is_constant ())
pp_wide_integer (pp, x.coeffs[0]);
@@ -828,9 +828,9 @@ pp_wide_integer (pretty_printer *pp, const poly_int_pod<N, T> &x)
}
}
-template void pp_wide_integer (pretty_printer *, const poly_uint16_pod &);
-template void pp_wide_integer (pretty_printer *, const poly_int64_pod &);
-template void pp_wide_integer (pretty_printer *, const poly_uint64_pod &);
+template void pp_wide_integer (pretty_printer *, const poly_uint16 &);
+template void pp_wide_integer (pretty_printer *, const poly_int64 &);
+template void pp_wide_integer (pretty_printer *, const poly_uint64 &);
/* Flush the formatted text of PRETTY-PRINTER onto the attached stream. */
void
@@ -1063,7 +1063,7 @@ static const char *get_end_url_string (pretty_printer *);
A format string can have at most 30 arguments. */
/* Formatting phases 1 and 2: render TEXT->format_spec plus
- TEXT->args_ptr into a series of chunks in pp_buffer (PP)->args[].
+ text->m_args_ptr into a series of chunks in pp_buffer (PP)->args[].
Phase 3 is in pp_output_formatted_text. */
void
@@ -1093,7 +1093,7 @@ pp_format (pretty_printer *pp, text_info *text)
memset (formatters, 0, sizeof formatters);
- for (p = text->format_spec; *p; )
+ for (p = text->m_format_spec; *p; )
{
while (*p != '\0' && *p != '%')
{
@@ -1157,7 +1157,7 @@ pp_format (pretty_printer *pp, text_info *text)
case 'm':
{
- const char *errstr = xstrerror (text->err_no);
+ const char *errstr = xstrerror (text->m_err_no);
obstack_grow (&buffer->chunk_obstack, errstr, strlen (errstr));
}
p++;
@@ -1316,7 +1316,7 @@ pp_format (pretty_printer *pp, text_info *text)
{
case 'r':
pp_string (pp, colorize_start (pp_show_color (pp),
- va_arg (*text->args_ptr,
+ va_arg (*text->m_args_ptr,
const char *)));
break;
@@ -1325,7 +1325,7 @@ pp_format (pretty_printer *pp, text_info *text)
/* When quoting, print alphanumeric, punctuation, and the space
character unchanged, and all others in hexadecimal with the
"\x" prefix. Otherwise print them all unchanged. */
- int chr = va_arg (*text->args_ptr, int);
+ int chr = va_arg (*text->m_args_ptr, int);
if (ISPRINT (chr) || !quote)
pp_character (pp, chr);
else
@@ -1339,49 +1339,49 @@ pp_format (pretty_printer *pp, text_info *text)
case 'd':
case 'i':
if (wide)
- pp_wide_integer (pp, va_arg (*text->args_ptr, HOST_WIDE_INT));
+ pp_wide_integer (pp, va_arg (*text->m_args_ptr, HOST_WIDE_INT));
else
pp_integer_with_precision
- (pp, *text->args_ptr, precision, int, "d");
+ (pp, *text->m_args_ptr, precision, int, "d");
break;
case 'o':
if (wide)
pp_scalar (pp, "%" HOST_WIDE_INT_PRINT "o",
- va_arg (*text->args_ptr, unsigned HOST_WIDE_INT));
+ va_arg (*text->m_args_ptr, unsigned HOST_WIDE_INT));
else
pp_integer_with_precision
- (pp, *text->args_ptr, precision, unsigned, "o");
+ (pp, *text->m_args_ptr, precision, unsigned, "o");
break;
case 's':
if (quote)
- pp_quoted_string (pp, va_arg (*text->args_ptr, const char *));
+ pp_quoted_string (pp, va_arg (*text->m_args_ptr, const char *));
else
- pp_string (pp, va_arg (*text->args_ptr, const char *));
+ pp_string (pp, va_arg (*text->m_args_ptr, const char *));
break;
case 'p':
- pp_pointer (pp, va_arg (*text->args_ptr, void *));
+ pp_pointer (pp, va_arg (*text->m_args_ptr, void *));
break;
case 'u':
if (wide)
pp_scalar (pp, HOST_WIDE_INT_PRINT_UNSIGNED,
- va_arg (*text->args_ptr, unsigned HOST_WIDE_INT));
+ va_arg (*text->m_args_ptr, unsigned HOST_WIDE_INT));
else
pp_integer_with_precision
- (pp, *text->args_ptr, precision, unsigned, "u");
+ (pp, *text->m_args_ptr, precision, unsigned, "u");
break;
case 'f':
- pp_double (pp, va_arg (*text->args_ptr, double));
+ pp_double (pp, va_arg (*text->m_args_ptr, double));
break;
case 'Z':
{
- int *v = va_arg (*text->args_ptr, int *);
- unsigned len = va_arg (*text->args_ptr, unsigned);
+ int *v = va_arg (*text->m_args_ptr, int *);
+ unsigned len = va_arg (*text->m_args_ptr, unsigned);
for (unsigned i = 0; i < len; ++i)
{
@@ -1398,10 +1398,10 @@ pp_format (pretty_printer *pp, text_info *text)
case 'x':
if (wide)
pp_scalar (pp, HOST_WIDE_INT_PRINT_HEX,
- va_arg (*text->args_ptr, unsigned HOST_WIDE_INT));
+ va_arg (*text->m_args_ptr, unsigned HOST_WIDE_INT));
else
pp_integer_with_precision
- (pp, *text->args_ptr, precision, unsigned, "x");
+ (pp, *text->m_args_ptr, precision, unsigned, "x");
break;
case '.':
@@ -1425,14 +1425,14 @@ pp_format (pretty_printer *pp, text_info *text)
gcc_assert (*p == '*');
p++;
gcc_assert (*p == 's');
- n = va_arg (*text->args_ptr, int);
+ n = va_arg (*text->m_args_ptr, int);
/* This consumes a second entry in the formatters array. */
gcc_assert (formatters[argno] == formatters[argno+1]);
argno++;
}
- s = va_arg (*text->args_ptr, const char *);
+ s = va_arg (*text->m_args_ptr, const char *);
/* Append the lesser of precision and strlen (s) characters
from the array (which need not be a nul-terminated string).
@@ -1447,7 +1447,7 @@ pp_format (pretty_printer *pp, text_info *text)
{
/* diagnostic_event_id_t *. */
diagnostic_event_id_ptr event_id
- = va_arg (*text->args_ptr, diagnostic_event_id_ptr);
+ = va_arg (*text->m_args_ptr, diagnostic_event_id_ptr);
gcc_assert (event_id->known_p ());
pp_string (pp, colorize_start (pp_show_color (pp), "path"));
@@ -1459,7 +1459,7 @@ pp_format (pretty_printer *pp, text_info *text)
break;
case '{':
- pp_begin_url (pp, va_arg (*text->args_ptr, const char *));
+ pp_begin_url (pp, va_arg (*text->m_args_ptr, const char *));
break;
default:
@@ -1763,13 +1763,10 @@ pp_remaining_character_count_for_line (pretty_printer *pp)
void
pp_printf (pretty_printer *pp, const char *msg, ...)
{
- text_info text;
va_list ap;
va_start (ap, msg);
- text.err_no = errno;
- text.args_ptr = &ap;
- text.format_spec = msg;
+ text_info text (msg, &ap, errno);
pp_format (pp, &text);
pp_output_formatted_text (pp);
va_end (ap);
@@ -1780,13 +1777,10 @@ pp_printf (pretty_printer *pp, const char *msg, ...)
void
pp_verbatim (pretty_printer *pp, const char *msg, ...)
{
- text_info text;
va_list ap;
va_start (ap, msg);
- text.err_no = errno;
- text.args_ptr = &ap;
- text.format_spec = msg;
+ text_info text (msg, &ap, errno);
pp_format_verbatim (pp, &text);
va_end (ap);
}
@@ -2290,14 +2284,9 @@ assert_pp_format_va (const location &loc, const char *expected,
bool show_color, const char *fmt, va_list *ap)
{
pretty_printer pp;
- text_info ti;
rich_location rich_loc (line_table, UNKNOWN_LOCATION);
- ti.format_spec = fmt;
- ti.args_ptr = ap;
- ti.err_no = 0;
- ti.x_data = NULL;
- ti.m_richloc = &rich_loc;
+ text_info ti (fmt, ap, 0, nullptr, &rich_loc);
pp_show_color (&pp) = show_color;
pp_format (&pp, &ti);
diff --git a/gcc/pretty-print.h b/gcc/pretty-print.h
index 0d9bc99..02658c8 100644
--- a/gcc/pretty-print.h
+++ b/gcc/pretty-print.h
@@ -31,15 +31,29 @@ along with GCC; see the file COPYING3. If not see
along with a list of things. */
struct text_info
{
- const char *format_spec;
- va_list *args_ptr;
- int err_no; /* for %m */
- void **x_data;
- rich_location *m_richloc;
+ text_info () = default;
+ text_info (const char *format_spec,
+ va_list *args_ptr,
+ int err_no,
+ void **data = nullptr,
+ rich_location *rich_loc = nullptr)
+ : m_format_spec (format_spec),
+ m_args_ptr (args_ptr),
+ m_err_no (err_no),
+ m_data (data),
+ m_richloc (rich_loc)
+ {
+ }
void set_location (unsigned int idx, location_t loc,
enum range_display_kind range_display_kind);
location_t get_location (unsigned int index_of_location) const;
+
+ const char *m_format_spec;
+ va_list *m_args_ptr;
+ int m_err_no; /* for %m */
+ void **m_data;
+ rich_location *m_richloc;
};
/* How often diagnostics are prefixed by their locations:
@@ -434,8 +448,9 @@ pp_wide_integer (pretty_printer *pp, HOST_WIDE_INT i)
inline void
pp_wide_int (pretty_printer *pp, const wide_int_ref &w, signop sgn)
{
- unsigned int prec = w.get_precision ();
- if (UNLIKELY ((prec + 3) / 4 > sizeof (pp_buffer (pp)->digit_buffer) - 3))
+ unsigned int len;
+ print_dec_buf_size (w, sgn, &len);
+ if (UNLIKELY (len > sizeof (pp_buffer (pp)->digit_buffer)))
pp_wide_int_large (pp, w, sgn);
else
{
@@ -445,6 +460,6 @@ pp_wide_int (pretty_printer *pp, const wide_int_ref &w, signop sgn)
}
template<unsigned int N, typename T>
-void pp_wide_integer (pretty_printer *pp, const poly_int_pod<N, T> &);
+void pp_wide_integer (pretty_printer *pp, const poly_int<N, T> &);
#endif /* GCC_PRETTY_PRINT_H */
diff --git a/gcc/print-tree.cc b/gcc/print-tree.cc
index 62451b6..e4ee8b8 100644
--- a/gcc/print-tree.cc
+++ b/gcc/print-tree.cc
@@ -365,13 +365,13 @@ print_node (FILE *file, const char *prefix, tree node, int indent,
fputs (code == CALL_EXPR ? " must-tail-call" : " static", file);
if (TREE_DEPRECATED (node))
fputs (" deprecated", file);
- if (TREE_UNAVAILABLE (node))
- fputs (" unavailable", file);
if (TREE_VISITED (node))
fputs (" visited", file);
if (code != TREE_VEC && code != INTEGER_CST && code != SSA_NAME)
{
+ if (TREE_UNAVAILABLE (node))
+ fputs (" unavailable", file);
if (TREE_LANG_FLAG_0 (node))
fputs (" tree_0", file);
if (TREE_LANG_FLAG_1 (node))
diff --git a/gcc/real.cc b/gcc/real.cc
index fc6b3a7..a9996c8 100644
--- a/gcc/real.cc
+++ b/gcc/real.cc
@@ -1477,7 +1477,7 @@ real_to_integer (const REAL_VALUE_TYPE *r)
wide_int
real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision)
{
- HOST_WIDE_INT val[2 * WIDE_INT_MAX_ELTS];
+ HOST_WIDE_INT valb[WIDE_INT_MAX_INL_ELTS], *val;
int exp;
int words, w;
wide_int result;
@@ -1516,7 +1516,11 @@ real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision)
is the smallest HWI-multiple that has at least PRECISION bits.
This ensures that the top bit of the significand is in the
top bit of the wide_int. */
- words = (precision + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT;
+ words = ((precision + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT);
+ val = valb;
+ if (UNLIKELY (words > WIDE_INT_MAX_INL_ELTS))
+ val = XALLOCAVEC (HOST_WIDE_INT, words);
w = words * HOST_BITS_PER_WIDE_INT;
#if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG)
diff --git a/gcc/reg-notes.def b/gcc/reg-notes.def
index 1f74a60..5cbe35d 100644
--- a/gcc/reg-notes.def
+++ b/gcc/reg-notes.def
@@ -96,8 +96,9 @@ REG_NOTE (DEP_CONTROL)
to extract the actual value. */
REG_NOTE (BR_PROB)
-/* Attached to a call insn; indicates that the call is malloc-like and
- that the pointer returned cannot alias anything else. */
+/* Attached to a move insn which receives the result of a call; indicates that
+ the call is malloc-like and that the pointer returned cannot alias anything
+ else. */
REG_NOTE (NOALIAS)
/* REG_BR_PRED is attached to JUMP_INSNs. It contains
diff --git a/gcc/reload.cc b/gcc/reload.cc
index 2126bdd..2e57ebb 100644
--- a/gcc/reload.cc
+++ b/gcc/reload.cc
@@ -168,8 +168,8 @@ struct decomposition
int reg_flag; /* Nonzero if referencing a register. */
int safe; /* Nonzero if this can't conflict with anything. */
rtx base; /* Base address for MEM. */
- poly_int64_pod start; /* Starting offset or register number. */
- poly_int64_pod end; /* Ending offset or register number. */
+ poly_int64 start; /* Starting offset or register number. */
+ poly_int64 end; /* Ending offset or register number. */
};
/* Save MEMs needed to copy from one class of registers to another. One MEM
@@ -3321,7 +3321,7 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known,
were handled in find_reloads_address. */
this_alternative[i]
= base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
- ADDRESS, SCRATCH);
+ ADDRESS, SCRATCH, insn);
win = 1;
badop = 0;
break;
@@ -3508,7 +3508,7 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known,
the address into a base register. */
this_alternative[i]
= base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
- ADDRESS, SCRATCH);
+ ADDRESS, SCRATCH, insn);
badop = 0;
break;
@@ -4018,7 +4018,7 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known,
operand_reloadnum[i]
= push_reload (XEXP (recog_data.operand[i], 0), NULL_RTX,
&XEXP (recog_data.operand[i], 0), (rtx*) 0,
- base_reg_class (VOIDmode, as, MEM, SCRATCH),
+ base_reg_class (VOIDmode, as, MEM, SCRATCH, insn),
address_mode,
VOIDmode, 0, 0, i, RELOAD_OTHER);
rld[operand_reloadnum[i]].inc
@@ -4897,7 +4897,8 @@ find_reloads_address (machine_mode mode, rtx *memrefloc, rtx ad,
if (reg_equiv_constant (regno) != 0)
{
find_reloads_address_part (reg_equiv_constant (regno), loc,
- base_reg_class (mode, as, MEM, SCRATCH),
+ base_reg_class (mode, as, MEM,
+ SCRATCH, insn),
GET_MODE (ad), opnum, type, ind_levels);
return 1;
}
@@ -4966,7 +4967,7 @@ find_reloads_address (machine_mode mode, rtx *memrefloc, rtx ad,
/* If we do not have one of the cases above, we must do the reload. */
push_reload (ad, NULL_RTX, loc, (rtx*) 0,
- base_reg_class (mode, as, MEM, SCRATCH),
+ base_reg_class (mode, as, MEM, SCRATCH, insn),
GET_MODE (ad), VOIDmode, 0, 0, opnum, type);
return 1;
}
@@ -5113,7 +5114,7 @@ find_reloads_address (machine_mode mode, rtx *memrefloc, rtx ad,
/* Reload the displacement into an index reg.
We assume the frame pointer or arg pointer is a base reg. */
find_reloads_address_part (XEXP (ad, 1), &XEXP (ad, 1),
- INDEX_REG_CLASS, GET_MODE (ad), opnum,
+ index_reg_class (insn), GET_MODE (ad), opnum,
type, ind_levels);
return 0;
}
@@ -5123,7 +5124,8 @@ find_reloads_address (machine_mode mode, rtx *memrefloc, rtx ad,
reload the sum into a base reg.
That will at least work. */
find_reloads_address_part (ad, loc,
- base_reg_class (mode, as, MEM, SCRATCH),
+ base_reg_class (mode, as, MEM,
+ SCRATCH, insn),
GET_MODE (ad), opnum, type, ind_levels);
}
return ! removed_and;
@@ -5203,7 +5205,7 @@ find_reloads_address (machine_mode mode, rtx *memrefloc, rtx ad,
op_index == 0 ? addend : offset_reg);
*loc = ad;
- cls = base_reg_class (mode, as, MEM, GET_CODE (addend));
+ cls = base_reg_class (mode, as, MEM, GET_CODE (addend), insn);
find_reloads_address_part (XEXP (ad, op_index),
&XEXP (ad, op_index), cls,
GET_MODE (ad), opnum, type, ind_levels);
@@ -5261,7 +5263,8 @@ find_reloads_address (machine_mode mode, rtx *memrefloc, rtx ad,
}
find_reloads_address_part (ad, loc,
- base_reg_class (mode, as, MEM, SCRATCH),
+ base_reg_class (mode, as, MEM,
+ SCRATCH, insn),
address_mode, opnum, type, ind_levels);
return ! removed_and;
}
@@ -5511,9 +5514,10 @@ find_reloads_address_1 (machine_mode mode, addr_space_t as,
bool reloaded_inner_of_autoinc = false;
if (context == 1)
- context_reg_class = INDEX_REG_CLASS;
+ context_reg_class = index_reg_class (insn);
else
- context_reg_class = base_reg_class (mode, as, outer_code, index_code);
+ context_reg_class = base_reg_class (mode, as, outer_code, index_code,
+ insn);
switch (code)
{
@@ -5738,7 +5742,8 @@ find_reloads_address_1 (machine_mode mode, addr_space_t as,
reloadnum = push_reload (tem, tem, &XEXP (x, 0),
&XEXP (op1, 0),
base_reg_class (mode, as,
- code, index_code),
+ code, index_code,
+ insn),
GET_MODE (x), GET_MODE (x), 0,
0, opnum, RELOAD_OTHER);
@@ -5756,7 +5761,8 @@ find_reloads_address_1 (machine_mode mode, addr_space_t as,
reloadnum = push_reload (XEXP (op1, 0), XEXP (x, 0),
&XEXP (op1, 0), &XEXP (x, 0),
base_reg_class (mode, as,
- code, index_code),
+ code, index_code,
+ insn),
GET_MODE (x), GET_MODE (x), 0, 0,
opnum, RELOAD_OTHER);
@@ -6216,7 +6222,7 @@ find_reloads_subreg_address (rtx x, int opnum, enum reload_type type,
{
push_reload (XEXP (tem, 0), NULL_RTX, &XEXP (tem, 0), (rtx*) 0,
base_reg_class (GET_MODE (tem), MEM_ADDR_SPACE (tem),
- MEM, SCRATCH),
+ MEM, SCRATCH, insn),
GET_MODE (XEXP (tem, 0)), VOIDmode, 0, 0, opnum, type);
reloaded = 1;
}
diff --git a/gcc/reload.h b/gcc/reload.h
index 0982d0c..9e8c060 100644
--- a/gcc/reload.h
+++ b/gcc/reload.h
@@ -97,7 +97,7 @@ struct reload
/* Positive amount to increment or decrement by if
reload_in is a PRE_DEC, PRE_INC, POST_DEC, POST_INC.
Ignored otherwise (don't assume it is zero). */
- poly_int64_pod inc;
+ poly_int64 inc;
/* A reg for which reload_in is the equivalent.
If reload_in is a symbol_ref which came from
reg_equiv_constant, then this is the pseudo
diff --git a/gcc/reload1.cc b/gcc/reload1.cc
index 9ba822d..b3e0d9b 100644
--- a/gcc/reload1.cc
+++ b/gcc/reload1.cc
@@ -196,7 +196,7 @@ static int last_spill_reg;
static rtx spill_stack_slot[FIRST_PSEUDO_REGISTER];
/* Width allocated so far for that stack slot. */
-static poly_uint64_pod spill_stack_slot_width[FIRST_PSEUDO_REGISTER];
+static poly_uint64 spill_stack_slot_width[FIRST_PSEUDO_REGISTER];
/* Record which pseudos needed to be spilled. */
static regset_head spilled_pseudos;
@@ -257,13 +257,13 @@ struct elim_table
{
int from; /* Register number to be eliminated. */
int to; /* Register number used as replacement. */
- poly_int64_pod initial_offset; /* Initial difference between values. */
+ poly_int64 initial_offset; /* Initial difference between values. */
int can_eliminate; /* Nonzero if this elimination can be done. */
int can_eliminate_previous; /* Value returned by TARGET_CAN_ELIMINATE
target hook in previous scan over insns
made by reload. */
- poly_int64_pod offset; /* Current offset between the two regs. */
- poly_int64_pod previous_offset; /* Offset at end of previous insn. */
+ poly_int64 offset; /* Current offset between the two regs. */
+ poly_int64 previous_offset; /* Offset at end of previous insn. */
int ref_outside_mem; /* "to" has been referenced outside a MEM. */
rtx from_rtx; /* REG rtx for the register to be eliminated.
We cannot simply compare the number since
@@ -309,7 +309,7 @@ static int num_eliminable_invariants;
static int first_label_num;
static char *offsets_known_at;
-static poly_int64_pod (*offsets_at)[NUM_ELIMINABLE_REGS];
+static poly_int64 (*offsets_at)[NUM_ELIMINABLE_REGS];
vec<reg_equivs_t, va_gc> *reg_equivs;
@@ -1382,7 +1382,7 @@ maybe_fix_stack_asms (void)
if (insn_extra_address_constraint (cn))
cls = (int) reg_class_subunion[cls]
[(int) base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
- ADDRESS, SCRATCH)];
+ ADDRESS, SCRATCH, chain->insn)];
else
cls = (int) reg_class_subunion[cls]
[reg_class_for_constraint (cn)];
@@ -4020,7 +4020,7 @@ init_eliminable_invariants (rtx_insn *first, bool do_subregs)
/* Allocate the tables used to store offset information at labels. */
offsets_known_at = XNEWVEC (char, num_labels);
- offsets_at = (poly_int64_pod (*)[NUM_ELIMINABLE_REGS])
+ offsets_at = (poly_int64 (*)[NUM_ELIMINABLE_REGS])
xmalloc (num_labels * NUM_ELIMINABLE_REGS * sizeof (poly_int64));
/* Look for REG_EQUIV notes; record what each pseudo is equivalent
diff --git a/gcc/rtl-ssa/blocks.cc b/gcc/rtl-ssa/blocks.cc
index 1f9969d..d46cbf1 100644
--- a/gcc/rtl-ssa/blocks.cc
+++ b/gcc/rtl-ssa/blocks.cc
@@ -57,7 +57,7 @@ function_info::build_info::build_info (unsigned int num_regs,
// write to an entry before reading from it. But poison the contents
// when checking, just to make sure we don't accidentally use an
// uninitialized value.
- bb_phis.quick_grow (num_bb_indices);
+ bb_phis.quick_grow_cleared (num_bb_indices);
bb_mem_live_out.quick_grow (num_bb_indices);
bb_to_rpo.quick_grow (num_bb_indices);
if (flag_checking)
@@ -614,7 +614,7 @@ function_info::place_phis (build_info &bi)
// Calculate dominance frontiers.
auto_vec<bitmap_head> frontiers;
- frontiers.safe_grow (num_bb_indices);
+ frontiers.safe_grow_cleared (num_bb_indices);
for (unsigned int i = 0; i < num_bb_indices; ++i)
bitmap_initialize (&frontiers[i], &bitmap_default_obstack);
compute_dominance_frontiers (frontiers.address ());
@@ -626,7 +626,7 @@ function_info::place_phis (build_info &bi)
// they are live on entry to the corresponding block, but do not need
// phi nodes otherwise.
auto_vec<bitmap_head> unfiltered;
- unfiltered.safe_grow (num_bb_indices);
+ unfiltered.safe_grow_cleared (num_bb_indices);
for (unsigned int i = 0; i < num_bb_indices; ++i)
bitmap_initialize (&unfiltered[i], &bitmap_default_obstack);
diff --git a/gcc/rtl-tests.cc b/gcc/rtl-tests.cc
index ae86694..96656c5 100644
--- a/gcc/rtl-tests.cc
+++ b/gcc/rtl-tests.cc
@@ -246,6 +246,7 @@ template<unsigned int N>
void
const_poly_int_tests<N>::run ()
{
+ using poly_int64 = poly_int<N, HOST_WIDE_INT>;
rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
rtx x255 = gen_int_mode (poly_int64 (1, 255), QImode);
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 102ad9b..e4b6cc0 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -204,7 +204,7 @@ union rtunion
{
int rt_int;
unsigned int rt_uint;
- poly_uint16_pod rt_subreg;
+ poly_uint16 rt_subreg;
const char *rt_str;
rtx rt_rtx;
rtvec rt_rtvec;
@@ -2270,6 +2270,7 @@ namespace wi
/* This ought to be true, except for the special case that BImode
is canonicalized to STORE_FLAG_VALUE, which might be 1. */
static const bool is_sign_extended = false;
+ static const bool needs_write_val_arg = false;
static unsigned int get_precision (const rtx_mode_t &);
static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
const rtx_mode_t &);
@@ -2402,7 +2403,7 @@ rtx_to_poly_int64 (const_rtx x)
otherwise leave it unmodified. */
inline bool
-poly_int_rtx_p (const_rtx x, poly_int64_pod *res)
+poly_int_rtx_p (const_rtx x, poly_int64 *res)
{
if (CONST_INT_P (x))
{
@@ -3630,7 +3631,7 @@ extern HOST_WIDE_INT get_integer_term (const_rtx);
extern rtx get_related_value (const_rtx);
extern bool offset_within_block_p (const_rtx, HOST_WIDE_INT);
extern void split_const (rtx, rtx *, rtx *);
-extern rtx strip_offset (rtx, poly_int64_pod *);
+extern rtx strip_offset (rtx, poly_int64 *);
extern poly_int64 get_args_size (const_rtx);
extern bool unsigned_reg_p (rtx);
extern bool reg_mentioned_p (const_rtx, const_rtx);
@@ -4108,8 +4109,11 @@ extern int epilogue_completed;
extern int reload_in_progress;
-/* Set to 1 while in lra. */
-extern int lra_in_progress;
+/* Set to true while in IRA. */
+extern bool ira_in_progress;
+
+/* Set to true while in LRA. */
+extern bool lra_in_progress;
/* This macro indicates whether you may create a new
pseudo-register. */
@@ -4583,7 +4587,7 @@ load_extend_op (machine_mode mode)
and return the base. Return X otherwise. */
inline rtx
-strip_offset_and_add (rtx x, poly_int64_pod *offset)
+strip_offset_and_add (rtx x, poly_int64 *offset)
{
if (GET_CODE (x) == PLUS)
{
diff --git a/gcc/rtlanal.cc b/gcc/rtlanal.cc
index 8b48fc2..87267ee 100644
--- a/gcc/rtlanal.cc
+++ b/gcc/rtlanal.cc
@@ -945,7 +945,7 @@ split_const (rtx x, rtx *base_out, rtx *offset_out)
to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
rtx
-strip_offset (rtx x, poly_int64_pod *offset_out)
+strip_offset (rtx x, poly_int64 *offset_out)
{
rtx base = const0_rtx;
rtx test = x;
diff --git a/gcc/rust/ChangeLog b/gcc/rust/ChangeLog
index 6d69819..6c88612 100644
--- a/gcc/rust/ChangeLog
+++ b/gcc/rust/ChangeLog
@@ -1,3 +1,8 @@
+2023-09-28 Richard Sandiford <richard.sandiford@arm.com>
+
+ * backend/rust-constexpr.cc (rs_fold_indirect_ref): Remove unused
+ variables.
+
2023-09-21 Iain Buclaw <ibuclaw@gdcproject.org>
* rust-session-manager.cc (Session::init): Call
diff --git a/gcc/selftest-diagnostic.cc b/gcc/selftest-diagnostic.cc
index 6d1bd1b..d632c51 100644
--- a/gcc/selftest-diagnostic.cc
+++ b/gcc/selftest-diagnostic.cc
@@ -36,12 +36,12 @@ namespace selftest {
test_diagnostic_context::test_diagnostic_context ()
{
diagnostic_initialize (this, 0);
- show_caret = true;
- show_labels_p = true;
+ m_source_printing.enabled = true;
+ m_source_printing.show_labels_p = true;
show_column = true;
- start_span = start_span_cb;
- min_margin_width = 6;
- caret_max_width = 80;
+ m_text_callbacks.start_span = start_span_cb;
+ m_source_printing.min_margin_width = 6;
+ m_source_printing.max_width = 80;
}
test_diagnostic_context::~test_diagnostic_context ()
diff --git a/gcc/simplify-rtx.cc b/gcc/simplify-rtx.cc
index 170406a..69d8757 100644
--- a/gcc/simplify-rtx.cc
+++ b/gcc/simplify-rtx.cc
@@ -6109,6 +6109,23 @@ simplify_context::simplify_relational_operation_1 (rtx_code code,
break;
}
+ /* (ne:SI (subreg:QI (ashift:SI x 7) 0) 0) -> (and:SI x 1). */
+ if (code == NE
+ && op1 == const0_rtx
+ && (op0code == TRUNCATE
+ || (partial_subreg_p (op0)
+ && subreg_lowpart_p (op0)))
+ && SCALAR_INT_MODE_P (mode)
+ && STORE_FLAG_VALUE == 1)
+ {
+ rtx tmp = XEXP (op0, 0);
+ if (GET_CODE (tmp) == ASHIFT
+ && GET_MODE (tmp) == mode
+ && CONST_INT_P (XEXP (tmp, 1))
+ && is_int_mode (GET_MODE (op0), &int_mode)
+ && INTVAL (XEXP (tmp, 1)) == GET_MODE_PRECISION (int_mode) - 1)
+ return simplify_gen_binary (AND, mode, XEXP (tmp, 0), const1_rtx);
+ }
return NULL_RTX;
}
@@ -8689,6 +8706,7 @@ template<unsigned int N>
void
simplify_const_poly_int_tests<N>::run ()
{
+ using poly_int64 = poly_int<N, HOST_WIDE_INT>;
rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
diff --git a/gcc/sreal.cc b/gcc/sreal.cc
index 606a571..681a8a8 100644
--- a/gcc/sreal.cc
+++ b/gcc/sreal.cc
@@ -323,7 +323,7 @@ sreal_verify_basics (void)
of given arguments A and B. */
static void
-verify_aritmetics (int64_t a, int64_t b)
+verify_arithmetics (int64_t a, int64_t b)
{
ASSERT_EQ (a, -(-(sreal (a))).to_int ());
ASSERT_EQ (a < b, sreal (a) < sreal (b));
@@ -356,7 +356,7 @@ sreal_verify_arithmetics (void)
int a = values[i];
int b = values[j];
- verify_aritmetics (a, b);
+ verify_arithmetics (a, b);
}
}
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 8cf00a8..93d4abe 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,1188 @@
+2023-10-17 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/test_frame_2.c: Expect x30 to come before x19.
+ * gcc.target/aarch64/test_frame_4.c: Likewise.
+ * gcc.target/aarch64/test_frame_7.c: Likewise.
+ * gcc.target/aarch64/test_frame_10.c: Likewise.
+
+2023-10-17 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/pcs/stack_clash_2.c: Expect restores
+ to happen in offset order.
+ * gcc.target/aarch64/sve/pcs/stack_clash_2_128.c: Likewise.
+ * gcc.target/aarch64/sve/pcs/stack_clash_2_256.c: Likewise.
+ * gcc.target/aarch64/sve/pcs/stack_clash_2_512.c: Likewise.
+ * gcc.target/aarch64/sve/pcs/stack_clash_2_1024.c: Likewise.
+ * gcc.target/aarch64/sve/pcs/stack_clash_2_2048.c: Likewise.
+
+2023-10-17 Andrew Pinski <apinski@marvell.com>
+
+ PR tree-optimization/110817
+ * gcc.c-torture/execute/pr110817-1.c: New test.
+ * gcc.c-torture/execute/pr110817-2.c: New test.
+ * gcc.c-torture/execute/pr110817-3.c: New test.
+
+2023-10-17 Marek Polacek <polacek@redhat.com>
+
+ PR c++/111840
+ * g++.dg/parse/error65.C: New test.
+
+2023-10-17 Marek Polacek <polacek@redhat.com>
+
+ PR c++/111660
+ * g++.dg/cpp0x/hog1.C: New test.
+ * g++.dg/cpp2a/consteval36.C: New test.
+
+2023-10-17 Vineet Gupta <vineetg@rivosinc.com>
+
+ * gcc.target/riscv/pr111466.c (foo2): Change return to unsigned
+ int as that will potentially generate two SEXT.W instructions.
+ dg-final: Change to scan-assembler-not SEXT.W.
+
+2023-10-17 Martin Uecker <uecker@tugraz.at>
+
+ PR c/111708
+ * gcc.dg/pr111708-1.c: New test.
+ * gcc.dg/pr111708-2.c: New test.
+
+2023-10-17 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/111837
+ * gfortran.dg/implied_do_io_8.f90: New test.
+
+2023-10-17 Andrew Pinski <pinskia@gmail.com>
+
+ PR tree-optimization/111432
+ * gcc.dg/tree-ssa/bitops-7.c: New test.
+
+2023-10-17 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111846
+ * gcc.dg/vect/pr111846.c: New testcase.
+
+2023-10-17 Lehua Ding <lehua.ding@rivai.ai>
+
+ * gcc.target/riscv/rvv/base/cpymem-1.c: Split check.
+
+2023-10-17 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/111818
+ * gcc.dg/torture/pr111818.c: New testcase.
+
+2023-10-17 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111807
+ * gcc.dg/torture/pr111807.c: New testcase.
+
+2023-10-17 Vineet Gupta <vineetg@rivosinc.com>
+
+ PR target/111466
+ * gcc.target/riscv/pr111466.c: New test.
+
+2023-10-17 Chenghui Pan <panchenghui@loongson.cn>
+
+ * gcc.target/loongarch/vector/lasx/lasx-vec-init-1.c: New test.
+
+2023-10-17 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/costmodel/riscv/rvv/dynamic-lmul2-7.c: New test.
+
+2023-10-17 Iain Buclaw <ibuclaw@gdcproject.org>
+
+ * gdc.dg/builtins_reject.d: New test.
+ * gdc.dg/intrinsics_reject.d: New test.
+
+2023-10-16 Uros Bizjak <ubizjak@gmail.com>
+
+ * gcc.target/i386/large-data.c: New test.
+
+2023-10-16 Vineet Gupta <vineetg@rivosinc.com>
+
+ * gcc.target/riscv/fle-ieee.c: Updates dg-options with
+ explicit -march=rv64gc and -march=rv32gc.
+ * gcc.target/riscv/fle-snan.c: Ditto.
+ * gcc.target/riscv/fle.c: Ditto.
+ * gcc.target/riscv/flef-ieee.c: Ditto.
+ * gcc.target/riscv/flef.c: Ditto.
+ * gcc.target/riscv/flef-snan.c: Ditto.
+ * gcc.target/riscv/flt-ieee.c: Ditto.
+ * gcc.target/riscv/flt-snan.c: Ditto.
+ * gcc.target/riscv/fltf-ieee.c: Ditto.
+ * gcc.target/riscv/fltf-snan.c: Ditto.
+
+2023-10-16 Manolis Tsamis <manolis.tsamis@vrull.eu>
+
+ * gcc.target/riscv/fold-mem-offsets-1.c: New test.
+ * gcc.target/riscv/fold-mem-offsets-2.c: New test.
+ * gcc.target/riscv/fold-mem-offsets-3.c: New test.
+ * gcc.target/i386/pr52146.c: Adjust expected output.
+
+2023-10-16 Andrew Pinski <pinskia@gmail.com>
+
+ PR tree-optimization/101541
+ * gcc.dg/tree-ssa/phi-opt-36.c: New test.
+ * gcc.dg/tree-ssa/phi-opt-37.c: New test.
+
+2023-10-16 Andrew Pinski <pinskia@gmail.com>
+
+ PR tree-optimization/31531
+ * gcc.dg/tree-ssa/pr31531-1.c: New test.
+ * gcc.dg/tree-ssa/pr31531-2.c: New test.
+
+2023-10-16 Jason Merrill <jason@redhat.com>
+
+ * g++.dg/concepts/diagnostic3.C: Add expected column.
+ * g++.dg/cpp1z/fold3.C: Adjust diagnostic lines.
+
+2023-10-16 Marek Polacek <polacek@redhat.com>
+
+ PR c++/111272
+ * g++.dg/cpp1y/constexpr-diag1.C: New test.
+
+2023-10-16 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/costmodel/riscv/rvv/no-dynamic-lmul-1.c: New test.
+
+2023-10-16 liuhongt <hongtao.liu@intel.com>
+
+ * gcc.target/i386/part-vect-hf-convert-1.c: New test.
+
+2023-10-16 liuhongt <hongtao.liu@intel.com>
+
+ * gcc.target/i386/part-vect-roundhf.c: New test.
+ * gcc.target/i386/part-vect-sqrtph-1.c: New test.
+
+2023-10-15 Iain Buclaw <ibuclaw@gdcproject.org>
+
+ * lib/gdc-utils.exp (gdc-convert-args): Handle new compiler options.
+
+2023-10-14 Iain Buclaw <ibuclaw@gdcproject.org>
+
+ PR d/111537
+ * gdc.dg/analyzer/analyzer.exp: New test.
+ * gdc.dg/analyzer/pr111537.d: New test.
+
+2023-10-14 Tobias Burnus <tobias@codesourcery.com>
+
+ * c-c++-common/gomp/allocate-14.c: Fix directive name.
+ * c-c++-common/gomp/allocate-15.c: Likewise.
+ * c-c++-common/gomp/allocate-9.c: Fix comment typo.
+ * gfortran.dg/gomp/allocate-4.f90: Remove sorry dg-error.
+ * gfortran.dg/gomp/allocate-7.f90: Likewise.
+ * gfortran.dg/gomp/allocate-10.f90: New test.
+ * gfortran.dg/gomp/allocate-11.f90: New test.
+ * gfortran.dg/gomp/allocate-12.f90: New test.
+ * gfortran.dg/gomp/allocate-13.f90: New test.
+ * gfortran.dg/gomp/allocate-14.f90: New test.
+ * gfortran.dg/gomp/allocate-15.f90: New test.
+ * gfortran.dg/gomp/allocate-8.f90: New test.
+ * gfortran.dg/gomp/allocate-9.f90: New test.
+
+2023-10-14 Jakub Jelinek <jakub@redhat.com>
+
+ PR c/102989
+ * gcc.dg/bitint-38.c: Change into dg-do run test, in addition
+ to checking the addition, division and right shift results at compile
+ time check it also at runtime.
+ * gcc.dg/bitint-39.c: New test.
+
+2023-10-13 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/104351
+ * gfortran.dg/derived_function_interface_1.f90: Adjust pattern.
+ * gfortran.dg/pr104351.f90: New test.
+
+2023-10-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111795
+ * gcc.target/i386/vect-simd-clone-avx512-1.c: New testcase.
+ * gcc.target/i386/vect-simd-clone-avx512-2.c: Likewise.
+ * gcc.target/i386/vect-simd-clone-avx512-3.c: Likewise.
+
+2023-10-13 Richard Biener <rguenther@suse.de>
+
+ * gcc.dg/vect/slp-simd-clone-1.c: New testcase.
+ * gcc.dg/vect/slp-simd-clone-2.c: Likewise.
+
+2023-10-13 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/bb-slp-68.c: Use vect512.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-ceil-run-1.c:
+ Use scalar func as reference instead of hardcode.
+ * gcc.target/riscv/rvv/autovec/unop/math-ceil-run-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-floor-run-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-floor-run-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-rint-run-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-rint-run-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-round-run-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-round-run-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-trunc-run-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-trunc-run-2.c: Ditto.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-llfloor-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-llfloor-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-llfloor-0.c: New test.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-ifloor-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-ifloor-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-ifloor-0.c: New test.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-iceil-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-iceil-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-iceil-0.c: New test.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-llceil-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-llceil-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-llceil-0.c: New test.
+
+2023-10-13 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.c-torture/compile/pc44485.c (func_21): Add missing cast.
+ * gcc.c-torture/compile/pr106101.c: Use builtins to avoid
+ calls to undeclared functions. Change type of yyvsp to
+ char ** and introduce yyvsp1 to avoid type errors.
+ * gcc.c-torture/execute/pr111331-1.c: Add missing int.
+ * gcc.dg/pr100512.c: Unreduce test case and suppress only
+ -Wpointer-to-int-cast.
+ * gcc.dg/pr103003.c: Likewise.
+ * gcc.dg/pr103451.c: Add cast to long and suppress
+ -Wdiv-by-zero only.
+ * gcc.dg/pr68435.c: Avoid implicit int and missing
+ static function implementation warning.
+
+2023-10-13 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.c-torture/compile/20000105-1.c: Add missing int return type.
+ Call __builtin_exit instead of exit.
+ * gcc.c-torture/compile/20000105-2.c: Add missing void types.
+ * gcc.c-torture/compile/20000211-1.c (Lstream_fputc, Lstream_write)
+ (Lstream_flush_out, parse_doprnt_spec): Add missing function
+ declaration.
+ * gcc.c-torture/compile/20000224-1.c (call_critical_lisp_code):
+ Declare.
+ * gcc.c-torture/compile/20000314-2.c: Add missing void types.
+ * gcc.c-torture/compile/980816-1.c (XtVaCreateManagedWidget)
+ (XtAddCallback): Likewise.
+ * gcc.c-torture/compile/pr49474.c: Use struct
+ gfc_formal_arglist * instead of (implied) int type.
+ * gcc.c-torture/execute/20001111-1.c (foo): Add cast to
+ char *.
+ (main): Call __builtin_abort and __builtin_exit.
+
+2023-10-13 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.c-torture/compile/920501-11.c: Compile with -std=gnu89.
+ * gcc.c-torture/compile/920501-23.c: Likewise.
+ * gcc.c-torture/compile/920501-8.c: Likewise.
+ * gcc.c-torture/compile/920701-1.c: Likewise.
+ * gcc.c-torture/compile/930529-1.c: Likewise.
+
+2023-10-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111779
+ * gcc.dg/tree-ssa/ssa-dse-26.c: Adjust for more DSE.
+ * gcc.dg/vect/vect-pr111779.c: New testcase.
+
+2023-10-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111773
+ * g++.dg/torture/pr111773.C: New testcase.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-llround-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-llround-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-llround-0.c: New test.
+
+2023-10-13 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/bb-slp-pr69907.c: Add RVV.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-iround-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-iround-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-iround-0.c: New test.
+
+2023-10-13 Kito Cheng <kito.cheng@sifive.com>
+
+ * g++.target/riscv/rvv/autovec/bug-01.C: New.
+ * g++.target/riscv/rvv/rvv.exp: Add autovec folder.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-llrint-0.c: Include
+ stdint-gcc.h for int types.
+ * gcc.target/riscv/rvv/autovec/unop/math-llrint-run-0.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/test-math.h: Remove int64_t
+ typedef.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-lfloor-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lfloor-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lfloor-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lfloor-run-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-lfloor-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-lfloor-1.c: New test.
+
+2023-10-13 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-lceil-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lceil-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lceil-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lceil-run-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-lceil-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-lceil-1.c: New test.
+
+2023-10-12 Christoph Müllner <christoph.muellner@vrull.eu>
+
+ * gcc.target/riscv/xtheadcondmov-indirect.c: Make robust against
+ instruction reordering.
+
+2023-10-12 Tamar Christina <tamar.christina@arm.com>
+
+ * gcc.target/aarch64/armv9_warning.c: New test.
+
+2023-10-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR c/102989
+ * gcc.dg/bitint-38.c: New test.
+
+2023-10-12 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-lround-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lround-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lround-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lround-run-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-lround-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-lround-1.c: New test.
+
+2023-10-12 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111764
+ * gcc.dg/vect/pr111764.c: New testcase.
+
+2023-10-12 Hu, Lin1 <lin1.hu@intel.com>
+
+ * gcc.target/i386/funcspec-56.inc: Add new target attribute.
+ * gcc.target/i386/x86gprintrin-1.c: Add -musermsr for 64bit target.
+ * gcc.target/i386/x86gprintrin-2.c: Ditto.
+ * gcc.target/i386/x86gprintrin-3.c: Ditto.
+ * gcc.target/i386/x86gprintrin-4.c: Add musermsr for 64bit target.
+ * gcc.target/i386/x86gprintrin-5.c: Ditto
+ * gcc.target/i386/user_msr-1.c: New test.
+ * gcc.target/i386/user_msr-2.c: Ditto.
+
+2023-10-12 Chenghui Pan <panchenghui@loongson.cn>
+
+ * lib/target-supports.exp: Add LoongArch in
+ check_effective_target_vect_int_mod according to SX/ASX capabilities.
+
+2023-10-12 Chenghui Pan <panchenghui@loongson.cn>
+
+ PR target/111424
+ * lib/target-supports.exp: Enable vect.exp for LoongArch.
+
+2023-10-12 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/67740
+ * gfortran.dg/pr67740.f90: New test
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ PR target/111367
+ * g++.target/powerpc/pr111367.C: New test.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ PR testsuite/111427
+ * gfortran.dg/vect/pr60510.f (test): Init variable a.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * gcc.dg/vect/costmodel/ppc/costmodel-vect-store-2.c: New test.
+
+2023-10-12 Kewen Lin <linkw@linux.ibm.com>
+
+ * gcc.dg/vect/costmodel/ppc/costmodel-vect-store-1.c: New test.
+
+2023-10-12 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/test-math.h: Add type int64_t.
+ * gcc.target/riscv/rvv/autovec/unop/math-llrint-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-llrint-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-llrint-0.c: New test.
+
+2023-10-12 Mo, Zewei <zewei.mo@intel.com>
+ Hu Lin1 <lin1.hu@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+
+ * gcc.target/i386/apx-push2pop2-1.c: New test.
+ * gcc.target/i386/apx-push2pop2_force_drap-1.c: Likewise.
+ * gcc.target/i386/apx-push2pop2_interrupt-1.c: Likewise.
+
+2023-10-12 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/math-irint-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-irint-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-irint-0.c: New test.
+
+2023-10-11 Andrew Pinski <pinskia@gmail.com>
+
+ PR tree-optimization/111282
+ * gcc.dg/tree-ssa/and-1.c: Update testcase to avoid
+ matching `~1 & (a ^ 1)` simplification.
+ * gcc.dg/tree-ssa/bitops-6.c: New test.
+
+2023-10-11 Mary Bennett <mary.bennett@embecosm.com>
+
+ * lib/target-supports.exp: Add proc for the XCValu extension.
+ * gcc.target/riscv/cv-alu-compile.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile-addn.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile-addrn.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile-addun.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile-addurn.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile-clip.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile-clipu.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile-subn.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile-subrn.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile-subun.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile-suburn.c: New test.
+ * gcc.target/riscv/cv-alu-fail-compile.c: New test.
+
+2023-10-11 Mary Bennett <mary.bennett@embecosm.com>
+
+ * lib/target-supports.exp: Add new effective target check.
+ * gcc.target/riscv/cv-mac-compile.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-mac.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-machhsn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-machhsrn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-machhun.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-machhurn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-macsn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-macsrn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-macun.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-macurn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-msu.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-mulhhsn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-mulhhsrn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-mulhhun.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-mulhhurn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-mulsn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-mulsrn.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-mulun.c: New test.
+ * gcc.target/riscv/cv-mac-fail-compile-mulurn.c: New test.
+ * gcc.target/riscv/cv-mac-test-autogeneration.c: New test.
+
+2023-10-11 Gaius Mulley <gaiusmod2@gmail.com>
+
+ PR modula2/111675
+ * gm2/extensions/run/pass/packedrecord3.mod: New test.
+
+2023-10-11 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.target/riscv/rvv/autovec/gather-scatter/offset_extend-1.c: New test.
+
+2023-10-11 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/unop/test-math.h: New define for
+ CVT like test case.
+ * gcc.target/riscv/rvv/autovec/vls/def.h: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/math-lrint-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lrint-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lrint-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-lrint-run-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-lrint-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/math-lrint-1.c: New test.
+
+2023-10-11 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/tree-ssa/ssa-dom-cse-2.c: Remove riscv.
+
+2023-10-11 Richard Biener <rguenther@suse.de>
+ Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/111519
+ * gcc.dg/torture/pr111519.c: New testcase.
+
+2023-10-11 Roger Sayle <roger@nextmovesoftware.com>
+
+ * gcc.target/i386/pr106245-1.c: New test case.
+
+2023-10-11 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * lib/target-supports.exp: Add RVV.
+
+2023-10-11 liuhongt <hongtao.liu@intel.com>
+
+ * gcc.target/i386/pr111745.c: New test.
+
+2023-10-11 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/vect-live-2.c: Make pattern match more accurate.
+
+2023-10-11 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/vect-multitypes-16.c: Adapt check for RVV.
+ * lib/target-supports.exp: Add vect_ext_char_longlong property.
+
+2023-10-10 Jason Merrill <jason@redhat.com>
+
+ PR c++/109422
+ * g++.dg/cpp2a/lambda-generic-mangle1.C: New test.
+ * g++.dg/cpp2a/lambda-generic-mangle1a.C: New test.
+
+2023-10-10 Andrew Pinski <pinskia@gmail.com>
+
+ PR tree-optimization/111679
+ * gcc.dg/tree-ssa/bitops-5.c: New test.
+
+2023-10-10 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/no-scevccp-outer-7.c: Adjust regex pattern.
+ * gcc.dg/vect/no-scevccp-vect-iv-3.c: Ditto.
+
+2023-10-10 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/tree-ssa/predcom-2.c: Add riscv.
+
+2023-10-10 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/pr65947-8.c: Use vect_fold_extract_last.
+
+2023-10-10 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/111751
+ * gcc.target/riscv/rvv/autovec/pr111751.c: New test.
+
+2023-10-10 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/bb-slp-pr65935.c: Add vect1024 variant.
+ * lib/target-supports.exp: Ditto.
+
+2023-10-10 Claudiu Zissulescu <claziss@gmail.com>
+
+ * gcc.target/arc/add_f-combine.c: New test.
+
+2023-10-10 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * lib/target-supports.exp: Add 256/512/1024
+
+2023-10-09 Andrew MacLeod <amacleod@redhat.com>
+
+ PR tree-optimization/111694
+ * gcc.dg/pr111694.c: New.
+
+2023-10-09 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/slp-perm-4.c: Adapt test for stride5 load_lanes.
+
+2023-10-09 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/pr97832-2.c: Adapt dump check for target supports load_lanes with stride = 8.
+ * gcc.dg/vect/pr97832-3.c: Ditto.
+ * gcc.dg/vect/pr97832-4.c: Ditto.
+
+2023-10-09 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/slp-12a.c: Adapt for stride 8 load_lanes.
+
+2023-10-09 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/slp-reduc-4.c: Adapt test for stride8 load_lanes.
+
+2023-10-09 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/slp-23.c: Add RVV like ARM SVE.
+ * gcc.dg/vect/slp-perm-10.c: Ditto.
+
+2023-10-09 Xianmiao Qu <cooper.qu@linux.alibaba.com>
+
+ * gcc.target/riscv/xtheadmempair-4.c: New test.
+
+2023-10-09 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111715
+ * gcc.dg/tree-ssa/ssa-fre-102.c: New testcase.
+
+2023-10-09 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/vls/perm-4.c: Adjust checker.
+ * gcc.target/riscv/rvv/autovec/unop/bswap16-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/bswap16-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/bswap16-0.c: New test.
+
+2023-10-09 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/pr45752.c: Adapt dump check for target supports load_lanes with stride = 5.
+
+2023-10-09 Robin Dapp <rdapp@ventanamicro.com>
+
+ * gcc.dg/vect/vect-cond-arith-2.c: Also match COND_LEN.
+ * gcc.dg/vect/vect-cond-arith-4.c: Ditto.
+ * gcc.dg/vect/vect-cond-arith-5.c: Ditto.
+ * gcc.dg/vect/vect-cond-arith-6.c: Ditto.
+
+2023-10-09 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/fast-math-slp-38.c: Add ! vect_strided6.
+
+2023-10-09 Roger Sayle <roger@nextmovesoftware.com>
+
+ * gcc.target/i386/rcr-1.c: New 64-bit test case.
+ * gcc.target/i386/rcr-2.c: New 32-bit test case.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+
+ * gcc.target/i386/noevex512-1.c: New test.
+ * gcc.target/i386/noevex512-2.c: Ditto.
+ * gcc.target/i386/noevex512-3.c: Ditto.
+
+2023-10-09 Haochen Jiang <haochen.jiang@intel.com>
+ Hu, Lin1 <lin1.hu@intel.com>
+
+ * gcc.target/i386/pr90096.c: Adjust error message.
+
+2023-10-09 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/vect-cond-reduc-4.c: Add vect_pack_trunc variant.
+
+2023-10-09 Haochen Gui <guihaoc@gcc.gnu.org>
+
+ PR target/106769
+ * gcc.target/powerpc/pr88558.h: New.
+ * gcc.target/powerpc/pr88558-p7.c: New.
+ * gcc.target/powerpc/pr88558-p8.c: New.
+
+2023-10-08 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/111155
+ * c-c++-common/analyzer/out-of-bounds-diagram-strcat-2.c: New test.
+ * c-c++-common/analyzer/out-of-bounds-diagram-strcat.c: New test.
+ * gcc.dg/analyzer/out-of-bounds-diagram-17.c: Update expected
+ result to show the existing content of "buf" and the index at
+ which the write starts.
+ * gcc.dg/analyzer/out-of-bounds-diagram-18.c: Likewise.
+ * gcc.dg/analyzer/out-of-bounds-diagram-19.c: Likewise.
+ * gcc.dg/analyzer/out-of-bounds-diagram-6.c: Update expected
+ output.
+
+2023-10-08 David Malcolm <dmalcolm@redhat.com>
+
+ PR driver/111700
+ * c-c++-common/diagnostic-format-sarif-file-pr111700.c: New test.
+
+2023-10-08 liuhongt <hongtao.liu@intel.com>
+
+ * gcc.target/i386/part-vect-absneghf.c: New test.
+ * gcc.target/i386/part-vect-copysignhf.c: New test.
+ * gcc.target/i386/part-vect-xorsignhf.c: New test.
+
+2023-10-08 liuhongt <hongtao.liu@intel.com>
+
+ * gcc.target/i386/part-vect-vminmaxph-1.c: New test.
+ * gcc.target/i386/avx512fp16-64-32-vecop-1.c: Scan-assembler
+ only for { target { ! ia32 } }.
+
+2023-10-08 Tobias Burnus <tobias@codesourcery.com>
+
+ * gfortran.dg/block_17.f90: New test.
+ * gfortran.dg/gomp/strictly-structured-block-5.f90: New test.
+
+2023-10-08 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ * gcc.target/powerpc/const-build.c: Add more tests.
+
+2023-10-08 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ * gcc.target/powerpc/const-build.c: Add more tests.
+
+2023-10-08 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ * gcc.target/powerpc/const-build.c: Add more tests.
+
+2023-10-08 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ * gcc.target/powerpc/const-build.c: New test.
+
+2023-10-08 Hongyu Wang <hongyu.wang@intel.com>
+
+ * gcc.target/i386/apx-egprs-names.c: Compile for non-ia32.
+ * gcc.target/i386/apx-inline-gpr-norex2.c: Likewise.
+ * gcc.target/i386/apx-interrupt-1.c: Likewise.
+ * gcc.target/i386/apx-legacy-insn-check-norex2-asm.c: Likewise.
+ * gcc.target/i386/apx-legacy-insn-check-norex2.c: Likewise.
+
+2023-10-08 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.dg/vect/tsvc/vect-tsvc-s1115.c: Fix TSVC XPASS.
+ * gcc.dg/vect/tsvc/vect-tsvc-s114.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s1161.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s1232.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s124.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s1279.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s161.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s253.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s257.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s271.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s2711.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s2712.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s272.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s273.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s274.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s276.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s278.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s279.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s3111.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s353.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s441.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-s443.c: Ditto.
+ * gcc.dg/vect/tsvc/vect-tsvc-vif.c: Ditto.
+
+2023-10-08 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * lib/target-supports.exp: Enable more vect tests for RVV.
+
+2023-10-07 Lehua Ding <lehua.ding@rivai.ai>
+
+ Revert:
+ 2023-10-07 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/rvv.exp: Add zfa for building.
+ * gcc.target/riscv/rvv/autovec/unop/math-ceil-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-floor-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-rint-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-round-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-roundeven-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-roundeven-run-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-roundeven-run-2.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-trunc-run-0.c: New test.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * gcc.target/i386/apx-legacy-insn-check-norex2.c: Add intrinsic
+ tests.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * gcc.target/i386/apx-legacy-insn-check-norex2.c: Add
+ sse/vex intrinsic tests.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * lib/target-supports.exp: Add apxf check.
+ * gcc.target/i386/apx-legacy-insn-check-norex2.c: New test.
+ * gcc.target/i386/apx-legacy-insn-check-norex2-asm.c: New assembler test.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * gcc.target/i386/apx-inline-gpr-norex2.c: New test.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * gcc.target/i386/apx-egprs-names.c: New test.
+ * gcc.target/i386/apx-spill_to_egprs-1.c: Likewise.
+ * gcc.target/i386/apx-interrupt-1.c: Likewise.
+
+2023-10-07 Kong Lingling <lingling.kong@intel.com>
+ Hongyu Wang <hongyu.wang@intel.com>
+ Hongtao Liu <hongtao.liu@intel.com>
+
+ * gcc.target/i386/apx-1.c: New test.
+
+2023-10-07 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/rvv.exp: Add zfa for building.
+ * gcc.target/riscv/rvv/autovec/unop/math-ceil-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-floor-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-rint-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-round-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-roundeven-run-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-roundeven-run-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-roundeven-run-2.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/math-trunc-run-0.c: New test.
+
+2023-10-07 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ PR target/108338
+ * gcc.target/powerpc/pr108338.c: Updated to check mtvsrws for p9.
+
+2023-10-07 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ PR target/108338
+ * gcc.target/powerpc/pr108338.c: New test.
+
+2023-10-07 xuli <xuli1@eswincomputing.com>
+
+ * gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c: Adjust assembler times.
+ * gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c: Ditto.
+
+2023-10-06 Roger Sayle <roger@nextmovesoftware.com>
+
+ * gcc.target/i386/ashldi3-2.c: New 32-bit test case.
+ * gcc.target/i386/ashlti3-3.c: New 64-bit test case.
+
+2023-10-06 Roger Sayle <roger@nextmovesoftware.com>
+ Uros Bizjak <ubizjak@gmail.com>
+
+ * gcc.target/i386/lea-2.c: New test case.
+
+2023-10-06 Andrew Pinski <pinskia@gmail.com>
+
+ PR middle-end/111699
+ * gcc.c-torture/compile/pr111699-1.c: New test.
+
+2023-10-06 Patrick O'Neill <patrick@rivosinc.com>
+
+ * gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-1.h:
+ Replace stdint.h with stdint-gcc.h.
+ * gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-2.h:
+ Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-1.h:
+ Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-2.h:
+ Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-1.h:
+ Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-2.h:
+ Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-1.h:
+ Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-2.h:
+ Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_sqrt-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_sqrt-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_unary-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_unary-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_unary-3.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_unary-4.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_unary-5.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_unary-6.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_unary-7.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_unary-8.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/partial/slp-8.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/partial/slp-9.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/pr111232.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/cvt-0.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/unop/cvt-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls-vlmax/perm.h: Ditto.
+ * gcc.target/riscv/rvv/base/abi-call-args-4-run.c: Ditto.
+ * gcc.target/riscv/rvv/base/pr110119-2.c: Ditto.
+ * gcc.target/riscv/rvv/vsetvl/pr111255.c: Ditto.
+ * gcc.target/riscv/rvv/vsetvl/wredsum_vlmax.c: Ditto.
+
+2023-10-05 Patrick O'Neill <patrick@rivosinc.com>
+ Joern Rennecke <joern.rennecke@embecosm.com>
+
+ * gcc.dg/pr90263.c: Skip riscv_v targets.
+ * gcc.target/riscv/rvv/base/pr90263.c: New test.
+
+2023-10-05 Sergei Trofimovich <siarheit@google.com>
+
+ PR ipa/111283
+ PR gcov-profile/111559
+ * gcc.dg/tree-prof/pr111559.c: New test.
+
+2023-10-05 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/111657
+ * gcc.target/i386/pr111657.c: New test.
+
+2023-10-05 Martin Jambor <mjambor@suse.cz>
+
+ Revert:
+ 2023-10-05 Martin Jambor <mjambor@suse.cz>
+
+ PR ipa/108007
+ * gcc.dg/ipa/pr108007.c: New test.
+
+2023-10-05 Claudiu Zissulescu <claziss@gmail.com>
+
+ * gcc.target/arc/enter-dw2-1.c: Remove tests when using linux
+ build.
+ * gcc.target/arc/tls-ld.c: Update test.
+ * gcc.target/arc/tls-le.c: Likewise.
+
+2023-10-05 Claudiu Zissulescu <claziss@gmail.com>
+
+ * gcc.target/arc/loop-3.c: Update test.
+
+2023-10-05 Claudiu Zissulescu <claziss@gmail.com>
+
+ * gcc.target/arc/add_n-combine.c: Recognize add2 instruction.
+ * gcc.target/arc/firq-4.c: FP register is a temp reg. Update test.
+ * gcc.target/arc/firq-6.c: Likewise.
+ * gcc.target/arc/mtune-ARC600.c: Remove test.
+ * gcc.target/arc/mtune-ARC601.c: Likewise.
+ * gcc.target/arc/mtune-ARC700-xmac: Likewise.
+ * gcc.target/arc/mtune-ARC700.c: Likewise.
+ * gcc.target/arc/mtune-ARC725D.c: Likewise.
+ * gcc.target/arc/mtune-ARC750D.c: Likewise.
+ * gcc.target/arc/uncached-7.c: Set it to XFAIL.
+
+2023-10-04 Roger Sayle <roger@nextmovesoftware.com>
+
+ * gcc.target/arc/ashrsi-1.c: New TARGET_BARREL_SHIFTER test case.
+ * gcc.target/arc/ashrsi-2.c: New !TARGET_BARREL_SHIFTER test case.
+ * gcc.target/arc/ashrsi-3.c: Likewise.
+ * gcc.target/arc/ashrsi-4.c: Likewise.
+ * gcc.target/arc/ashrsi-5.c: Likewise.
+ * gcc.target/arc/lshrsi-1.c: New TARGET_BARREL_SHIFTER test case.
+ * gcc.target/arc/lshrsi-2.c: New !TARGET_BARREL_SHIFTER test case.
+ * gcc.target/arc/lshrsi-3.c: Likewise.
+ * gcc.target/arc/lshrsi-4.c: Likewise.
+ * gcc.target/arc/lshrsi-5.c: Likewise.
+ * gcc.target/arc/shlsi-1.c: New TARGET_BARREL_SHIFTER test case.
+ * gcc.target/arc/shlsi-2.c: New !TARGET_BARREL_SHIFTER test case.
+ * gcc.target/arc/shlsi-3.c: Likewise.
+ * gcc.target/arc/shlsi-4.c: Likewise.
+ * gcc.target/arc/shlsi-5.c: Likewise.
+
+2023-10-04 Roger Sayle <roger@nextmovesoftware.com>
+
+ PR rtl-optimization/110701
+ * gcc.target/i386/pr110701.c: New test case.
+
+2023-10-04 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/37336
+ PR fortran/111674
+ * gfortran.dg/allocate_with_source_25.f90: Final count in tree
+ dump reverts from 4 to original 6.
+ * gfortran.dg/finalize_38.f90: Add test for fix of PR111674.
+
+2023-10-03 David Malcolm <dmalcolm@redhat.com>
+
+ * g++.dg/diagnostic/static_assert3.C: Add directives for
+ additional source printing.
+ * g++.dg/template/error60.C: New test.
+
+2023-10-03 Patrick O'Neill <patrick@rivosinc.com>
+
+ * gcc.target/riscv/rvv/fortran/pr111566.f90: Restore escaped
+ characters.
+
+2023-10-03 Martin Jambor <mjambor@suse.cz>
+
+ PR ipa/110378
+ * gcc.dg/ipa/ipa-sra-32.c: New test.
+ * gcc.dg/ipa/pr110378-4.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-4.c: Use a return value.
+
+2023-10-03 Martin Jambor <mjambor@suse.cz>
+
+ PR ipa/108007
+ * gcc.dg/ipa/pr108007.c: New test.
+
+2023-10-03 Andrew MacLeod <amacleod@redhat.com>
+
+ * gcc.dg/pr93917.c: Check for ranges in final optimized listing.
+ * gcc.dg/tree-ssa/vrp-unreachable.c: Ditto.
+
+2023-10-03 Roger Sayle <roger@nextmovesoftware.com>
+
+ * gcc.target/arc/scc-ltu.c: New test case.
+
+2023-10-02 John David Anglin <danglin@gcc.gnu.org>
+
+ * gfortran.dg/pr95690.f90: Add hppa*-*-* to dg-error targets at line 5.
+
+2023-10-02 John David Anglin <danglin@gcc.gnu.org>
+
+ * gcc.dg/pr108095.c: Require target lra.
+
+2023-10-02 John David Anglin <danglin@gcc.gnu.org>
+
+ * gcc.dg/long_branch.c: Increase timeout factor for hppa*-*-*.
+
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * gcc.dg/plugin/diagnostic_group_plugin.c
+ (test_begin_group_cb, test_end_group_cb): Replace with...
+ (class test_output_format): ...this new subclass.
+ (plugin_init): Update.
+
+2023-10-02 David Malcolm <dmalcolm@redhat.com>
+
+ * gcc.dg/plugin/diagnostic_plugin_show_trees.c: Update for
+ reorganization of source-printing fields of diagnostic_context.
+ * gcc.dg/plugin/diagnostic_plugin_test_inlining.c: Likewise.
+ * gcc.dg/plugin/diagnostic_plugin_test_paths.c: Likewise.
+ * gcc.dg/plugin/diagnostic_plugin_test_show_locus.c: Likewise.
+ * gcc.dg/plugin/diagnostic_plugin_test_string_literals.c: Likewise.
+ * gcc.dg/plugin/diagnostic_plugin_test_tree_expression_range.c:
+ Likewise.
+
+2023-10-02 Wilco Dijkstra <wilco.dijkstra@arm.com>
+
+ PR target/111235
+ * gcc.dg/rtl/arm/stl-cond.c: Remove test.
+ * gcc.target/arm/atomic_loaddi_7.c: Fix dmb count.
+ * gcc.target/arm/atomic_loaddi_8.c: Likewise.
+ * gcc.target/arm/pr111235.c: Add new test.
+
+2023-10-02 Tamar Christina <tamar.christina@arm.com>
+
+ * gcc.target/aarch64/xorsign.c:
+
+2023-10-02 Iain Sandoe <iain@sandoe.co.uk>
+
+ * g++.dg/debug/dwarf2/pr85550.C: Skip for Darwin.
+
+2023-10-02 Joern Rennecke <joern.rennecke@embecosm.com>
+ Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.target/riscv/rvv/base/cpymem-1.c: New test.
+ * gcc.target/riscv/rvv/base/cpymem-2.c: Likewise.
+
+2023-10-01 Joern Rennecke <joern.rennecke@embecosm.com>
+
+ * lib/target-supports.exp (add_options_for_riscv_v):
+ Fix typo in first regexp.
+ (add_options_for_riscv_zfh): Likewise.
+ (add_options_for_riscv_d): Likewise.
+
+2023-10-01 Joern Rennecke <joern.rennecke@embecosm.com>
+ Juzhe-Zhong <juzhe.zhong@rivai.ai>
+ Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/111566
+ * gcc.target/riscv/rvv/autovec/vls/mov-1.c: Adapt test.
+ * gcc.target/riscv/rvv/autovec/vls/mov-10.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-3.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-5.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-7.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-8.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-9.c: Ditto.1
+ * gcc.target/riscv/rvv/autovec/vls/mov-2.c: Removed.
+ * gcc.target/riscv/rvv/autovec/vls/mov-4.c: Removed.
+ * gcc.target/riscv/rvv/autovec/vls/mov-6.c: Removed.
+ * gcc.target/riscv/rvv/fortran/pr111566.f90: New test.
+
+2023-09-29 Gaius Mulley <gaiusmod2@gmail.com>
+
+ * gm2/iso/run/pass/m2date.mod (DayName): Reordered.
+
+2023-09-29 Xiao Zeng <zengxiao@eswincomputing.com>
+
+ * gcc.target/riscv/zicond-primitiveSemantics_compare_imm.c: New test.
+ * gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_0_imm.c:
+ Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_imm_imm.c:
+ Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_imm_reg.c:
+ Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_reg_reg.c:
+ Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_compare_reg.c: Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_0_imm.c:
+ Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_imm_imm.c:
+ Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_imm_reg.c:
+ Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_reg_reg.c:
+ Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics.c: Tighten expected regexp.
+ * gcc.target/riscv/zicond-primitiveSemantics_return_0_imm.c: Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_return_imm_imm.c: Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_return_imm_reg.c: Likewise.
+ * gcc.target/riscv/zicond-primitiveSemantics_return_reg_reg.c: Likewise.
+ * gcc.target/riscv/zicond-xor-01.c: Likewise.
+
+2023-09-29 Patrick O'Neill <patrick@rivosinc.com>
+
+ * gcc.target/riscv/rvv/vsetvl/wredsum_vlmax.c: Specify -mabi=lp64d.
+
+2023-09-29 Jivan Hakobyan <jivanhakobyan9@gmail.com>
+
+ * gcc.target/riscv/zbb-andn-orn-01.c: New test.
+ * gcc.target/riscv/zbb-andn-orn-02.c: Likewise.
+
+2023-09-29 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.dg/plugin/poly-int-tests.h (test_num_coeffs_extra): Use
+ poly_int rather than poly_int_pod.
+
+2023-09-29 Francois-Xavier Coudert <fxcoudert@gmail.com>
+
+ * gcc.dg/debug/dwarf2/inline4.c: Ajdust regexp to match darwin
+ output.
+
+2023-09-29 Andre Vehreschild <vehre@gcc.gnu.org>
+
+ PR fortran/37336
+ * gfortran.dg/coarray/alloc_comp_6.f90: New test.
+ * gfortran.dg/coarray/alloc_comp_7.f90: New test.
+
+2023-09-29 Manos Anagnostakis <manos.anagnostakis@vrull.eu>
+
+ * gcc.target/aarch64/ldp_aligned.c: Splitted into this and
+ ldp_unaligned.
+ * gcc.target/aarch64/stp_aligned.c: Splitted into this and
+ stp_unaligned.
+ * gcc.target/aarch64/ldp_unaligned.c: New test.
+ * gcc.target/aarch64/stp_unaligned.c: New test.
+
+2023-09-29 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/111583
+ * gcc.dg/tree-ssa/pr111583-1.c: New testcase.
+ * gcc.dg/tree-ssa/pr111583-2.c: Likewise.
+
+2023-09-28 Gaius Mulley <gaiusmod2@gmail.com>
+
+ * gm2/coroutines/pim/run/pass/coroutines-pim-run-pass.exp:
+ Add load_lib timeout-dg.exp and increase timeout to 60
+ seconds.
+ * gm2/pimlib/base/run/pass/pimlib-base-run-pass.exp: Add
+ load_lib timeout-dg.exp and increase timeout to 60 seconds.
+ * gm2/projects/iso/run/pass/halma/projects-iso-run-pass-halma.exp:
+ Increase timeout to 45 seconds.
+ * gm2/switches/whole-program/pass/run/switches-whole-program-pass-run.exp:
+ Add load_lib timeout-dg.exp and increase timeout to 120 seconds.
+ Remove unnecessary compile of mystrlib.mod.
+ * gm2/iso/run/pass/iso-run-pass.exp: Add load_lib
+ timeout-dg.exp and set timeout to 60 seconds.
+
+2023-09-28 Wilco Dijkstra <wilco.dijkstra@arm.com>
+
+ PR target/111121
+ * gcc.target/aarch64/mops_4.c: Add memmove testcases.
+
+2023-09-28 Pan Li <pan2.li@intel.com>
+
+ PR target/111506
+ * gcc.target/riscv/rvv/autovec/unop/cvt-0.c: New test.
+ * gcc.target/riscv/rvv/autovec/unop/cvt-1.c: New test.
+ * gcc.target/riscv/rvv/autovec/vls/cvt-0.c: New test.
+
2023-09-28 Richard Biener <rguenther@suse.de>
PR tree-optimization/111614
diff --git a/gcc/testsuite/c-c++-common/analyzer/out-of-bounds-diagram-strcat-2.c b/gcc/testsuite/c-c++-common/analyzer/out-of-bounds-diagram-strcat-2.c
new file mode 100644
index 0000000..b129518
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/analyzer/out-of-bounds-diagram-strcat-2.c
@@ -0,0 +1,74 @@
+/* { dg-additional-options "-fdiagnostics-text-art-charset=unicode" } */
+/* { dg-skip-if "" { powerpc-ibm-aix* } } */
+
+#include <string.h>
+
+#define LOREM_IPSUM \
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod" \
+ " tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim" \
+ " veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea" \
+ " commodo consequat. Duis aute irure dolor in reprehenderit in voluptate" \
+ " velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint" \
+ " occaecat cupidatat non proident, sunt in culpa qui officia deserunt" \
+ " mollit anim id est laborum."
+
+void test (void)
+{
+ char buf[500];
+ strcpy (buf, LOREM_IPSUM);
+ strcat (buf, LOREM_IPSUM); /* { dg-warning "stack-based buffer overflow" } */
+}
+
+/* { dg-begin-multiline-output "" }
+
+ ┌─────┬───┬───┬───┬───┬───┬────────┬─────┬─────┬─────┬─────┬─────┬─────┐
+ │ [0] │[1]│[2]│[3]│[4]│[5]│ │[440]│[441]│[442]│[443]│[444]│[445]│
+ ├─────┼───┼───┼───┼───┼───┤ ... ├─────┼─────┼─────┼─────┼─────┼─────┤
+ │ 'L' │'o'│'r'│'e'│'m'│' '│ │ 'o' │ 'r' │ 'u' │ 'm' │ '.' │ NUL │
+ ├─────┴───┴───┴───┴───┴───┴────────┴─────┴─────┴─────┴─────┴─────┴─────┤
+ │ string literal (type: 'char[446]') │
+ └──────────────────────────────────────────────────────────────────────┘
+ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
+ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
+ v v v v v v v v v v v v v v v
+ ┌───┬────────────────────────────────────────────┬─────┬────────────────────┬─────┐┌────────────────────────────────────┐
+ │[0]│ ... │[445]│ ... │[499]││ │
+ ├───┼───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬┼─────┼────────────────────┴─────┘│ │
+ │'L'│'o'│'r'│'e'│'m'│' '│...│'o'│'r'│'u'│'m'│'.'││ NUL │ │ after valid range │
+ ├───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴┴─────┴──────────────────────────┐│ │
+ │ 'buf' (type: 'char[500]') ││ │
+ └─────────────────────────────────────────────────────────────────────────────────┘└────────────────────────────────────┘
+ ├────────────────────────────────────────┬────────────────────────────────────────┤├─────────────────┬──────────────────┤
+ │ │
+ ╭─────────┴─────────╮ ╭──────────┴──────────╮
+ │capacity: 500 bytes│ │overflow of 391 bytes│
+ ╰───────────────────╯ ╰─────────────────────╯
+
+ { dg-end-multiline-output "" { target c } } */
+
+/* { dg-begin-multiline-output "" }
+
+ ┌─────┬───┬───┬───┬───┬───┬────────┬─────┬─────┬─────┬─────┬─────┬─────┐
+ │ [0] │[1]│[2]│[3]│[4]│[5]│ │[440]│[441]│[442]│[443]│[444]│[445]│
+ ├─────┼───┼───┼───┼───┼───┤ ... ├─────┼─────┼─────┼─────┼─────┼─────┤
+ │ 'L' │'o'│'r'│'e'│'m'│' '│ │ 'o' │ 'r' │ 'u' │ 'm' │ '.' │ NUL │
+ ├─────┴───┴───┴───┴───┴───┴────────┴─────┴─────┴─────┴─────┴─────┴─────┤
+ │ string literal (type: 'const char[446]') │
+ └──────────────────────────────────────────────────────────────────────┘
+ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
+ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
+ v v v v v v v v v v v v v v v
+ ┌───┬────────────────────────────────────────────┬─────┬────────────────────┬─────┐┌────────────────────────────────────┐
+ │[0]│ ... │[445]│ ... │[499]││ │
+ ├───┼───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬┼─────┼────────────────────┴─────┘│ │
+ │'L'│'o'│'r'│'e'│'m'│' '│...│'o'│'r'│'u'│'m'│'.'││ NUL │ │ after valid range │
+ ├───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴┴─────┴──────────────────────────┐│ │
+ │ 'char buf [500]' (type: 'char[500]') ││ │
+ └─────────────────────────────────────────────────────────────────────────────────┘└────────────────────────────────────┘
+ ├────────────────────────────────────────┬────────────────────────────────────────┤├─────────────────┬──────────────────┤
+ │ │
+ ╭─────────┴─────────╮ ╭──────────┴──────────╮
+ │capacity: 500 bytes│ │overflow of 391 bytes│
+ ╰───────────────────╯ ╰─────────────────────╯
+
+ { dg-end-multiline-output "" { target c++ } } */
diff --git a/gcc/testsuite/c-c++-common/analyzer/out-of-bounds-diagram-strcat.c b/gcc/testsuite/c-c++-common/analyzer/out-of-bounds-diagram-strcat.c
new file mode 100644
index 0000000..53c128a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/analyzer/out-of-bounds-diagram-strcat.c
@@ -0,0 +1,66 @@
+/* { dg-additional-options "-fdiagnostics-text-art-charset=unicode -Wno-stringop-overflow" } */
+/* { dg-skip-if "" { powerpc-ibm-aix* } } */
+
+#include <string.h>
+
+void test (void)
+{
+ char buf[10];
+ strcpy (buf, "foo");
+ strcat (buf, " bar");
+ strcat (buf, " baz!"); /* { dg-warning "stack-based buffer overflow" } */
+}
+
+/* { dg-begin-multiline-output "" }
+
+ ┌────┬────┬────┐ ┌────┬────┬───────┐
+ │[0] │[1] │[2] │ │[3] │[4] │ [5] │
+ ├────┼────┼────┤ ├────┼────┼───────┤
+ │' ' │'b' │'a' │ │'z' │'!' │ NUL │
+ ├────┴────┴────┴─┴────┴────┴───────┤
+ │ string literal (type: 'char[6]') │
+ └──────────────────────────────────┘
+ │ │ │ │ │ │
+ │ │ │ │ │ │
+ v v v v v v
+ ┌─────┬─────────────────────────────┬────┬────┬────┐ ┌─────────────────┐
+ │ [0] │ ... │[7] │... │[9] │ │ │
+ └─────┴────────┬────┬────┬────┬────┬┼────┼────┴────┘ │ │
+ │' ' │'b' │'a' │'r' ││NUL │ │after valid range│
+ ┌──────────────┴────┴────┴────┴────┴┴────┴─────────┐ │ │
+ │ 'buf' (type: 'char[10]') │ │ │
+ └──────────────────────────────────────────────────┘ └─────────────────┘
+ ├────────────────────────┬─────────────────────────┤ ├────────┬────────┤
+ │ │
+ ╭─────────┴────────╮ ╭─────────┴─────────╮
+ │capacity: 10 bytes│ │overflow of 3 bytes│
+ ╰──────────────────╯ ╰───────────────────╯
+
+ { dg-end-multiline-output "" { target c } } */
+
+/* { dg-begin-multiline-output "" }
+
+ ┌─────┬─────┬─────┐ ┌─────┬─────┬─────┐
+ │ [0] │ [1] │ [2] │ │ [3] │ [4] │ [5] │
+ ├─────┼─────┼─────┤ ├─────┼─────┼─────┤
+ │ ' ' │ 'b' │ 'a' │ │ 'z' │ '!' │ NUL │
+ ├─────┴─────┴─────┴──┴─────┴─────┴─────┤
+ │string literal (type: 'const char[6]')│
+ └──────────────────────────────────────┘
+ │ │ │ │ │ │
+ │ │ │ │ │ │
+ v v v v v v
+ ┌────┬──────────────────────────┬─────┬─────┬─────┐ ┌─────────────────┐
+ │[0] │ ... │ [7] │ ... │ [9] │ │ │
+ └────┴───────┬────┬────┬───┬───┬┼─────┼─────┴─────┘ │ │
+ │' ' │'b' │'a'│'r'││ NUL │ │after valid range│
+ ┌────────────┴────┴────┴───┴───┴┴─────┴───────────┐ │ │
+ │ 'char buf [10]' (type: 'char[10]') │ │ │
+ └─────────────────────────────────────────────────┘ └─────────────────┘
+ ├────────────────────────┬────────────────────────┤ ├────────┬────────┤
+ │ │
+ ╭─────────┴────────╮ ╭─────────┴─────────╮
+ │capacity: 10 bytes│ │overflow of 3 bytes│
+ ╰──────────────────╯ ╰───────────────────╯
+
+ { dg-end-multiline-output "" { target c++ } } */
diff --git a/gcc/testsuite/c-c++-common/diagnostic-format-sarif-file-pr111700.c b/gcc/testsuite/c-c++-common/diagnostic-format-sarif-file-pr111700.c
new file mode 100644
index 0000000..06605ac
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/diagnostic-format-sarif-file-pr111700.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fdiagnostics-format=sarif-file" } */
+
+# 0 "this-file-does-not-exist.c"
+#warning message
+
+/* Verify that some JSON was written to a file with the expected name. */
+/* { dg-final { verify-sarif-file } } */
+
+/* ...and that it at least includes the warning
+ { dg-final { scan-sarif-file "\"message\": " } }
+ { dg-final { scan-sarif-file "\"text\": \"#warning message" } } */
diff --git a/gcc/testsuite/c-c++-common/gomp/allocate-14.c b/gcc/testsuite/c-c++-common/gomp/allocate-14.c
index b25da54..894921a 100644
--- a/gcc/testsuite/c-c++-common/gomp/allocate-14.c
+++ b/gcc/testsuite/c-c++-common/gomp/allocate-14.c
@@ -17,7 +17,7 @@ h ()
{
#pragma omp target
#pragma omp parallel
- #pragma omp serial
+ #pragma omp single
{
int var2[5]; /* { dg-error "'allocate' directive for 'var2' inside a target region must specify an 'allocator' clause" } */
#pragma omp allocate(var2)
diff --git a/gcc/testsuite/c-c++-common/gomp/allocate-15.c b/gcc/testsuite/c-c++-common/gomp/allocate-15.c
index 15105b91..52cb768 100644
--- a/gcc/testsuite/c-c++-common/gomp/allocate-15.c
+++ b/gcc/testsuite/c-c++-common/gomp/allocate-15.c
@@ -19,7 +19,7 @@ h ()
{
#pragma omp target
#pragma omp parallel
- #pragma omp serial
+ #pragma omp single
{
int var2[5];
#pragma omp allocate(var2)
diff --git a/gcc/testsuite/c-c++-common/gomp/allocate-9.c b/gcc/testsuite/c-c++-common/gomp/allocate-9.c
index 3c11080..3138274 100644
--- a/gcc/testsuite/c-c++-common/gomp/allocate-9.c
+++ b/gcc/testsuite/c-c++-common/gomp/allocate-9.c
@@ -20,7 +20,7 @@ typedef enum omp_allocator_handle_t
static int A[5] = {1,2,3,4,5};
int B, C, D;
-/* If the following fails bacause of added predefined allocators, please update
+/* If the following fails because of added predefined allocators, please update
- c/c-parser.c's c_parser_omp_allocate
- fortran/openmp.cc's is_predefined_allocator
- libgomp/env.c's parse_allocator
diff --git a/gcc/testsuite/g++.dg/concepts/diagnostic3.C b/gcc/testsuite/g++.dg/concepts/diagnostic3.C
index 410651a..52b2f23 100644
--- a/gcc/testsuite/g++.dg/concepts/diagnostic3.C
+++ b/gcc/testsuite/g++.dg/concepts/diagnostic3.C
@@ -7,7 +7,7 @@ template<typename T>
concept foo = (bool)(foo_v<T> | foo_v<T&>);
template<typename... Ts>
-requires (foo<Ts> && ...) // { dg-message "with Ts = .int, char... evaluated to .false." }
+requires (foo<Ts> && ...) // { dg-message "19:with Ts = .int, char... evaluated to .false." }
void
bar()
{ }
@@ -16,7 +16,7 @@ template<int>
struct S { };
template<int... Is>
-requires (foo<S<Is>> && ...) // { dg-message "with Is = .2, 3, 4... evaluated to .false." }
+requires (foo<S<Is>> && ...) // { dg-message "22:with Is = .2, 3, 4... evaluated to .false." }
void
baz()
{ }
diff --git a/gcc/testsuite/g++.dg/cpp0x/hog1.C b/gcc/testsuite/g++.dg/cpp0x/hog1.C
new file mode 100644
index 0000000..105a2e9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/hog1.C
@@ -0,0 +1,77 @@
+// PR c++/111660
+// { dg-do compile { target c++11 } }
+
+enum Value {
+ LPAREN,
+ RPAREN,
+ LBRACE,
+ RBRACE,
+ LBRACK,
+ RBRACK,
+ CONDITIONAL,
+ COLON,
+ SEMICOLON,
+ COMMA,
+ PERIOD,
+ BIT_OR,
+ BIT_AND,
+ BIT_XOR,
+ BIT_NOT,
+ NOT,
+ LT,
+ GT,
+ MOD,
+ ASSIGN,
+ ADD,
+ SUB,
+ MUL,
+ DIV,
+ PRIVATE_NAME,
+ STRING,
+ TEMPLATE_SPAN,
+ IDENTIFIER,
+ WHITESPACE,
+ ILLEGAL,
+};
+
+constexpr Value GetOneCharToken(char c) {
+ return
+ c == '(' ? LPAREN :
+ c == ')' ? RPAREN :
+ c == '{' ? LBRACE :
+ c == '}' ? RBRACE :
+ c == '[' ? LBRACK :
+ c == ']' ? RBRACK :
+ c == '?' ? CONDITIONAL :
+ c == ':' ? COLON :
+ c == ';' ? SEMICOLON :
+ c == ',' ? COMMA :
+ c == '.' ? PERIOD :
+ c == '|' ? BIT_OR :
+ c == '&' ? BIT_AND :
+ c == '^' ? BIT_XOR :
+ c == '~' ? BIT_NOT :
+ c == '!' ? NOT :
+ c == '<' ? LT :
+ c == '>' ? GT :
+ c == '%' ? MOD :
+ c == '=' ? ASSIGN :
+ c == '+' ? ADD :
+ c == '-' ? SUB :
+ c == '*' ? MUL :
+ c == '/' ? DIV :
+ c == '#' ? PRIVATE_NAME :
+ c == '"' ? STRING :
+ c == '\'' ? STRING :
+ c == '`' ? TEMPLATE_SPAN :
+ c == '\\' ? IDENTIFIER :
+ c == ' ' ? WHITESPACE :
+ c == '\t' ? WHITESPACE :
+ c == '\v' ? WHITESPACE :
+ c == '\f' ? WHITESPACE :
+ c == '\r' ? WHITESPACE :
+ c == '\n' ? WHITESPACE :
+ ILLEGAL;
+}
+
+int main() {}
diff --git a/gcc/testsuite/g++.dg/cpp1y/constexpr-diag1.C b/gcc/testsuite/g++.dg/cpp1y/constexpr-diag1.C
new file mode 100644
index 0000000..0e2909e
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/constexpr-diag1.C
@@ -0,0 +1,21 @@
+// PR c++/111272
+// { dg-do compile { target c++14 } }
+// { dg-options "-Werror=invalid-constexpr" }
+// { dg-prune-output "some warnings being treated as errors" }
+
+struct Jam
+{
+ // constexpr // n.b.
+ int ft() { return 42; } // { dg-message "declared here" }
+
+ constexpr Jam() { ft(); } // { dg-error "call to non-.constexpr. function" }
+// { dg-message "declared here" "" { target c++20_down } .-1 }
+};
+
+constexpr bool test()
+{
+ Jam j; // { dg-error "called in a constant expression" }
+ return true;
+}
+
+static_assert(test(), ""); // { dg-error "non-constant condition" }
diff --git a/gcc/testsuite/g++.dg/cpp1z/fold3.C b/gcc/testsuite/g++.dg/cpp1z/fold3.C
index 787bf79..a256141 100644
--- a/gcc/testsuite/g++.dg/cpp1z/fold3.C
+++ b/gcc/testsuite/g++.dg/cpp1z/fold3.C
@@ -7,44 +7,44 @@
#define MAKE_FN(name, op) \
template<typename... Ts> \
- constexpr auto name (Ts... ts) { return (... op ts); } // { dg-error "empty" }
+ constexpr auto name (Ts... ts) { return (... op ts); } // { dg-message "" }
-MAKE_FN (add, +);
-MAKE_FN (sub, -);
-MAKE_FN (mul, *);
-MAKE_FN (div, /);
-MAKE_FN (mod, %);
-MAKE_FN (bxor, ^);
-MAKE_FN (bor, |);
-MAKE_FN (band, &);
-MAKE_FN (lsh, <<);
-MAKE_FN (rsh, >>);
+MAKE_FN (add, +); // { dg-message "" }
+MAKE_FN (sub, -); // { dg-message "" }
+MAKE_FN (mul, *); // { dg-message "" }
+MAKE_FN (div, /); // { dg-message "" }
+MAKE_FN (mod, %); // { dg-message "" }
+MAKE_FN (bxor, ^); // { dg-message "" }
+MAKE_FN (bor, |); // { dg-message "" }
+MAKE_FN (band, &); // { dg-message "" }
+MAKE_FN (lsh, <<); // { dg-message "" }
+MAKE_FN (rsh, >>); // { dg-message "" }
-MAKE_FN (assign, =);
-MAKE_FN (addi, +=);
-MAKE_FN (subi, -=);
-MAKE_FN (muli, *=);
-MAKE_FN (divi, /=);
-MAKE_FN (modi, %=);
-MAKE_FN (bxori, ^=);
-MAKE_FN (bori, |=);
-MAKE_FN (bandi, &=);
-MAKE_FN (lshi, <<=);
-MAKE_FN (rshi, >>=);
+MAKE_FN (assign, =); // { dg-message "" }
+MAKE_FN (addi, +=); // { dg-message "" }
+MAKE_FN (subi, -=); // { dg-message "" }
+MAKE_FN (muli, *=); // { dg-message "" }
+MAKE_FN (divi, /=); // { dg-message "" }
+MAKE_FN (modi, %=); // { dg-message "" }
+MAKE_FN (bxori, ^=); // { dg-message "" }
+MAKE_FN (bori, |=); // { dg-message "" }
+MAKE_FN (bandi, &=); // { dg-message "" }
+MAKE_FN (lshi, <<=); // { dg-message "" }
+MAKE_FN (rshi, >>=); // { dg-message "" }
-MAKE_FN (eq, ==);
-MAKE_FN (ne, !=);
-MAKE_FN (lt, <);
-MAKE_FN (gt, >);
-MAKE_FN (le, <);
-MAKE_FN (ge, >);
+MAKE_FN (eq, ==); // { dg-message "" }
+MAKE_FN (ne, !=); // { dg-message "" }
+MAKE_FN (lt, <); // { dg-message "" }
+MAKE_FN (gt, >); // { dg-message "" }
+MAKE_FN (le, <); // { dg-message "" }
+MAKE_FN (ge, >); // { dg-message "" }
MAKE_FN (land, &&);
MAKE_FN (lor, ||);
MAKE_FN (comma, COMMA);
-MAKE_FN (dot_star, .*);
-MAKE_FN (arrow_star, ->*);
+MAKE_FN (dot_star, .*); // { dg-message "" }
+MAKE_FN (arrow_star, ->*); // { dg-message "" }
int main() {
static_assert(land() == true, "");
@@ -52,7 +52,7 @@ int main() {
comma(); // No value to theck
// These are all errors, but the error is emitted at the point
- // of instantiation (line 10).
+ // of macro definition or expansion above.
add(); // { dg-message "required from here" }
mul(); // { dg-message "required from here" }
bor(); // { dg-message "required from here" }
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval36.C b/gcc/testsuite/g++.dg/cpp2a/consteval36.C
new file mode 100644
index 0000000..9c470e4
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval36.C
@@ -0,0 +1,22 @@
+// PR c++/111660
+// { dg-do compile { target c++20 } }
+
+consteval int id (int i) { return i; }
+
+void
+g (int i)
+{
+ 1 ? 1 : ((1 ? 1 : 1), id (i)); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : 1), id (i), 1); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((i ? 1 : 1), id (i), 1); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? i : 1), id (i), 1); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : i), id (i), 1); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((i ? -i : i), id (i), 1); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : id (i)), id (42), 1); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : id (42)), id (i)); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : id (42)), id (i), 1); // { dg-error "'i' is not a constant expression" }
+ id (i) ? 1 : ((1 ? 1 : 1), id (i)); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : id (i)), id (i)); // { dg-error "'i' is not a constant expression" }
+ 1 ? id (i) : ((1 ? 1 : id (i)), id (i)); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((id (i) ? 1 : 1), id (i)); // { dg-error "'i' is not a constant expression" }
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/lambda-generic-mangle1.C b/gcc/testsuite/g++.dg/cpp2a/lambda-generic-mangle1.C
new file mode 100644
index 0000000..0051307
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/lambda-generic-mangle1.C
@@ -0,0 +1,9 @@
+// PR c++/109422
+// { dg-do compile { target c++20 } }
+
+struct C {
+ template<typename T>
+ void f(decltype([](T, auto) { return 0; })) {}
+};
+void g() { C().f<int>({}); }
+// { dg-final { scan-assembler "_ZN1C1fIiEEvDTtlNS_UlT_TL0__E_EEE" } }
diff --git a/gcc/testsuite/g++.dg/cpp2a/lambda-generic-mangle1a.C b/gcc/testsuite/g++.dg/cpp2a/lambda-generic-mangle1a.C
new file mode 100644
index 0000000..dc7b012
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/lambda-generic-mangle1a.C
@@ -0,0 +1,10 @@
+// PR c++/109422
+// { dg-do compile { target c++20 } }
+// { dg-additional-options "-fabi-version=18" }
+
+struct C {
+ template<typename T>
+ void f(decltype([](T, auto) { return 0; })) {}
+};
+void g() { C().f<int>({}); }
+// { dg-final { scan-assembler "_ZN1C1fIiEEvDTtlNS_UlT_T_E_EEE" } }
diff --git a/gcc/testsuite/g++.dg/debug/dwarf2/pr85550.C b/gcc/testsuite/g++.dg/debug/dwarf2/pr85550.C
index 35b0f56..c95f752 100644
--- a/gcc/testsuite/g++.dg/debug/dwarf2/pr85550.C
+++ b/gcc/testsuite/g++.dg/debug/dwarf2/pr85550.C
@@ -2,6 +2,7 @@
// { dg-do link }
// { dg-options "-O2 -g -fdebug-types-section" }
// { dg-skip-if "AIX DWARF5" { powerpc-ibm-aix* } }
+// { dg-skip-if "No debug linker support" { *-*-darwin* } }
struct A {
int bar () const { return 0; }
diff --git a/gcc/testsuite/g++.dg/diagnostic/static_assert3.C b/gcc/testsuite/g++.dg/diagnostic/static_assert3.C
index 5d36388..4ec53f1 100644
--- a/gcc/testsuite/g++.dg/diagnostic/static_assert3.C
+++ b/gcc/testsuite/g++.dg/diagnostic/static_assert3.C
@@ -5,6 +5,11 @@
template <typename T, typename U> struct is_same { static constexpr bool value = false; };
template <typename T> struct is_same<T, T> { static constexpr bool value = true; };
+/* { dg-begin-multiline-output "" }
+ f(0, 1.3);
+ ~^~~~~~~~
+ { dg-end-multiline-output "" } */
+
template <typename T, typename U>
void f(T, U)
{
@@ -32,5 +37,5 @@ void f(T, U)
void g()
{
- f(0, 1.3);
+ f(0, 1.3); // { dg-message " required from here" }
}
diff --git a/gcc/testsuite/g++.dg/parse/error65.C b/gcc/testsuite/g++.dg/parse/error65.C
new file mode 100644
index 0000000..d9e0a4b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/parse/error65.C
@@ -0,0 +1,10 @@
+// PR c++/111840
+// { dg-do compile { target c++11 } }
+
+// NB: =delete("reason") may be allowed via P2573.
+int f1() = delete("should have a reason"); // { dg-error "expected" }
+int f2() = delete[""]; // { dg-error "expected" }
+int f3() = delete{""}; // { dg-error "expected" }
+int f4() = delete""; // { dg-error "expected" }
+int f5() = delete[{'a'""; // { dg-error "expected" }
+int i = f5();
diff --git a/gcc/testsuite/g++.dg/template/error60.C b/gcc/testsuite/g++.dg/template/error60.C
new file mode 100644
index 0000000..8c2139b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/error60.C
@@ -0,0 +1,37 @@
+// { dg-options "-fdiagnostics-show-caret" }
+
+template <typename Foo>
+struct my_pointer
+{
+ my_pointer (Foo *ptr) // { dg-message " initializing argument 1" }
+ : m_ptr (ptr)
+ {}
+
+ Foo *m_ptr;
+};
+
+template <typename Foo>
+void test (Foo val)
+{
+ my_pointer<Foo> ptr (val); // { dg-error "invalid conversion from 'int' to 'int\\*'" }
+}
+
+void usage ()
+{
+ test<int> (42); // { dg-message " required from here" }
+ /* { dg-begin-multiline-output "" }
+ test<int> (42);
+ ~~~~~~~~~~^~~~
+ { dg-end-multiline-output "" } */
+}
+
+ /* { dg-begin-multiline-output "" }
+ my_pointer (Foo *ptr)
+ ~~~~~^~~
+ { dg-end-multiline-output "" } */
+ /* { dg-begin-multiline-output "" }
+ my_pointer<Foo> ptr (val);
+ ^~~
+ |
+ int
+ { dg-end-multiline-output "" } */
diff --git a/gcc/testsuite/g++.dg/torture/pr111773.C b/gcc/testsuite/g++.dg/torture/pr111773.C
new file mode 100644
index 0000000..af8c687
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/pr111773.C
@@ -0,0 +1,31 @@
+// { dg-do run }
+
+#include <new>
+
+void* operator new(std::size_t sz)
+{
+ throw std::bad_alloc{};
+}
+
+int __attribute__((noipa)) foo ()
+{
+ int* p1 = static_cast<int*>(::operator new(sizeof(int)));
+ return 10;
+}
+
+int main()
+{
+ int res;
+ try
+ {
+ res = foo ();
+ }
+ catch (...)
+ {
+ return 0;
+ }
+
+ if (res != 10)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/g++.target/i386/mv16.C b/gcc/testsuite/g++.target/i386/mv16.C
index 07f4a2a..ef47831 100644
--- a/gcc/testsuite/g++.target/i386/mv16.C
+++ b/gcc/testsuite/g++.target/i386/mv16.C
@@ -116,6 +116,14 @@ int __attribute__ ((target("arch=arrowlake-s"))) foo () {
return 30;
}
+int __attribute__ ((target("arch=clearwaterforest"))) foo () {
+ return 31;
+}
+
+int __attribute__ ((target("arch=pantherlake"))) foo () {
+ return 32;
+}
+
int main ()
{
int val = foo ();
@@ -166,6 +174,10 @@ int main ()
assert (val == 29);
else if (__builtin_cpu_is ("arrowlake-s"))
assert (val == 30);
+ else if (__builtin_cpu_is ("clearwaterforest"))
+ assert (val == 31);
+ else if (__builtin_cpu_is ("pantherlake"))
+ assert (val == 32);
else
assert (val == 0);
diff --git a/gcc/testsuite/g++.target/powerpc/pr111367.C b/gcc/testsuite/g++.target/powerpc/pr111367.C
new file mode 100644
index 0000000..8f9d441
--- /dev/null
+++ b/gcc/testsuite/g++.target/powerpc/pr111367.C
@@ -0,0 +1,22 @@
+/* { dg-do assemble } */
+/* { dg-require-effective-target power10_ok } */
+/* { dg-options "-mdejagnu-cpu=power10 -fstack-protector-strong" } */
+
+/* Verify object file can be generated successfully. */
+
+struct SortAscending
+{
+};
+
+typedef unsigned long long size_t;
+
+void VQSort (long long *, size_t, SortAscending);
+
+void
+BenchAllColdSort ()
+{
+ typedef long long T;
+ constexpr size_t kSize = 10 * 1000;
+ alignas (16) T items[kSize];
+ VQSort (items, kSize, SortAscending ());
+}
diff --git a/gcc/testsuite/g++.target/riscv/rvv/autovec/bug-01.C b/gcc/testsuite/g++.target/riscv/rvv/autovec/bug-01.C
new file mode 100644
index 0000000..fd10009
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/autovec/bug-01.C
@@ -0,0 +1,33 @@
+/* { dg-options "-march=rv64gcv_zvl512b -mabi=lp64d -O3" } */
+
+class c {
+public:
+ int e();
+ void j();
+};
+float *d;
+class k {
+ int f;
+
+public:
+ k(int m) : f(m) {}
+ float g;
+ float h;
+ void n(int m) {
+ for (int i; i < m; i++) {
+ d[0] = d[1] = d[2] = g;
+ d[3] = h;
+ d += f;
+ }
+ }
+};
+c l;
+void o() {
+ int b = l.e();
+ k a(b);
+ for (;;)
+ if (b == 4) {
+ l.j();
+ a.n(2);
+ }
+}
diff --git a/gcc/testsuite/g++.target/riscv/rvv/rvv.exp b/gcc/testsuite/g++.target/riscv/rvv/rvv.exp
index 2495305..c30d6e9 100644
--- a/gcc/testsuite/g++.target/riscv/rvv/rvv.exp
+++ b/gcc/testsuite/g++.target/riscv/rvv/rvv.exp
@@ -40,5 +40,8 @@ set CFLAGS "-march=$gcc_march -O3"
dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/base/*.C]] \
"" $CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/autovec/*.\[C\]]] \
+ "" $CFLAGS
+
# All done.
dg-finish
diff --git a/gcc/testsuite/gcc.c-torture/compile/20000105-1.c b/gcc/testsuite/gcc.c-torture/compile/20000105-1.c
index 6f389d8..1917b2b 100644
--- a/gcc/testsuite/gcc.c-torture/compile/20000105-1.c
+++ b/gcc/testsuite/gcc.c-torture/compile/20000105-1.c
@@ -1,14 +1,15 @@
+int
main(int na, char* argv[])
{
int wflg = 0, tflg = 0;
int dflg = 0;
- exit(0);
+ __builtin_exit(0);
while(1)
{
switch(argv[1][0])
{
help:
- exit(0);
+ __builtin_exit(0);
case 'w':
case 'W':
wflg = 1;
diff --git a/gcc/testsuite/gcc.c-torture/compile/20000105-2.c b/gcc/testsuite/gcc.c-torture/compile/20000105-2.c
index 7689395..74bee07 100644
--- a/gcc/testsuite/gcc.c-torture/compile/20000105-2.c
+++ b/gcc/testsuite/gcc.c-torture/compile/20000105-2.c
@@ -1,4 +1,5 @@
-foo ()
+void
+foo (void)
{
long long int i = (int) "";
}
diff --git a/gcc/testsuite/gcc.c-torture/compile/20000211-1.c b/gcc/testsuite/gcc.c-torture/compile/20000211-1.c
index b83d6a4..c6b6c24 100644
--- a/gcc/testsuite/gcc.c-torture/compile/20000211-1.c
+++ b/gcc/testsuite/gcc.c-torture/compile/20000211-1.c
@@ -46,6 +46,10 @@ typedef struct
typedef struct
{
} printf_arg_dynarr;
+extern void Lstream_fputc (struct lstream *, int);
+extern void Lstream_write (struct lstream *, const Bufbyte *, Bytecount);
+extern void Lstream_flush_out (struct lstream *);
+extern printf_spec_dynarr *parse_doprnt_spec (Bufbyte *, Bytecount);
static void
doprnt_1 (Lisp_Object stream, const Bufbyte *string, Bytecount len,
Charcount minlen, Charcount maxlen, int minus_flag, int zero_flag)
diff --git a/gcc/testsuite/gcc.c-torture/compile/20000224-1.c b/gcc/testsuite/gcc.c-torture/compile/20000224-1.c
index 1c72b6a..085c33f 100644
--- a/gcc/testsuite/gcc.c-torture/compile/20000224-1.c
+++ b/gcc/testsuite/gcc.c-torture/compile/20000224-1.c
@@ -21,6 +21,7 @@ union Lisp_Object
}
Lisp_Object;
extern int initialized;
+extern void call_critical_lisp_code (Lisp_Object);
void
init_device_faces (int *d)
{
diff --git a/gcc/testsuite/gcc.c-torture/compile/20000314-2.c b/gcc/testsuite/gcc.c-torture/compile/20000314-2.c
index 3fdb3c3..0ec8181 100644
--- a/gcc/testsuite/gcc.c-torture/compile/20000314-2.c
+++ b/gcc/testsuite/gcc.c-torture/compile/20000314-2.c
@@ -1,6 +1,7 @@
extern void malloc(__SIZE_TYPE__ size);
-toto()
+void
+toto(void)
{
malloc(100);
}
diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-11.c b/gcc/testsuite/gcc.c-torture/compile/920501-11.c
index d636357..fccd0c4 100644
--- a/gcc/testsuite/gcc.c-torture/compile/920501-11.c
+++ b/gcc/testsuite/gcc.c-torture/compile/920501-11.c
@@ -1 +1,2 @@
+/* { dg-additional-options "-std=gnu89" } */
typedef struct{int s;}S;foo(){int i=(int)&(S){(void*)((int)&(S){1})};}
diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-23.c b/gcc/testsuite/gcc.c-torture/compile/920501-23.c
index 291d93e..2bc8b32 100644
--- a/gcc/testsuite/gcc.c-torture/compile/920501-23.c
+++ b/gcc/testsuite/gcc.c-torture/compile/920501-23.c
@@ -1,3 +1,4 @@
+/* { dg-additional-options "-std=gnu89" } */
typedef unsigned char qi;
typedef unsigned short hi;
typedef unsigned long si;
diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-8.c b/gcc/testsuite/gcc.c-torture/compile/920501-8.c
index 68e3393..bba64d4 100644
--- a/gcc/testsuite/gcc.c-torture/compile/920501-8.c
+++ b/gcc/testsuite/gcc.c-torture/compile/920501-8.c
@@ -1 +1,2 @@
+/* { dg-additional-options "-std=gnu89" } */
x(int*p){int x=p;}
diff --git a/gcc/testsuite/gcc.c-torture/compile/920701-1.c b/gcc/testsuite/gcc.c-torture/compile/920701-1.c
index 4302f9e..2a9c831 100644
--- a/gcc/testsuite/gcc.c-torture/compile/920701-1.c
+++ b/gcc/testsuite/gcc.c-torture/compile/920701-1.c
@@ -1 +1,2 @@
+/* { dg-additional-options "-std=gnu89" } */
f(char*c){extern char a[],b[];return a+(b-c);}
diff --git a/gcc/testsuite/gcc.c-torture/compile/930529-1.c b/gcc/testsuite/gcc.c-torture/compile/930529-1.c
index 8ad358d..4e0bcd8 100644
--- a/gcc/testsuite/gcc.c-torture/compile/930529-1.c
+++ b/gcc/testsuite/gcc.c-torture/compile/930529-1.c
@@ -1,4 +1,5 @@
/* { dg-require-effective-target untyped_assembly } */
+/* { dg-additional-options "-std=gnu89" } */
struct r
{
diff --git a/gcc/testsuite/gcc.c-torture/compile/980816-1.c b/gcc/testsuite/gcc.c-torture/compile/980816-1.c
index 5bd83b1..ae94214 100644
--- a/gcc/testsuite/gcc.c-torture/compile/980816-1.c
+++ b/gcc/testsuite/gcc.c-torture/compile/980816-1.c
@@ -16,6 +16,8 @@ typedef void (*XtCallbackProc)(
extern const char XtStrings[];
+extern Widget XtVaCreateManagedWidget(const char *, WidgetClass, Widget, ...);
+extern void XtAddCallback(const char *, XtCallbackProc, XtPointer);
typedef struct
{
diff --git a/gcc/testsuite/gcc.c-torture/compile/pc44485.c b/gcc/testsuite/gcc.c-torture/compile/pc44485.c
index fc95925..51c6989 100644
--- a/gcc/testsuite/gcc.c-torture/compile/pc44485.c
+++ b/gcc/testsuite/gcc.c-torture/compile/pc44485.c
@@ -39,7 +39,7 @@ lbl_28:for (p_25 = 0; p_25 < 9; p_25 += 1)
if (p_25)
goto lbl_29;
unsigned short l_53;
- for (0; l_53; l_53 = foo)
+ for (0; l_53; l_53 = (unsigned short) foo)
{
}
return 0;
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr106101.c b/gcc/testsuite/gcc.c-torture/compile/pr106101.c
index dec6608..e307afc 100644
--- a/gcc/testsuite/gcc.c-torture/compile/pr106101.c
+++ b/gcc/testsuite/gcc.c-torture/compile/pr106101.c
@@ -24,10 +24,9 @@ static const int yycheck[] =
};
-int yyparse (void)
+int yyparse (char **yyvsp, char *yyvsp1)
{
int yystate = 0;
- int *yyvsp = 0;
int yyn;
int yyresult;
@@ -39,22 +38,22 @@ yyreduce:
{
case 72: {
- if (strncmp( yyvsp[0], "~", 1) == 0) {
+ if (__builtin_strncmp( yyvsp[0], "~", 1) == 0) {
*(char **)&(yyval) = *ftpglob(yyvsp[0]);
if (globerr != 0) {
yyval = 0;
}
- free(yyvsp[0]);
+ __builtin_free(yyvsp[0]);
}
}
break;
}
- *++yyvsp = yyval;
+ *++yyvsp1 = yyval;
{
- const int yyi = yypgoto[0] + *yyvsp;
- yystate = (yycheck[yyi] == *yyvsp ? 0 : 0);
+ const int yyi = yypgoto[0] + *yyvsp1;
+ yystate = (yycheck[yyi] == *yyvsp1 ? 0 : 0);
}
return yyresult;
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr111699-1.c b/gcc/testsuite/gcc.c-torture/compile/pr111699-1.c
new file mode 100644
index 0000000..87b127e
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr111699-1.c
@@ -0,0 +1,7 @@
+typedef unsigned char __attribute__((__vector_size__ (8))) V;
+
+void
+foo (V *v)
+{
+ *v = (V) 0x107B9A7FF >= (*v <= 0);
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr49474.c b/gcc/testsuite/gcc.c-torture/compile/pr49474.c
index 0368ccb..01fe0b1 100644
--- a/gcc/testsuite/gcc.c-torture/compile/pr49474.c
+++ b/gcc/testsuite/gcc.c-torture/compile/pr49474.c
@@ -1,8 +1,9 @@
typedef struct gfc_formal_arglist
{
- int next;
+ struct gfc_formal_arglist* next;
}
gfc_actual_arglist;
+struct gfc_formal_arglist*
update_arglist_pass (gfc_actual_arglist* lst, int po, unsigned argpos,
const char *name)
{
diff --git a/gcc/testsuite/gcc.c-torture/execute/20001111-1.c b/gcc/testsuite/gcc.c-torture/execute/20001111-1.c
index 85617c2..30c8558 100644
--- a/gcc/testsuite/gcc.c-torture/execute/20001111-1.c
+++ b/gcc/testsuite/gcc.c-torture/execute/20001111-1.c
@@ -16,7 +16,7 @@ foo (unsigned int offset)
return i * 0xce2f;
buffer = next_buffer;
- data = buffer * 0xce2f;
+ data = (char *) (buffer * 0xce2f);
for (i = 0; i < 2; i++)
bar ();
buffer = next_buffer;
@@ -33,9 +33,9 @@ int
main ()
{
if (foo (3) != 3)
- abort ();
+ __builtin_abort ();
next_buffer = 1;
if (foo (2) != 0xce2f + 2)
- abort ();
- exit (0);
+ __builtin_abort ();
+ __builtin_exit (0);
}
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr110817-1.c b/gcc/testsuite/gcc.c-torture/execute/pr110817-1.c
new file mode 100644
index 0000000..1d33fa9
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr110817-1.c
@@ -0,0 +1,13 @@
+typedef unsigned long __attribute__((__vector_size__ (8))) V;
+
+
+V c;
+
+int
+main (void)
+{
+ V v = ~((V) { } <=0);
+ if (v[0])
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr110817-2.c b/gcc/testsuite/gcc.c-torture/execute/pr110817-2.c
new file mode 100644
index 0000000..1f75917
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr110817-2.c
@@ -0,0 +1,16 @@
+
+typedef unsigned char u8;
+typedef unsigned __attribute__((__vector_size__ (8))) V;
+
+V v;
+unsigned char c;
+
+int
+main (void)
+{
+ V x = (v > 0) > (v != c);
+ // V x = foo ();
+ if (x[0] || x[1])
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr110817-3.c b/gcc/testsuite/gcc.c-torture/execute/pr110817-3.c
new file mode 100644
index 0000000..36f09c8
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr110817-3.c
@@ -0,0 +1,14 @@
+typedef unsigned __attribute__((__vector_size__ (1*sizeof(unsigned)))) V;
+
+V v;
+unsigned char c;
+
+int
+main (void)
+{
+ V x = (v > 0) > (v != c);
+ volatile signed int t = x[0];
+ if (t)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr111331-1.c b/gcc/testsuite/gcc.c-torture/execute/pr111331-1.c
index 4c7f4fd..05a6db6 100644
--- a/gcc/testsuite/gcc.c-torture/execute/pr111331-1.c
+++ b/gcc/testsuite/gcc.c-torture/execute/pr111331-1.c
@@ -10,7 +10,7 @@ int c(int d, int e, int f) {
int main() {
int g = -1;
a = c(b + 30, 29, g + 29);
- volatile t = a;
+ volatile int t = a;
if (t != 28)
__builtin_abort();
return 0;
diff --git a/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-17.c b/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-17.c
index 6920e8c..d46159e 100644
--- a/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-17.c
+++ b/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-17.c
@@ -11,19 +11,21 @@ void test (void)
}
/* { dg-begin-multiline-output "" }
- ┌─────┬─────┬────┬────┬────┐┌─────┬─────┬─────┐
- │ [0] │ [1] │[2] │[3] │[4] ││ [5] │ [6] │ [7] │
- ├─────┼─────┼────┼────┼────┤├─────┼─────┼─────┤
- │ ' ' │ 'w' │'o' │'r' │'l' ││ 'd' │ '!' │ NUL │
- ├─────┴─────┴────┴────┴────┴┴─────┴─────┴─────┤
- │ string literal (type: 'char[8]') │
- └─────────────────────────────────────────────┘
- │ │ │ │ │ │ │ │
- │ │ │ │ │ │ │ │
- v v v v v v v v
- ┌─────┬────────────────────────────────────────┬────┐┌─────────────────┐
- │ [0] │ ... │[9] ││ │
- ├─────┴────────────────────────────────────────┴────┤│after valid range│
+ ┌────┬────┬────┬────┬────┐┌─────┬─────┬─────┐
+ │[0] │[1] │[2] │[3] │[4] ││ [5] │ [6] │ [7] │
+ ├────┼────┼────┼────┼────┤├─────┼─────┼─────┤
+ │' ' │'w' │'o' │'r' │'l' ││ 'd' │ '!' │ NUL │
+ ├────┴────┴────┴────┴────┴┴─────┴─────┴─────┤
+ │ string literal (type: 'char[8]') │
+ └───────────────────────────────────────────┘
+ │ │ │ │ │ │ │ │
+ │ │ │ │ │ │ │ │
+ v v v v v v v v
+ ┌─────┬────────────────────┬────┬──────────────┬────┐┌─────────────────┐
+ │ [0] │ ... │[5] │ ... │[9] ││ │
+ ├─────┼────┬────┬────┬────┬┼────┼──────────────┴────┘│ │
+ │ 'h' │'e' │'l' │'l' │'o' ││NUL │ │after valid range│
+ ├─────┴────┴────┴────┴────┴┴────┴───────────────────┐│ │
│ 'buf' (type: 'char[10]') ││ │
└───────────────────────────────────────────────────┘└─────────────────┘
├─────────────────────────┬─────────────────────────┤├────────┬────────┤
diff --git a/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-18.c b/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-18.c
index ea0b880..f54cd80 100644
--- a/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-18.c
+++ b/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-18.c
@@ -11,28 +11,34 @@ void test (void)
}
/* { dg-begin-multiline-output "" }
- ┌─────┬─────────┐┌────┬────┬────┬────┬──────┐
- │ [0] │ [1] ││[2] │[3] │[4] │[5] │ [6] │
- ├─────┼─────────┤├────┼────┼────┼────┼──────┤
- │0xe3 │ 0x83 ││0xa1│0xe3│0x82│0xa4│ 0x00 │
- ├─────┴─────────┴┴────┼────┴────┴────┼──────┤
- │ U+30e1 │ U+30a4 │U+0000│
- ├─────────────────────┼──────────────┼──────┤
- │ メ │ イ │ NUL │
- ├─────────────────────┴──────────────┴──────┤
- │ string literal (type: 'char[7]') │
- └───────────────────────────────────────────┘
- │ │ │ │ │ │ │
- │ │ │ │ │ │ │
- v v v v v v v
- ┌────┬───────────────────────────┬─────────┐┌──────────────────────────┐
- │[0] │ ... │ [10] ││ │
- ├────┴───────────────────────────┴─────────┤│ after valid range │
- │ 'buf' (type: 'char[11]') ││ │
- └──────────────────────────────────────────┘└──────────────────────────┘
- ├────────────────────┬─────────────────────┤├────────────┬─────────────┤
- │ │
- ╭─────────┴────────╮ ╭─────────┴─────────╮
- │capacity: 11 bytes│ │overflow of 5 bytes│
- ╰──────────────────╯ ╰───────────────────╯
+ ┌──────┬────┐┌────┬────┬────┬────┬──────┐
+ │ [0] │[1] ││[2] │[3] │[4] │[5] │ [6] │
+ ├──────┼────┤├────┼────┼────┼────┼──────┤
+ │ 0xe3 │0x83││0xa1│0xe3│0x82│0xa4│ 0x00 │
+ ├──────┴────┴┴────┼────┴────┴────┼──────┤
+ │ U+30e1 │ U+30a4 │U+0000│
+ ├─────────────────┼──────────────┼──────┤
+ │ メ │ イ │ NUL │
+ ├─────────────────┴──────────────┴──────┤
+ │ string literal (type: 'char[7]') │
+ └───────────────────────────────────────┘
+ │ │ │ │ │ │ │
+ │ │ │ │ │ │ │
+ v v v v v v v
+ ┌────┬────────────────────────────────────────┬──────┬────┐┌──────────────────────────┐
+ │[0] │ ... │ [9] │[10]││ │
+ ├────┼────┬────┬────┬────┬────┬────┬────┬────┬┼──────┼────┘│ │
+ │0xe3│0x82│0xb5│0xe3│0x83│0x84│0xe3│0x82│0xad││ 0x00 │ │ │
+ ├────┴────┴────┼────┴────┴────┼────┴────┴────┤├──────┤ │ │
+ │ U+30b5 │ U+30c4 │ U+30ad ││U+0000│ │ after valid range │
+ ├──────────────┼──────────────┼──────────────┤├──────┤ │ │
+ │ サ │ ツ │ キ ││ NUL │ │ │
+ ├──────────────┴──────────────┴──────────────┴┴──────┴────┐│ │
+ │ 'buf' (type: 'char[11]') ││ │
+ └─────────────────────────────────────────────────────────┘└──────────────────────────┘
+ ├────────────────────────────┬────────────────────────────┤├────────────┬─────────────┤
+ │ │
+ ╭─────────┴────────╮ ╭─────────┴─────────╮
+ │capacity: 11 bytes│ │overflow of 5 bytes│
+ ╰──────────────────╯ ╰───────────────────╯
{ dg-end-multiline-output "" } */
diff --git a/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-19.c b/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-19.c
index 35ab72b..6af5c0f 100644
--- a/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-19.c
+++ b/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-19.c
@@ -22,24 +22,26 @@ test_long_string ()
}
/* { dg-begin-multiline-output "" }
- ┌───┬───┬───┬───┬───┬───┬───────┬─────┬─────┬─────┬─────┬─────┬─────┐
- │[0]│[1]│[2]│[3]│[4]│[5]│ │[440]│[441]│[442]│[443]│[444]│[445]│
- ├───┼───┼───┼───┼───┼───┤ ... ├─────┼─────┼─────┼─────┼─────┼─────┤
- │'L'│'o'│'r'│'e'│'m'│' '│ │ 'o' │ 'r' │ 'u' │ 'm' │ '.' │ NUL │
- ├───┴───┴───┴───┴───┴───┴───────┴─────┴─────┴─────┴─────┴─────┴─────┤
- │ string literal (type: 'char[446]') │
- └───────────────────────────────────────────────────────────────────┘
- │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
- │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
- v v v v v v v v v v v v v v v
- ┌───┬──────────────────────────┬────┐┌────────────────────────────────────┐
- │[0]│ ... │[99]││ │
- ├───┴──────────────────────────┴────┤│ after valid range │
- │ 'buf' (type: 'char[100]') ││ │
- └───────────────────────────────────┘└────────────────────────────────────┘
- ├─────────────────┬─────────────────┤├─────────────────┬──────────────────┤
- │ │
- ╭─────────┴─────────╮ ╭──────────┴──────────╮
- │capacity: 100 bytes│ │overflow of 350 bytes│
- ╰───────────────────╯ ╰─────────────────────╯
+ ┌───┬───┬───┬───┬───┬───┬───────┬─────┬─────┬─────┬─────┬─────┬─────┐
+ │[0]│[1]│[2]│[3]│[4]│[5]│ │[440]│[441]│[442]│[443]│[444]│[445]│
+ ├───┼───┼───┼───┼───┼───┤ ... ├─────┼─────┼─────┼─────┼─────┼─────┤
+ │'L'│'o'│'r'│'e'│'m'│' '│ │ 'o' │ 'r' │ 'u' │ 'm' │ '.' │ NUL │
+ ├───┴───┴───┴───┴───┴───┴───────┴─────┴─────┴─────┴─────┴─────┴─────┤
+ │ string literal (type: 'char[446]') │
+ └───────────────────────────────────────────────────────────────────┘
+ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
+ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
+ v v v v v v v v v v v v v v v
+ ┌───┬────────────┬───┬────────────────────┬────┐┌────────────────────────────────────┐
+ │[0]│ ... │[4]│ ... │[99]││ │
+ ├───┼───┬───┬───┬┼───┼────────────────────┴────┘│ │
+ │'a'│'b'│'c'│' '││NUL│ │ after valid range │
+ ├───┴───┴───┴───┴┴───┴─────────────────────────┐│ │
+ │ 'buf' (type: 'char[100]') ││ │
+ └──────────────────────────────────────────────┘└────────────────────────────────────┘
+ ├──────────────────────┬───────────────────────┤├─────────────────┬──────────────────┤
+ │ │
+ ╭─────────┴─────────╮ ╭──────────┴──────────╮
+ │capacity: 100 bytes│ │overflow of 350 bytes│
+ ╰───────────────────╯ ╰─────────────────────╯
{ dg-end-multiline-output "" } */
diff --git a/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-6.c b/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-6.c
index 25bf9d5..ad32096 100644
--- a/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-6.c
+++ b/gcc/testsuite/gcc.dg/analyzer/out-of-bounds-diagram-6.c
@@ -33,22 +33,24 @@ test_bad_memcpy ()
/* { dg-begin-multiline-output "" }
- ┌─────────────────────────────────────────────────────────────────────────┐
- │ read of 4096 bytes │
- └─────────────────────────────────────────────────────────────────────────┘
- ^ ^ ^ ^ ^
- │ │ │ │ │
- │ │ │ │ │
- ┌──────────────────┐┌──────────┬──────────┬────────────┐┌─────────────────┐
- │ ││ [0] │ ... │ [445] ││ │
- │before valid range│├──────────┴──────────┴────────────┤│after valid range│
- │ ││string literal (type: 'char[446]')││ │
- └──────────────────┘└──────────────────────────────────┘└─────────────────┘
- ├────────┬─────────┤├────────────────┬─────────────────┤├────────┬────────┤
- │ │ │
- ╭────────┴──────────────╮ ╭───────┴───────╮ ╭───────────┴───────────╮
- │under-read of 100 bytes│ │size: 446 bytes│ │over-read of 3550 bytes│
- ╰───────────────────────╯ ╰───────────────╯ ╰───────────────────────╯
+ ┌────────────────────────────────────────────────────────────────────────────────────────────┐
+ │ read of 4096 bytes │
+ └────────────────────────────────────────────────────────────────────────────────────────────┘
+ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
+ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
+ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
+ ┌──────────────────┐┌───┬───────────────────────────────────────────┬─────┐┌─────────────────┐
+ │ ││[0]│ ... │[445]││ │
+ │ │├───┼───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┼─────┤│ │
+ │before valid range││'L'│'o'│'r'│'e'│'m'│' '│...│'o'│'r'│'u'│'m'│'.'│ NUL ││after valid range│
+ │ │├───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴─────┤│ │
+ │ ││ string literal (type: 'char[446]') ││ │
+ └──────────────────┘└─────────────────────────────────────────────────────┘└─────────────────┘
+ ├────────┬─────────┤├──────────────────────────┬──────────────────────────┤├────────┬────────┤
+ │ │ │
+ ╭────────┴──────────────╮ ╭───────┴───────╮ ╭───────────┴───────────╮
+ │under-read of 100 bytes│ │size: 446 bytes│ │over-read of 3550 bytes│
+ ╰───────────────────────╯ ╰───────────────╯ ╰───────────────────────╯
{ dg-end-multiline-output "" } */
@@ -81,22 +83,24 @@ test_bad_memcpy ()
/* { dg-begin-multiline-output "" }
- ┌─────────────────────────────────────────────────────────────────────────┐
- │ read of 4096 bytes │
- └─────────────────────────────────────────────────────────────────────────┘
- ^ ^ ^ ^ ^
- │ │ │ │ │
- │ │ │ │ │
- ┌──────────────────┐┌──────────┬──────────┬────────────┐┌─────────────────┐
- │ ││ [0] │ ... │ [445] ││ │
- │before valid range│├──────────┴──────────┴────────────┤│after valid range│
- │ ││string literal (type: 'char[446]')││ │
- └──────────────────┘└──────────────────────────────────┘└─────────────────┘
- ├────────┬─────────┤├────────────────┬─────────────────┤├────────┬────────┤
- │ │ │
- ╭────────┴──────────────╮ ╭───────┴───────╮ ╭───────────┴───────────╮
- │under-read of 100 bytes│ │size: 446 bytes│ │over-read of 3550 bytes│
- ╰───────────────────────╯ ╰───────────────╯ ╰───────────────────────╯
+ ┌────────────────────────────────────────────────────────────────────────────────────────────┐
+ │ read of 4096 bytes │
+ └────────────────────────────────────────────────────────────────────────────────────────────┘
+ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
+ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
+ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
+ ┌──────────────────┐┌───┬───────────────────────────────────────────┬─────┐┌─────────────────┐
+ │ ││[0]│ ... │[445]││ │
+ │ │├───┼───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┼─────┤│ │
+ │before valid range││'L'│'o'│'r'│'e'│'m'│' '│...│'o'│'r'│'u'│'m'│'.'│ NUL ││after valid range│
+ │ │├───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴───┴─────┤│ │
+ │ ││ string literal (type: 'char[446]') ││ │
+ └──────────────────┘└─────────────────────────────────────────────────────┘└─────────────────┘
+ ├────────┬─────────┤├──────────────────────────┬──────────────────────────┤├────────┬────────┤
+ │ │ │
+ ╭────────┴──────────────╮ ╭───────┴───────╮ ╭───────────┴───────────╮
+ │under-read of 100 bytes│ │size: 446 bytes│ │over-read of 3550 bytes│
+ ╰───────────────────────╯ ╰───────────────╯ ╰───────────────────────╯
{ dg-end-multiline-output "" } */
diff --git a/gcc/testsuite/gcc.dg/bitint-38.c b/gcc/testsuite/gcc.dg/bitint-38.c
new file mode 100644
index 0000000..1dc7a56
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitint-38.c
@@ -0,0 +1,43 @@
+/* PR c/102989 */
+/* { dg-do run { target { bitint } } } */
+/* { dg-options "-std=c2x" } */
+
+#if __BITINT_MAXWIDTH__ >= 16319
+constexpr unsigned _BitInt(16319) a = 468098567701677261276215481936770442254383643766995378241600227179396283432916865881332215867106489159251577495372085663487092317743244770597287633199005374998455333587280357490149993101811392051483761495987108264964738337118155155862715438910721661230332533185335581757600511846854115932637261969633134365868695363914570578110064471868475841348589366933645410987699979080140212849909081188170910464967486231358935212897096260626033055536141835599284498474737858487658470115144771923114826312283863035503700600141440724426364699636330240414271275626021294939422483250619629005959992243418661230122132667769781183790338759345884903821695590991577228520523725302048215447841573113840811593638413425054938213262961448317898574140533090004992732688525115004782973893244091427000396890427152225308661078954671066069234453757593181753900865203439035402480306413572239610467142591920809187367438071170100969567440044691427487959785637338381651309916782063670286046547585240837892307170928849485877186793280707600840866783471799148179250818387716183127323346199533387463363442356218803779697005759324410376476855222420876262425985571982818180353870410149824214544313013285199544193496624223219986402944849622489422007678564946174797892795089330899535624727777525330789492703574564112252955147770942929761545604350869404246558274752353510370157229485004402131043153454290397929387276374054938578976878606467217359398684275050519104413914286024106808116340712273059427362293703151355498336213170698894448405369398757188523160460292714875857879968173578328191358215972493513271297875634400793301929250052822258636015650857683023900709845410838487936778533250407886180954576046340697908584020951295048844938047865657029072850797442976146895294184993736999505485665742811313795405530674199848055802759901786376822069529342971261963119332476504064285869362049662083405789828433132154933242817432809415810548180658750393692272729586232842065658490971201927780014258815333115459695117942273551876646844821076723664040282772834511419891351278169017103987094803829594286352340468346618726088781492626816188657331359104171819822673805856317828499039088088223137258297373929043307673570090396947789598799922928643843532617012164811074618881774622628943539037974883812689130801860915090035870244061005819418130068390986470314677853605080103313411837904358287837401546257413240466939893527508931541065241929872307203876443882106193262544652290132364691671910332006127864146991404015366683569317248057949596070354929361158326955551600236075268435044105880162798380799161607987365282458662031599096921825176202707890730023698706855762932691688259365358964076595824577775275991183149118372047206055118463112864604063853894820407249837871368934941438119680605528546887256934334246075596746410297954458632358171428714141820918183384435681332379317541048252391710712196623406338702061195213724569303285402242853671386113148211535691685461836458295037538034378318055108240082414441205300401526732399959228346926528586852743389490978734787926721999855388794711837164423007719626109179005466113706450765269687580819822772189301084503627297389675134228222337286867641110511061980231247884533492442898936743429641958314135329073406495776369208158032115883850691010569048983941126771477990976092252391972812691669847446798507244106121667885423025613769258102773855537509733295805013313937402282804897213847221072647111605172349464564089914906493508133855389627177663426057763252086286325343811254757681803068276278048757997425284334713190226818463023074461900176958010055572434983135171145365242339273326984465181064287264645470832091115100640584104375577304056951969456200138485313560009272338228103637763863289261673258726736753407044143664079479496972580560534494806170810469304773005873590626280072387999668522546747985701599613975101188543857852141559251634058676718308000324869809628199442681565615662912626022796064414496106344236431285697688357707992989966561557171729972093533007476947862215922583204811189015550505642082475400647639520782187776825395598257421714106473869797642678266380755873356747812273977691604147842741151722919464734890326772594979022403228191075586910464204870254674290437668861177639713112762996390246102030994917186957826982084194156870398312336059100521566034092740694642613192909850644003933745129291062576341213874815510099835708723355432970090139671120232910747665906191360160259512198160849784197597300106223945960886603127136037120000864968668651452411048372895607382907494278810971475663944948791458618662250238375166523484847507342040066801856222328988662049579299600545682490412754483621051190231623196265549391964259780178070495642538883789503379406531279338866955157646654913405181879254189185904298325865503395688786311067669273609670603076582607253527084977744533187145642686236350165593980428575119329911921382240780504527422630654086941060242757131313184709635181001199631726283364158943337968797uwb;
+constexpr unsigned _BitInt(16319) b = 9935443518057456429927126655222257817207511311671335832560065573055276678747990652907348839741818562757939084649073348172108397183827020377941725983107513636287406530526358253508437290241937276908386282904353079102904535675608604576486162998319427702851278408213641454837223079616401615875672453250148421679223829417834227518133091055180270249266161676677176149675164257640812344297935650729629801878758059944090168862730519817203352341458310363811482318083270232434329317323822818991134500601669868922396013512969477839456472345812312321924215241849772147687455760224559240952737319009348540894966363568158349501355229264646770018071590502441702787269097973979899837683122194103110089728425676690246091146993955037918425772840022288222832932542516091501149477160856564464376910293230091963573119230648026667896399352790982611957569978972038178519570278447540707502861678502657905192743225893225663994807568918644898273702285483676385717651104042002105352993176512166420085064452431753181365805833548922676748890412420332694609096819779765600345216390394307257556778223743443958983962113723193551247897995423762348092103893683711373897139168289420267660611409947644548715007787832959251167553175096639147674776117973100447903243626902892382263767591328038235708593401563793019418124453166386471792468421003855894206584354731489363668134077946203546067237235657746480296831651791790385981397558458905904641394246279782746736009101862366868068363411976388557697921914317179371206444085390779634831369723370050764678852846779369497232374780691905280992368079762747352245519607264154197148958896955661904214909184952289996142050604821608749900417845137727596903100452350067551305840998280482775209883278873071895588751811462342517825753493814997918418437455474992422243919549967371964423457440287296270855605850954685912644303354019058716916735522533065323057755479803668782530250381988211075034655760123250249441440684338450953823290346909689822527652698723502872312570305261196768477498898020793071808758903381796873868682378850925211629392760628685222745073544116615635557910805357623590218023715832716372532519372862093828545797325567803691998051785156065861566888871461130133522039321843439017964382030080752476709398731341173062430275003111954907627837208488348686666904765710656917706470924318432160155450726007668035494571779793129212242101293274853237850848806152774463689243426683295884648680790240363097015218347966399166380090370628591288712305133171869639679922854066493076773166970190482988828017031016891561971986279675371963020932469337264061317786330566839383989384760935590299287963546863848119999451739548405124001514033096695605580766121611440638549988895970262425133218159848061727217163487131806481686766843789971465247903534853837951413845786667122427182648989156599529647439419553785158561613114023267303869927565170507781782366447011340851258178534101585950081423437703778492347448230473897643505773957385504112182446690585033823747175966929091293693201061858670141209129091452861292276276012910624071241165402089161606944423826245461608594935732481900198240862293409442308800690019550831630479883000579884614601906961723011354449804576794339826056986957680090916046848673419723529694384653809400377218545075269148766129194637039408225515678013332188074997217667835494940043014917877438354902673107453164275280010251040360040937308738925689475725131639032011979009642713542292894219059352972933151112376197383814925363288670995556269447804994925086791728136906693249507115097807060365872110998210768336078389508724184863597285987736912073071980137162590779664675033429119327855307827174673749257462983054221631797527009987595732460222197367608440973488211898471439302051388806818521659685873672383828021329848153410204926607710971678268541677584421695238011784351386047869158787156634630693872428067864980320063293435887574745859067024988485742353278548704467544298793511583587659713711677065792371199329419372392720321981862269890024832348999865449339856339220386853162641984444934998176248821703154774794026863423846665361147912580310179333239849314145158103813724371277156031826070213656189218428551171492579367736652650240510840524479280661922149370381404863668038229922105064658335083314946842545978050497021795217124947959575065471749872278802756371390871441004232633252611825748658593540667831098874027223327541523742857750954119615708541514145110863925049204517574000824797900817585376961462754521495100198829675100958066639531958106704159717265035205597161047879510849900587565746603225763129877434317949842105742386965886137117798642168190733367414126797929434627532307855448841035433795229031275545885872876848846666666475465866905332293095381494096702328649920740506658930503053162777944821433383407283155178707970906458023827141681140372968356084617001053870499079884384019820875585843129082894687740533946763756846924952825251383026364635539377880784234770789463152435704464616uwb;
+constexpr unsigned _BitInt(16319) c = a + b;
+constexpr unsigned _BitInt(16319) d = 20129744567093027275741005070628998262449166046517026903695683755854448756834360166513132405078796314602781998330705368407367482030156637206994877425582250124595106718397028199112773892105727478029626122540718672466812244172521968825004812596684190534400169291245019886664334632347203172906471830047918779870667296830826108769036384267604969509336398421516482677170697323144807237345130767733861415665037591249948490085867356183319101541167176586195051721766552194667530417142250556133895688441663400613014781276825394358975458967475147806589013506569415945496841131100738180426238464950629268379774013285627049621529192047736803089092751891513992605419086502588233332057296638567290306093910878742093500873864277174719410183640765821580587831967716708363976225535905317908137780497267444416760176647705834046996010820212494244083222254037700699529789991033448979912128507710343500466786839351071045788239200231971288879352062329627654083430317549832483148696514166354870702716570783257707960927427529476249626444239951812293100465038963807939297639901456086408459677292249078230581624034160083198437374539728677906306289960873601083706201882999243554025429957091619812945018432503309674349427513057767160754691227365332241845175797106713295593063635202655344273695438810685712451003351469460085582752740414723264094665962205140763820691773090780866423727990711323748512766522537850976590598658397979845215595029782750537140603588592215363608992433922289542233458102634259275757690440754308009593855238137227351798446486981151672766513716998027602215751256719370429397129549459120277202327118788743080998483470436192625398340057850391478909668185290635380423955404607217710958636050373730838469336370845039431945543326700579270919052885975364141422331087288874462285858637176621255141698264412903522678033317989170115880081516284097559300133507799471895326457336815172421155995525168781635131143991136416642016744949082321204689839861376266795485532171923826942486502913400286963940309484507484129423576156798044985198780159055788525538310878089397895175129162099671894337526801235280427428321205321530735108239848594278720839317921782831352363541199919557577597546876704462612904924694431903072332864341465745291866718067601041404212430941956177407763481845568339170224196193106463030409080073136605433869775860974939991008596874978506245689726966715206639438259724689301019692258116991317695012205036157177039536905494005833948384397446492918129185274359806145454148241131925838562069991934872329314452016900728948186477387223161994145551216156032211038319475270853818660079065895119923373317496777184177315345923787700803986965175033224375435249224949151191006574511519055220741174631165879299688118138728380219550143006894817522270338472413899079751917314505754802052988622174392135207139715960212346858882422543222621408433817817181595201086403368301839080592455115463829425708132345811270911456928961301265223101989524481521721969838980208647528038509328501705428950749820080720418776718084142086501267418284241370398868561282277848391673847937247873117719906103441015578245152673184719538896073697272475250261227685660058944107087333786104761624391816175414338999215260190162551489343436332492645887029551964578826432156700872459216605843463884228343167159924792752429816064841479438134662749621639560203443871326810129872763539114284811330805213188716333471069710270583945841626338361700846410927750916663908367683188084193258384935122236639934335284160522042065088923421928660724095726039642836343542211473282392554371973074108770797447448654428325845253304889062021031599531436606775029315849674756213988932349651640552571880780461452187094400408403309806507698230071584809861634596000425300485805174853406774961321055086995665513868382285048348264250174388793184093524675621762558537763747237314473883173686633576273836946507237880619627632543093619281096675643877749217588495383292078713230253993525326209732859301842016440189010027733234997657748351253359664018894197346327201303258090754079801393874104215986193719394144148559622409051961205332355846077533183278890738832391535561074612724819789952480872328880408266970201766239451001690274739141595541572957753788951050043026811943691163688663710637928472363177936029259448725818579129920714382357882142208643606823754520733994646572586821541644398149238544337745998203264678454665487925173493921777764033537269522992103115842823750405588538846833724101543165897489915300004787110814394934465518176677482202804123781727309993329004830726928892557850582806559007396866888620985629055058474721708813614135721948922060211334334572381348586196886746758900465692833094336637178459072850215866106799456460266354416689624866015411034238864944123721969568161372557215009049887790769403406590484422511214573790761107726077762451440539965975955360773797196902546431341823788555069435728043202455375041817472821677779625286961992491729576392881089462100341878uwb;
+constexpr unsigned _BitInt(16319) e = d / 42uwb;
+constexpr unsigned _BitInt(16319) f = 26277232382028447345935282100364413976442241120491848683780108318345774920397366452596924421335605374686659278612312801604887370376076386444511450318895545695570784577285598906650901929444302296033412199632594998376064124714220414913923213779444306833277388995703552219430575080927111195417046911177019070713847128826447830096432003962403463656558600431115273248877177875063381111477888059798858016050213420475851620413016793445517539227019973682699447952322388748860981947593432985730684746088183583225184347825110697327973294826205227564425769950503423435597165969299975681406974619941538502827193742760455245269483134360940023933986344217577102114800134253879530890064362520368475535738854741806292542624386473461274620987891355541987873664157022522167908591164654787501854546457737341526763516705032705254046172926268968997302379261582933264475402063191548343982201230445504659038868786347667710658240088825869575188227013335559298579845948690316856611693386990691782821847535492639223427223360712994033576990398197160051785889033125034223732954451076425681456628201904077784454089380196178912326887148822779198657689238010492393879170486604804437202791286852035982584159978541711417080787022338893101116171974852272032081114570327098305927880933671644227124990161298341841320653588271798586647749346370617067175316167393884414111921877638201303618067479025167446526964230732790261566590993315887290551248612349150417516918700813876388862131622594037955509016393068514645257179527317715173019090736514553638608004576856188118523434383702648256819068546345047653068719910165573154521302405552789235554333112380164692074092017083602440917300094238211450798274305773890594242881597233221582216100516212402569681571888843321851284369613879319709906369098535804168065394213774970627125064665536078444150533436796088491087726051879648804306086489894004214709726215682689504951069889191755818331155532574370572928592103344141366890552816031266922028893616252999452323417869066941579667306347161357254079241809644500681547267163742601555111699376923690500014172294337681007418735910341792131377741308586228268385825579773985382339854821729670313925456724869607910114957040810377671394779834675225181536565444830551924417794139736686594557660483813045525089850285373756403594900392226296617656189774567019900237644329891280192776067340109751100025818473155267503490628146429306493520953677660612094758307190480072039980575323428994009982415676875786338343681850769724258724712947129844865182522700509869810541147515988955709784790248266593581532414091983670376426534289079098742549505127694160521110700035496658932724007621759500091227595477831200325335242614162624218010753586306794482732500765136299548052958345872488446969032973871418565484570096440609125401439516349061951073344772753817168731533186740449206533184858409824331269879276752302819075938894191764603880669059804914705202932220114574769307945938446355744093058483466098741029671133305308451601510124097336668044362140994842230895354232007936193610666215236351383330719496758577095102466235782700820575938453736277546445932135116947993404356975890051717304128693125699951445791328843668647245439797933691355015781238038148597339831348341049751957204680813855138272253234219030458164179195368888878989362640509486440530112337687890165646824152338885218611665567933423652236621168833497594762922586523151554244316284075364923316223457798336995440229801638249044555841786652868778333857626201712694823945146208412572567947403078655159448178467488335673853886982143607843369103504905837049147006413324087204923968347162406372146304110247436210704329838033967549296094708909042352807942165389054391217609084676765464997803900415653278041220586434133698802658726748950122980183615091029049242919298428066745937148593879994539254240070220900694662200741796632687373414952817000938093930497338259168439649970963774406833411431113922194082765390241161715106142638681072839764035976877223152727829248475639970029777900589595383604989099084081251802305001465530685587689066710306032849298712531664047230963409638484129598076118133347670029704549206295184751171783054889490211218045322681317529569999778899567668829982207035948032411418382057247326141072264502161892285323531743728756335449414720326329614400327415751813608405440522389476951223717685562226240221655814783640319063683104993438443847695342093582440489676230855515734722099028773790309518629302472390856918840009781940193713784596688294176313226823907143925396584175086934911386332502448539920116580493698106175151294846382915609543814748269873022997601962804377576934064368480060369871027634248583037300264157126892396407333810094970488786868749240778818119777818968060847669660858189435863648299750130319878885182309492320093569553086644726783916663680961005542160003603514646606310756647257217877792590840884087816175376150368236330721380807047180835128240716072193739218623529235235449408073833764uwb;
+constexpr unsigned _BitInt(16319) g = f >> 171;
+static_assert (c == 10403542085759133691203342137159028259461894955438331210801665800234672962180907518788681055608925051917190662144445433835595489501570265148539013616306519011285861864113638610998587283343748668959870044400340187367869274012726759732348878437230149364081610941398977036594823591463255731808309715219781556045092524781748798096243155527048746090614751043610821560662864236720952557147844731917800712343725546175449104075627616077829385396994452199410766816558008090921987787438967590914249326913953731957899714113110918563882837045448642562338486517475793442626878243475178869958697311252767202125088496235928130685145568023992654921893286093433280015789621699281948053130963767216950901322064090115301029360256916486236324346980555378227825665231041206505932451054100655891377307183657244188881780309602697733965633806548575793711470844175477213922050584861112947113328821094578714380110663964395764964375008963336325761662071121014767368961020824065775639039724097407257977371623360602667242992626829630277589757195892131842788347638167481783472539736593840645020141666099662762763659119482517961624374850646183224354529879255694192077493038699570091875155722960929748259201284457182471153956119946261637096783796538046622701136421992223281799392319105563566498086105138357131671079600937329401554014025354725298453142629483842874038291307431207948198280389112036878226218928165845324560374437065373122000792930554833265840423016148390974876479752688661617125284208020330726704780298561478529279775092768807953202013307072084373090254748865483609183726295735240865516817482898554990450888147008484162850924835809973020042760450232447237837196378388135483084055028396408249214425019231777824054821326738728924661602608905318664721047678808734917923923121217803736039325080641571812479260200189082647677675380297657174607422686495562781202604884582727406463545308236800937463493199421020490845203940782000643133713413924683795888948837880891750307666957538835987772265423203470320354145742841869795472799186154631385288573730129094228733379855432514817031425884584962254283999586850250406406681047191820544352342046667950146374296364655891915135310082529994904874562441551527081311638121766367661807914647092917287784017613115795691373814041086838720316968010349263776702775009771662737124600992709418630470128579612748138807983617697487500079502839532266478317788699680283395230308668613168191852557234122469290277763000256531531071762280960597416576452124575885006363492171314551026369237325119844147154972582617127637240421323781252125819313268498872048683068789228870983086306586111793007178693570562554975762384431236664489360478109692520183356042112794589756922036102025380888246082763911915622037570736969677850621708281909652070776450422110772285659921383413532725137107621514770958361581240471968542997294446402584844918179956881219978405772785713402046471903103404871352324277109089891640558983922159359479964068994923538490500501798825116238188381267330618026093160290205596669795981834842352271011063939632623926629960113926326029952143452354640614061049438932665467928443113232214498101774523178129020155017228802221901469548072234073334681052461327832268955923701109732874360984002493130025470753861967432493102395766279717815113135763810886216491770265724160887688887515282293447287121039545323777928286876711267049135547760773655845950622676327972280622345486253084626121247885891757458308974259466441284967765824561478351421051923081842594791616249682768594796413184742007504540382141773556098929461233842797978566466734240436032269122908057438314319410489575244845739320693764798687398942275314333361838560358278583766983210126081046020231469705836544611252075187733112560778125560225565803349953151880800601890382648216375737077015744684142132303864494083237680306898134033570758401131735819237730280209424231954121970154195575070728876653187928423918894211617093567094857926079694003950142962763480728907322409338954277493711834363423032309296862081371923061150409402403668284066920335645815769603890931600189625120845560771835017710222988445713995722670892970377791415975424998772977793133120924108755323766471601770964843725827421304729349535336212587039242582503381150992918495310760366078232133800372960134691178665615437284018675587037783965019497398984583781291648236566997741116811234934754542646608973862932050896956712947890625239848619289180051302224085308716715734850608995498117691600907423641124622236235949675965926735290984369155077055324647942699875972019355174794849379024365265476001505043957802797349447782453767742359446787304217770032967959809288342189111153359045680464231699344620995535326063943372491385550455978845273436611631962336651743357242055102619760848116407351488643448217122169718350824452317641509534606434395208225350712889271762643740106849245478364448395994915755050465135468245061369394410933866013068008514339549345174558881983866497072827311379042433413uwb);
+static_assert (e == 479279632549833982755738215967357101486884905869453021516563898948915446591294289678884104882828483681018619007873937343032559095956110409690354224418625002966550159961834004740780330764422082810229193393826635058733624861250523067262019347540099774628575459315357616349150824579695313640630281667807589996920649924543478780215152006371546893079438057655154349456445174360590648508217399231758605134881847410713059287758746575793311941456361347290358374327775052253988819455767870384140373534325319062214637649448223675213701403987503519204500321584986093940400979311922337629196153927395934961423190792514929752893552191612781025930779806940809347748073488156862698382316586632554531097474068541478416687472958980350462147229542043370966376951612302580094672036569174235908042392792082009922861348754900810642762162386011767716267196524707159512614047405558309045526869231198654773018734270263596328291409529332649735222668150705420335319769465472201979730869384913211207207537399601373999069700655463720229201053332186006978582500927709712840419997653716343058563745053549481680514857956192457105651774755444712054911665735085740088242901976172465572034046597419519355833772202459754151176845548994456208445029222984100996313709454921745133168181790539412958897510447873469344071508368320478228160779533683887240349189576312875329064089835494782533898285493126755916970631488996451823585682342809043933704643566255965170014371156957508657356962712435465291272811967482363708516439065578762133187029479457794090439202070979801732536040880905419100375029921889772128503084510931435171483979018779597166630558819909348223770001377390273307373052030729413819617985823981374070443715485088829487365151686786653141560555397632839783786973475603908129103121125925582435377586599443363217659482486021512444715078999742145616192417054383275221431750185701711793487079447980295741809417265923372265027237884200396238493927359102885825948568128006352273465051712472070059202450319054451522388321059702003081513718019001071076161432358471155369959782811652330837503075288087426055655400029411438748293362031465017502577139252244731448555188613876936961036695236179942323751116112011014592974397486473882674592008130136792663493287323834319147915022427528033518178139180198551672004671264439595962120954122300129377851806213689047404966592261393005849755403969409681891387136302126214754577574214078992738385834194218500941354892714424617818676129678402812599649389519193939384481931712519965763571236544579269391714688112594004439937791027666527275028956096005024721892268353662349049501568931426746983749923266289936079664852088114380642027976981532748458314879741695023966059798072743350980348361092364278288527112580481417860547783209941006436630295569025708378983678708447667928300527961717504931897999052674925211486251029110033534138519456704647644914365911948549537915597987234033945431722519315974082307832411934886264333083916226707665948547147824941143774031630992986403589281430493343304207573431954440506367102005746914258775268625663056944615427077330312326664431034309894720122682694874274735620802316011315482410182991906165335883031756812018133914090861319389023790839528337203606889129436487920140167370284870924438860873830296648014424844378195912932551426780779819757525353368558050825303562419989528653425507781193568399131883673447888828695552112293654073088339775808234324436627659543962164946450396759723040075906766506152022264815158093674649622869572430121164843379253826764183953324829436751005035078152203675523168431161209463034491772102996315554878311000500752369796109685119745615468446576523546008325039060775520970963367909216533343057221662059707100715990114520515109428581554773471551782223970832412406073499896797949247197263055911053575580685552002226777990994346631851517791364630330551754443656577948498726362806681419705536740324268597539896282803552799726080554573302695958428417269671660306173853381343814024048279362738039470198839365706286164147555864933364363287875097138128425573909904433183795098670203800533548856219174579901097084123411402160448390274656216062207733804522678116007830485911118338137291415500040244636646228465275546613185451215477214924093897408659253897872331630294361379429268082112519489979283826532913282908147824847781517964779380824918394924322420104717839012960422523766744397106063463998218416521947089619846125464833145312281971994057275917591591279145274837283273569411904875883590818927011083766111368623876288661469697856984023924541117354584710728162060928747544449729071086406072820826707352705098469570212430005031769870770984490147544922541878582516496026055634218534739829767044431114272772863484628968800592047985977005687260574374332608765746965647976405949709304033414442630581488362251756922883517287565772653346189666094175256518980878632057889091042584644510374477219106080358138511257658994752983022904583136418485544787844335722425uwb);
+static_assert (g == 8779107423697189837569390605084121179785924908521985744210325591223667924519652625818373720019509245903707006132632572173386255064201355735198759440688262514780984111791042739566301784897316373994922192050963272288434060342288511971569697680026523760811225516430052699754044682818892679819131995600216280966062736732384732411361657444399695883865096103428759622813867735547259978529319436889864013687219390567604283318011100799953451520968441264866031813954488628058475114348729275414143158917874709599556247183695853838552321088973445876088042556810479910661449374661999675082811103814453353294194886612961492737263277271551889038610730760478459569256149321998350414023066363814989311109728311712989022996247280182587921449185353922885937877604500400738774240008709945289791605011177739657720181601453512259882004564462415828652714904289727235210537277721389816687643366145200001177712112197515695578887483792988755435401388456145854488880537088360397994643216014828495662460205686448548113229841097955613958440901375416256532864511852298696327611517233241324799070919491286426159788792631723833717451538043437364017185237743182402835670087683125602640318887451596650323528720128188198547270462971612157603487958526705005955580409441670771849388016438035850194585870327013409236236730914217722025655319472231141666790287955685713636274653565577454275838590350806168639165264676470440930351612992518904664647715805865941038423768376846697817543122409517591717292238745940345900530458551468519245767864531742102178628854376524513367983209186974575765707273973775386840081238803880335095740836386527208267311808973522450391189055739828936937359693167240524660624945856907042041257347192086984009640984509322622503890256046324768341632643546455779035376002061691113121234273164937984171774242327769915688742564049454163158318121818582764775268091292470889088445575108022688069271697198283151469645400870507006663799330661702702747443254220478311056407220749648103123435473381583520873055218734115120978678440455896458852497569989966723235965608706826593607128847630137618509151255834742636438796285569873869967729341871213521030011427372987388572674228441333458857512226049283243347521457804912008781036966786374760325341492033297848368160903260470019067535330611645909560888797451907088389764190403007998305168673029446934012245138838180596098559442570696150011296218144186387024615885302290744905340666905921743970013779813332493771192048043297281423248489056841417013807670308191095732464221451376997270745468459702152796818222745730565721202663103043121160101459833683249558684459108862536961994308535039970814557821268170388745941980378838969910592895670554291811739768771829941043857819603751246957962236091154755893962038363120690483862423001038948620681611253867149296463690417828034303547922792249098522404751428960713875050463906134150846089705714470303918299012691600285355859412924847760497076978432722446602521825089097454542343354847347396045079587757210635356999268706465425788833311190517623061860675230010994127196459030322166751571656642321690787471906609473496034789643710478162255664092991251446787887635351852933826820719781733754578161073401362668109819113924252291125741395271474342305574536974918273938513597418963787308994593434191890687730302495910686072338836413159162281072263542758257699588089838677469397467899348065293581751035844389848387161847435160327276066603683131703246410409122832793376751512688745195564021646069245992363396468100513536211651450610523315211697125774638845313243973083536417692075962486918844667432144353019722959653638632948294049984266861870151255315023346724671430499257993958049088066160870545025276597975154855537620265690354041028742742755074396597631965320380782500944568424053420038357524917125099241334990032189526465838192972110970861380060986802081948044345526414857158569939005895236672306344348212805851269920711043891306875873016330601673973249327072503571873518366750575070091051288590764788630190966776854031578939382690709022667421734442841784680826494146620589862829612704279521637740421694195051400095278084716974624615208392585573200182664157066813849346058321763156523965698465901396025152159642193562900743812715885811057212579017860488539960334406702752688595217360219470968738009774067915037157027492209108801337707562571266897723911401203374308490793226200974353356835311756384895692909802720948968131504604855466961987314701846460342135201914356152591684810924688350929140120187693089324255924634578576427004426339299493833434502951593902551451002292839635000904253250021884625417628756439862964325562720709528784964868687330847894476999577326582332350213148861205413652337499383416531545707272907994755638339630221576707954964236210962693804639714754668679841134928393081284209158098202683744650513918920168330598432362389777471870631039488408769354863001967531729415686631571754649uwb);
+
+__attribute__((noipa)) unsigned _BitInt(16319)
+foo (unsigned _BitInt(16319) a, unsigned _BitInt(16319) b)
+{
+ return a + b;
+}
+
+__attribute__((noipa)) unsigned _BitInt(16319)
+bar (unsigned _BitInt(16319) a, unsigned _BitInt(16319) b)
+{
+ return a / b;
+}
+
+__attribute__((noipa)) unsigned _BitInt(16319)
+baz (unsigned _BitInt(16319) a, unsigned _BitInt(16319) b)
+{
+ return a >> b;
+}
+#endif
+
+int
+main ()
+{
+#if __BITINT_MAXWIDTH__ >= 16319
+ if (foo (a, b) != c || bar (d, 42uwb) != e || baz (f, 171uwb) != g)
+ __builtin_abort ();
+#endif
+}
diff --git a/gcc/testsuite/gcc.dg/bitint-39.c b/gcc/testsuite/gcc.dg/bitint-39.c
new file mode 100644
index 0000000..1782abd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitint-39.c
@@ -0,0 +1,43 @@
+/* PR c/102989 */
+/* { dg-do run { target { bitint } } } */
+/* { dg-options "-std=c2x" } */
+
+#if __BITINT_MAXWIDTH__ >= 65472
+constexpr unsigned _BitInt(65472) a = 65874028414853418769511496113653036945710467528674240836188871502976748745763974525826911024024988631776491096261751992694895296214498585327384601121850230477827515082843619768846499801840836477230687590770231681718268338714887172330888785411211246357940293072535006266916583089772232445667160886208448607927748489013059344141951625673199213945267073377588652374198634016718719257659199541018970117033192039851653491638992986532515557336069337719046396088060414869427270972708809318734227046883806081011951941853679439848321781173016681200685359617693806920032604807641414656615976627881030905633504995608500926313100449123752592887306455273713086084720226711163858729450345307982160835189085198339193053545528326363004277216519521996803653047233696840937893926439475024000274633924716672022043462512673682343650888234611610847904449303938980580750614829479521364941526248038103556791184166137132891705498670203747450648138794675915927809475689266431674447401721770257807894864574772637777546833262526951307916013054159424871918674386303422478917660124313218179561460069927914109853781325554791226906771269776791232118969314470480394982481391738558649576936680153086613697764141243587961259345656781466911451311347049773832249980208428530724233103217369963103642890811580190014250767734527431460062555356767163656837728658921843437422091225040343040245536763579475902218857185893576553568913202523567943581185934074140454243365677691321402599868371075480132367548064644179457549106582668003278353839811901755098775042915492192962256439240717885562228560807300368546591938513564249673935563354224669505426548315283725161174366021701375396395313621732692448055488113835736519133847733806933661608336123237017767589792413707136130610229516496362268872996002408911802982404007464241164929849341909484070765678046190680486057484835560562448063002010171248868062856794398851279347048705720960974749673146662296765976038110218870942100630056574273343812309347877934257043013803766940137680754965100302990989281818211387137383832600675317434920893732120105407723736041635958085676519190305809361514850656066374765384629954359307080862256105691419898168955679806156443004466405913142123628524231555115399711321193475052976374230374739915275763990520499641653144284448959338924397090731568126860006699218100212808418951593973346259258754997140003401228249592046733643429633842927437674005630309466335475488976044970815680337796705251785679176317921969250255914644143927301143156940750728409691193837462118411451140103456466642917621794037963804154558563563784540744625286045320380647149505894465602074709712052403213672513944120580472196908870481008605753502378254869195510822476848807886998210212224568817665331382282682183443019436910934518254905570403017861343476981516822213043567878416094061997484366094926060989684786188719853361377592613245034788253106057223977996442867392634883776229675223227731721808083021736947720678172726157569358655251453763272147556197009586238264046136725977202440532459363521159216893005875943650891355683793285122481839809223042446177899018320884858378858428482004887350002248475654126695437381791039346060525159596388199191045220810622260461623488951548300045143031946225548627356322494072635246741690369797703750001744527651635248045159165227990904076792563599086736013143689002450121560791866918627656264249547156379687514841821979600884946973379386059970849524777822542279884834069088756993835503335163308439818940407550289076454404410002526269749323403673331561005855976564163112359624884449722076806747002050900060486557888988389190561491837653913417576996782073392887675547243908721620711696799096385497090726000898536841269644251102949559919330359725680095583471880505317680844666709274840966390417748140336028128180466837002340200275922083519953988906089254843320339560405442077901688830870404393361047302469466448124739846498711286663890861622125303854725244017935265801954966781478807230955537079573035800508809193696776558659380041474673947719614891591483335826961674762867599639802412473648680441069062029676465317014339608126488486239504818779896608289143566814074344128163474927248996300958043211993182818053822887583188270715520756855009877038634425539657265881288715061989818229395878573316718153726750826355803265260984266618144751818838129431927560052524725127582765549442372461635957790507428542296212660482883830778100032262628335685268361076110653003933720753658626342498598108207113591628523223931344937962257434088745568685381473222813754713557301137685188707577771735719907385875925945361215971464704808214927499603711487344132091326731184707081884630565832911532419130163514966778562434847861021362218993469451376667702490745897248165073801412613710459059823342004801266188992270518474584690584110796775040192964553128155321278735400418222301274509554699483242906248842528643927491916050334115601214082061834286251573797771471610462890207070038990113517384314874988389218929593573595769992957934439472313648608872221417538591099909547681713055197851963217806394271932283786418963737083378263837851866770792294581430370179812803807766812134735008944094254924133000181138341444754146592554240244926530488085797161607440296312032375671384766799546314294952639429116053444305204179954961737670754928180951211327209631174491438566704748775957423647631106931872353072411758039523701355399245986495256131899939578128637906193987861515696267624022768855677386145040750676905386076271594075012645017847456843861067907361603785482067517607711397078496893391404908466039763474426356995787412728936302077788321037622089836944185920214948667492076372211507958383725993147568828022520584628113743069072098077681737646616832744282683301027940111008949487559489400280166812162349769417565580841259074013682180200573711186777726659689975490119163357473183919117291539140678034476458394800768806683673132266242343880852243749183854335527770813216391997262974885810843114429314160237463276656664218915730925460828760711378046669902410716387688714853282313680765227107400697209645981183025449661912571103890490913039864188897637949556486744952841792160388971558166205393610932705442433631027753841798811000777130265365602851965515681643984169325435610742993940251263241305792519709217930230875279761194839959584662947191743403356700820347334268850390650844637405501531301921096571026263943432066459294971843089035394861220552532758517810050362565876624718715634556912646641949968260622504116224630219284301623921188277675205384056375364778072061649987278221213051902862594711798265704318920329526303415071651490215159169436665502967051371583199355417397040849618260976239182865708899876580220885148819958098892407447384776849540577596373076572931387032303318606938946805395609756474317173037851890190086080334220234266081872276699590277311605725492019621206245108261399321173649438883031907502103004618343981447073270063780499728342099870986514585907891949892308658040261024667776373424978390909682927910796351757483974443865702371792838882771005747543496062821233816175876149479890010870200215632229210706418785910292789638550348725797240661293132307251159131784402794068309831964186085953199517245335059417823074979114015927625438852575505515547130205018453380351113945679912359116614886302675918646948307265762938004110601513658895196700081824586705020282825093739598881081540708217418131914073953341509242444578622435984145983493347604029272854899321609106849286714028483886414825128479818859724734438344906945298606455406699287820316660695989544913612976004949026573679384907850688667479190435572179237006408685754560762141977321367339517301097820953022755318954649838568359278646708109095740043295665248217530811189495831329465924529060656867586984790794628582957651073768585090489278140555182834690421510195303839613545873092108913694594537183285417682903243454311391369375810654906662974651031976593576633213427096627363649424527674138186816206523669842323707281027980506733634096495865602720885328039147475167492532627097604997707385503393991456792672459352936820122822469556596945676396903590546739682393729575454493944807057345819069181271569904644175551803259147445317231361820692382321822423289376965920517760072762131028848540838123733912106843655308311750496550815033703056692798798011918005011348199727933997069164542544843124651534694478753855625309629960678863623815672915274926726965714364858257611221106385074050699918280948177141442387115797213194242870596023564029577989498938485534058029690537190534744252272824857327493965876618976643677984346169877601719298683399281042997338804443874233186946808040598508364557521255575013579815682146068863549337248303001248078807047378108920762143811221461759354620875369589065024312588898065507971561029929345527858159313129820327669633132293160565999400466976147547125003253164886640290777125287017292361727094985547014756417512102317970262384641051079277427351725574074131116107146600399234403647819323600759586671280733725982209124764267999221147483008839140602670257205078780970624480127071396543158542953522136110465710368242064084840890205082187266645629140357952494765688460918507878486426042065278647584015295279651155982623258616377485606104157632043716067693156340573589748375359059722913343059515310846221532353844339794755668848402309080774422293295651170084782844354070621520168937868316086040235069146058345289688192907685918900389066798118530800450286088002427616043549842138481236713776013883531628590595300042847329197168381891936968101967776014081901912034332263670677828204428422384434715718704595120461430375574175765702011953092786008531618841562463621545904938593447520015052398913473747165675739128227729860522837856493359404981275620405156088407724873896814306917527150300142982470028171920769744429179320431741610875850043999674743188496908160129431272106464679454201996897333414256543788368259896759637862338268498926658644196226425393282232669383401043410041622131834269331249053253163263805617292232014554953766603366944449281936436163348007550264881313734403499866885910422096847245868983837395381509905362814510495294417227071475516750438538914922504534834593033205080900795488912148742019119549014001554366911119067584994744299018308039208210417466906415185076706749205801012799329399986289204986294786437588639612782571971507723771937929701163710244163043404564826267800950589676152533594989418618014288641614520385570184240884586130870870741886347468641829857183453740491569842565622064996027914799445938815414612045489948609894048719358464589599498624644809962127765563981065415756251258118421436949142446606626464150436046745354962830933231617087191036634528282763930864590672989015325808346540917086018285482216294776630407604444017209836386813546251369184558393180862727824133618231105407565377726874201016716874747193298110226548713113055804386335542276584785327642141420979789128607575389126059449056231221655504322975168824547921598754212580284860341317067232171503075318659142194545497016145803181512026343421286297638468103352223053704896868265116206256544679438575897966426762874745106558292347134875728572127325337754733765251105470177415270966206899861267252369448068927266102835975020557437913164840102135985178231875990189340734544654744647827441286010902235438982564998068477390121580268067852517302500958493927727155669887335454802232441766876959727296032213883872141451322900261005024406412638925534447309297109774680653396839068981982303959449585919601951784676269338509798444492284027198527534729264816863223855205480827392176674198533541998652285208950844368772121015938775827868718348053651466660945842822857135950701469458166244446036560410696408598768008796242326788817178564489668276079897020785729862548025216683403145162467545641729518620497596028019199039246447845134132855463133735174323788649842018758419435906318743206053356800088370917852318294527105533345230477001222310383402828126358397712503413591645611054601866497386469412638491773728614643382590286186308330017723253240683781406419919818813502414034869402770627046602136910556735340620577006649826384200614066464173557086745764379665102794298748986565199421337796563169859800331533497258203854653725133871631985474523134061290217790871839012014060652170431386706390526266713354909666532894989712338707426377288105959729380930582675275553400044698473843175010204608057871421993855679325795511343440323540375249581001966431109313820512103729737415876492397855350709356620428053728631513279657137136751691815273101807269203767363250781488995922177169506473238033222632470640191052132325977988500916888797348877819383961059260577892204584222324373872871821748974613401628203875338762555211387910748411152327539619567997694542831475663395895555090404922654858513105597129946139446385944075046977527087443129745077809595756576233514427417022666744060487857898390789948480719931806881956529624868523931326204018924742455510988728560620886881508902552006781252685182126521983698023601752690226917489683075686040459944761124984353820735928331227975669811495025536004835557357866887952405925158206358648460053744971810759800516971341251283976744691594110219822376535068050425606180275339861939768053672977590839860433707369056754028768873245370748243872093908425321452790676005744239063004090812287994171654371534280527461204785965744686899037505526748205474829861508775633341610106216825084287086663714002411976655574453853711919148720158805545980786494280975922278491841813051023374114075744194873828536865132926198728339588170375472796201352742410432853435729363112226201941628255002535490275056766586373821459331811284350772302429368659539685224295962240105465692745743117250558134122530552966969305908586250082284865580325406190790343864287037227316258520732020059190355270848306662653818702002897039308630618431243314449931945150585878901504845973215101937953823928461721757938572553687288827606914187939448631587847341567152264836920949873736396479054462466993545281884314888041635210657865924492733477634678092762053049423724420978749565535935202527272735525538857856532535829727403758248515676377734964271319895603733369499282090619370644390912703118190967699735839422150118676090812589197956828432883454591775506142072991995255477162685456179917844477810916117758366522467183885517800565518850836567463260579936570404378905085631880126835957647088321534365595303983957495652572590640830763831479030713794949481583992861854626122255507636651728698938040296394529620734556222565311409570754214795520535003346400553934254868951469742466063081282009587365288248731116040001268793498485153779630516908276030764263795595829834765298430173859886036974114210469592900822522496578393807022364135855868617791972115817676751055814736691157277719078078363722995682352787453931398544225888615155245513172842244076107970477685087347438625378887619302413866466619117008334792748656074141300284702324314107158575021092021045201219104849568044014318163216648167167102545567590192051572811882759399171815091482243321889561619871953313830812925825376241612376634587322435846044799053536631550927269169032948586514626083989734654942487786073128339707925635994571914892483303685762083840487806131930731387849170685349079864020207179382689618594010162070659159447889344283707204286255036729730123415253795453381392363525730409662963512930464146567702953413882972452890687402324823676434623668607417451713252684608112007770743378032124178068076627655591705783142289469636449645674093183854831679943102480054334422336004642534472137802293959591564047787165742281711262045913463889166614975785367285123231848489964947168259259859651331806294100233514606935240876224388286907295795623525145565485264076203685696921301244157049222399694261123517677386771518164905737344603978252996052580868902923124475552914011730528812300179021230309867253769880851013594703320917123515967493597713163002806913617288734951731859626757112576606989339415205916570757259477367425003980820415407690284037569443414621271531087857711540292395118940327930085917648961413854880713533683379365683118321440194053278494655321531901445796224168264056771141704976004602347371222102339725674327295424584923438972729801604160881606371129484752571956550759895866459266634599318934331132992268419492312051515244634605489726581981774372625890847390663222639677778599130509229036502941438146903740421129745765409463741046041724109743995541832910890810043237637250838512421266274159269838087162966877339415320918445838084607926482487326890808603331454937071756872582541587950869617374867497291942434166388340736188426522646778126260187474459305273317048369223194305392592429269796065446394224285706245837186991386807972768134945406804843677986121363618283994623075786820519030677924524631232773053566327068836914008068880508593892814413599933512127846256450189605965007722408749599327105633360191669226811125705464166865932685139499043188417058408145619475137407704309057096253453970943431010208805958880441668536636630984528024738383726952579394912266155766194059496573034122939179354744906886317447299846294675145801761429867331920860320689436106861896387056042301081513818264319294187170363122704106526138368915635603316494940799629455580847884255301629707874127819875328740309245368068925386845081146527751898281929519183808884694732922929381859998068561552287796638630153557107517603976885737408961385726894117817772124305384275773752681561772589162896726543848118292224854735017911552771598879672195419903142467189758288893617797981332645308463000380199968291172662286342433407530992399311582382371037051384107721693674935016059465473282177680148455402959465472212928981753238596329614601855483413398833842270568295296640743598590779752989551301629995771523000231911679050279485698305095423321484021804360883790254659446161386801194534079288499545690468910577746611299797864563154863490832639144410652702292169488612519356587316152787229543906355470677554502438539602268426767473565515536424169935736875415273620834081490730213150398951736650842108450857240742774594217852879173672688751229305531115825014850231000546945150122964624749643195594303796058701805941210884086651903723484629069594503110445498020891082678423358251793036052092074949457345902128190663782180041297332240668738760754685732067836519097118357790131727679935781054122263807112935237022613351668926147398489883137771515688789600851256388568716495659230435230035583859074576720408679091908831650422755099588394054135377265310497551564802196750405263555192208913371509621178676044203357752939238134233705209988483613418357068790410292407284985250078216801955681819735640889915286539424767699291558260497936451862444377162411777198512727507682930531190062304184267580007068243872442587201835784461931300341180739516168341521559240873860541511979663086972805839349071094889623734952370289999461430319855751397225742175395567834824532612348799185498233668108399584932908791423527338415192594890797161640870428514483903241405192766090688820461474694129483396941450655378070502596902012960005925071407505634409039374591587356995126256441595045666991786542475413106212634440243252914105680546441834800933952893940680530366893360583164583102871341878638954296317852245050855362654227742890577697939811035528084823005072060163844309859018655879693761266441850506107247499887845837529072734963012358189223535309540488630641402694119285809766132525455640455074277103890569921925511571780900429074779519535887352143224202076526307608773803900609314478528807884041223459718478713147780482731067298371645459172074879456665672144659257063151846218815521853521538039105580092129093604550652176702149722267464711925176295825259195451897982973468788292910834248649486634164719847512355589622486356044931885992391641340252366296529307289311708336501951699737694422661506981734291820308445302217945166uwb;
+constexpr unsigned _BitInt(65472) b = 100114717733501279047723776182956757115347721138729600912473525092734313193742682589293676348980090646698499578185617585517470684073352391831549464003765330477102561187757762352331821624084183379691781020398833769817312236580255374406710448238653215266366011299735448468233210044198074360279493272538045406557958991166999429636554253642483353287723736626614076173774389030939047034368309416169925794676390609697640839419284172923106398747822703250993067360914782537098910254695325663386815684191492860666091783967519578790679002244952830877932146242740101430274624860813733813737621518346385654857238660113122429104843905171306980734661172930679373474815865977649539832219611124478579695954630737663119194766666017364148266829323683991092607661278489224151059990583807281880150958519699211512206729907482237561220086387258275529362147029865909521016312742478755489179294243171430093282806931016966661559517454422004572379224590073716192247703943965356759520307382733944577073741111865259059837853430666432946454201096800841338436696525366648645617275032528104274255720436655975433320265726343790016529230948964521731256685630816133432097823288170038731302362863259092166866670795290976986882930506509405859592561888953891642540736219655036807263131586242300902324923629589408158902044606269590852501824083407977953169790880420387424165701539966258248590181838302042657242716350701981682380948097800014578392460910606462228574932950529247423845880444246962732820595796167829328780390088984547261952625922193139901916610229492861661092747721202750340704955733954501284774742095963348761398081653664038221128605707314601000137489677301442099842048911700061503791803955912831122268287304609148464318114946033067997411760446328269899089629818950408698728257794167574666280106889646797554422339012444675121187904191185708967722514510225266384207784173261118827990431209523939503888139578140495778237116859508769743682297748273388288638161363038211171042178345297734400961512771146499751650577019484918426210881233862902133150779088536308917675675720023126726569989263142413036511588884004219336934144457874722017065088307033502514199354766651530451205664583289757274081662413150880987330091133750067230834920533743728975055389263344730948669540035822680101190222573346417237717427750191394457814210342687726766498000013848036213470001824806523810390917676635462226134191850956214698938594038260119823563852944477412377714414327252439758780753934063615870937408624486637799147159598040393215327215307442605340117246025800399858556963142978679611972248396221700503403804080352829695144629372961722182429873697110904758384111265071905568036182549343701666549957834011325475822990799454716096746337639631671775884490360422243011601189186004955027306894976823561765501914858039133459964203906905395236286543557370732592582545923702706082388298981292440554326523141412191768888786774985354560225477275159056876962270986375173181799087865790243219120768156514753058075378804237121387244965229365089782719136044346354639742868457547718684681095030552362332018190999849681495887592146220633962494400153448731724916345133363187176014861754666835819786079622711131515095142110819477599733946526083895839602442738520215405610661155875963333902646752869321042183314833848600425342924029514402024057940515954560414087254132638206655542732916298810645354416986968872665347028969439469342892109369274868458350967335118125301472432660404078835720635929784538443336511239984968501806047941832562031327607795893386746691225953383835234381573194967235635474643094918398517417264166921806441528288088045443402355878723816094922844956240649972407815453434139893648396173056349465887442386211214383750279215239460582299014299785457774615881718062010290696199591746236715232317901166371136396637267613309688712874953346067259350936395868267524609507790518269395096951214637354016666487972149783655980128430859771990004203501929484787778444457578683004883770156322092059077341536722349032536972114257275616406902699749709996773603933319820907854024772479536315883844669093925821283494708645822008101810820783195140641716134000243606844133608185824332332112983249839927840564670970228432433825221961975915060990749635049584212248419151095056965424787975733630152111814352148292787134970830277961594621197535656320334231667471538323175715856706330175623720414446772642564136630153700983082843271476124222334675026760575553850016767126350165338075849886189816039085512959708505905638997418660622260834740918236636522752771425143415253634469316035769539924429279861161253628051885191242039194300643294989477525263496369270524045342208132906359550473104053196559789720930488483001375834491004104923938881376193746134243314219738399375604805971031464688704734674695095464521297128496245034914707312779753888535864711266092705548466197291426535574121014893754119787123401584971937623911931812654824764450770311232342468580959470259239272655961272418899162587910421611618524261985997750542021074779455005156374688740346435734305248614024687700564612552105741190172364625951503127677278930753915708544368773914098299676590937380612660233945555661226552469943730447311202454996701689733595767235544191018887562427221716057081123244243028862206939620020318416338019272030222673535761415091243824899344220213821826909682871717155546406870595084403092112870623574388474066476479073028727456441291272134518133325916707830017953156301354073289349821072945405562455412800081903376134990084929058222810213923609047100884815489976321023259522929519598262743943702392819103090239344152227378627363985053187604499448761547106356313981994575416149460255944497986615098021157845050284356535450362706248417187285042024526055312009264065881371591682354632169802604427262083598208582250627831915427796250182367158626300599669694399212026109416298724633822409185174039235022054762927142269080686246280318930901190980203350485911624668496546994063270528145522420719526397809989544695566596843503482731492596788167816454610363907131928062248472260573041500893166062965788856187295498704098181866189125137557826948675639600493354685969266855297623794567974488898523037472688252341617982295015392873790962088664497225378086193671544310879669838785972260142903223019927245095777580844549010638884104639769209200307665477604099091055300078270362519111246477993239904430731996922462686314752826464505758784039999666723171757625919453065906122811537251305497217011514737417177213763214593764067148752664422948146012328519731290903790308244098558969854469190904931282697255521655384538710810480303644586476374058540600441774912294572534545156636426733874536036390499586116578633893608943483320624970331502526937331033083364637213706286447812835438610194090360193847259865214797296133832566181311155776439894102392354820221013003611344028693316135266795276622685748215557346584596407856198349627245526002613343566627596047387566650205245868910425334754220122268774663499153322547243446655200236971477227286808325849832329977668989025155615690655734958700121049678314259880433274516941313740368448978067012564933495679614199825637910516781569579160199155830560560663296421354578510594442512499287195379579324268336277361316360091572843798014638962194213012716269698822468758597878300311133823629949061897664125640932351032054366829396551968251203900849671044358851282172072240838258098943272016433083629163897124597784224520933090413533316056993226180858350563044320086941399011552412354674055795214366172709489622353743902709646842353766918728049574079852658490598896856958726564654660876677816188904983204056956942298316286809628666323111074302026916737650913945951936978318198610882687109809391228261776993442552568856842086299985121528354577358120371573210058546210995628721410485136914205561775507634402830126625053092970550420925253966720424893740053664461273292615578752444089726419732641032393726962665264685116944689079236432992916339791133475201077494716024783727266793965971604306316367909477592552189739623599529386835978606414089208119364638444853003163517130387228434899052185263668016274079598175225418161791242022435714069120445212082926181940570850484426182339526458978818065903060378711467088311585645898741729961699239471682144702568103870842720702216669402680860278635845362230434066427869144231859159759262986955826439936615791274907168968855242256254901343070736749858025412302901760189775358581743472761836453826171858917032848455752485761908759016583296522483675389396824066404514507722330609652952992678075474567391657410046141256707906965286740038798179153617510590571459571689682189325317814200862825413367798753685167097440125704909594308609543703230902994867544547366610907020718715679465856749627217752722956021023792719375618949257949318029551471406463237279097386053256458106165784402814718586954320968953015232944232114555091481218513505878811187031325189438715890314846003392759822391815907593357151854176896718800458712776231550840392562528165573689669173641314877886854023490372744509901335180787696294817330796161905970053394112525443907867607433668652883386258677820962996685886115562854075574339741971216719661373828156559780529282458735911459494401559489864284887812419236439545341876135885651429400013372301583238860904028454489524708142722596306803949054854566597158217957039081871297261584604777835589339080130753277065539224637472918767946085816057384570763125621400591796712807858659361014495937425718482510759878432518805178469184911397470582102792186717135199970992054685542479985782231415083443247072002183081558412170430706150181461932786174632200040060486620130556082725775550597153503886491716273765811783242898035508062776289388360315897493848644783580401671326513023177609545661175067468865231253962607714995209579233510463800213895037006685022727111530678946960647569492041994611577889289214604187003067493607014843594375259767009393325507098249374731201260534197847535675315091627936145725445474417647007077970651601013669120244089638820059008184479526915272546451238701529388378073107543820882471482601307693781755204293593645053848731941506801855573871644105106448156348632476694394590996668647499759732769324337813750839939368228482585677866822996075808897285141101045024299327172559063303643204635678891840174765990600105457475619319212057398172536274173734952573235629349657388284834808930989298181672653013231489218326906675692420865138508313678035493530650052541000425980302390672463045262535367221549932356256061179214984213805565772817169806741774108671645134326581312200396561492691232279136954082549326553800132522590765092700931199260885443006495417472683709218836347889341416556713960955521447189825173045189970496783139865264307350984445082592658446239587119944913051760181394482956533054827091401320864009373362414310884501656463755878688896941858827644902208323530519392191134363209222093605875804363615582532971999495724877910275024940970123598634267867983449057004158327626031855161673492950985924797420926140300957443148635248305948119123302159711096542342778234754508269492333008731157030461874784024110145169185011713946906205781125735443682935337828885687738693695435870270328989801090441996815657595446621053110705087986014161869591796501434716102994924894262460919853691777292604060667837892304215870967163298790188945493292708730676014635415208899184754264208175638073501979475201586463232950692133256168040865636663291310390611433019671304871706407766956219378791473587162260146941132518775127141191761778833574287972156449744699932079204525128341757126237578255202780294168272384946263960753848846747580898152471820579481210461228692617403001743334010595558586720070582170906860414819065030941686631580882169249479850751852749787576392806501914519459448284809198919325234555597962666877036528108982758949155823331707970649668751754759983077311413542378611369389126365438077851402102549924054446450867994447577497552929123805136418269089766743806652808714281299047085598636180446664259081645302803118870447938332134726962881831933268902895367605961305654742682557681942595468692376585710407167969955445144928854325958455185640902877281897527515272503957173916752368702422657830194930248690788806072926150700257030088973552028794286919001287564467565169267168276360307868896645323350741447653902093097675109639470686554558522889261292506848863285598285129339004052668107076800113176815163961345570041567054146209333141463996135121987937345779668356356471788538881904369947412786816384317275919332015924666412228298730977060742887197419749312437642373180122106860788033207555416255486970093794393249658029678141106618618752418344530675496475112902295282960974467835305345646815993451293962483421933603619298443409212189067242142672046810985939654246095575583636106034886704865149056128346328530869795152065794354344104033444822747517762489977160553533001635374459413235293846801145574868600827903435057462223232011527715971681357193578475384189501097323708718043676013057635619753633401699987433242030567257829524648984042297522741836915148469698960760620579836938105975423987551338694880562104981961592402605868387046713636135146255490090587713657063013206834342169765207057771395817575151173639060049201698656201765881257687504060150982128733838563049796034023876840614011963572189314715848275504480094908212911727151682350972478265191507531831250489123983022266437313653926683050158462511188241809958130049339770427565160413835574135986323510291513545862418063205074338951079462206513685443145029864073921345021844849145745506162062728038266780249818625325662039340090237496340119475614453071232421297080815361765917150654498069903376867748062701336517275461202624748253020697726500031864664945001245111844270693804633948293660956787843373851845741902131555575197437478354614572541194422782602417058105318480168246799662377801538412314782958545171511894127231938686814207967909824118678903423512091452330938797960128910415335081862651960856032734835329855407218637980418358390956784239363890984656439527356282433324733832160603920004248721726338683836656033047419137225415292066170285984893661640083871544720413189611564145203928377134969949548474888762163068000397757655287195739651812759315723556948579434454669396632037129299024150872401801117369941615583644974876856502199207175387556416641512503041635774749314746591155833739783123080261110094782076209478201554818723446968836641113221564050283245191877862533711084428640130864488454624802680743072975620306310112619729211333069104357243325018778925520339061944098628465362651206617075002416048055795473391492434363052025347266378809851694299889317222370569013832244994643866409070049045118774933868368384603258635939615603091148804570884962661244438865936067471893668250308842566762788319411539561490781503763155653479460134043895532426945156128934612696025828504252151283375134510928459903096734715153778109403130197131248019316651759693973872674955733275103421982376252787312385797658220770598275965146003356181856999908947864770076708535586349492850757899232423143844396084844040078005090221296054651028866997635940904155999427038747919225619740701123163061243671120415487124489411500715828191499601409291460757889380695691474308683189522903077241070380367089132643611650523307834742745303310487887966951497117510647349374273456397897912211757807464869034382068316034160043853449543591014954136582441662095506858285864781585065293658602509060854597649067616795737812231598010734067941421565684420201992773835623036204418104877305471742752109397920380834966996710722028819710035428614600730086831596971100231590883989944752546536016172546412684040033559521236967246214853735645413254299253410727690575867954214416140558081346451163964108762439427323970051606289776165858416424357891422467181650062684992877166286740589476752703701626528952288163423007889277331352674140021142845910516076662232216674718029398638715878974237152069137050755428155130793640662166117584202466608262987553732524714724388803300488229233044845668887800816824438968080839216969181219448044547443941051929925417699822554422881421994087240655174880773473548911756902998184900735695948490729748321004382279340016278094703572364133125564281642719607796887756471802835333379278409552784648073731419635698518935992786120804917676305536605060009182587611523462994989304451899943175196266152948994385451605471158595872957687826054582153472712542715255302609621888638651371697502021672915772676513343000790978308183287967785349465835917873022653042349856876549193764678016698233332898698798473764345254002662653507564038184374035002721176644511741088639865091304530245168267837700955655779122874902659518767696535040199203301461538774050612808685039502336859677745894076135500061581363176842799625473551806048893186087740946866607450388261315547482318656946969696550662834005919348568293728751372059746250342038728413637702999453641030986923514088735238246185727389302501769068311178111654423584088243660081820882572402391549407625437880913790459621072887481182872099914351803243142744092107764205010581529784581053259035664288135974399803998383643582622960285003661540002883844783064606262959936669169329402394659415036846067768525715376351716220490464406707920642698245918808294529084826031771071469485012891059110411484906779808870511481508634839218367918630524749909869651924155084608745088196942032592596539527999961140782450000510003527503750358791029922462482173677027356392350857214893824774827323978108285364541487637924703438313854030156086904163066939454891376856614795154679714792914673806642019152560027271392215087552179465448818060415061519146801092421368810715449187690775243640394007336532567401444394739650896078150242023594790877415956923548671954483306470894638272194818356709172236128340332770207851698971571587319010792604461565714677618438475569161104417573197556864349189146849724838083993133408251465883761728542325249315486515628013094801050375617956346751016492408161056634016092819014841853897267006965481937343839268114290444523549214856748090163208413527478840072549345380110648262081120319520850811775071545119935961470512637911989177087460006474693931889487742359706755672077087166572689858419021851837802780193208434024282299532942483000144195002022520619980583220590077215711664509153314666589125687826629822516884195708377252997538578332258307932873989956586374981442291372854468403699025042572235856717915388770530168587331179797655658071103524008115236353756549679170748759469516067942438616716357831153014255228670162534958193004943600542870528708805213389429304896418304635289448124302710129897769356418770523480407389315328572306634238009504382082021165613590522029840102543328930669970500692456407511650187852937349195602608014357736751059571489603216826077415809980979995711283335891962795940179019435439507734122753192445481086439094633796179207995025246597613838245829524552778303821902522279173224245471199657550553825642492239342948135017794983743597654410313919346933663867165680179522453998439173845369808176275332816850180170341213378782496307778975588398912577730974746618617131469815557973549303574003815147711173642475398410677966310429335960283636647175743582296638000833820970775030964223374636373803454955444189296936840037446363760362609968685074787335071844649451957697708868264272628943789964979104588674822545864503338181273898348523408711384734508949115868795361526655558615590780941372174574518038087185667131948876078387967198148041613418973289441521934658231375183374956575695869332017751718627982355393837019680653556985919193671847767127134568298291883035015093292336382067308787956277908453081687665703784475018000718697708635284076006695671158759597412687872005027545848711100708185954993889142620615392711359879239101030375288401894286247542619281771596083146956639572008458511055345530112400412859347687uwb;
+constexpr unsigned _BitInt(65472) c = a + b;
+constexpr unsigned _BitInt(65472) d = 65733310712974309367288359480609626206791873887406120614733566482989916799556257112216000049296191860946928376952770914306941899605485940771755684540650442628048273063270873248514555210034196075561139066045840499492361498226073231697006377971536721223952154393046315830776038940449691541319740696961883087824017261681376524927620653768554379673373641639381277483868891587662001863419124871001009079537921523665768337716200951896815969924745929795329212934170312485558464946777503351275156853431957611568920789844552631853447610968920496077738692498000533852760507563967103841050666987732076030100597343456816635403239960433806006856418152716540091693637161283246177641292173543574512366902095853676443736576147420571058987232518265841796862433718664274349920633906531393769642675525093914711153973922447983137836456165193697235052551805400298086748645418276187545002106540916940395119175073042452237626925202060310568697433598981090347253584305512444157902701459227814962188102586948892109610299149051502729974628908609809839821822668130317727334441794910982323288113687167417312366330251518202778678924819655933549572677766276304430538124570465930158881124158535793551498860046868088427643048679300639925301593466769812387105615012578877880942705932034097020799850477256554157187520410799244192086355131496764490623151239802453953907987512129264476529989486245112709372298713617598130001106466276409258479134275433778100370825627338116103346660142744281838852332284703873115994712754322103199482327643844821444620969063070231305099748945990177354116563708994207860979807869487982086908579214553457839339156863236498493901872308526293149758602972887939740766581996707422324863902544850256976660695255552868047320215405536704136182963653688512530881389442673688153779168928135272667235282714117619186610756449017782957105316404901015149425682458716235839749012551636965653227602758151742238268928989092641393035117605197064846256509919642113594994851398265224351584646864671276272287170206350834967382210977603114637135024583665957926257686586058765810879630550155133497957679518697701224201347673784813556755516652278639222049628628287667613121004050120363396228919211550751644909908584892325419054798571690630781528920127106892476546223788400087450236632648353332805463973464740030783584597992338831046583674519869603805425373352565430767940512477204822377530625570185248046102607943687326859064835877026768109497540886942682684799854161862922119146249366591746206428593130174782303743654861025754940013540275960320712831588609903301287960684961185629000492377328684361400422413202934931928128205758977693042413470979440630546227978341467432196713936659890524636113664916651345905532522446273701858583186487486581241346946982996651940112810669464052280768863401797902429886586329492435603149059738096812614308552041610545962554732460517382302778704900314853378833994526492859340082586070061874180401035531072947356120533223330596236951819458671353284610290902367114339230130038474653060194134173441865231113369348953652356888429945225573297968120204809079466443965769374017835395355023768612500042930682981458036210757904433118080252226763196336174038015538916025253047465327780663745316284042180589950637410910243796722176452997422452032869849474155801562587406994494338806397698857919192253297500493784346639226164158567706153672447576854501508656642416319546327503355101617552858712404139385302095053298974831170533510055418735231163952891673278446632404352251926633418293917756851159977649745652219261347400777288096237796987996178292518234769055437767785281299929461975823205145570766477071025158470557503539318620163226935642230750325519351407338671881791777421947266477128840544205744167421398368196045974760281477844909410435155086523641006217202968340148903733166861610745128566882125063104438505676019473865622707492473021438332405640370508995585435065934252616730287190014082490179349579155878175673090202058578789470739947139150638934038138854860659580737294427346084473515761085656374694303240034849377638603819627901691746584454737075987696732357864160392684097734196506903951030340568566210073938545020734774672539136441431596494399821760599541801790002315717000790501618766726270055259571145436819102070077096893640498510170082736898801319866580232219628192859326134644962295862962394503244378790497751333374799637180562971292630390152275444702790253729069919721990668632055229811455885739489565076520257982326323875516272981848548514646158679361508148923561357778035888502051861007880479204011110486245574869858472137738300810270139435935840928373836784477996558084637636599118320399137586018170765778543561772852108460779850111484532178560301608530586242255218170413391235975404124813248921038023968124539591162200254492538477883460799401251722230389152830685780062770735001129218658225402145869689445329832076158865544012546385379952350848071363447689084299437128393129712854269186158884623440785080127828567674142955139977142314173327744101769578385949887626225232567431292464838177881053175933182795610276528165039456432713744227820265347923797672998924525666844096977943611539498280790283718695987111890972309916548512146602693673585809451569438925252460399015606252694572451638647723983860176322391824411594587395886929378769888525765272903804387296530026060315134144616825034150473979181473433350930826379704943481916964116513350250938678585295189807690963814876673299057200601671473184784764308134823451386765389454409346892892074774191075512646614701216770972610868772288229260369238970314495042811553164842978580655316213597939013907486910566989281674324160240738064996765996237332550300274306784336662255839815949109702299743980927123146425097820892903672153196408370662800689888188019379058741285331097301245972926112276153227947559985943546686506638810613424903193542704460404171589584398190245989640779754495625561723849214983737287782842000135718896989752537689864067398378213687459242441194971923220481541558570562442595085851041944799808904544506901571173692831540551165995774595397470400090562683562794797697451285136828058143410591944789180427904555456236941912215085454981071205525291216441004333912838320426472156279271774015558431587150544806781108944739721492705508709807242677552414468497054010543374648176577420059610855075742038369243252137693046107454287811873987736356821716492639864472332281808643883488191858800704652514964663233933121073957190441066389920544329130284858672534099201695727803342839514123378350647709393690280907650354940575107480283474589624431744112216519223017797921728614302139082329594274840140955169096182323453926257960018135659648490966464611127419651150020298022851672795958613733226054374082818376066733051312368282035038368221284863628439006578385553557427882694598740435172636552554976931016786384269073745219783967315992539194327913100155235209949808181873346418916844796192276741603170233235579826937741291735884125962583664120694650682226451066086977052680862907173073986350090415076972216568662204777477676181236054073998491942400434664408501191792680685898213614962161129478516160664228754814099785893942104387816797251376921851749252728130118789568732271069448494645703579466004131124122290810249141494796253053995420122535297581983215063363025637799574748502335575866748531406891315967131854482010827928515900145183933802038759837780856621841566650201051974277965566300742851625735938106960447092085629446949143070976802036257426890656404430664365438022478767335206522837018867309001375559793404871129404322039002227149686606800054203400638173907211138919574969418122927785423106047254614638512456220433124655851487811812724404028021271603490636522676990657913071077769637444715725031465662119446538311847732751542465191912628172838715204335863864671263320123569607180813744039784794557850275817793745624539689727591059618207995917822692190607371341378619667008166326946823971150816170122919732927420005317989039847266860660838953010552852504872862201752813773661634566079866536422711701242406591907899014311859857561282458525992977344795421368705765423491900494672782609071622856881671225075228441073660291043270090532848717852955586557701097116656191206054810390050048974711729417972730695253698922905903402839461448947122909788437632918839109833855362870382021199337984636024211238445100119626506381001969142602820463906490830595091014587141363784712274529031186275532698542639030690737659655600732970978851629312632137955400109789124977675620891004796030561742309942982748726372788787844021365802206904154115828668469270647299065970892497506111177532779559235107495836644954860706519473738135333950676031184011334131153827391679043933870226065555817120578169594750859481964131203848536656321857915857019373230113604751095011430181838849196745919952458747873798651717228532368461626109205523225639696330985887543329702480065069798021969676105832222945278141918785180882254250181098714406914257413016773369311869929152473125037538836513147242640209270244621659867009280097881063095471227753857143377601538041908750720006209690876181941021293642811986890202282804923829454634304181075900728613236664383586413616441697962542689578964065923089817733799825401573631814041866188341822243904759333927889104250345822671803414434105208744186155926365676023108882730907733416174548842904738098597990054889857381759390405485985518292115964818019271139283111127144800517769797473685025967075941449741615153698181505476810210967212803156816435231005673110423674513460636903668571824788159266787950485643661101097519027767227755811167539723858541445903613867046027425790261586364576421346937949718637662093121876664211260659292665449453080467223969221541179031478839642011093187558574762424999953052269680026408388999894170108416916532370159310822306602527242241530469492050287644560390277163182566803384323647099694707514050241865025172990171732605422101252357761063125476145746508991853078042922923153532784863325897244500645080149943359682263761150491526758875442985583552806808448629787995518297967314995455444994379911835022316562927851501506714648033621987766605002273618805427820725655255977374251338915726717301436930699540753512270795587456559247324929358179682063943525930291478512399288175042909536775369829505534083212059067254922352369906115992953239868481254701706434450589968175259925224821181704186062671131863272203667288350609487644222219885266990254704667818169376769155049017443906498172414816916890099409769355698701114433211583133920680556670543334803812761075853503926979327958677028267011586824680244161797749063673065254240231260361482750693784287853381848065660924686459034669880024581940263665796227271840581998890059322321592914679410594646741924022734402967264922935666215241807910570827221059060464009439431848275172279807341660871539626899936734487126312793753112535721604037917984956372984344255463503171438382469687784808103212516696656465863764872965099254615613949298313368151770793885032085655010703699302801830837233905991992846971231980083357412443374790849451715248332001200446408954911292590345506980059689220419521622573695142800778196641963819347779781464898335945168142382288667603631444785921490519524557942964428371858887048557762883101800114636975374788847139768098532708794734963700051171553605854224738749839228186541203233478952754225649056732496723537224770787140204485344097694581527085745724073853845931749145068705119355469706666814377304194353069473678901721251998125134277321554040010085443433214437095962138168614846132092960249532502790475660773357322473456633740763590177329302651667558515902026192312270713340464664559379349881654479834848594990705911609377850540465814247981868919471088482610181053279395956541668167109033136996544482455304496236470853311516882696402592860776402528524418974654229171819383398946865904742803905698663793581648361428196179753345853333461596621837135825352632730141555838702201751706912055431454254203634774237836923223909207131773115474932364730163918580203711140938921645842472562399715122364324743113226904203354131670331853202038093577891624766074225030436696861691740417975054973861487048797975263899852389518576215977484746081088024774051070273517721500772048423662760209959853087623187023361223489991661522345358861206297468468414945784260252914834291908620955283686325996635431556840870693679743205870610728458779027093769106719141828673885454104445364754600645382978058882615962053396551668687164543905638237796969580255230943486680489869121332795004537548997958551443130441850722739586205099809291944934718981644248935989484265304140216346392928217091155186286727301198704575313127693210570021741771212340344380118361755793120626113896692172192970189671397465989966599783019340692909894111707602644029868632108619976201596557347762747397731091893518315505744672739581394668327454739383784324388450118021530254822019582999210141345431573532702238498543425906437633146511039286213901077094205025792484777193934176232100087114080513255075225408076979438243093213287700754638428973401727568608105385331184818413415120264785903581965112782059251324887970218309029080956611905522342577055153160171391832815735671709202835679972770969092038116768344576380562579680049697638639253099672180524573592630287801479783485556178767876566817915901890128280355298145494997577366648043196995306380682131376412607852915782853539523955091939553876816718814425681881438067157998112338527064687494784079462240951494355318688764824947876348896064548609507089718328708924674408381256787459001120621378520391261981333895190200338539428045048703611100501650848998205563213020524635315289615363581537690207298649864096548701931841128460211285121616079194250546369128047149913251323864985018928671318833677490034910282824468791738951513347250937598596971700525262274478694023742540145205474422785034665365205356639213190996217496924563848079577314325157667502656165232690283638427361378701996292077524391724943161508199500251567352164469451944314191201830196953638296355281391315214165221286987409934439741065567042555182155219977334605117051211162008552050535337492427253161086619689944865514767300074434538675912676999280216534587905627238194494953260482940568293400188546300502051046396999957249695964663901363407706222747131851011821193811243337366829604875455415851595817936629351826829550581136476539437795010218682911765844577711253228470671618789932794676490380865767038712588269058013669058792562996005423843803791484387085411898630625696125013546418161882721396452684841294493269501195578664340234891466164269467677385637878034293455809942446434225495352332378954730697473949178984606491568133276987301905841355297400053577386173285186611377447749125561813289416475082718496432897380687562954485268216052656604982175166636595368316816184037417869395475854625707060812970045553255755682765008637058611255039007981700581445526495567716561151398679279392913224930984914722746088712741496204641145240343337267832439894946010794902149269327815030140928095537137424419469957191747582125977373974416010362400064662058205735068506653620358020091594077058093709563305635394365888951703713886687815595672975687836297163978303414380398340196256886843057145148086253019606251959176012736523050890982883167224531493046576153166202984754911330805302122877672209304137806534926332672030280601402371820660980715756316306971938426266364136014397755163537944436183306688557521149071947401065246665374314986299514968125105349134395472939146400860609598580135850530756353052531369089611571602009716057411639240365265231216993694267378520425490736310164525367262982354745486129744345588532922894054353037220470375493150439381833056944970888892616077309777316341795883468143310146239393066796540157579427902905105575396180637339329822962351442704693893990585295012177749307785629674984186894084722849823467988780664750679693559534625774528522993488647132027746755993238476030751014405149653809180905004415744676550311221411267308377184686844986249007487478788508392782538891486397947112704910062977888061362065836091013302863155856965856096726458793923914795360061925584765206205171434756224518649718609198278686690570114973663391681083900561300317422032763346383056548744255119690201692499045660273674439968025877769274188305281955507982278844512623391355402464120401117148098511029898954726889232385151020303726506197952516637537419684727644750222404041929736512483497879491116484463259316053640341152264383400828860691401749104231010185274773199703770494291362248430735093171883754998640079602962659079040079351016071711191111680144191437902886226218542004288118985273592852073561277359495283785012899821803960056421709438351766076034988885713762994657577069723317148951070302272223238754526260227628439170238797495573897162559693676180431590127915391824013774025745344743489766655187683613196825687939071892559395878482535957800662063722765871480796050452363686958159805963927717718040700130441332294899530200297519615070767447460875457173269654053182266680123894729972709573570698706627270034433323209929097134664823788484666779312590125436798445066410155762650942485034997893787224028460212608271409888891101832339701067151190376431210470677697853810239905340357318543088596033437087441047744357222850669065868528446046740410109557825052671697608704008115043754427611790290802442242197178818583202461150101668283595827485977845284539626583718745260600144453281308210442940609691638369829825393954583995698366266720158971927855718008153570289419638691055692609193578393304832631971933511941336873679108394602640602562097556694144199408702985966498710717489164632640898761368752687108705140589757820237281212812473653136623398287806799099262908394484009109400779936356676886679972945728534689830084292420337733083763707387359460107302890472955054186957291401602261166979427665022210699225524810990904147722777293253830299043537994818526494128605925817350925088323444808096338513145738279696564731850349272008307820505114672631691086424540539543915924116257585875165597466373117844714254711574026661223440392924078451616713051920432311403871515113100518727577685639388320691776988175874708086550198109171131010071911106276959402931457385608422763988528718814476397814403552115457745096413840806325350585181288716521662012116913089656556297188715085982902221449994681630218437964936442320787391478216687908217903058261221128100585640757158282791292148088017396304829182083856116576696914842166432097920147620232111356401551165153489559661815514199982279638812679376483348319464548905923211042034064419484777348455290858798391526838102301379345168508032358556333110301315859339863690258679059572710256382248585394483092186058190443279674645328249674038450532729040355220463492502022446815663125456493901673386199636489640049010666677368429631921384546418957523730405383304428565397535515818187652093874534484245027771357268667721098366735200537472441216821380420620954504368236682393275855126238039776198621321282477911713680118719544465367875016153686707320695964258722040822975809759593976906308805244188511548948755947543912746457738061565530716519713167241081134918501973826739021484172593069513499777141328929736443889220619862691489015649243210929597568564547430948813132671040781638452369284835944133923240780767953175287558722117901106695398470566569210207177272401106331295413031614114233240842652452039520120577789477112213113151681038394702186301416182218247167584374421196913821288796001954550153326787772982152472873940191552643973037381956032576392763553240937704068420072583921057053471467372933417647188614551861876072347018962154219975121743480241605411751024300986478918975618499472073146934217998545512642937199165055642656035521248653183517787719217771774678113026792233128320134509150459329079579221641634322619683342322639111798635465124959513826815422uwb;
+constexpr unsigned _BitInt(65472) e = d / 71uwb;
+constexpr unsigned _BitInt(65472) f = 81119048438667678167087681061547454956381140139384237245583550530711468333938645409844208679849133854327057611553489468128164328035984847463655845071788960362320831883540865379868695908177633856753875928042242813143759736902234615262784603349439052388136548958388122154196710982058319581972751161275633244818776656878408368619251890145307362029151951656665307449952950058545413678359706906934219861128762057927752878526888300707475052550567207218048464686590414329826011899493877618251612171176743649929542208704371790742051581061564924686728630317826783167436874223826889941491613321988011685915234856911937130079954611950479372405086492241563823732858570387693419963851277511423963831485548548300094183848777598065592477874361374791093134495858258772895350156173763904460587088304626080634782396169079853496729981933220728341579433292565018270340924969893176586137517212163566854668149084854194514540512972367539162773979546948405367825368816551439864599197623727447961973313852940189734097264773581223296564673086780437949527333234617991653039436427470089995629538677536322616344087013847245825889785129307101277587565790413807740056128153459347496379119475908790478674607311021353394024326546901226384866292029804856762549998378275395607862546028767887582384886893835304122473034112598555482961034975502226294476263955953414070184072692312251960704831523959513116199602910182193757720818611785611751424872435670755617225210397949250736289480496031229147785822280915073807228690260594124922546495633080711556467302590257107613805828154894362406553065122318032375945758669501668531919640242996141135537856152350704071206074158995802163298941744429285664265346600664813581320330085506182810969682362910556467384482527355881041049958578983724682270051978097497573988595202306543328228899366535303141001836130010179223955484310671691315675157890298161246314699906091629686102037118090687227643323983661402317966793329148608990208151816103891551217336257589721797279511678677607962987465599398444346206660607675612669385133994339334001047202637207070158304634164621649018083846364972854840542085445503273185398635963127539412104744443236732256410064386842938897180293501132242767965433327446407714979432067107304996848947553965459312857290102292633778071925103855561961642689712171066326296310751014912350815838229102059282801001807007546862384852431170548875729913417313449317847846268130604437245222947026237046389351718642033347919557634869689872939487994497721319996121053479869301217891602001377463819348096214595761560042060467757505786280651084681880163047905225772849072707850057452565822401181498495553368995371572225391463960379328369998850781937151047607536666997565456244697610971794918121724255472143489722289853957819365396078455701339705550882833666248654901691646941516172619659758623700966761264487839045597711090111609043113466081404797094428834184000722494962361259142927215260652320646674302376291741333046218753795318533934134259021690347497655704386411615592650870422342229618987816769323769256274950493247725841212690242938711616060178642536645581037848024744067722023890268818663850692404866682718757177280591339061114974200127926081733733018102141027506053391371425739609259095336710641686739798192412331252101710193050506100630574040807566296147790678793092821503626466188997393185623444459684752040722703184565952637890300357344078236773703041612119688753710951737983088821404522393324152283715757727599156198283521860165529341197148175466397391445044487934307253572874211454446413378321526991472029674563296210659444945015438578433530023303317646179730757486551333547484168809553116275681543554769794852111447172464324820230195235570574101222038053231415531130403535642101014250043829312952635775623171615396151190160824468938979021996635823946530092873435448371338356467494980520678919103994679971871190574979116176861082978687034118804828671405005458967552089962593761753485765192586963197333042944108056083507875173377303275821279528360690420915103903277555517739176118062387662726531924427886263223467472756464895276108685877011617074604870503329783376466294167561256118789414043290844890467721215980924736118802611717469061553985464403283105120058553402002832205225756564480590464041062170054205408435207673717764462878667484094585324010615984002752965285598325142296404754671892624212410092915444222450922807502434260509517936530681719400385179332102078962960989053304307903630987068328941572686913884437211771806107290781422240094404517925939208619021498644747595753667530534854396116308693966724901034910333882380323105599336039992289396717523357770305965180968018091683038560418513989593671315494952005575641589066096424514272893213280807203580425404888117237642205257712307303520976480585990631493558802249597215990680727235727680464741106200457940514707243632479374697310949259806293595605426504156875265643852486131696413469651497770625614055561695520874734132569280184155330884303541927274099444596075460312500140627414316921245782551508392694567926458725922156525134131912620565629563745424477601001002834577003764301757275006572898941827383642087739961341233316802079286247050240864190436485241852367599925843222354834508189424267520537148646435669679609916702080455050518247564333696291426460728311097975896311205794873263365251428440180820526976514251965060111780918811290932324514544775320372515644899646445386297030021170451457124095843775923323648641302287349569472380872468981315675312934595215917185742254550785729284384994468474362021071156740095507340126577833882996928879297117957134110697823557811746768297256901301469847365366540872176266513666224312797304645117801470774161300769414206440106647681505178861726195178070504474193201547152440879140147395354291118092097766087707629160801177150569212616669050339675277783278561419070471509958120627858186869286180400810179621757437463950373570215026972271523776904898375864872682957691253734286904820890912266621421134519912609409321158405153278383121089931663076724394859447175761223776998781734439316313779725605736797116945518910959312627147450807693644698768186421543176062568269126135364923107771703835943600424121438954027487338321722570442329551738760412623469610835330959822855143237859319003755140931892768237651598726557124619934447707062427209188221064149521440640066748619847807496914428090027290892686426829384522012606207518375267107272024760472127103492224665241988704409301564171603029155477114602819662906864324804894233432183235006361334660519980228744254510540041914250111138836989771057454654361547808598933184456396356506645221284696351504236067684263932447980859845924807299229049024046473245673203436521710994303419754660467297271707120629690385943875053778343184455773738237881501082462953248165955808898084784475632662320255655065971913920219022035914633324174732177874088438600417796719490628693272667960011196066396582965342176994548255439142090512459388501942746352045185426148002634943204379246479992639232921830427033287155498919776545033968687621234093949952909800026995695079322733909577094852037690747955182561930828130036202145179371869408388913075896545961481134374398000320119002595527049537405648113593028246809564199535473284898041084967534286644400118310477565602342339149979928025711422687497006581756870241948832788965327620211873440249958591482493647480343674661352705657341579860982847393587969445835000305914486839931462621936881997112355704793534251184118769836474684593365406384472677451238145980115233631239247091702886622282776765570550640577164254082902084481046445184457510053388880609565532493436064593184865796251942514270217511141402543672756335480006142305884648244473831525460829363634009149069107713792592957352156655644528612153857814065482670082433717647773255802209129658062502706566850593441730666442893080895203992967625531811586694873773329205655322405359033341706687324384018317037001782168879764687102213649107684017881114046056973626288368923831980412157547214430507911000409494544506938619432275414478439064152462285377461223295418752397837436482567742872764397512131024342672507526733360260619700643186477463782694275609954903143256542952944244778121152941289179788480630575079737246115747738710731162393425436847859590748398447899233172200885163210339069282573128507741201746652410914892598843232556714867016096598036327204404859186977944018852154292950479601707726551489067324830725948985200010822771865994042549528143137537234897029439040042099258593057347700830782468002060943008703943539233162564161925283351163593564000534389587666012967529689803053042601268511160410637186606459825374862793733909559137035783365120986108633756185235286103911854615676281592264788454915332644527001118582184317205780738425507296610451103953245609660526224451952038193836909795487838919754293279246980412412277469667514578815693400872697826159671033387900060491350863625690886949607407803017489536456543547449575452205017872347883973885053031375178228655676446542688676430489905336895163589876370964053761552234596313318661762645436334322498036180731113807766213352920419109181566662246988745170550346369695736277066595184206278927540272175689860604339875001317571632108033143340510285476759986347610209603646730491066476518327502915964009738773437390713106897658049079921025910397086352600630140473861970059325940882198708598301803793063240941226649048055739602475710328647679371343726914399933927294112748415016048578310432314612655800678443667961781718967749656812897847657802967169299415131554072494120795622870359923856891605071616981039858332959855369188625993550681663399570012356032290795384164356201731128172530583544055446571588461496057995980051362265047609949782134422377992955289114641410000667908324797515059271430856942076440404106883511530427157381337099588682150941240363697805807280391816158778238762289070818367605192052103908830159642096297615269980255859671250200610730249344137390151207031035250558274035825002195793610549381991958431852387163377257799558681814523416557997072060034383107573558276040572471843576830621389193860813697602418293653386506782212076337399903209307214470100317235761412805710850988359248318598165462604247385873981751694346718728457417932324818612380916204158539792578806888256805126676518435457076849655151856936357948664784020095496222964185430323976361450038602411831033143981782139688999603806697895951064824131960093159480868343924057343826438855010476119767439348362410237725083120920471869889013959103517616051537640818148924738754354089158457115165537078046425536848814772861455928301259776462563365625720593811805541433460027991668477245734380834490979189946819375364265114193907530843496079499570606407645546816451989380123450283611889376947411971083006967171854331483582409116300864741032958931152422255907229272782370063366185455785561463140714434020616892321571308397688561765568218590094663388920474697495754115827676909301461896579867695100028699940482746731097861581511867012590301918904848423473482044669126212681353219369262557674792141559042307781925390412160830618045086937241164165649447650723634354457142234648420469253813076126735987958558693703224345904768090230475931054961533525465895971268945384947233816059121648653851353453912800961632935932861729834819069634919635535224730153324057833673435077653698129140436860433080504229548612604634726787499023145026639040302604734451197951750210597736330455651622023033797538538538388047038717462186292552700783681032951322029660431854988718647042310141247996594683975241674287271866496256088758252070549970212193934302912289510530303203831345332617060689084866476564117717848075568001674095145292184997435515321066836286842449524618961120843546331558247350173766825349931296560241611815167064564222077109455628613665751622910083205972562374990491989440135610674632037756035201220531489214743233527679896283445539353795937582940464566820043656823348330117257561617021315014049449950739422399371130289848322210348793026445266609167020369472192152933194603981711148733468582622132685224707344961686050084595182669371775863549703595858015823029362059923704401908771829719157180336503430116716548633754689680081817505016700333755593763797689012292844579379020029127375863019884877689032184615650546270273754978419500607831225684508776090376013315145720095774198145351621403429352951733285655442381483133062462448870877207237218277952979379182757247788452029508696570488503322564296655547291293159396498587812247048810855422075907102519074830482119974646632454684466511785100872254577824923892646219715283408023229043837722903387825185619904121450017633423962806734053983934885694936245030629211896809296229178870359102587984788832764476132123860284993319553254333377839285141565016492722252221500663685398500037288543684486768039325272434073801963463430942131045379202104691431436998216258579637703636865411008126921082151389068001167345018683753020006635937917955530346905162334685025167618679504177188090790942946213285536894146220921886876894907949473675184610288598142017504617242654104935121705029703374353045210887017164061794037910162870959118258745823059265397125649060836584712804805218669996212063992598515582310270762188378921310553219011284838436786157288363931529250206667998181297158027685071735143864929936838618916161453511377304799441674298890606473463305374663350676450748359348725937885062403443723509124094601154953488320671752278073393621467650971847117076922250477511784298673658861500721612064575807608998895607851644295453391456454531215271085280924936192363348006487557798534384978443971017420514310413011341257910074101557496425650045546328569522751909127274474723854148277045201115758974699352866898705983402808577306498669325143473424401614778819080047903246862154326670516656908138050620886701703196982970872862366004885871348263125410814963357269942863134475202775601444469305395643930624301668737708355311375180156253322096137444456043933566859036382939967257430039603258368723799047389083839037860709580897208871869627268202249353732838171824296209296294548516742651643397676384749036910080620422686703669742292285224805146963851250167147621958192005244523117780154638357434245675935026566469741372627933840058563022743977770039792941923366831648672050001010333404021614070168174891577961210547481087335204029860430074623406526352168871545292621654379079768226371019632197882405089965244441091504957041079111109676293226058631082853605089096750145815866278653725337363601678627338076852854131594110446067843045748196020864098897960603689048909134722146224159214858620044517227409018605296559371894118327828546734398364921268494096918767605212134173850356612880499233235875192216561439048754061975401146302507573162756351518836491904543648427377290672401377607941743202014178133247052968916763308898658059366804112593142747772802872872304011241156942330917342016664825114824131243881038959568825225716955297062560791044525662833617673328465929407476131817005115131011425346567647872751463087723497711057993035067620531694484063799218004595637121150717217183299896785562805015987083726131098938541665352678729793826691542886712254454230735695807236938891382873474309821108791807902851724497787728694301469491290361539735735734028169861741514343551497126831500670910708155930812131910887385589358355422555285375252543773821415062238769615036107116916346130894926104313380154323093641810144994084088334595459474978569275200850715028129823788422826182776550189888994189028571671614227763000320597970917330215486637825461852950012215677075728416669389817388070042483328475316537616864892173128781529114231771457379262706718258234462542374022749406522482693318623915313361505582138590475914803913584557901835727566996101504851269428252388749069741603720981968903953766184268334938276196004132962491389692548154641063031127046389922901411660834127939363774203120739133359618778189671081189918613578143635540354473049602793742448779303364071952500394469313756206252912480887419992330960013802381990210410631484072902902375998459735022573434286550488854404225499410437440899765548675353956027131753647660244719190071121180046235065381258093864329255300391061722518798465720681617948506132785891614696323904168697255418185414840320826942738182211049148991865561460552475257728514376004626739858958112783218822581938659995258008366390674024417559692051128119019755248518826335616004433962828701583475168064968287188646214245436116135742700974241192952705901357173335366072427382953649798775310169545613750921035354107237850806773170429868305977421534705142239789399817358489398528489621671100995785476142590361642167601419294639340535073524003189021567892064529154719611780384794965102877185634190417678554972476697592417945100762958234795145634183841198575605036693361890914066638688933804783122003206140851517845351284437901254259517963992132047440899338576461272921447117371152745656464026628310288885368960098096395483758646172667160452702652763728544415019742833971425823350297849878891081549376274324754320377470927375294403461571445568047178227228649780287407629716070804833784829531827100866306285704108381700908714688679792170091907185890482755056663783653285850369390801734159537296613413088952244635065103645191579854038158054805810126516527000246940559539553778892449771733156126807409645105473768008808972072008217506082220527464421397385482291529628610427697726036406268830099560882593282337035370709287350673933627663463610202838804830135576202990273111350225601975962441083015431105048646343759044877770708388334099167417362280821803262543471360466340830978889317353377750218329560019091914123445543397162385202752767669258874323919091915867230744011350323401943498133873125741935300069150865471338673218318699682037848842960704204726712036770403135645242073535305594100787208225387595470760974598941367205963787787451921872637551209837515546984033501137650105919525019782774083428947486562538308599107816386191438690298067432065274285152619455640208329873154326556265218124953422102104061164759739406020007771447449633506691053566567554794290137213551502841022803374066829191255804461926337382291971071657619740152638187836068987044078311655693090136890581847972811444227785568598697490213744389532539849062085793434364140569384991142262252002341633787983042085815862009284930092119956533000012942039931918424874436052309809612076045438352507919301315931370884026985545405614314711742990866407383288963891689191987720090661237584147466664348258734606093419545193907923873057096186600080087829790813417721075180297238453473507102656390704903022463507389818244022806666755905180037866359102638089631182462270862768787387368570734224286771986934557843667679671999977789368503351679562337657410015516370171000888537444667506161377038374275576286219252222288250058986308656559322111651560979152776988991823243693590987572629615903838137358071995845670867227876752335931359417336032690435719777074026099760429651423260611168595239702289345958194686977765546787012516833388024556329306161090878811254191812807181148398358492263376089635636576122404761772834195976106082533669359788834602183992961701195820952116398799174622232396129037427479764558336883762314935238157985291895843414254671829878324339001987738619891291427323314592666630164935825946743743891716827463547008617450634928010342360487247773526521316117380620849244670899334798193689224088577441539145108966591940250014425667966508636518668132105272814102989073304527222247595005692471942155566669672508265839552263578063972763273633393197633385588002045734487191760501135491526337629695993173298383444778411111350828500653946760200905423307420627645624295420909895627482832393225261330169329637488004609091139880602114081817083587800642684902092333638498363749375561841720781868464322821196677454133629871922182954402660834327698585486213839832288uwb;
+constexpr unsigned _BitInt(65472) g = f >> 131;
+static_assert (c == 57377170989012975523235298775628163096786299329052925173791514226739126998299132487187540275332383345633202443026572283244195003730186546233931040576254981863691366198241646738582443836938118929157834265839050149448112591983102709209299379515040179869431043477239668452738506539490676389953277690877463630052873929146723173084770756201506988565759265503385683595422822915381435833997954584133804797693715376511393546428928468053144514693805025551921210093099095632179601519025351007239644806371537347664790877994232302065208364877919439667571152385500660502806582630919320611305765872100746931444315248184766234359238222381740447636693461661322019586656052268540800014118990950583495903380068020786002928240326898431693869995996718398424733979832452475783024656559268509404424289861986444627782117438848327446464800529729964729718097296194135885144761895936794304601810062573606092269764133696154408339619752939837601410428702087613619070704348681194015486008116310922595347812974165062342557117212169975803171297939841250867923983149268543209999522465395376129489798925020742018329341237926186874665814453600170581435306674792594350254610887598332942194741495882331538862369813458167400580253173723189529350263994469744239652974208595409783715833572820797712603002019716179307138276060181701674177428736049429149797664004611079956017295196252957890844127128867677075049650430396318798397551429060988758309683253767978753911099615434970166708870436638501594171804068269923661969833755536739123593776058456130540504821096837471150421391045389049925726624976620799424653072940119200833302562438007537447404734386131521034628578787348919469268813671092767748222787598717204014902891198311288659886453038762669021123791782745284995695243470021516088024702004980924874633482158825760584623238586376051678961742550579964436226750544260417107242109791556517723299596710714148703737310949180738966617948021698482116874864385210419740339388941376554729711906521354048083406958192358775355325345507072554884236740160471200259192566137094372853322221762223094632248220548185456945141125865077645827772322028652990486811164897145703431533429290615187818334391383828077139094311820133411825864375296948673419508392908735309533794376305482404486262153845151304245674729526505850830277816734660459648523460411806223632018060140822898787537826638236736751423736216559819748923681398783598859809563130523214652005683927088749355217131149051663499588885886330919134346890307879961324344245434284594400898921111984879378661668306107937578725335792263176557153878942501439033799035563039116379919159618164339126594670691646729276045965557011176383422843664479955728477223045422048303147106364799579008832203805312334786141336502003121104130271974959332146376010884841925918029468341023983319772851851664342534284512591451020982074054897782588530597448807380584790251174403400238997787446351083672030851327022087535958506159285135629734186095662860675037323703747160879949801105821048487074621182257059586398135308126409584794558377256124386237830467323837343503362473680381578999707526775921093389038187490484497312179534439889042136882192310807394542570423503986970240038791539314459588021661648933510351172260677915795875497850781049984891592813453981194596114978480372421470118652515114050411513114649187392927390975279222243409044636396912608303417495052035400225075553781946329045094985071941326489996016916823325216289045354447354144050536509083841785710890721046478853309021167099375188018164239801696329633079133423287468617852015848403194383956288270065563643315866420598807502505331357730461671330715263435034824503558539475524688019896805444836262818564073355993322734575900524731273000040150425301379127511007660411996040079442458594302495932467555744591017938218516995885228130963020666374760378908193803561331317453562691363361354611668053607744081182902070039627101880107640659696080949502658725031197348371475602578643801127706709134367012875195131639234938755799617691766597157349009302256648315876942533494115962323538649459020329598000862487044395337155431131013558695846554105884176417471347065293777152594575396433576821110661608157633901890580575884751567213983771538554347215449337912902861615102040695523346094241892668483532972691946732806677315979384350301454967139053089328843395710175363423404688529792958672602871625669473512483023574456170726531260260946103622337174958678711850229459265537116921432491080935701898552913255223911135180728069171872891062429303783287026009016315716944633919378928825723883165682544428615042779534922925502649177799213581158521568158502446397574489100084879041497110913323291680911234810644190354794668172165378953505813116318874451935588145078632980031353728507979825565665920315276501835618753883496574252386528550555752843027495441205710405304638611159558259785718071260837463137913842597005776779733557923959753782007797529213511124469320394528648397077071453476382189330456814947262443238283693818278138742130574846019552822985604546612483744887207912067845279067463815407828656789627494881030985107694337930545926547433509012296426510716888770579076820149279306121299381655054952027022977335524127091352403292373008785990716311488261080490309254583535811987673294043461070581913091893080936636115904845041902191703523201071787259612447592676792505759677966780452587431984189514319347175891229710097096365686473870394668607558038051588936187179612143798475705278075384349930191177322696048657400014763260912331412279202722110386136894083215493922629076606190648325566402723954664692455059976157716620580788754045939267603789964810044706415968757569230510841226863188335626223259706356490036456648924181052223770415859279916782365738594735517943035253936284458160249089252540393577186711907727259164271416839138540376190055656600064257272234066886224049144273431330413829645578960245276381213558329997342418781219387097787222357594015898718426781092557058092859739532385394368000749767108956540818781858459815407818695572901424772118023522678371353095421976241418691028000204265310601381761839280887252069818979764758504220831106501028185521829701363754196239360882890594202775941577975089261586955359977289873278823433293541610042350356067122216096386616651954495143585357708932399484513598457803800808749826728824363615001756504010336555895332804363920476806591766278431679053668388627603963885414289439458464070206722458205258166950802964072814303267777876740132506958968087233409171300526778517733904003289371434899975981736840997906816115741902851430181786713326181575271955416680098293696540574046267437039512151785873275668761543850231612184977894694935246302815353707626637679887407654905090274043363509917967971055508277358228255261720032037021724784380712823284038643112418764060083535874850655753002234160156923784288459307336864286469807863209068655601511818538160113777808067018484890368284742927677047799037476345520137540192632669168740608456730741177178877910421413550084270220952817191104038929625042700621101042348018035592395814083980607333923224063214017706195565269113470819118710389614439662265116267024838305932242153012585666128852756106428935734739343899090102371508357315778430818082281612497811051081914600448559972827189212366348562276521096863748564522601522836467489096204107031943168847093756146432733254310510877058981575484478312475789678500465693680957571230599200379811794899000408522453208943194637533274877838620032444084054666122791538228798508122274105344560918421660909431966213065663391903510420571528276287513936427590149123153659453247668880384252580860685522708929772391190878613569747105616267557722657970871999598423567174785065483690942999650200577908282136131388209014355457877079862945319452307119868290433386447955204453003966215305376845062457862559139718646506841289184761081170379608471444998100858013325063048414873814363324034007806940715284690366133104298538705401413557371978650782670338051340279530280798691039878257086541595412171258015869453114209998969123972862084269553172667362635960143002024592703543374501375512557282260274028696504420109848710726436876464883065705050998810792120677293673583998993466899168700267969980764897320695075843573224646757856180890382606532742035206458623542872260827143221389848642948556565071800150434517846872802324425127417431258428308071963629234948095760674934589629845719662968285652513693448668835853248319759608740716614581476514221999640547376483602031471307813774135163763491173434014650976791905327267186992022483428738586855935367723720074384474949712341321238520301025597375659385656017148986957121657129834110025621044271357895265671534892080199098542882169009910595958948436713380345352476106696395360258169682211637750121156349580825248660061244110679902128745055300138269575958449002147545083559956031766052888223796335450194196798699171795633203904189952604630734021921921228481130132654726544095150977821187685183379064045353585395430267018036660734909163408245192359244775200076206192083612085469831577941861884660458350283992377091411231055397542746374259732042017861222505314111940400302860724642754576538241456722757020830634425778112251617404993084088417047747816095415008603440403265658423226973486739184425896621068136329186080532906608514234166760300199309364742888015621211710361049700580845783432435649614154609150154965516457170474710283842159665498124060704063464243394363748028511484181185566021930604608442079098582803126446325655440082382071178455425117073958830180290107139791665869128616263354737273232374401449257208137848313191868404686893285498673666589118399129431182916868718295567608644291712503538673835365569403960548071551112712374724024300579550788961323252957755907322602925352173223841819786388258724508632493133180526462059408659358184446424675320379981399962127170881709631055197700603450530872723073754389614635916391243226051141922253223341435262139566512605879149585490301218536218492434309804115534183183083018503589744799382318105456137181247133285922864966127225285258412932181055251103552380083065848295160524666765105954037938560153512546040572004655023981253731267348632892140022133211533705564104883758467874449979015739375589192513424564175325796577444797880730748118924718286215363953834247825883843595483956382840139139748245812677166028154786715048909442194090210735729291510042560816853038961042824725280565150515529600968887764274505282171965666414745578032247009048757175106815836725918393302152718345619569171907146362247470520055968725760858733223307639098163709096769398640712618527242053179932441956104932054081703891298390107140207409724042106312114969977085487037664792203603195092086448281709223400676668283610504698608636440396606026189697499708152165350439589551766455030801757402906237697245959849840715231668791768500635301313036422757667738281442199630221346041492905125863155557817573114446056286870330618283061980463563119055671927898971865795744097683907079998069888535818762062268092744398622042175454225957004435212814532543616097338938440636633760068444824154299043305340019556109318202522362701812949888780299273388613136975822732556342715808161476596121870401470789675277766242452731042279597726209189388638578732487045756906229414074535488399358987687950637914538413579269681144908454883498414796024816282527308557576791227134605302838338307096182103410790870363530944179054254517330982268764929562387176562572373243832876072729602677151703023053348694653148509610201047071522702697862029532402007702587104968360278736656675538015138000711226530185640098499149143684089184943616800337677747604790252218278882721763582376598723368183425562453278578339141336589135811587376152040402853615753844089939155790961519831882099532989537993927199933822033592774496226679696190989726222733191782650231166839942077425437951502569511184735236266191787748436290237035160351734983084840658965538422978102631057875165913971869963222524448095761054981357243107880300789567816565621063245987538673626476198682322043087378393011275471833331032176041783645796122679515181384124221256382917493817352948297883200879490361107375062945755165012792984991897216784334286583032930127727950977500557539764397169634593736811624033437165187826915800733863530025110164137835837143858119026253128528131272606689212604891831174209747753264967100079332465041072368950204986256700096244652876683043311570982466386738966312407703227763474714531165388711277536518197832749220599331961515896900293431422369835847287434772881229984625732732809789579395508115983718421682952414035889213306134089078201488106234745234057614038446948235855188876995041443055843357934050591293112753129809270691464471312231024917067466182223492930537547372916340919082770793302716953086664515281644394476430575935749121783120312862884889459242098791265183906337098873988028596572796137063072081620722095516813043638849671182629461743606866372989070069852346205995688805220455004116397068106330678416424211165171518259229056915374927045569723919448383668491014996574632602574252467664205521471365613967081427059374476089018459404037302368871028085916061759010423892746796285211966092710008134819655582457138439023666444944891530723560011557085541565411827972609059229111127397226427629116698421983897391398596219832987156368738782594437893023941042701424644414086708556238888746645033835725629988268773048943265547341371481032993137143805554814496307470405895876302058560230883482430776645465119255648809144708948636412538342277335275751572751372446894792228421604657166669773903269993295507978493647505792280997499789246419864086990167630347037708777224613731254119345052268389188171507033953296428212293624965778790948350360016421973394044452412399224266824898024158536148457434213366242733957796079723623977022985608286548054129644313785693790557934374287353233328449826608313051205376014241926948821299136734323901589644428629483929404221881510853170540833957501329075981265870145384780426696760450336298399024944684123954304186694350139242512575214055573533219078625595654506575681699072565203146347216173447335173123370755736026192689837437929370388699416297158649912413517250011432242968265199820201285954144825232821959165666107896169208671492458377776249215787552509646575344647307422465423326429392152423004052360721153457253592949492403097525629910898420163145166185911562561752960199555784048725709249291792810374859562786645742272989972525467622834487038796465828018426717594897076745074463811550754089804843219188185754124341879969296851829036079963082578199796004955232539652960074116179854894856313316073228971848360244723519616575592369250758341622753179190894764703211325914576710306847315159357359272977607375726641672685040994533986221234325996425307431022601884076697597718771994081708400850415985438885670427617951757236278628683577635266112681530724520278096375923533828352912752994717601742038214560209829690103601941545691441836366234036458040814909716174697885680044935700868734581083573849480267460985666775249982111539085280548230383912709829269012073430170276198172259222549613119555244144065338721893308260499561691437254744496258463236888265188277309379773682417603587655081001984192025696041397088330848089185614182955063699741218308992395171698220373016511483143443914157690063091927155804132750052568219978901565229763640612932719991842163069636528263642824212850805526813779797173134680215392114908738221620931275033761918515875292410917242397116753349825223691084466303739780785210585338299769894691620987832106771404824371801492797870391049671303749323050453506947491626485419737816893010971182412473409207000775812638914070632597471994209863137354879852769642828700378752373576530481451944237495211162201121550860395431579585096479778553146063747014637733526197508493057244777265762428056676020077593579406537909531604652622404267828393975246026762495524363575072904577510760049700232052110480162986516841327268823920154784702112675674033049099998907077096907316402674865699806609843961758901371850487980998849092210439390597163549631211768888555269215452620335390255249686615829707293839768465425303788650792317601584256854750057531792193110334592502237605428527575110755634543303600574528336859910106207401638044386816863382484241806254105892047999649236826581747985698151170965639418120843859468883710909579864781169659795385062888835113059997374070933051254432835144651452730460690446338409880966109457039072896467197514603531894666236916522063339884216607674422820547061878801890644407405005297229913302907905989142668962399330477905791788840993859339189448842702091821435290161943846990838809465557802272245361684376804705730998196227422255998497531063186046339388885080315344924548852522156309817495246382182091170182813397207177648238750003281454020706305986631844082852382152907869276567945287377011407831248292719314836933657948720370725576463397477910330786210186110542846909059570837716357334196889992580491934386403646177787114125991076575215594071754844512255300672017006398520493763296391312546178465934317694364203456231175279118241601817994306053825851788275404207507029742940394885299706629314787178170012288301675221361088585540013652866459345122648052430544454738922623717360205960207988353514027996049637943629561010850822232857411334356832633050261519929532029564320421258733019611313914563382448992228140724182383681244953648342664319660717610251899615717264870779936476907176205505283987266119295438992422272879445907906015678883364811903413095647831590128094339976754886945541457700620475959336707369611589241476492461640768459267381852344960429303161699202710553466641045206393484743062461243195396164578651399435046500761075777202760982601136433411690243839582809040626594691027600401165936716934892278404446694606493857993204606232018944224554798158200523410564443488141766793172758844260998734710961833531918359661619396461926332529677237590199176808231815967876906077143858813392535013059603823260109176996696506116178766611761855554961376636946668660167448652954408060562070235178943688385179029326434626293955990817953919486951544258997832284911755812248749136493939837707432235935529117788085255396132634824496109162108080060580390541248229902771512721078221592123055930108004427138094286568360330563318218084308662991481109628386467855831215477865163776871997519328151709013415780618918771478918919854914154296194468152273666077423743061544950596310904448403007425548897109037277480972163670835672539538449998689312373758669315774264509883124626980967035655029131398613792305479561124035819117691203742369119465589782234509863125831055563606828714918892781085447122203918667819446414276380855205213717119761037741867296869124268320452748990107714427341442676361925378642418301349722555482023301243849003842222838445281521322057605487804319041746447698920151954556062116305184313170709790188674724727302997759172800711920699360802063886569174240829804299999523206760958141293877384569405015559203473994467621937829863215769860655262603133191365334364626095943232408992590739192182246079010986217319903015010301534889217980664788324403254938505200168831699124840149090307724792109135193581334978980453959144421948517385726523748989575197758434898688542906068559717985212861609354436320557470737689343713752675030695577044546143840004678208388540426881739449913507701753208395772678143431446763872789610598910353393327933983049729956012931601687494173098494285210682640867822541915736893597305275198025701325854826155860156845842261912029699309965219373327871140745396567573273856307816111226977214979759176835322753978160811867041793816448967839795503872543666156544717824660368965764738108535644744259013242678154849116803726409914591785248541248970129110386875628233500985207949423219262838418057121695256044990188524077420479497943082095944724774879374625875467782415157uwb);
+static_assert (e == 925821277647525484046314922262107411363265829400086205841317837788590377458538832566422535905580167055590540520461562173337209853598393531996558937192259755324623564271420749979078242394847832050156888254166767598483964763747510305591639126359672129914819075958398814517972379442953401990418883055801170251042496643399669365177755686881047601033431572385651795547449177291014110752382040436633930697717204558672793488960576787279098167954168025286326942734793133599414999250387371144720519062421938191111560420345811716245740999562260508137164683070430054264232500900945124521840380108902479297191511879673473738073802259631070519104481024176621009769537482862622220299889768219359329111297124699668221641917569303817732214542510786504181161038291046117604515970514526672811868669367519925509210900316168776589245861481601369507782419794370395588009090398256162605663472407280850635481339056935947008829932423384655897146952098325216158501187401583720533840865622926971298423980097871719853666185197908489154572238149433941405941164339863629962456926688887074975888925171372074822060989458002856037731335488111740134826447412342315923072177048815917730720058570926669739420564040395611656944347595783660919740753053095949114163591726463068745671914535691507335209161651500762777289019870411890029385283542207950572157059715527520477577288903229077134225204031621305765807024135459128591564879806709989556044144724419409864377825737156564835868452714708194913413130770477086140770602173550749288483487941476358374943226240425793029573928816763061325585404352031096633518420697013832209979988937372645624495167087838006956364398711637931686740886942083658320656084460667920068505669645778267276911200782434761229862188810376114594126248643500176491287175248925185264495337015989755876553277663628439248038823225602576860638259223957959851065950122763885066887500727281206383487362790869608989703225198487906944156585988691054172626900276649487253448611243172173965980941755933468623762960652828661512425506726804431508944008220647294736023754733222053674360993664156809830389852376023960904244333433588923334584741581389284817600403215319262156633859860850188679280552275362699224083219505525710127532374249163813824350987705730879951355264625353344369530037300751166274140471334366630754712647779420155585685556617881743738385540176977898140007217988800315176487684087116169663417013291370800831899096859531945204190716717502573025350058617787635480933089670306284597585818734856088785121899451066970986106201069863672011712515632440863210713872692755338035103906037807907048203002858238477860960644492643563977654520837191979524337723119259608404421643097049642762164294600723181768063696426390167022298401232205369596435872999952844226940995344564116630547371856308484927980089147780783142944503353476234004345803402965436092320175500244821165897252117110610969492880654829004789895578451575692681704725789759821917190608779304172351434076882695116684651984540381892101832817443304972678790622161153031424131962661319047280118731622895398562931270490268724879492475624986166695709225686881866197787756098330394876207857808917156059890517791497692592084725900225707789400920109586813314313859749022393670949449440053474960231732358062704688307739072616923416724042175976602907009843069284397933767612588511924496143157162925438784118979955697204347276653750979525739483874670669758573414142808243691479623929222974232866338808714580720619054812299696431442314820449670893215407292350016337713376699327031849963391229409806166154760509553415749785479654053067398328168020591208777537261560091226352467020712077514638290424834182192144095075007314808554065801151996865097848823621532965411348509400301385467549943306482837716125484654016268824276633890383140399157026813143195233954156740378621479762034345150366471462896094471724972132934259234371415619656132888252595107277891368873437781551821825058533495432015354958571532246066759818861276604704762519082544501137144991934451893993179733600794009778918873730272924487377741238051996430766968127830812630033209354371727945038509809956393676483669979805775689275281982179924965339949879318260484504531839430166222394398812915504091572095306003803595205227400518578902395452068924514063523522292068997201688261693411544059054356706121760032336098061894411892659021095089202461966720852999595670850565525006263419581038437604503126629135662749715654308249851965705303102225103187660218539056082373922741495192667063495055261427574338533640873969873350429284563536767411909505209274255461102828313663935717406210258786433584197134620910389247874905921114613887579869940542867067223269133250420424105415946176905656458177271017679129160751989239090198941031674944197520677810416776932397950599133491316349287341573552540692293389940564264376549311679136031343692195347738666617353185336134422711920920844399307719314978136465976048410259058164969778439241674431316067395494758148840480886692112354117101032785128193937723745773065158101867146919612180819406775396844129335149440514481197738822995968228560848807717236586943646824305166818267294980444218285644933573502760381857619328308683781861219756248923743795092527308806372681676276144404148907769558418647660002483413969357909782921068829991250280120081201026814146299951127127610072311896011620199302450411006668075365222906756407654674886818542441552830122233595706898699872729786995398578270431009457368799785412790631316216715005485273371082998479926397057401586572038045306633417054489750538440286890689722739366800162720631591247614863608642224493153626576205168863122173579721700536126715014031511726060567243757525868482476617126043798623940056069396100653874617195674699607791498709445954939294199831258859982271624381652130224970790313748637013345915294979530795868152262160914129486516964231044670275836594228816755503940204163740305969707253292074475814676058249561929433134333660057287301101601231820316073168618636908190726177050175247818110578055560560688796401505655932023842697754241774588374583062963381557220895250630953485229368124338847090290027391396907435275428961083688904437823309592552190497059386493018787504765076429185299708053155148710304044373870518043787954080584404302939574749896866935415049254281838639079551782780564219871198249887864355538762502718959259919546646112503328969319950600561471441299743783716668847772659164854260774150186393424240478270287902343520007666607468800826373719707066137011314687880480610962685179005544933533910568379444719823665964430839780728790312908721450954900306036821156888483515412321692126129142199751034562341661408706136051387196710769170808727480986201380603544687267022728636986681325110117972770888046653074394859695327060350191949845163075852867006026516825334372326375162712041928605870230764353151341123717849521021678793350888734580777604926875801033048153758377411158827346523524411768729956869595658251889098064558649587248601158119029099816578206772717002437661779578738240520735444629045137710953185651212029211246365385921614991669030870319446280256529788199452527866424798087728941043658956252705695603053482413759462700693700396198856191108905226330260487967656048816959593297497053665480866123890888084442536937116867351876268498070783630979926035244163040376739878970300224889181049042406027162195776692731462000545913208181079180867135918324675694055853046489335938390647015590992212564586330238720324943335239947287702685301470854427682225668715032890232716014350243788751768447794434804639497493507073621826571926761326808459692590242410407317957315748210250498916986581050910401583890428635558533119546645249639493352412276105501922854605502224127761658727287953742606076981156611923074814756799317499509745248284124488946539941744572834694649627740946208644282250490064195074307292869656683657601797057177721239379124263270290244666779980558690255863759814422128884734790463658210245352187577310420384040290997731732542997922603843129602152306671290627691265725865108615510456430871942239638195209261426513500366098272461907343221207963711153528093982853648896096575798186268665189310896623817510846345533080533140219528981705593192340721212039582958451394010024357999615925285263365111350752152668471111931308588569544125617452251181061448878620016892084290648228327302043663656711357478900973839476344562063251135142126966016075546263553162380720933458207021727310291418840276839446943253223262384684959689512677602663732079938322408518253951573835351309616179244687166025972131912707139495128391953077031980947874168604119683184664472292669848381795716009083871277556612306170920196488394805408610339875406019601113294843242620641631227050396754855645908196677904279556854314392365012070695397607233869733732555354673786608404872112006461237659135939679275103781149663509936946839995722971632218920136654648803799972140417973321580604863072421391340575806397889874629780379074048070658779849462956748626382042782237133987989332960693947107347322070553240815226240781284897941649906726782225942376770704312812547551858324243572433971646340877222604560978234285974381350714487510375554698400191780868985387925205337522055254786165264786273261600448085096706878053834421193793435604071890849941164391597386395848010481495153892474310184632151139545541072880969618376592937999859928026160306470287401211063637917126265042524945623705790523166204475630950333591915029238611968191762728143401485588876210805814128969245566633882719865122176245924463431951715842086750236450006840051564804190408841791940222692500559490965372477515688268253907405496642061472907342914619010121657212579181361467764234637924865485254654467943228472410972274349854112832298416317954400352112014820699718681808295773157325470660796230565624096088825387707637204654499888032220345920989819199754462019497516156333728274845778054436974267467207501484818327497996634691908114728824070307789831590463706387785702300364749922544296903520329009609348748598472207871485112473007786011386600419549232652084046690076837253441970589225666430463772556359249502084980591376994436651741109935603108812045858836257061111489108694386435643655503570595363318133190975314435624763094113576669379299879979047877298240040979391202391964866677228338860102182004962956424168914834364344806404249386400712534763031829932743960305692761446072279764397234750540149429403439749575848830848657812222791117982664155619963998683072850912914322395766334779657728184710326923706111558881079866807532448067057406387379253229971248972229112839251051834424814052803713705144425918750145936376770334989969765941522051562319527592037603944008196341741771777848899163126744930412990444970629287473164038618648207526802355843985009383313264900148884890437451555831118865237299650313800103403674247036998590658232213046659066945246981994423069267393751732173862753007086921667358727996969128214260798544457265686829196691538797403013370398779833123532308239888530352263432384546504659679350788619617562975098311033202992160208321823263595046929594372484633167764684371063478592389989019993262289770354123954622487914983371095489879787300506269973836370262923994809081491851979148233210464287723547308268289545956100025353727281343306885170982649275108574573731883099312275402899355278010561115889951284552584210602172192240235668967937144010856156904288525973206965937001207686254561321773686607677024015765569995868546681370481610606612305336643961295748241327849599352676198386527228372353464255467163589382142154369711725391415150151737427077090938503391036481370835248615036035435239805525524939624513586824779990843403584223026036549164871994498279585076983299267350266471423781445213817651822478261150255874775114605585133555708514031499589483260801164738064688179949331387667872882453931997456104210519238094969069094347377374389413541242199714835857089203684459462494870779614545494951490685946503545167775428612031749346968651237139763717031086363001626407498094791041106763432551280829882335809472709855142568511616100186294425399353967187772580310395684195656686846115845499108406502700569267254295406499817588703876956335949148148960788415278113818141194000719299627010161982704907375531833943096522360889958075510189999882556652751533256426724908005844306820566942462454815614379651882905999952611712068181277375771031068600151105053225733715057841114673643294161325414723447247896413844761392712900873991500727727988233012755468137985487045848323147699725209424244123873303345760534627485114513265503137177270494363511154154010126502031675154781468525410425582343562369254805002623756722552094430638213066101557324249884101582258371550962841630889022903012629467213985495629174612197041783095535483671731125269178980318929998149748008732059177416300672714752080719604134060781771051728726498516455316263934982877808288029831289158518619994126749438610499036246939468147866808815583628635866352666003012691226678944025246264467520199665240846297381415679648946836733478583637226664975883109220259563005658134769128244863819504484695987609363181740591057926507876779223774228426887733534600167683413275733171171270019603279094868615622575150422151703790028705869976684174374120840564080248431538776051720852458782994792785936334978669805334758824884759378899860961695145044302746444751642930185873173329305382132062149406379095533148443999367492106392631221391752474392696310805042226934345451615316827944781158323260483877728010772182364455618254430262105733658004629703164428287059954752943677755230683385792422272308382960568148442648521812726916908473955619693036101591838375145285769230472979404087426741547381641531013124522936059313874952339143580993610832789396477651427096689929844065793223009542113167750462316461855478190328834520247867562981697538905274347803151303382326837667926549784019228244459707228042200246435557237296895455131340248838065579792009722304766582554629605581578556681573590748753636612679599540171330555661187523819744087281037159103595512553735410777764605456477949855507965732993734959932675737107114324664945943782423246976584400383847339248164647110781898130986963866741914263056327890373726590220101946401337369865956909412583101247130992986634456295774045770365699491568498700087644325800718476354842411877991082107110921907265515434055445483828546895078607556007597715422679136379038955557432552862372826360828027222460232258885433338573416736183915685479701303704169090476673292837808268808618290572192903169204879129040601709809192744243584412246495712375554087815372762877034756019516026451187231771971020372313035145807498295137052077097875760346570303776525027990167687906412639437374329382722326571991231658438388194553372063029334063330040808178698069781482650930319107112425002346994300962208678648414336188668674008812775504408028810609235995531901530099417059930126872981698330218683036165021988047868722244970608801844857953841494207221711214149875285075258271377921688661211419646509144638419930001984902754044189076330562777348557494732075689780507188202817812141664869508007135966483915775937944747297094500891628667526280126080333998404053740784126418138539396675750752315216878030933195589338833030254735958022623267030648066711592266070181453059500443564036283847411309644435370856412705956023552243720250796266568065803243388752146081981140573461356567703830118679807945577667574016387858372340609953359965086606647146493876713596821337835204436170776131677949232013227414096628304205354026063813469761303258719571993966225488958555093510427679791988971742172779979160922404736762880638975534962742894170841469656176520040761328916017189723598495076611011733196408040688628395455067286145659096950255539579524498493898542819120837012716973317963326487849849715816371147080347801323811060493129264074757544079929354744987108772532724901250431897896897092387811630627162295682938692000390799380186457408883817104297882448016632464850925981359863538329736159272918094180915299281795598292795892856092097063188703480460632536098280113540310786423817088772720505027688114038400828083435419652958618670207960650777062461355274910559417030961671643529156548924836502550477472694370667915954413329338824630200776333664812570409093806671470985465153208017946314158900781802567307669191878751484541748174663621804204380702802179251961019509169300052486002788063614613203094151093588031301465379292063556105603936494598372721962197938596354257244836631392404104249987383535354722179904221179866116779749977897677368618081056318874360605107874352677173957972841002691713804847766731026566566458338088565056130613983832021989821964722306831167603123999437414390273779602339099084350503010746403627846052441086604944652144679200545838398031375048439017447852050336579754361882763104670283491766082028363014447117531598447417678699769199955291379423547782526702513838534616910733291869941851842197893695263196593800083998981939690714086344244116829570847891514360775644611936068668410891121888072989671551040770844686050331981671924327746963849622675057706122039771668798123652290001766715471057273382475530294964577957716813904563781129755046076195618184532849854944607763244738467753108136589490285069089300807303423783042724466020296447103622857051676984063780930235780424078279226093967571953577579085123301797349159025386510453481391811031020579578896736388673626563068243444220092728433031839438654271567721273844233939318850279293315407811042192934736150988154534195150957861317891400276601282474543791458850609927220168618781858328688361677583022572402852987289530903439428291591351640826911513931896234378855793926094184629649867461377749820862152288361311783428144898687311257224061894140973371841970934601082910985534446880770279296961865075179339207939540667034649398632260182465551928975935233834664323653910774960714073598940999871889404546159059913102803430112603077837945473322898835928522370752743776004767790785046192909362420427454535328279162043868628615367414430148444280505973468416702467121091075677716122735981853155305087654090041184203543897366928456793118364387536804232658134896980836455221010943495434855043472538001537621563522139593046154357787767005431104545971669418574851780250907092283316306268963575222624300712467342094598896648057984691403613468974488944872998622837323868755355189267111780877757568894059389970538280324764242551100008248461368426518187213353352357694434213832166987557418266406227696226731269684792001568400021847396528021967067824143661722248433981399668779553795275336703143817493437527034996864062750575476033683476592990160272467162084962796568071976074871258307939299418014923559299385665473033725113974451523809060277107680679572873780993418718878242541739331014400659375536978260477488357552107556192113366347417991104642703118092203083908784935287088794768526725852335467431719632035696961197574244468572784804202348383106161583679110160287614379640906594882850609519086285042813749276356637781379038220846742528795286871479100756150807333745975474958321450363517740760238117025425974486078154207830247097362626165324810782122767883284045649030069274675687700549598368628071401598588410441252531499209707332674122415338248580890294783064345979541280969199051704799741386653088518816114562299165926309199651937446790392973333738006627698157890241933414100089173174831431184707510434403555662528452402504077142425536804953254061897213891569242003073903768794005932350898891391492985275354272208278492706372857379439317642872859681436000458822433289482266728226315775670196071226105231934830048135875895979603688395385169281157101689790447091271008526926070764802626463647543922527775678125834056317542431590664776972614685296274947164129345320953791799602460255113053411734201692035340147314494078580586501891867882863976375198757727260072182528363757963uwb);
+static_assert (g == 29798432244916674188267874565994511129465120803978788344316864814816788118116211967064993859202651457241150653527937300318139441398137480739498016605169958074338468812974247940643983119687412709712298707708639065721844633834030392754622467082167575384256636124970912416863467750122830431140200433047618208655095004510815432687783237399333966883566662284249473184266466494555781284661942394134007606974555197496847111970137021016999418540004497135190180698605565746245843253924478007019667216316038918311843149665996622104492911573546889531777540730482311189993304911007037408068009066215218430070848129328418292313395373962245129226209492855153844385373084110919586111468993602111024707027363861811899881912842982859068396068358713057563354536496215662738647102018858667173959375414318871177496133849959199312926882218088364664113311108693640122976260449711327351932322233788110835455072118555373145049291154743494115590060507317111856539735032533192218230483260244334704193889469794662184966199537993963999078475259216878312482974707442036256492782509772373455272769273833882315645298854863311795542608650304882161420040697211181665252342080035197173802534749769469724703268158024710819175669522770851475093176446672238329528502908755349515870984599273679350446826002857904638165429577187227047143578608071452633518342471271063866280346111762071869789364903979901717205061699969843702610929231630805060328615287467659369370840319159313397133502893318829441500860040361733782863054788093908897708674331583672603407220706479492676072388733266691156305962918416232324775256248110177992057207141301567069682256206366821456168676509466261858856409938472156321638861395757026663407211258205424076803550614986657484594005574319169124484702443999652049989150938063489591743426294245993866451260505250593013109374230205818553705481323814467997764434232825216265839479482674731987963761808633293308337512265283871364612091159988201854370660145184476139485901491033635913812044790756144158913525552429641012849929727963443403717727376487350639360231917245906451656664043825445552575910730367456550208688513910014028364857635115620260661725890378663220166170586315294293674161240986703299566970473620561528908884250063780883792514734224688857563036631219375697604761545663806029135955446308329698729187633118726312957682328458165865329528612815778259975236473921843981997298878825807911669198854508239886525812277435714684005819874906489012032318711200528590760750570526041852880637356865931470793479763913927643202410399111751142093604070407422948063457225322500044586890851162487991894225174721754156861359977725394051668830407469004793273274348712069653223607355162389110859799061463530280124845281291280807143650682769463073672563974069400047110643228833827027461693537080439809461719940027498281816925274592533558602395110549455677542858499427662912175880073255153392214064243908048250185909069590209744180868907760635944391850223298315749825841911357067093249040115124993758536243030293225525076353059013123513325111698367182126915150684907599519814623893088542203783385592662269309532984292040400010214779495601358956413902867199644918537464229778619633233694940922502004380517602109211347567613595105955983216430889725956410713259203298246729608748959880055703627186556401864111638018470966012670735995089481515926436830314775679719160087497370044975445626269931153113082589491706095558709469653105913401990243443281006865761497472274166770933023600380168828263796270922831789199991709935715817691152337421394560278747618690783171271473596641043754971933307758834432348553804094808181840424862922045449137050403287348645901740502665714464996238931464404295154095892302597483360400390905349894368195585539402232530091453204340306958822061151948990309527918884648436508768149153317544823591124206975349609256020546849488655736876372252212050830544466937747670764315669293329627518514102617563387649516771990491187310435931289985445526697182123815532501667217766095415711672214803130745000817763746855941300298444804381373550323960417069156844824460603544879660738559186868618886343869739140893603306795963472244965044168057032658483269336150393386192137971198307076283753910047081813222124156957062790985290105121454908913661951981590760015402914802952617772813249648632973163740452843731296254362782304147355033972630909824073748449028173804547717470842714205464845684491356562908731509116142387254835486442424180641204069624898443940690204887266127211533609905992195650506424940434101991806132943921221483671634998075419443031716104319954336198869316200695048883609675602643204732662053401940340217947023472547371529407763503959015869316454468851368326655813945200855080128542483589047640225121515223460096921289082506572330085372802157429561149070936426948551401948745143933947153735393521070866175780404741792183629118310346303334779175995003188671138779985580661774608808636114429379106468715113991360500153251219339447352994135563797492655340081747655877102423272282522093490036439208508486641371157289888819319721741072021088988838185575526314420629404039273157137765410457726146724738238380912109958473128084231429395806333577982707754174511689519819992445390193198198658674126964252857209724109156960517790623894027903793027127063672501493610074211338320954814508470755576473435095989167775767189027449015518474082166619942092480362630295618282318120120784478999847987626484022481457667372563874126148626833365925380661168036280884209429692348940543937907700655045159826358187660828871244692799325658776545285122623630935724707862474702154310309904951694032327191814827022575329978008964102267856794554960532273796606163812328655447447843939879079636519295465998645050724045891371587372278645762052618094041441489071969833201075688391288870817741047957072975950738340906036013816125144698235601966779945176857432857633399909230472912455927908602203373016718198614604928264423172202682557163587717390960517005049436804372462076787514085114196168532579703170731400431879295626043387670808835418558634677506586435199004217308917933633779528930967546154533842346719884096382881942924876345720600214180390787910645851233124478908442387878397330067576987181883304718450176054652199888389760748145336637283014805674257940619738290930262825301386786230510361804626489472048185472733925891779724267067453733096077343064447478426009793051940246437960227278263493197855944995062383797697382558922940929936070026427171402887628703859868935135079107329440442422076757924715656314319859155753548658149259529376550001720812563487422205664760779332226992828623190778104898486092380042779700488516917660657023822452904116096237918857457696381824722898116427936892835447067468468407009042078067084750876549952565020965668757590614127881877935221459340616752511248121731457595547980112335065757832005891665164905391633974616167324708883637006961506520792896036912627971590991704412331257521924823476631105169897878844060506322822415206964793339314218595856304487986963824904423128022971556161524749970501482913358319084506890437142764406147344587629536637264158168077923943554162127130293481141343347153505733367154880523871795677462334071936126263595187983894733622188425947744097102772800798384967456398866974517228076050160896508568699658252932716504698114217833892857487015573927286387074461795988559112405605411753418237862771339843397427624643310193743445651017055548894766307353802701817908959634882329583556939122162691594208513303270959445875971764738074360361727430013357247044648803607516281791963055729724246812412172071844637854708873681127474627331405181146695377571274674214437806915445217802775052815841975718045601990734416426330360993618773038694004120497336677710478038046243055705865865831174795450314028741068624141624553472680654469753078301535955043264283271909904456068340262636622279301829323206627968239913393816282945184963299662978583596244380198766900193280359211101193120639037252806190653613853649777477220096431415312315755563965267544743737395010806760019102516763403978882159819651640938970139344764659441590225162332671407148444880969613631409727647114892093790763899889575251819843426046454701461164266075412453030724977752871861083191084321517298250628434552232026386426397723496147104928347285904099698459531024308948589396794992195467602362080739396575252883580280220821638109567806035125118547858876658113425227036790662985374378852325482982045005999509082737712016309072932342749447188645797866863307956692202254274096257229720450056213784002994618833012774061411527155971154557389097691677678141035861132494437203379707367812549575427083312703505887015979470139355962755397252795113687602766495784089469668341232834905294201079498970320080406741451950210575593932460159381736742522819101649364144856706686103595233078202056255672800217818041865057688977839959137871100908550020967057057308917735988786388788052699769578854534978855415079111598231460966054321228995734190020325078812000201079027820474779442492631325689095753878633085497650204857079872554559473483419911768746015789710993375966794219776549253853530178476551311105974056423547355469529552560908854325369362455545410091798379902621331029483887682674948217956629284752930908329829674733920276770747094185746685996596412427679993681949974698419348014764002932434551929981624067136243863902078558471041489279445807895278052301932856585893565779292891317207843407744400767779620517520921670322338050728890045747983451373362797910535929269393753273012362217889561673925968389459965833685401690081841397162574712924132550520731464727484969352370896692414821612827317102299387105946365095615792215974376569671600616769751488289768809074955947363365918741991629059219089014597460766672961554404710097273003543733333910427577142506563709675650599245232505907077002621633894112388073720526721850175317282680734663880459465941229617558887214041112292723017506170621073751431142027041833994224029341012822137562826287139233418519120675175406976662350512581191346450770177349622645505283880898193006660945594325793831420602547212104618649469606666572347914704432825980672979848824836597174174529614001858589547531559654977293193127270065702000158500698214539788268688147372675991092422533558858644439593588202639477050504734296650046079656640647901548822757043473731619522399230164121224825466361924137251715058758866933494917083923326776719732155409909144925938224463264494658702152466050633205281304505215885375512199078501207471257652024000665306520470131745219079586546105311830140096132293692468094511925886272781358563926829224095578683651984200983372583665803571377476027144913648184978730933878979567253043069660012737872687050609798840108790845129395707870572912374332279382983724848624322539586761650029883161495235724577859552041602579347180037188647513083952602337794121166597431167370527762154337549776106370376153562655881935505078278498650286552410329632274646493785454991181144854679186335497390946483818038542665684787252305039589768256820169455262274070935643105621078189499806863932972473187397166808047422437248169273260555301215824183715525067199124002511185216150503341306108923690508295521642787129456634906823886074539205650887996137900313156803955685097335647168198343050412274573595260250608802876865485888024072019917345123849434033283566478908088265035768478211683223376432512441588787190764012153315909872510218339126604004796078990801028669133777338710317564240776134511810524463606368771117942931124804883649428527160017692567898683240236409226430799434265084307696860149739966884956352228793342336285107735457499085215192833410476346646383701031126925208288541002096440132320482015655980943794434204072206620053672585659226466354793301901487364350146519867630971691770986995057080225660905837503456552838203132631041791897154141335541677639190964910215683663487060604980846305614894646240323789368500369758091291245113498841329427792734330734803282924511704131080318489668163617828033603395900451487922230312909796925948381102991146822054674696487426070753396832430221618428295425257499796994420607860538645419531491757733336034021601535426583758567627555629938946859886544158530489820642241655856617038305077492032552858077568946392077123407218809443346686209401271493763555342851545003074944605265496573303231886857449155910337142767732100225936378105170371201084980241510325910452626303515593633156139236733088905862745695882083854190859810581160362070408267948533624449628244247119196173683262185913191513113413120834890358166551589156139594720535747080730112294593994121763531168883911031894535910487069095707175045214441413883884802410923699659922887455483992198926058974807403496764446301358667199255684428863794804385174349638463155672979632002172038865554637417660688132773049255710905333067787632676418064804368544180709337584779332643844014281294499087468987637559541868883972735107421654055148815317881757858498940165307145686763353409263467427598935862681561349845673024564278210709413696260619038530970875676404593624213991446834528296807904314996877361889127548706876660509342389067428760334202348176873902188764996715901220157107218395994779944228232025706880680537017422662829436163962024049183896623152251324568553859222919956280866164457373227617378984673998954294513406712936355130470484047172888327701828569122540112559029443105549133380108876317878934885708779481197872203584407950893040390662267527949738213177889303432525604671841689257389744714864585578792783077907001007366247010458342027047984410042602813807810527541755925124978790060094078974677759122005471591830027215095163800538956004584188386335594985153414639789590893161032065216627865562478243477492718987712336028170936986667727941311558639344162780955383819989272289059208271904167118532762576939936016139229330085472000664229184978349081804033101689310845610467686180433739112997151156369536100005937892508417519342890274027944999566791868441643171088511636287506843607494353281634216084927195204425318616160380027060137805983808724233258618373597362604084772620575548007554735347777391638935368867781173138943420137303127382395494860537617670160035924495601832037708702973800426654932341543282278261613196243383667905292201034018092087846236419463180081908614772996072182181475631925657135805746736769755149885159738078245236118655290281971965234305542674569161473901630756445921641134653221918773817366488270251547556553445951856258504157060074846199618221127220780775488108206614587779803010482980949599988435901666000892139982997311053704503820813573985517680412958243220188442951830553187600252244113193599181872435497016639064999726818396372997120756358317896887004368831719573185714636352388773401931148269187200760456815204645989551987623393621306594072130172374142854695647745385134266260394221479246638034464958250019727875388606182872986888783267877119329425439261928807662708446658489716263804660134040984758765172767392018690172608055362199607909977706235198762015694206501563061184219994813537596598965037650742184551655336428129821678412946250805280869978630642803771544192674966142704717514858737919262672330422178767701996082172980161058350281694837604804891891418253868499644666889737170146162672595758195633807151023528153597542333384168701376272552982942009280710503747128071261631293054233342278356057312085260259985144621746482929521601039472256832633668389692876355260796154739710758469371523134766822097661489603351981504829985756614646692050908671043534745910896810946153108823010813565715434364129438007586079825725640829938578522201512997903614656518629709210847565220655518550297524205877327265360786386320740093104940037625667310836335061646216571983748430953724716089948056286461750916867222003853823582795551925662024207653956362942482171653115616941583528041583602451893714531451905480724366854760556927442747966826238761875413903795420865790594905494790702579294909238521515589515005826505930386072130386589217894716686827488993215487103272779544227027134111697663748622188055096883894725483722705851459928566869284590930382532770843933468366093036527695687945993230017813346212329222255566336095319164589209582367615830662860581648744271399297073178983306225384265231838249991556901952431468352513388991468708608092739067474189472822100362609976498026022632560035118317611302900901331038619406127804916063924831183297494256578717253424234035974824920125286381993339752593386233733538479594764169282697196119372287911111519020020827824461438993089860086727372125993892847397206287561303860397419198856048912526671932730189638075028289339687312428642008826667579692987271950503032135653656758304596897823894611869615940642910929670674439798335840164484798390625939163760520381693557697796137496879028346725344730517178323841793297642804722421921020973660881331152475521171925828969058909085954920667354484636498136495463081604922293303480762098328616310257972292652271806090362595994022460593778967329967630217444017525741638045229870029346988014958084572771398978984372096138531989499905656918554786826489755330939796198694531597704511088156379062341123293295518802525687692186882544300669083021869264815460641932403430710625466965805696101573184162037649331430509823557536730381559741312755561207022942288392997545062374284743535347995055673645037635267101631718560797941766939979345842257105659818759024388745163435943430548152822772246258077883370773252929015899844104006131190134245627499981302192117817503265438901221817741389115833357581935745779927204521552590187770875390757765548238495852940181105538496855904068066688668026690107729150021075489752456818910460907620831637809136111720910366919838287991763482928791748202589855715467117114637839467261942141745735707911425500856498654501970611071951782463930001708380526113138372274175339335662509216976645164154703071438074490395763946786352790222348893491203713832049967892693792723281438844117355316466098637070993105543662349514411036242212001644754859251446687255243764019263618483003145105226068454039223391184981227114376461103289114282734069735129878443870014635503532648866626566667914901166162930587104576749414261153794064753802158961802642927355719918310663028215143569818970905632433269645320004900303957055632175311138456746844602835628904740418607000085429943093034316700698981140571877816887916474759854753997859271105386693600207306132365473887105340142508006162852906118538596732969987126166211990351704986503780155363871413699663954792800610809029455955600309822991543330042253679891766932624144939060428160579453101461433044143996234873674506037735662596771123861963729201895854603129610305096448743155140934507704061445592546699556299418746596517566849045890451544180847737277330001790442633829205995645608625832773369516360989588132005808556078368148568861893086235628443747784399629728471253481086345451020854279429082627369196342148203023724090997870940983926087765285728354735131880373997886328690240207916061276214402047198215843063673535384956668795832090551190886193140702678521430258815311225104047901738300533548529803349897158550493609982225769194034849807698159537877895555193536203558830022247566834435050885516469224684770384749924654266966662000902430627265289641143382738252234277626796684149644206035576614604674203041322937356663227140931005992313613604607015407976103540881585077144015739356504208743192058109416473577195313011234604304724364984149920580678685505306262473132159795312126331269718770461048329028893386454886617883785246418127743847205019651073400232203851162419350487342072044133458470845299106673918135646890264041635815461909682536651955578794886488715663089155422196473858390320269399281721069474509476566112352026843925757498701587450846186392850664266843050309911138045544674927859244458425621557512679785762866254871670uwb);
+
+__attribute__((noipa)) unsigned _BitInt(65472)
+foo (unsigned _BitInt(65472) a, unsigned _BitInt(65472) b)
+{
+ return a + b;
+}
+
+__attribute__((noipa)) unsigned _BitInt(65472)
+bar (unsigned _BitInt(65472) a, unsigned _BitInt(65472) b)
+{
+ return a / b;
+}
+
+__attribute__((noipa)) unsigned _BitInt(65472)
+baz (unsigned _BitInt(65472) a, unsigned _BitInt(65472) b)
+{
+ return a >> b;
+}
+#endif
+
+int
+main ()
+{
+#if __BITINT_MAXWIDTH__ >= 65472
+ if (foo (a, b) != c || bar (d, 71uwb) != e || baz (f, 131uwb) != g)
+ __builtin_abort ();
+#endif
+}
diff --git a/gcc/testsuite/gcc.dg/debug/dwarf2/inline4.c b/gcc/testsuite/gcc.dg/debug/dwarf2/inline4.c
index 2faef6e..22eb35f 100644
--- a/gcc/testsuite/gcc.dg/debug/dwarf2/inline4.c
+++ b/gcc/testsuite/gcc.dg/debug/dwarf2/inline4.c
@@ -2,7 +2,7 @@
the DW_TAG_inlined_subroutine and the DW_TAG_variable for the local. */
/* { dg-options "-O -gdwarf -dA" } */
/* { dg-do compile } */
-/* { dg-final { scan-assembler "DW_TAG_inlined_subroutine\[^\\(\]*\\(\[^\\)\]*\\)\[^\\(\]*\\(DIE \\(0x\[0-9a-f\]*\\) DW_TAG_formal_parameter\[^\\(\]*\\(DIE \\(0x\[0-9a-f\]*\\) DW_TAG_variable" } } */
+/* { dg-final { scan-assembler "DW_TAG_inlined_subroutine\[^\\(\]*\(\|\\(\[^\\)\]*\\)\)\[^\\(\]*\\(DIE \\(0x\[0-9a-f\]*\\) DW_TAG_formal_parameter\[^\\(\]*\\(DIE \\(0x\[0-9a-f\]*\\) DW_TAG_variable" } } */
/* { dg-final { scan-assembler-times "DW_TAG_inlined_subroutine" 2 } } */
static int foo (int i)
diff --git a/gcc/testsuite/gcc.dg/ipa/ipa-sra-32.c b/gcc/testsuite/gcc.dg/ipa/ipa-sra-32.c
new file mode 100644
index 0000000..f844428
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ipa/ipa-sra-32.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-ipa-sra" } */
+
+/* Test that parameters can be removed even when they are returned but the
+ return is unused. */
+
+extern int use(int use);
+
+
+static int __attribute__((noinline))
+foo(int a, int b, int c)
+{
+ use (c);
+ return a + b + c;
+}
+
+static int __attribute__((noinline))
+bar (int a, int b, int c, int d)
+{
+ return foo (a, b, c + d);
+}
+
+int
+baz (int a, int b, int c, int d)
+{
+ bar (a, b, c, d);
+ return a + d;
+}
+
+/* { dg-final { scan-ipa-dump-times "Will remove parameter" 4 "sra" } } */
diff --git a/gcc/testsuite/gcc.dg/ipa/ipa-sra-4.c b/gcc/testsuite/gcc.dg/ipa/ipa-sra-4.c
index c86ae83..5b42fbd 100644
--- a/gcc/testsuite/gcc.dg/ipa/ipa-sra-4.c
+++ b/gcc/testsuite/gcc.dg/ipa/ipa-sra-4.c
@@ -54,10 +54,10 @@ void caller (void)
int b = 10;
int c;
- ox (&a);
+ c = ox (&a);
ox_ctrl_1 (&a);
ox_ctrl_2 (&a);
- *holder = ox_improved (1, &b);
+ *holder = ox_improved (1, &b) + c;
return;
}
diff --git a/gcc/testsuite/gcc.dg/ipa/pr110378-4.c b/gcc/testsuite/gcc.dg/ipa/pr110378-4.c
new file mode 100644
index 0000000..32432a8
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ipa/pr110378-4.c
@@ -0,0 +1,50 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-ipa-sra -fdump-tree-optimized-slim" } */
+
+/* This emulates what C++ trstcase pr110378-1.C looks like on 32-bit arm (or
+ any architecture where the destructor returns this pointer. It verifies
+ that when it later becomes known that the return value will be removed, we
+ can split a parameter even in this case. */
+
+struct S
+{
+ short move_offset_of_a;
+ int *a;
+};
+
+extern int *allocate_stuff (unsigned);
+extern void deallocate_stuff (void *);
+
+static void
+something_like_a_constructor (struct S *p, int len)
+{
+ p->a = allocate_stuff (len * sizeof (int));
+ *p->a = 4;
+}
+
+static int
+operation (struct S *p)
+{
+ return *p->a + 1;
+}
+
+static struct S * __attribute__((noinline))
+something_like_an_arm32_destructor (struct S *p)
+{
+ deallocate_stuff (p->a);
+ return p;
+}
+
+volatile int v2 = 20;
+
+int test (void)
+{
+ struct S shouldnotexist;
+ something_like_a_constructor (&shouldnotexist, v2);
+ v2 = operation (&shouldnotexist);
+ something_like_an_arm32_destructor (&shouldnotexist);
+ return 0;
+}
+
+/* { dg-final { scan-ipa-dump "Will split parameter 0" "sra" } } */
+/* { dg-final { scan-tree-dump-not "shouldnotexist" "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/long_branch.c b/gcc/testsuite/gcc.dg/long_branch.c
index c1ac24f..ba80ab3 100644
--- a/gcc/testsuite/gcc.dg/long_branch.c
+++ b/gcc/testsuite/gcc.dg/long_branch.c
@@ -1,7 +1,7 @@
/* { dg-do run } */
/* { dg-options "-O2 -fno-reorder-blocks" } */
/* { dg-skip-if "limited code space" { pdp11-*-* } } */
-/* { dg-timeout-factor 2.0 { target hppa*-*-* } } */
+/* { dg-timeout-factor 4.0 { target hppa*-*-* } } */
void abort ();
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic_group_plugin.c b/gcc/testsuite/gcc.dg/plugin/diagnostic_group_plugin.c
index 3396b38..0a4f25e 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic_group_plugin.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic_group_plugin.c
@@ -181,27 +181,31 @@ test_diagnostic_start_span_fn (diagnostic_context *context,
pp_newline (context->printer);
}
-/* Custom diagnostic callback: loudly announce a new diagnostic group. */
+/* Custom output format subclass. */
-static void
-test_begin_group_cb (diagnostic_context * context)
+class test_output_format : public diagnostic_text_output_format
{
- pp_string (context->printer,
- "================================= BEGIN GROUP ==============================");
- pp_newline (context->printer);
-}
-
-/* Custom diagnostic callback: loudly announce the end of a
- diagnostic group. */
+ public:
+ test_output_format (diagnostic_context &context)
+ : diagnostic_text_output_format (context)
+ {}
-static void
-test_end_group_cb (diagnostic_context * context)
-{
- pp_set_prefix (context->printer, NULL);
- pp_string (context->printer,
- "---------------------------------- END GROUP -------------------------------");
- pp_newline_and_flush (context->printer);
-}
+ void on_begin_group () final override
+ {
+ /* Loudly announce a new diagnostic group. */
+ pp_string (m_context.printer,
+ "================================= BEGIN GROUP ==============================");
+ pp_newline (m_context.printer);
+ }
+ void on_end_group () final override
+ {
+ /* Loudly announce the end of a diagnostic group. */
+ pp_set_prefix (m_context.printer, NULL);
+ pp_string (m_context.printer,
+ "---------------------------------- END GROUP -------------------------------");
+ pp_newline_and_flush (m_context.printer);
+ }
+};
/* Entrypoint for the plugin.
Install custom callbacks into the global_dc.
@@ -220,9 +224,8 @@ plugin_init (struct plugin_name_args *plugin_info,
return 1;
diagnostic_starter (global_dc) = test_diagnostic_starter;
- global_dc->start_span = test_diagnostic_start_span_fn;
- global_dc->begin_group_cb = test_begin_group_cb;
- global_dc->end_group_cb = test_end_group_cb;
+ global_dc->m_text_callbacks.start_span = test_diagnostic_start_span_fn;
+ global_dc->m_output_format = new test_output_format (*global_dc);
pass_info.pass = new pass_test_groups (g);
pass_info.reference_pass_name = "*warn_function_noreturn";
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_show_trees.c b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_show_trees.c
index d81fa57..f5c6fc5 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_show_trees.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_show_trees.c
@@ -115,7 +115,7 @@ plugin_init (struct plugin_name_args *plugin_info,
if (!plugin_default_version_check (version, &gcc_version))
return 1;
- global_dc->caret_max_width = 80;
+ global_dc->m_source_printing.max_width = 80;
register_callback (plugin_name,
PLUGIN_PRE_GENERICIZE,
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_inlining.c b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_inlining.c
index 3627f7a..7edce1f 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_inlining.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_inlining.c
@@ -169,7 +169,7 @@ plugin_init (struct plugin_name_args *plugin_info,
if (!plugin_default_version_check (version, &gcc_version))
return 1;
- global_dc->caret_max_width = 80;
+ global_dc->m_source_printing.max_width = 80;
pass_info.pass = new pass_test_inlining (g);
pass_info.reference_pass_name = "*warn_function_noreturn";
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_paths.c b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_paths.c
index 62558be..bf66500 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_paths.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_paths.c
@@ -534,7 +534,7 @@ plugin_init (struct plugin_name_args *plugin_info,
if (!plugin_default_version_check (version, &gcc_version))
return 1;
- global_dc->caret_max_width = 80;
+ global_dc->m_source_printing.max_width = 80;
pass_info.pass = make_pass_test_show_path (g);
pass_info.reference_pass_name = "whole-program";
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_show_locus.c b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_show_locus.c
index baa6b62..2b8cbc9 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_show_locus.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_show_locus.c
@@ -175,7 +175,7 @@ test_show_locus (function *fun)
/* Hardcode the "terminal width", to verify the behavior of
very wide lines. */
- global_dc->caret_max_width = 71;
+ global_dc->m_source_printing.max_width = 71;
if (0 == strcmp (fnname, "test_simple"))
{
@@ -246,7 +246,7 @@ test_show_locus (function *fun)
if (0 == strcmp (fnname, "test_very_wide_line"))
{
const int line = fnstart_line + 2;
- global_dc->show_ruler_p = true;
+ global_dc->m_source_printing.show_ruler_p = true;
text_range_label label0 ("label 0");
text_range_label label1 ("label 1");
rich_location richloc (line_table,
@@ -258,7 +258,7 @@ test_show_locus (function *fun)
&label1);
richloc.add_fixit_replace ("bar * foo");
warning_at (&richloc, 0, "test");
- global_dc->show_ruler_p = false;
+ global_dc->m_source_printing.show_ruler_p = false;
}
/* Likewise, but with a secondary location that's immediately before
@@ -266,7 +266,7 @@ test_show_locus (function *fun)
if (0 == strcmp (fnname, "test_very_wide_line_2"))
{
const int line = fnstart_line + 2;
- global_dc->show_ruler_p = true;
+ global_dc->m_source_printing.show_ruler_p = true;
text_range_label label0 ("label 0");
text_range_label label1 ("label 1");
rich_location richloc (line_table,
@@ -278,7 +278,7 @@ test_show_locus (function *fun)
richloc.add_range (get_loc (line, 34), SHOW_RANGE_WITHOUT_CARET,
&label1);
warning_at (&richloc, 0, "test");
- global_dc->show_ruler_p = false;
+ global_dc->m_source_printing.show_ruler_p = false;
}
/* Example of multiple carets. */
@@ -289,11 +289,11 @@ test_show_locus (function *fun)
location_t caret_b = get_loc (line, 11);
rich_location richloc (line_table, caret_a);
add_range (&richloc, caret_b, caret_b, SHOW_RANGE_WITH_CARET);
- global_dc->caret_chars[0] = 'A';
- global_dc->caret_chars[1] = 'B';
+ global_dc->m_source_printing.caret_chars[0] = 'A';
+ global_dc->m_source_printing.caret_chars[1] = 'B';
warning_at (&richloc, 0, "test");
- global_dc->caret_chars[0] = '^';
- global_dc->caret_chars[1] = '^';
+ global_dc->m_source_printing.caret_chars[0] = '^';
+ global_dc->m_source_printing.caret_chars[1] = '^';
}
/* Tests of rendering fixit hints. */
@@ -407,11 +407,11 @@ test_show_locus (function *fun)
location_t caret_b = get_loc (line - 1, 19);
rich_location richloc (line_table, caret_a);
richloc.add_range (caret_b, SHOW_RANGE_WITH_CARET);
- global_dc->caret_chars[0] = '1';
- global_dc->caret_chars[1] = '2';
+ global_dc->m_source_printing.caret_chars[0] = '1';
+ global_dc->m_source_printing.caret_chars[1] = '2';
warning_at (&richloc, 0, "test");
- global_dc->caret_chars[0] = '^';
- global_dc->caret_chars[1] = '^';
+ global_dc->m_source_printing.caret_chars[0] = '^';
+ global_dc->m_source_printing.caret_chars[1] = '^';
}
/* Example of using the "%q+D" format code, which as well as printing
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_string_literals.c b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_string_literals.c
index 0269f72..1b5fad2 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_string_literals.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_string_literals.c
@@ -208,7 +208,7 @@ plugin_init (struct plugin_name_args *plugin_info,
if (!plugin_default_version_check (version, &gcc_version))
return 1;
- global_dc->caret_max_width = 80;
+ global_dc->m_source_printing.max_width = 80;
pass_info.pass = new pass_test_string_literals (g);
pass_info.reference_pass_name = "ssa";
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_tree_expression_range.c b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_tree_expression_range.c
index f546863..fbdb2f8 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_tree_expression_range.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_tree_expression_range.c
@@ -89,7 +89,7 @@ plugin_init (struct plugin_name_args *plugin_info,
if (!plugin_default_version_check (version, &gcc_version))
return 1;
- global_dc->caret_max_width = 130;
+ global_dc->m_source_printing.max_width = 130;
register_callback (plugin_name,
PLUGIN_PRE_GENERICIZE,
diff --git a/gcc/testsuite/gcc.dg/plugin/poly-int-tests.h b/gcc/testsuite/gcc.dg/plugin/poly-int-tests.h
index 7af9859..022ccd6 100644
--- a/gcc/testsuite/gcc.dg/plugin/poly-int-tests.h
+++ b/gcc/testsuite/gcc.dg/plugin/poly-int-tests.h
@@ -4839,11 +4839,11 @@ test_num_coeffs_extra ()
{
/* Test the most common POD types. */
test_unsigned<N, unsigned short, HOST_WIDE_INT,
- poly_int_pod<N, unsigned short> > ();
+ poly_int<N, unsigned short> > ();
test_signed<N, HOST_WIDE_INT, HOST_WIDE_INT,
- poly_int_pod<N, HOST_WIDE_INT> > ();
+ poly_int<N, HOST_WIDE_INT> > ();
test_unsigned<N, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
- poly_int_pod<N, unsigned HOST_WIDE_INT> > ();
+ poly_int<N, unsigned HOST_WIDE_INT> > ();
/* Test some coefficient types that weren't covered in the core tests. */
test_signed<N, int, HOST_WIDE_INT,
diff --git a/gcc/testsuite/gcc.dg/pr100512.c b/gcc/testsuite/gcc.dg/pr100512.c
index 70b90e0..0c1fc63 100644
--- a/gcc/testsuite/gcc.dg/pr100512.c
+++ b/gcc/testsuite/gcc.dg/pr100512.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -w" } */
+/* { dg-options "-O2 -Wno-pointer-to-int-cast" } */
#include <stdint.h>
int a;
@@ -15,7 +15,7 @@ void b() {
;
g:
for (; a;) {
- int16_t i = &d;
+ int16_t i = (int16_t) &d;
*c = i && *f;
}
}
diff --git a/gcc/testsuite/gcc.dg/pr103003.c b/gcc/testsuite/gcc.dg/pr103003.c
index d3d65f8..3cd4628 100644
--- a/gcc/testsuite/gcc.dg/pr103003.c
+++ b/gcc/testsuite/gcc.dg/pr103003.c
@@ -1,11 +1,11 @@
/* { dg-do compile } */
-/* { dg-options "-O2" } */
+/* { dg-options "-O2 -Wno-pointer-to-int-cast" } */
typedef char int8_t;
int8_t c_4, uli_5;
unsigned short us_6;
void func_1() {
int uli_9;
- short ptr_16ptr_11 = &uli_9; /* { dg-warning "initialization of*" } */
+ short ptr_16ptr_11 = (short) &uli_9;
for (; us_6 <= 6;)
if ((us_6 *= uli_9) < (uli_5 || 0) ?: ((c_4 = us_6) >= us_6) - uli_5)
uli_9 = 9;
diff --git a/gcc/testsuite/gcc.dg/pr103451.c b/gcc/testsuite/gcc.dg/pr103451.c
index c701934..db724aa 100644
--- a/gcc/testsuite/gcc.dg/pr103451.c
+++ b/gcc/testsuite/gcc.dg/pr103451.c
@@ -1,5 +1,5 @@
// { dg-do compile }
-// { dg-options "-O2 -w -fnon-call-exceptions -fno-delete-dead-exceptions -fdump-tree-optimized" }
+// { dg-options "-O2 -Wno-div-by-zero -fnon-call-exceptions -fno-delete-dead-exceptions -fdump-tree-optimized" }
int func_10_ptr_12;
@@ -11,9 +11,9 @@ void func_10(long li_8)
func_10_ptr_12 &= 4 ? *ptr_9 : 4;
}
-void func_9_s_8()
-{
- func_10(func_9_s_8);
+void func_9_s_8(void)
+{
+ func_10((long) func_9_s_8);
}
// { dg-final { scan-tree-dump " / 0" "optimized" } }
diff --git a/gcc/testsuite/gcc.dg/pr108095.c b/gcc/testsuite/gcc.dg/pr108095.c
index fb76caa..0a487cf 100644
--- a/gcc/testsuite/gcc.dg/pr108095.c
+++ b/gcc/testsuite/gcc.dg/pr108095.c
@@ -1,5 +1,5 @@
/* PR tree-optimization/108095 */
-/* { dg-do compile } */
+/* { dg-do compile { target lra } } */
/* { dg-options "-Os -g" } */
int v;
diff --git a/gcc/testsuite/gcc.dg/pr111694.c b/gcc/testsuite/gcc.dg/pr111694.c
new file mode 100644
index 0000000..a70b030
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr111694.c
@@ -0,0 +1,19 @@
+/* PR tree-optimization/111009 */
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+
+#define signbit(x) __builtin_signbit(x)
+
+static void test(double l, double r)
+{
+ if (l == r && (signbit(l) || signbit(r)))
+ ;
+ else
+ __builtin_abort();
+}
+
+int main()
+{
+ test(0.0, -0.0);
+}
+
diff --git a/gcc/testsuite/gcc.dg/pr111708-1.c b/gcc/testsuite/gcc.dg/pr111708-1.c
new file mode 100644
index 0000000..4af7f53
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr111708-1.c
@@ -0,0 +1,42 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+extern int a(void); // external linkage (6.2.2p4)
+static int a(void); /* { dg-error "static declaration of 'a' follows non-static declaration" } */
+
+static int b(void); // internal linkage (6.2.2p3)
+extern int b(void); // internal linkage (6.2.2p4)
+
+static int h0(void);
+
+void s(void)
+{
+ extern int h0(void); // internal linkage (6.2.2p4),
+ extern int h0(void); // internal linkage (6.2.2p4), redeclaration, ok
+ extern int h2(void); // external linkage (6.2.2p4)
+ extern int h2(void); // external linkage (6.2.2p4), redeclaration, ok.
+}
+
+
+extern int i(void); // external linkage (6.2.2p4)
+static int j(void); // internal linkage (6.2.2p3)
+
+void bar(void)
+{
+ extern int i(void); // external linkage (6.2.2p4), ok
+}
+
+void foo(void)
+{
+ extern int j(void); // internal linkage (6.2.2p4), ok, internal
+}
+
+void x(void)
+{
+ int i(void); // no linkage (6.2.2p6)
+ int j; // no linkage (6.2.2p6)
+ {
+ extern int j(void); /* { dg-error "function previously declared 'static' redeclared 'extern'" } */
+ }
+}
+
diff --git a/gcc/testsuite/gcc.dg/pr111708-2.c b/gcc/testsuite/gcc.dg/pr111708-2.c
new file mode 100644
index 0000000..065c052
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr111708-2.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+/* { dg-require-effective-target trampolines } */
+
+static void pp(void)
+{
+ int pp;
+ {
+ auto void pp(void);
+ void pp(void) { }
+ }
+}
+
+static void q2(void);
+
+static void qq(void)
+{
+ auto void q2(void);
+ void q2(void) { }
+}
+
diff --git a/gcc/testsuite/gcc.dg/pr111845.c b/gcc/testsuite/gcc.dg/pr111845.c
new file mode 100644
index 0000000..1bcb4f8
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr111845.c
@@ -0,0 +1,16 @@
+/* PR tree-optimization/111845 */
+/* { dg-do compile } */
+/* { dg-options "-O2 --param tree-reassoc-width=2" } */
+
+int a, b;
+unsigned int c, d, e;
+
+void
+foo (int x)
+{
+ b += d;
+ c += b < d;
+ b += e = a < x;
+ c += b;
+ c += b < e;
+}
diff --git a/gcc/testsuite/gcc.dg/pr68435.c b/gcc/testsuite/gcc.dg/pr68435.c
index 2c7c8b8..089bbe0 100644
--- a/gcc/testsuite/gcc.dg/pr68435.c
+++ b/gcc/testsuite/gcc.dg/pr68435.c
@@ -1,5 +1,5 @@
/* { dg-do compile { target aarch64*-*-* i?86-*-* x86_64-*-* } } */
-/* { dg-options "-fdump-rtl-ce1 -O2 -w --param max-rtl-if-conversion-unpredictable-cost=100" } */
+/* { dg-options "-fdump-rtl-ce1 -O2 --param max-rtl-if-conversion-unpredictable-cost=100" } */
/* { dg-additional-options "-march=i686" { target { { i?86-*-* x86_64-*-* } && ia32 } } } */
typedef struct cpp_reader cpp_reader;
@@ -20,7 +20,7 @@ enum cpp_ttype
CPP_HEADER_NAME, CPP_COMMENT, CPP_MACRO_ARG, CPP_PADDING, CPP_EOF,
};
-static struct op lex (cpp_reader *, int);
+struct op lex (cpp_reader *, int);
struct op
{
@@ -29,7 +29,7 @@ struct op
};
int
-_cpp_parse_expr (pfile)
+_cpp_parse_expr (cpp_reader *pfile)
{
struct op init_stack[20];
struct op *stack = init_stack;
diff --git a/gcc/testsuite/gcc.dg/pr90263.c b/gcc/testsuite/gcc.dg/pr90263.c
index 3222a53..831e098 100644
--- a/gcc/testsuite/gcc.dg/pr90263.c
+++ b/gcc/testsuite/gcc.dg/pr90263.c
@@ -2,6 +2,7 @@
/* { dg-do compile } */
/* { dg-options "-O2" } */
/* { dg-require-effective-target glibc } */
+/* { dg-skip-if "riscv_v uses an inline memcpy routine" { riscv_v } }*/
int *f (int *p, int *q, long n)
{
diff --git a/gcc/testsuite/gcc.dg/pr93917.c b/gcc/testsuite/gcc.dg/pr93917.c
index f09e1c4..f636b77 100644
--- a/gcc/testsuite/gcc.dg/pr93917.c
+++ b/gcc/testsuite/gcc.dg/pr93917.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-tree-vrp1 -fdump-tree-vrp2" } */
+/* { dg-options "-O2 -fdump-tree-vrp1 -fdump-tree-vrp2 -fdump-tree-optimized-alias" } */
void f3(int n);
@@ -19,5 +19,5 @@ void f2(int*n)
/* { dg-final { scan-tree-dump-times "Global Export.*0, \\+INF" 1 "vrp1" } } */
/* { dg-final { scan-tree-dump-times "__builtin_unreachable" 1 "vrp1" } } */
-/* { dg-final { scan-tree-dump-times "Global Export.*0, \\+INF" 1 "vrp2" } } */
/* { dg-final { scan-tree-dump-times "__builtin_unreachable" 0 "vrp2" } } */
+/* { dg-final { scan-tree-dump-times "0, \\+INF" 2 "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/rtl/arm/stl-cond.c b/gcc/testsuite/gcc.dg/rtl/arm/stl-cond.c
deleted file mode 100644
index e47ca6b..0000000
--- a/gcc/testsuite/gcc.dg/rtl/arm/stl-cond.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/* { dg-do compile { target arm*-*-* } } */
-/* { dg-require-effective-target arm_arm_ok } */
-/* { dg-require-effective-target arm_arch_v8a_ok } */
-/* { dg-options "-O2 -marm" } */
-/* { dg-add-options arm_arch_v8a } */
-
-/* We want to test that the STL instruction gets the conditional
- suffix when under a COND_EXEC. However, COND_EXEC is very hard to
- generate from C code because the atomic_store expansion adds a compiler
- barrier before the insn, preventing if-conversion. So test the output
- here with a hand-crafted COND_EXEC wrapped around an STL. */
-
-void __RTL (startwith ("final")) foo (int *a, int b)
-{
-(function "foo"
- (param "a"
- (DECL_RTL (reg/v:SI r0))
- (DECL_RTL_INCOMING (reg:SI r0))
- )
- (param "b"
- (DECL_RTL (reg/v:SI r1))
- (DECL_RTL_INCOMING (reg:SI r1))
- )
- (insn-chain
- (block 2
- (edge-from entry (flags "FALLTHRU"))
- (cnote 5 [bb 2] NOTE_INSN_BASIC_BLOCK)
-
- (insn:TI 7 (parallel [
- (set (reg:CC cc)
- (compare:CC (reg:SI r1)
- (const_int 0)))
- (set (reg/v:SI r1)
- (reg:SI r1 ))
- ]) ;; {*movsi_compare0}
- (nil))
-
- ;; A conditional atomic store-release: STLNE for Armv8-A.
- (insn 10 (cond_exec (ne (reg:CC cc)
- (const_int 0))
- (set (mem/v:SI (reg/v/f:SI r0) [-1 S4 A32])
- (unspec_volatile:SI [
- (reg/v:SI r1)
- (const_int 3)
- ] VUNSPEC_STL))) ;; {*p atomic_storesi}
- (expr_list:REG_DEAD (reg:CC cc)
- (expr_list:REG_DEAD (reg/v:SI r1)
- (expr_list:REG_DEAD (reg/v/f:SI r0)
- (nil)))))
- (edge-to exit (flags "FALLTHRU"))
- ) ;; block 2
- ) ;; insn-chain
- (crtl
- (return_rtx
- (reg/i:SI r0)
- ) ;; return_rtx
- ) ;; crtl
-) ;; function
-}
-
-/* { dg-final { scan-assembler "stlne" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/pr111519.c b/gcc/testsuite/gcc.dg/torture/pr111519.c
new file mode 100644
index 0000000..ef34c26
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr111519.c
@@ -0,0 +1,48 @@
+/* PR tree-optimization/111519 */
+/* { dg-do run } */
+
+int a, o;
+char b, f, i;
+long c;
+static signed char d;
+static char g;
+unsigned *h;
+signed char *e = &f;
+static signed char **j = &e;
+static long k[2];
+unsigned **l = &h;
+short m;
+volatile int z;
+
+__attribute__((noipa)) void
+foo (char *p)
+{
+ (void) p;
+}
+
+int
+main ()
+{
+ int p = z;
+ signed char *n = &d;
+ *n = 0;
+ while (c)
+ for (; i; i--)
+ ;
+ for (g = 0; g <= 1; g++)
+ {
+ *n = **j;
+ k[g] = 0 != &m;
+ *e = l && k[0];
+ }
+ if (p)
+ foo (&b);
+ for (; o < 4; o++)
+ {
+ a = d;
+ if (p)
+ foo (&b);
+ }
+ if (a != 1)
+ __builtin_abort ();
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr111807.c b/gcc/testsuite/gcc.dg/torture/pr111807.c
new file mode 100644
index 0000000..09fbdcf
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr111807.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+
+static struct A {
+ int x : 4;
+} a;
+static int b;
+int main()
+{
+ struct A t[2];
+ t[0] = b ? t[1] : a;
+ return (b ? t[1].x : 0) && 1;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr111818.c b/gcc/testsuite/gcc.dg/torture/pr111818.c
new file mode 100644
index 0000000..a7a9111
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr111818.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+
+static void foo(const volatile unsigned int x, void *p)
+{
+ __builtin_memcpy(p, (void *)&x, sizeof x);
+}
+
+void bar(void *number)
+{
+ foo(0, number);
+}
diff --git a/gcc/testsuite/gcc.dg/tree-prof/pr111559.c b/gcc/testsuite/gcc.dg/tree-prof/pr111559.c
new file mode 100644
index 0000000..43202c6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-prof/pr111559.c
@@ -0,0 +1,16 @@
+/* { dg-options "-O2" } */
+
+__attribute__((noipa)) static void edge(void) {}
+
+int p = 0;
+
+__attribute__((noinline))
+static void rule1(void) { if (p) edge(); }
+
+__attribute__((noinline))
+static void rule1_same(void) { if (p) edge(); }
+
+__attribute__((noipa)) int main(void) {
+ rule1();
+ rule1_same();
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/and-1.c b/gcc/testsuite/gcc.dg/tree-ssa/and-1.c
index 276c2b9..27d3890 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/and-1.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/and-1.c
@@ -2,10 +2,10 @@
/* { dg-options "-O -fdump-tree-optimized-raw" } */
int f(int in) {
- in = in | 3;
- in = in ^ 1;
+ in = in | 7;
+ in = in ^ 3;
in = (in & ~(unsigned long)1);
return in;
}
-/* { dg-final { scan-tree-dump-not "bit_and_expr" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "bit_and_expr, " "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/bitops-5.c b/gcc/testsuite/gcc.dg/tree-ssa/bitops-5.c
new file mode 100644
index 0000000..990610e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/bitops-5.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized-raw" } */
+/* PR tree-optimization/111679 */
+
+int f1(int a, int b)
+{
+ return (~a) | (a ^ b); // ~(a & b) or (~a) | (~b)
+}
+
+_Bool fb(_Bool c, _Bool d)
+{
+ return (!c) | (c ^ d); // ~(c & d) or (~c) | (~d)
+}
+
+_Bool fb1(int x, int y)
+{
+ _Bool a = x == 10, b = y > 100;
+ return (!a) | (a ^ b); // ~(a & b) or (~a) | (~b)
+ // or (x != 10) | (y <= 100)
+}
+
+/* { dg-final { scan-tree-dump-not "bit_xor_expr, " "optimized" } } */
+/* { dg-final { scan-tree-dump-times "bit_not_expr, " 2 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "bit_and_expr, " 2 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "bit_ior_expr, " 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "ne_expr, _\[0-9\]+, x_\[0-9\]+" 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "le_expr, _\[0-9\]+, y_\[0-9\]+" 1 "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/bitops-6.c b/gcc/testsuite/gcc.dg/tree-ssa/bitops-6.c
new file mode 100644
index 0000000..e6ab2fd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/bitops-6.c
@@ -0,0 +1,33 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized-raw" } */
+/* PR tree-optimization/111282 */
+
+
+int f(int a, int b)
+{
+ return a & (b ^ ~a); // a & b
+}
+
+_Bool fb(_Bool x, _Bool y)
+{
+ return x & (y ^ !x); // x & y
+}
+
+int fa(int w, int z)
+{
+ return (~w) & (w ^ z); // ~w & z
+}
+
+int fcmp(int x, int y)
+{
+ _Bool a = x == 2;
+ _Bool b = y == 1;
+ return a & (b ^ !a); // (x == 2) & (y == 1)
+}
+
+/* { dg-final { scan-tree-dump-not "bit_xor_expr, " "optimized" } } */
+/* { dg-final { scan-tree-dump-times "bit_and_expr, " 4 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "bit_not_expr, " 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-not "ne_expr, " "optimized" } } */
+/* { dg-final { scan-tree-dump-times "eq_expr, " 2 "optimized" } } */
+
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/bitops-7.c b/gcc/testsuite/gcc.dg/tree-ssa/bitops-7.c
new file mode 100644
index 0000000..7fb18db
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/bitops-7.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-O1 -fdump-tree-optimized-raw" } */
+/* PR tree-optimization/111432 */
+
+int
+foo3(int c, int bb)
+{
+ if ((bb & ~3)!=0) __builtin_unreachable();
+ return (bb & (c|3));
+}
+
+int
+foo_bool(int c, _Bool bb)
+{
+ return (bb & (c|7));
+}
+
+/* Both of these functions should be able to remove the `IOR` and `AND`
+ as the only bits that are non-zero for bb is set on the other side
+ of the `AND`.
+ */
+
+/* { dg-final { scan-tree-dump-not "bit_ior_expr, " "optimized" } } */
+/* { dg-final { scan-tree-dump-not "bit_and_expr, " "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/phi-opt-36.c b/gcc/testsuite/gcc.dg/tree-ssa/phi-opt-36.c
new file mode 100644
index 0000000..4baf9f8
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/phi-opt-36.c
@@ -0,0 +1,51 @@
+/* { dg-options "-O2 -fdump-tree-phiopt" } */
+
+unsigned f0(int A)
+{
+ unsigned t = A;
+// A == 0? A : -A same as -A
+ if (A == 0) return t;
+ return -t;
+}
+
+unsigned f1(int A)
+{
+ unsigned t = A;
+// A != 0? A : -A same as A
+ if (A != 0) return t;
+ return -t;
+}
+unsigned f2(int A)
+{
+ unsigned t = A;
+// A >= 0? A : -A same as abs (A)
+ if (A >= 0) return t;
+ return -t;
+}
+unsigned f3(int A)
+{
+ unsigned t = A;
+// A > 0? A : -A same as abs (A)
+ if (A > 0) return t;
+ return -t;
+}
+unsigned f4(int A)
+{
+ unsigned t = A;
+// A <= 0? A : -A same as -abs (A)
+ if (A <= 0) return t;
+ return -t;
+}
+unsigned f5(int A)
+{
+ unsigned t = A;
+// A < 0? A : -A same as -abs (A)
+ if (A < 0) return t;
+ return -t;
+}
+
+/* f4 and f5 are not allowed to be optimized in early phi-opt. */
+/* { dg-final { scan-tree-dump-times "if " 2 "phiopt1" } } */
+/* { dg-final { scan-tree-dump-not "if " "phiopt2" } } */
+
+
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/phi-opt-37.c b/gcc/testsuite/gcc.dg/tree-ssa/phi-opt-37.c
new file mode 100644
index 0000000..f1ff472
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/phi-opt-37.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-O1 -fdump-tree-phiopt1" } */
+
+unsigned abs_with_convert0 (int x)
+{
+ unsigned int y = x;
+
+ if (x < 0)
+ y = -y;
+
+ return y;
+}
+unsigned abs_with_convert1 (unsigned x)
+{
+ int y = x;
+
+ if (y < 0)
+ x = -x;
+
+ return x;
+}
+
+/* { dg-final { scan-tree-dump-times "ABSU_EXPR <" 2 "phiopt1" } } */
+/* { dg-final { scan-tree-dump-not "if " "phiopt1" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr111583-1.c b/gcc/testsuite/gcc.dg/tree-ssa/pr111583-1.c
new file mode 100644
index 0000000..1dd8dbc
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr111583-1.c
@@ -0,0 +1,30 @@
+/* { dg-do run } */
+/* { dg-options "-Os" } */
+
+short a, f, i;
+static const int *e;
+short *g;
+long h;
+int main()
+{
+ {
+ unsigned j = i;
+ a = 1;
+ for (; a; a++) {
+ {
+ long b = j, d = h;
+ int c = 0;
+ while (d--)
+ *(char *)b++ = c;
+ }
+ if (e)
+ break;
+ }
+ j && (*g)--;
+ const int **k = &e;
+ *k = 0;
+ }
+ if (f != 0)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr111583-2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr111583-2.c
new file mode 100644
index 0000000..0ee2185
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr111583-2.c
@@ -0,0 +1,36 @@
+/* { dg-do run } */
+/* { dg-options "-Os" } */
+
+int b, c, d;
+char e;
+short f;
+const unsigned short **g;
+char h(char k) {
+ if (k)
+ return '0';
+ return 0;
+}
+int l() {
+ b = 0;
+ return 1;
+}
+static short m(unsigned k) {
+ const unsigned short *n[65];
+ g = &n[4];
+ k || l();
+ long a = k;
+ char i = 0;
+ unsigned long j = k;
+ while (j--)
+ *(char *)a++ = i;
+ c = h(d);
+ f = k;
+ return 0;
+}
+int main() {
+ long o = (e < 0) << 5;
+ m(o);
+ if (f != 0)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr31531-1.c b/gcc/testsuite/gcc.dg/tree-ssa/pr31531-1.c
new file mode 100644
index 0000000..c272991
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr31531-1.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+/* PR tree-optimization/31531 */
+
+int f(int a)
+{
+ int b = ~a;
+ return b<0;
+}
+
+
+int f1(unsigned a)
+{
+ int b = ~a;
+ return b<0;
+}
+/* We should convert the above two functions from b <0 to ((int)a) >= 0. */
+/* { dg-final { scan-tree-dump-times ">= 0" 2 "optimized"} } */
+/* { dg-final { scan-tree-dump-times "~" 0 "optimized"} } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr31531-2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr31531-2.c
new file mode 100644
index 0000000..865ea29
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr31531-2.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+/* PR tree-optimization/31531 */
+
+int f0(unsigned x, unsigned t)
+{
+ x = ~x;
+ t = ~t;
+ int xx = x;
+ int tt = t;
+ return tt < xx;
+}
+
+int f1(unsigned x, int t)
+{
+ x = ~x;
+ t = ~t;
+ int xx = x;
+ int tt = t;
+ return tt < xx;
+}
+
+int f2(int x, unsigned t)
+{
+ x = ~x;
+ t = ~t;
+ int xx = x;
+ int tt = t;
+ return tt < xx;
+}
+
+
+/* We should be able to remove all ~ from the above functions. */
+/* { dg-final { scan-tree-dump-times "~" 0 "optimized"} } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/predcom-2.c b/gcc/testsuite/gcc.dg/tree-ssa/predcom-2.c
index f19edd4..681ff7c 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/predcom-2.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/predcom-2.c
@@ -1,6 +1,6 @@
/* { dg-do run } */
/* { dg-options "-O2 -funroll-loops --param max-unroll-times=8 -fpredictive-commoning -fdump-tree-pcom-details-blocks -fno-tree-pre" } */
-/* { dg-additional-options "-fno-tree-vectorize" { target amdgcn-*-* } } */
+/* { dg-additional-options "-fno-tree-vectorize" { target amdgcn-*-* riscv*-*-* } } */
void abort (void);
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c
index a879d30..5c89e3f 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c
@@ -27,4 +27,4 @@ foo ()
but the loop reads only one element at a time, and DOM cannot resolve these.
The same happens on powerpc depending on the SIMD support available. */
-/* { dg-final { scan-tree-dump "return 28;" "optimized" { xfail { { alpha*-*-* hppa*64*-*-* nvptx*-*-* mmix-knuth-mmixware } || { { { lp64 && { powerpc*-*-* sparc*-*-* riscv*-*-* } } || aarch64_sve } || { arm*-*-* && { ! arm_neon } } } } } } } */
+/* { dg-final { scan-tree-dump "return 28;" "optimized" { xfail { { alpha*-*-* hppa*64*-*-* nvptx*-*-* mmix-knuth-mmixware } || { { { lp64 && { powerpc*-*-* sparc*-*-* } } || aarch64_sve } || { arm*-*-* && { ! arm_neon } } } } } } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-dse-26.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-dse-26.c
index e3c33f49..43152de 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-dse-26.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-dse-26.c
@@ -31,5 +31,5 @@ constraint_equal (struct constraint a, struct constraint b)
&& constraint_expr_equal (a.rhs, b.rhs);
}
-/* { dg-final { scan-tree-dump-times "Deleted dead store: x = " 1 "dse1" } } */
-/* { dg-final { scan-tree-dump-times "Deleted dead store: y = " 1 "dse1" } } */
+/* { dg-final { scan-tree-dump-times "Deleted dead store: x = " 2 "dse1" } } */
+/* { dg-final { scan-tree-dump-times "Deleted dead store: y = " 2 "dse1" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-102.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-102.c
new file mode 100644
index 0000000..afd4805
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-102.c
@@ -0,0 +1,32 @@
+/* PR/111715 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-fre1" } */
+
+struct B {
+ struct { int len; } l;
+ long n;
+};
+struct A {
+ struct B elts[8];
+};
+
+static void
+set_len (struct B *b, int len)
+{
+ b->l.len = len;
+}
+
+static int
+get_len (struct B *b)
+{
+ return b->l.len;
+}
+
+int foo (struct A *a, int i, long *q)
+{
+ set_len (&a->elts[i], 1);
+ *q = 2;
+ return get_len (&a->elts[i]);
+}
+
+/* { dg-final { scan-tree-dump "return 1;" "fre1" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp-unreachable.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp-unreachable.c
index 5835dfc..4aad7f1 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp-unreachable.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp-unreachable.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-tree-vrp1-alias -fdump-tree-vrp2-alias" } */
+/* { dg-options "-O2 -fdump-tree-vrp1 -fdump-tree-vrp2 -fdump-tree-optimized-alias" } */
void dead (unsigned n);
void alive (unsigned n);
@@ -39,4 +39,4 @@ void func (unsigned n, unsigned m)
/* { dg-final { scan-tree-dump-not "dead" "vrp1" } } */
/* { dg-final { scan-tree-dump-times "builtin_unreachable" 1 "vrp1" } } */
/* { dg-final { scan-tree-dump-not "builtin_unreachable" "vrp2" } } */
-/* { dg-final { scan-tree-dump-times "fff8 VALUE 0x0" 4 "vrp2" } } */
+/* { dg-final { scan-tree-dump-times "fff8 VALUE 0x0" 2 "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/bb-slp-68.c b/gcc/testsuite/gcc.dg/vect/bb-slp-68.c
index e7573a1..2dd3d8e 100644
--- a/gcc/testsuite/gcc.dg/vect/bb-slp-68.c
+++ b/gcc/testsuite/gcc.dg/vect/bb-slp-68.c
@@ -20,4 +20,4 @@ void foo ()
/* We want to have the store group split into 4, 2, 4 when using 32byte vectors.
Unfortunately it does not work when 64-byte vectors are available. */
-/* { dg-final { scan-tree-dump-not "from scalars" "slp2" { xfail amdgcn-*-* } } } */
+/* { dg-final { scan-tree-dump-not "from scalars" "slp2" { xfail vect512 } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/bb-slp-pr65935.c b/gcc/testsuite/gcc.dg/vect/bb-slp-pr65935.c
index 8df3532..9ef1330 100644
--- a/gcc/testsuite/gcc.dg/vect/bb-slp-pr65935.c
+++ b/gcc/testsuite/gcc.dg/vect/bb-slp-pr65935.c
@@ -67,7 +67,8 @@ int main()
/* We should also be able to use 2-lane SLP to initialize the real and
imaginary components in the first loop of main. */
-/* { dg-final { scan-tree-dump-times "optimized: basic block" 10 "slp1" } } */
+/* { dg-final { scan-tree-dump-times "optimized: basic block" 10 "slp1" { target {! { vect1024 } } } } } */
+/* { dg-final { scan-tree-dump-times "optimized: basic block" 11 "slp1" { target { { vect1024 } } } } } */
/* We should see the s->phase[dir] operand splatted and no other operand built
from scalars. See PR97334. */
/* { dg-final { scan-tree-dump "Using a splat" "slp1" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/bb-slp-pr69907.c b/gcc/testsuite/gcc.dg/vect/bb-slp-pr69907.c
index b348526..f63b42a 100644
--- a/gcc/testsuite/gcc.dg/vect/bb-slp-pr69907.c
+++ b/gcc/testsuite/gcc.dg/vect/bb-slp-pr69907.c
@@ -22,5 +22,5 @@ void foo(unsigned *p1, unsigned short *p2)
/* Disable for SVE because for long or variable-length vectors we don't
get an unrolled epilogue loop. Also disable for AArch64 Advanced SIMD,
because there we can vectorize the epilogue using mixed vector sizes.
- Likewise for AMD GCN. */
-/* { dg-final { scan-tree-dump "BB vectorization with gaps at the end of a load is not supported" "slp1" { target { { ! aarch64*-*-* } && { ! amdgcn*-*-* } } } } } */
+ Likewise for AMD GCN and RVV. */
+/* { dg-final { scan-tree-dump "BB vectorization with gaps at the end of a load is not supported" "slp1" { target { { ! aarch64*-*-* } && { { ! amdgcn*-*-* } && { ! riscv_v } } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/costmodel/ppc/costmodel-vect-store-1.c b/gcc/testsuite/gcc.dg/vect/costmodel/ppc/costmodel-vect-store-1.c
new file mode 100644
index 0000000..ab5f330
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/costmodel/ppc/costmodel-vect-store-1.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_int } */
+/* { dg-additional-options "-O3" }
+
+/* This test case is partially extracted from case
+ gcc.dg/vect/vect-avg-16.c, it's to verify we don't
+ cost a store with vec_to_scalar when we shouldn't. */
+
+void
+test (signed char *restrict a, signed char *restrict b, signed char *restrict c,
+ int n)
+{
+ for (int j = 0; j < n; ++j)
+ {
+ for (int i = 0; i < 16; ++i)
+ a[i] = (b[i] + c[i]) >> 1;
+ a += 20;
+ b += 20;
+ c += 20;
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "vec_to_scalar" 0 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/costmodel/ppc/costmodel-vect-store-2.c b/gcc/testsuite/gcc.dg/vect/costmodel/ppc/costmodel-vect-store-2.c
new file mode 100644
index 0000000..72b67cf
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/costmodel/ppc/costmodel-vect-store-2.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_int } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-additional-options "-mvsx" } */
+
+/* Verify we do cost the required vec_perm. */
+
+int
+foo (int *a, int *b, int len)
+{
+ int i;
+ int *a1 = a;
+ int *a0 = a1 - 4;
+ for (i = 0; i < len; i++)
+ {
+ *b = *a0 + *a1;
+ b--;
+ a0++;
+ a1++;
+ }
+ return 0;
+}
+
+/* The reason why it doesn't check the exact count is that
+ we can get more than 1 vec_perm when it's compiled with
+ partial vector capability like Power10 (retrying for
+ the epilogue) or it's complied without unaligned vector
+ memory access support (realign). */
+/* { dg-final { scan-tree-dump {\mvec_perm\M} "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/dynamic-lmul2-7.c b/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/dynamic-lmul2-7.c
new file mode 100644
index 0000000..3dfc6f1
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/dynamic-lmul2-7.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -fdump-tree-vect-details" } */
+
+int
+bar (int *x, int a, int b, int n)
+{
+ x = __builtin_assume_aligned (x, __BIGGEST_ALIGNMENT__);
+ int sum1 = 0;
+ int sum2 = 0;
+ for (int i = 0; i < n; ++i)
+ {
+ sum1 += x[2*i] - a;
+ sum1 += x[2*i+1] * b;
+ sum2 += x[2*i] - b;
+ sum2 += x[2*i+1] * a;
+ }
+ return sum1 + sum2;
+}
+
+/* { dg-final { scan-assembler {e32,m2} } } */
+/* { dg-final { scan-assembler-times {csrr} 1 } } */
+/* { dg-final { scan-tree-dump-times "Maximum lmul = 8" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-times "Maximum lmul = 4" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-times "Maximum lmul = 2" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-not "Maximum lmul = 1" "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/no-dynamic-lmul-1.c b/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/no-dynamic-lmul-1.c
new file mode 100644
index 0000000..7ede148
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/no-dynamic-lmul-1.c
@@ -0,0 +1,64 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl4096b -mabi=lp64d -fdump-tree-vect-details" } */
+
+#include <stdint-gcc.h>
+
+void
+foo (int8_t *restrict a)
+{
+ for (int i = 0; i < 4096; ++i)
+ a[i] = a[i]-16;
+}
+
+void
+foo2 (int16_t *restrict a)
+{
+ for (int i = 0; i < 2048; ++i)
+ a[i] = a[i]-16;
+}
+
+void
+foo3 (int32_t *restrict a)
+{
+ for (int i = 0; i < 1024; ++i)
+ a[i] = a[i]-16;
+}
+
+void
+foo4 (int64_t *restrict a)
+{
+ for (int i = 0; i < 512; ++i)
+ a[i] = a[i]-16;
+}
+
+void
+foo5 (int8_t *restrict a)
+{
+ for (int i = 0; i < 16; ++i)
+ a[i] = a[i]-16;
+}
+
+void
+foo6 (int16_t *restrict a)
+{
+ for (int i = 0; i < 16; ++i)
+ a[i] = a[i]-16;
+}
+
+void
+foo7 (int32_t *restrict a)
+{
+ for (int i = 0; i < 16; ++i)
+ a[i] = a[i]-16;
+}
+
+void
+foo8 (int64_t *restrict a)
+{
+ for (int i = 0; i < 16; ++i)
+ a[i] = a[i]-16;
+}
+
+/* { dg-final { scan-tree-dump-not "Maximum lmul" "vect" } } */
+/* { dg-final { scan-assembler-times {vsetvli} 4 } } */
+/* { dg-final { scan-assembler-times {vsetivli} 4 } } */
diff --git a/gcc/testsuite/gcc.dg/vect/fast-math-slp-38.c b/gcc/testsuite/gcc.dg/vect/fast-math-slp-38.c
index 7c7acd5..96751fa 100644
--- a/gcc/testsuite/gcc.dg/vect/fast-math-slp-38.c
+++ b/gcc/testsuite/gcc.dg/vect/fast-math-slp-38.c
@@ -18,4 +18,4 @@ foo (void)
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { ! vect_strided6 } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/no-scevccp-outer-7.c b/gcc/testsuite/gcc.dg/vect/no-scevccp-outer-7.c
index 543ee98..058d1d2 100644
--- a/gcc/testsuite/gcc.dg/vect/no-scevccp-outer-7.c
+++ b/gcc/testsuite/gcc.dg/vect/no-scevccp-outer-7.c
@@ -77,4 +77,4 @@ int main (void)
}
/* { dg-final { scan-tree-dump-times "OUTER LOOP VECTORIZED." 1 "vect" { target vect_widen_mult_hi_to_si } } } */
-/* { dg-final { scan-tree-dump-times "vect_recog_widen_mult_pattern: detected" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_widen_mult_pattern: detected(?:(?!failed)(?!Re-trying).)*succeeded" 1 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/no-scevccp-vect-iv-3.c b/gcc/testsuite/gcc.dg/vect/no-scevccp-vect-iv-3.c
index 7049e49..6f2b221 100644
--- a/gcc/testsuite/gcc.dg/vect/no-scevccp-vect-iv-3.c
+++ b/gcc/testsuite/gcc.dg/vect/no-scevccp-vect-iv-3.c
@@ -30,4 +30,4 @@ unsigned int main1 ()
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_widen_sum_hi_to_si } } } */
-/* { dg-final { scan-tree-dump-times "vect_recog_widen_sum_pattern: detected" 1 "vect" { target vect_widen_sum_hi_to_si } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_widen_sum_pattern: detected(?:(?!failed)(?!Re-trying).)*succeeded" 1 "vect" { target vect_widen_sum_hi_to_si } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr111764.c b/gcc/testsuite/gcc.dg/vect/pr111764.c
new file mode 100644
index 0000000..f4e110f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr111764.c
@@ -0,0 +1,16 @@
+#include "tree-vect.h"
+
+short b = 2;
+
+int main()
+{
+ check_vect ();
+
+ for (int a = 1; a <= 9; a++)
+ b = b * b;
+ if (b != 0)
+ __builtin_abort ();
+
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.dg/vect/pr111846.c b/gcc/testsuite/gcc.dg/vect/pr111846.c
new file mode 100644
index 0000000..d283882
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr111846.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -ffast-math" } */
+/* { dg-additional-options "-mavx2" { target { x86_64-*-* i?86-*-* } } } */
+
+extern __attribute__((__simd__)) float powf(float, float);
+float gv[0][10];
+float eq_set_bands_real_adj[0];
+void eq_set_bands_real() {
+ for (int c = 0; c < 10; c++)
+ for (int i = 0; i < 10; i++)
+ gv[c][i] = powf(0, eq_set_bands_real_adj[i]) - 1;
+}
diff --git a/gcc/testsuite/gcc.dg/vect/pr45752.c b/gcc/testsuite/gcc.dg/vect/pr45752.c
index e8b364f..3c87d9b 100644
--- a/gcc/testsuite/gcc.dg/vect/pr45752.c
+++ b/gcc/testsuite/gcc.dg/vect/pr45752.c
@@ -159,4 +159,4 @@ int main (int argc, const char* argv[])
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
/* { dg-final { scan-tree-dump-times "gaps requires scalar epilogue loop" 0 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" {target { ! { vect_load_lanes && vect_strided5 } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-8.c b/gcc/testsuite/gcc.dg/vect/pr65947-8.c
index d042679..9ced4db 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-8.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-8.c
@@ -41,6 +41,6 @@ main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-not "LOOP VECTORIZED" "vect" { target { ! { amdgcn*-*-* || aarch64_sve } } } } } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { amdgcn*-*-* || aarch64_sve } } } } */
-/* { dg-final { scan-tree-dump "multiple types in double reduction or condition reduction" "vect" { target { ! { amdgcn*-*-* || aarch64_sve } } } } } */
+/* { dg-final { scan-tree-dump-not "LOOP VECTORIZED" "vect" { target { ! { vect_fold_extract_last } } } } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { vect_fold_extract_last } } } } */
+/* { dg-final { scan-tree-dump "multiple types in double reduction or condition reduction" "vect" { target { ! { vect_fold_extract_last } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr97832-2.c b/gcc/testsuite/gcc.dg/vect/pr97832-2.c
index 4f05781..7d8d269 100644
--- a/gcc/testsuite/gcc.dg/vect/pr97832-2.c
+++ b/gcc/testsuite/gcc.dg/vect/pr97832-2.c
@@ -25,5 +25,5 @@ void foo1x1(double* restrict y, const double* restrict x, int clen)
}
}
-/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" } } */
-/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" "vect" } } */
+/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" { target { ! { vect_load_lanes && vect_strided8 } } } } } */
+/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" "vect" { target { ! { vect_load_lanes && vect_strided8 } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr97832-3.c b/gcc/testsuite/gcc.dg/vect/pr97832-3.c
index ad1225d..c0603e1 100644
--- a/gcc/testsuite/gcc.dg/vect/pr97832-3.c
+++ b/gcc/testsuite/gcc.dg/vect/pr97832-3.c
@@ -46,5 +46,5 @@ void foo(double* restrict y, const double* restrict x0, const double* restrict x
}
}
-/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" } } */
-/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" "vect" } } */
+/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" { target { ! { vect_load_lanes && vect_strided8 } } } } } */
+/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" "vect" { target { ! { vect_load_lanes && vect_strided8 } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr97832-4.c b/gcc/testsuite/gcc.dg/vect/pr97832-4.c
index 74ae27f..c034428 100644
--- a/gcc/testsuite/gcc.dg/vect/pr97832-4.c
+++ b/gcc/testsuite/gcc.dg/vect/pr97832-4.c
@@ -24,5 +24,5 @@ void foo1x1(double* restrict y, const double* restrict x, int clen)
}
}
-/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" } } */
-/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" "vect" } } */
+/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" { target { ! { vect_load_lanes && vect_strided8 } } } } } */
+/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" "vect" { target { ! { vect_load_lanes && vect_strided8 } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-12a.c b/gcc/testsuite/gcc.dg/vect/slp-12a.c
index f0dda55..973de6a 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-12a.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-12a.c
@@ -76,5 +76,5 @@ int main (void)
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { vect_strided8 && vect_int_mult } } } } */
/* { dg-final { scan-tree-dump-times "vectorized 0 loops" 1 "vect" { target { ! { vect_strided8 && vect_int_mult } } } } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { vect_strided8 && vect_int_mult } } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { { vect_strided8 && {! vect_load_lanes } } && vect_int_mult } } } } */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 0 "vect" { target { ! { vect_strided8 && vect_int_mult } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-23.c b/gcc/testsuite/gcc.dg/vect/slp-23.c
index d32ee5b..8836acf 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-23.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-23.c
@@ -114,5 +114,5 @@ int main (void)
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { ! vect_perm } } } } */
/* SLP fails for the second loop with variable-length SVE because
the load size is greater than the minimum vector size. */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target vect_perm xfail { aarch64_sve && vect_variable_length } } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target vect_perm xfail { { aarch64_sve || riscv_v } && vect_variable_length } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-perm-10.c b/gcc/testsuite/gcc.dg/vect/slp-perm-10.c
index 2cce30c..03de4c6 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-perm-10.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-perm-10.c
@@ -53,4 +53,4 @@ int main ()
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_perm } } } */
/* SLP fails for variable-length SVE because the load size is greater
than the minimum vector size. */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target vect_perm xfail { aarch64_sve && vect_variable_length } } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target vect_perm xfail { { aarch64_sve || riscv_v } && vect_variable_length } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-perm-4.c b/gcc/testsuite/gcc.dg/vect/slp-perm-4.c
index 107968f..f4bda39 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-perm-4.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-perm-4.c
@@ -115,4 +115,4 @@ int main (int argc, const char* argv[])
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
/* { dg-final { scan-tree-dump-times "gaps requires scalar epilogue loop" 0 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { ! { vect_load_lanes && vect_strided5 } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-reduc-4.c b/gcc/testsuite/gcc.dg/vect/slp-reduc-4.c
index 15f5c25..e2fe01b 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-reduc-4.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-reduc-4.c
@@ -60,6 +60,6 @@ int main (void)
/* For variable-length SVE, the number of scalar statements in the
reduction exceeds the number of elements in a 128-bit granule. */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { ! vect_multiple_sizes } xfail { vect_no_int_min_max || { aarch64_sve && vect_variable_length } } } } } */
-/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" { target { vect_multiple_sizes } } } } */
+/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" { target { vect_multiple_sizes && { ! { vect_load_lanes && vect_strided8 } } } } } } */
/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 0 "vect" { xfail { aarch64_sve && vect_variable_length } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-simd-clone-1.c b/gcc/testsuite/gcc.dg/vect/slp-simd-clone-1.c
new file mode 100644
index 0000000..6ccbb39
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/slp-simd-clone-1.c
@@ -0,0 +1,46 @@
+/* { dg-require-effective-target vect_simd_clones } */
+/* { dg-additional-options "-fopenmp-simd" } */
+
+#include "tree-vect.h"
+
+int x[1024];
+
+#pragma omp declare simd simdlen(4) notinbranch
+__attribute__((noinline)) int
+foo (int a, int b)
+{
+ return a + b;
+}
+
+void __attribute__((noipa))
+bar (void)
+{
+#pragma omp simd
+ for (int i = 0; i < 512; i++)
+ {
+ x[2*i+0] = foo (x[2*i+0], x[2*i+0]);
+ x[2*i+1] = foo (x[2*i+1], x[2*i+1]);
+ }
+}
+
+int
+main ()
+{
+ int i;
+ check_vect ();
+
+#pragma GCC novector
+ for (i = 0; i < 1024; i++)
+ x[i] = i;
+
+ bar ();
+
+#pragma GCC novector
+ for (i = 0; i < 1024; i++)
+ if (x[i] != i + i)
+ abort ();
+
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-simd-clone-2.c b/gcc/testsuite/gcc.dg/vect/slp-simd-clone-2.c
new file mode 100644
index 0000000..98387c9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/slp-simd-clone-2.c
@@ -0,0 +1,57 @@
+/* { dg-require-effective-target vect_simd_clones } */
+/* { dg-additional-options "-fopenmp-simd" } */
+/* { dg-additional-options "-mavx2" { target avx2_runtime } } */
+
+#include "tree-vect.h"
+
+int x[1024];
+
+#pragma omp declare simd simdlen(4) inbranch
+__attribute__((noinline)) int
+foo (int a, int b)
+{
+ return a + b;
+}
+
+void __attribute__((noipa))
+bar (void)
+{
+#pragma omp simd
+ for (int i = 0; i < 512; i++)
+ {
+ if (x[2*i+0] < 10)
+ x[2*i+0] = foo (x[2*i+0], x[2*i+0]);
+ if (x[2*i+1] < 20)
+ x[2*i+1] = foo (x[2*i+1], x[2*i+1]);
+ }
+}
+
+int
+main ()
+{
+ int i;
+ check_vect ();
+
+#pragma GCC novector
+ for (i = 0; i < 1024; i++)
+ x[i] = i;
+
+ bar ();
+
+#pragma GCC novector
+ for (i = 0; i < 1024; i++)
+ {
+ if (((i & 1) && i < 20)
+ || (!(i & 1) && i < 10))
+ {
+ if (x[i] != i + i)
+ abort ();
+ }
+ else if (x[i] != i)
+ abort ();
+ }
+
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" { target avx2_runtime } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1115.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1115.c
index 18cb5ec..3eda994 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1115.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1115.c
@@ -38,4 +38,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s114.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s114.c
index 59e5041..6972565 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s114.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s114.c
@@ -39,4 +39,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1161.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1161.c
index 28ce526..9098bea 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1161.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1161.c
@@ -45,4 +45,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1232.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1232.c
index 347ed47..66242bc 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1232.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1232.c
@@ -38,4 +38,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s124.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s124.c
index 0748110..70c3790 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s124.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s124.c
@@ -44,4 +44,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1279.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1279.c
index ce48bf7..ead646d 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1279.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s1279.c
@@ -40,4 +40,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s161.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s161.c
index 123ce6c..596d0e8 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s161.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s161.c
@@ -45,4 +45,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s253.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s253.c
index 4af52aa..805d9a4 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s253.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s253.c
@@ -41,4 +41,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s257.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s257.c
index 8a01633..51e3504 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s257.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s257.c
@@ -39,4 +39,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s271.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s271.c
index 03f806e..90f6461 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s271.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s271.c
@@ -39,4 +39,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s2711.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s2711.c
index 4510fae..ddc32e2 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s2711.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s2711.c
@@ -39,4 +39,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s2712.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s2712.c
index 92e9742..5750f9d 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s2712.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s2712.c
@@ -39,4 +39,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s272.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s272.c
index ae6b23e..44ea7ec 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s272.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s272.c
@@ -41,4 +41,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s273.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s273.c
index dbdb5f6..8496722 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s273.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s273.c
@@ -39,4 +39,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s274.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s274.c
index 89b30e8..a03971e 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s274.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s274.c
@@ -41,4 +41,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s276.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s276.c
index 7ca7166..d383cd0 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s276.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s276.c
@@ -42,4 +42,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s278.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s278.c
index 7459017..cd9d28b 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s278.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s278.c
@@ -44,4 +44,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s279.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s279.c
index 3a024e9..bb49437 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s279.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s279.c
@@ -48,4 +48,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s3111.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s3111.c
index c7b2d61..4163dd8 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s3111.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s3111.c
@@ -41,4 +41,4 @@ int main (int argc, char **argv)
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s353.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s353.c
index 5889858..98ba752 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s353.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s353.c
@@ -44,4 +44,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! riscv_v } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s441.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s441.c
index e73f782..480e597 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s441.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s441.c
@@ -42,4 +42,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s443.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s443.c
index a07800b..709413f 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s443.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s443.c
@@ -47,4 +47,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-vif.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-vif.c
index 48e1c14..6eba464 100644
--- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-vif.c
+++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-vif.c
@@ -38,4 +38,4 @@ int main (int argc, char **argv)
return 0;
}
-/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! aarch64_sve } } } } */
+/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { { ! aarch64_sve } && { ! riscv_v } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-cond-arith-2.c b/gcc/testsuite/gcc.dg/vect/vect-cond-arith-2.c
index 38994ea..7e16597 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-cond-arith-2.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-cond-arith-2.c
@@ -41,5 +41,5 @@ neg_xi (double *x)
return res_3;
}
-/* { dg-final { scan-tree-dump { = \.COND_ADD} "vect" { target { vect_double_cond_arith && vect_fully_masked } } } } */
-/* { dg-final { scan-tree-dump { = \.COND_SUB} "optimized" { target { vect_double_cond_arith && vect_fully_masked } } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?ADD} "vect" { target { vect_double_cond_arith && vect_fully_masked } } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?SUB} "optimized" { target { vect_double_cond_arith && vect_fully_masked } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-cond-arith-4.c b/gcc/testsuite/gcc.dg/vect/vect-cond-arith-4.c
index 1af0fe6..7d26dbe 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-cond-arith-4.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-cond-arith-4.c
@@ -52,8 +52,8 @@ main (void)
return 0;
}
-/* { dg-final { scan-tree-dump { = \.COND_ADD} "optimized" { target vect_double_cond_arith } } } */
-/* { dg-final { scan-tree-dump { = \.COND_SUB} "optimized" { target vect_double_cond_arith } } } */
-/* { dg-final { scan-tree-dump { = \.COND_MUL} "optimized" { target vect_double_cond_arith } } } */
-/* { dg-final { scan-tree-dump { = \.COND_RDIV} "optimized" { target vect_double_cond_arith } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?ADD} "optimized" { target vect_double_cond_arith } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?SUB} "optimized" { target vect_double_cond_arith } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?MUL} "optimized" { target vect_double_cond_arith } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?RDIV} "optimized" { target vect_double_cond_arith } } } */
/* { dg-final { scan-tree-dump-not {VEC_COND_EXPR} "optimized" { target vect_double_cond_arith } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-cond-arith-5.c b/gcc/testsuite/gcc.dg/vect/vect-cond-arith-5.c
index ec3d9db..f7daa13 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-cond-arith-5.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-cond-arith-5.c
@@ -54,8 +54,8 @@ main (void)
return 0;
}
-/* { dg-final { scan-tree-dump { = \.COND_ADD} "optimized" { target { vect_double_cond_arith && vect_masked_store } } } } */
-/* { dg-final { scan-tree-dump { = \.COND_SUB} "optimized" { target { vect_double_cond_arith && vect_masked_store } } } } */
-/* { dg-final { scan-tree-dump { = \.COND_MUL} "optimized" { target { vect_double_cond_arith && vect_masked_store } } } } */
-/* { dg-final { scan-tree-dump { = \.COND_RDIV} "optimized" { target { vect_double_cond_arith && vect_masked_store } } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?ADD} "optimized" { target { vect_double_cond_arith && vect_masked_store } } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?SUB} "optimized" { target { vect_double_cond_arith && vect_masked_store } } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?MUL} "optimized" { target { vect_double_cond_arith && vect_masked_store } } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?RDIV} "optimized" { target { vect_double_cond_arith && vect_masked_store } } } } */
/* { dg-final { scan-tree-dump-not {VEC_COND_EXPR} "optimized" { target { vect_double_cond_arith && vect_masked_store } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-cond-arith-6.c b/gcc/testsuite/gcc.dg/vect/vect-cond-arith-6.c
index 2aeebd4..a80c30a 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-cond-arith-6.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-cond-arith-6.c
@@ -56,8 +56,8 @@ main (void)
}
/* { dg-final { scan-tree-dump-times {vectorizing stmts using SLP} 4 "vect" { target vect_double_cond_arith } } } */
-/* { dg-final { scan-tree-dump-times { = \.COND_ADD} 1 "optimized" { target vect_double_cond_arith } } } */
-/* { dg-final { scan-tree-dump-times { = \.COND_SUB} 1 "optimized" { target vect_double_cond_arith } } } */
-/* { dg-final { scan-tree-dump-times { = \.COND_MUL} 1 "optimized" { target vect_double_cond_arith } } } */
-/* { dg-final { scan-tree-dump-times { = \.COND_RDIV} 1 "optimized" { target vect_double_cond_arith } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?ADD} "optimized" { target vect_double_cond_arith } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?SUB} "optimized" { target vect_double_cond_arith } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?MUL} "optimized" { target vect_double_cond_arith } } } */
+/* { dg-final { scan-tree-dump { = \.COND_(LEN_)?RDIV} "optimized" { target vect_double_cond_arith } } } */
/* { dg-final { scan-tree-dump-not {VEC_COND_EXPR} "optimized" { target vect_double_cond_arith } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-cond-reduc-4.c b/gcc/testsuite/gcc.dg/vect/vect-cond-reduc-4.c
index 8820075..8ea8c53 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-cond-reduc-4.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-cond-reduc-4.c
@@ -42,6 +42,7 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 2 "vect" { target vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 2 "vect" { target { vect_fold_extract_last && vect_pack_trunc } } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 4 "vect" { target { { vect_fold_extract_last } && { ! vect_pack_trunc } } } } } */
/* { dg-final { scan-tree-dump-times "condition expression based on integer induction." 2 "vect" { target { ! vect_fold_extract_last } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-live-2.c b/gcc/testsuite/gcc.dg/vect/vect-live-2.c
index dae36e9..0a49c96 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-live-2.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-live-2.c
@@ -58,4 +58,4 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vec_stmt_relevant_p: stmt live but not relevant" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vec_stmt_relevant_p: stmt live but not relevant(?:(?!failed)(?!Re-trying).)*succeeded" 1 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-multitypes-16.c b/gcc/testsuite/gcc.dg/vect/vect-multitypes-16.c
index a61f1a9..fd17ad7 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-multitypes-16.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-multitypes-16.c
@@ -35,6 +35,6 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_unpack } } } */
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 0 "vect" { target { ! vect_unpack } } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { { vect_unpack } || { vect_variable_length && vect_ext_char_longlong } } } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 0 "vect" { target { { ! vect_unpack } && {! { vect_variable_length && vect_ext_char_longlong } } } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-pr111779.c b/gcc/testsuite/gcc.dg/vect/vect-pr111779.c
new file mode 100644
index 0000000..79b72ae
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/vect-pr111779.c
@@ -0,0 +1,56 @@
+#include <stdbool.h>
+#include "tree-vect.h"
+
+struct C
+{
+ int c;
+ int d;
+ bool f :1;
+ float e;
+};
+
+struct A
+{
+ unsigned int a;
+ unsigned char c1, c2;
+ bool b1 : 1;
+ bool b2 : 1;
+ bool b3 : 1;
+ struct C b4;
+};
+
+void __attribute__((noipa))
+foo (const struct A * __restrict x, int y)
+{
+ int s = 0, i = 0;
+ for (i = 0; i < y; ++i)
+ {
+ const struct A a = x[i];
+ s += a.b4.f ? 1 : 0;
+ }
+ if (s != 0)
+ __builtin_abort ();
+}
+
+int
+main ()
+{
+ struct A x[100];
+ int i;
+
+ check_vect ();
+
+ __builtin_memset (x, -1, sizeof (x));
+#pragma GCC novect
+ for (i = 0; i < 100; i++)
+ {
+ x[i].b1 = false;
+ x[i].b2 = false;
+ x[i].b3 = false;
+ x[i].b4.f = false;
+ }
+ foo (x, 100);
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target vect_int } } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/armv9_warning.c b/gcc/testsuite/gcc.target/aarch64/armv9_warning.c
new file mode 100644
index 0000000..35690d5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/armv9_warning.c
@@ -0,0 +1,5 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv9-a -Wpedantic -Werror" } */
+
+#include <arm_neon.h>
+
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_aligned.c b/gcc/testsuite/gcc.target/aarch64/ldp_aligned.c
index f44f961..75495d7 100644
--- a/gcc/testsuite/gcc.target/aarch64/ldp_aligned.c
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_aligned.c
@@ -14,16 +14,6 @@ TYPE ldp_aligned_##TYPE(char* ptr){ \
return a_0 + a_1; \
}
-#define LDP_TEST_UNALIGNED(TYPE) \
-TYPE ldp_unaligned_##TYPE(char* ptr){ \
- TYPE a_0, a_1; \
- TYPE *arr = (TYPE*) ((uintptr_t)ptr & ~(2 * 8 * _Alignof(TYPE) - 1)); \
- TYPE *a = arr+1; \
- a_0 = a[0]; \
- a_1 = a[1]; \
- return a_0 + a_1; \
-}
-
#define LDP_TEST_ADJUST_ALIGNED(TYPE) \
TYPE ldp_aligned_adjust_##TYPE(char* ptr){ \
TYPE a_0, a_1, a_2, a_3, a_4; \
@@ -36,29 +26,11 @@ TYPE ldp_aligned_adjust_##TYPE(char* ptr){ \
return a_0 + a_1 + a_2 + a_3 + a_4; \
}
-#define LDP_TEST_ADJUST_UNALIGNED(TYPE) \
-TYPE ldp_unaligned_adjust_##TYPE(char* ptr){ \
- TYPE a_0, a_1, a_2, a_3, a_4; \
- TYPE *arr = (TYPE*) ((uintptr_t)ptr & ~(2 * 8 * _Alignof(TYPE) - 1)); \
- TYPE *a = arr+1; \
- a_0 = a[100]; \
- a_1 = a[101]; \
- a_2 = a[102]; \
- a_3 = a[103]; \
- a_4 = a[110]; \
- return a_0 + a_1 + a_2 + a_3 + a_4; \
-}
-
LDP_TEST_ALIGNED(int32_t);
LDP_TEST_ALIGNED(int64_t);
LDP_TEST_ALIGNED(v4si);
-LDP_TEST_UNALIGNED(int32_t);
-LDP_TEST_UNALIGNED(int64_t);
-LDP_TEST_UNALIGNED(v4si);
LDP_TEST_ADJUST_ALIGNED(int32_t);
LDP_TEST_ADJUST_ALIGNED(int64_t);
-LDP_TEST_ADJUST_UNALIGNED(int32_t);
-LDP_TEST_ADJUST_UNALIGNED(int64_t);
/* { dg-final { scan-assembler-times "ldp\tw\[0-9\]+, w\[0-9\]" 3 } } */
/* { dg-final { scan-assembler-times "ldp\tx\[0-9\]+, x\[0-9\]" 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_unaligned.c b/gcc/testsuite/gcc.target/aarch64/ldp_unaligned.c
new file mode 100644
index 0000000..d9dcfe4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_unaligned.c
@@ -0,0 +1,40 @@
+/* { dg-options "-O2 --param=aarch64-ldp-policy=aligned -mcpu=generic" } */
+
+#include <stdlib.h>
+#include <stdint.h>
+
+typedef int v4si __attribute__ ((vector_size (16)));
+
+#define LDP_TEST_UNALIGNED(TYPE) \
+TYPE ldp_unaligned_##TYPE(char* ptr){ \
+ TYPE a_0, a_1; \
+ TYPE *arr = (TYPE*) ((uintptr_t)ptr & ~(2 * 8 * _Alignof(TYPE) - 1)); \
+ TYPE *a = arr+1; \
+ a_0 = a[0]; \
+ a_1 = a[1]; \
+ return a_0 + a_1; \
+}
+
+#define LDP_TEST_ADJUST_UNALIGNED(TYPE) \
+TYPE ldp_unaligned_adjust_##TYPE(char* ptr){ \
+ TYPE a_0, a_1, a_2, a_3, a_4; \
+ TYPE *arr = (TYPE*) ((uintptr_t)ptr & ~(2 * 8 * _Alignof(TYPE) - 1)); \
+ TYPE *a = arr+1; \
+ a_0 = a[100]; \
+ a_1 = a[101]; \
+ a_2 = a[102]; \
+ a_3 = a[103]; \
+ a_4 = a[110]; \
+ return a_0 + a_1 + a_2 + a_3 + a_4; \
+}
+
+LDP_TEST_UNALIGNED(int32_t);
+LDP_TEST_UNALIGNED(int64_t);
+LDP_TEST_UNALIGNED(v4si);
+LDP_TEST_ADJUST_UNALIGNED(int32_t);
+LDP_TEST_ADJUST_UNALIGNED(int64_t);
+
+/* { dg-final { scan-assembler-times "ldp\tw\[0-9\]+, w\[0-9\]" 0 } } */
+/* { dg-final { scan-assembler-times "ldp\tx\[0-9\]+, x\[0-9\]" 0 } } */
+/* { dg-final { scan-assembler-times "ldp\tq\[0-9\]+, q\[0-9\]" 0 } } */
+
diff --git a/gcc/testsuite/gcc.target/aarch64/mops_4.c b/gcc/testsuite/gcc.target/aarch64/mops_4.c
index 1b87759..dd79611 100644
--- a/gcc/testsuite/gcc.target/aarch64/mops_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/mops_4.c
@@ -51,6 +51,54 @@ copy3 (int *x, int *y, long z, long *res)
}
/*
+** move1:
+** mov (x[0-9]+), x0
+** cpyp \[\1\]!, \[x1\]!, x2!
+** cpym \[\1\]!, \[x1\]!, x2!
+** cpye \[\1\]!, \[x1\]!, x2!
+** str x0, \[x3\]
+** ret
+*/
+void
+move1 (int *x, int *y, long z, int **res)
+{
+ __builtin_memmove (x, y, z);
+ *res = x;
+}
+
+/*
+** move2:
+** mov (x[0-9]+), x1
+** cpyp \[x0\]!, \[\1\]!, x2!
+** cpym \[x0\]!, \[\1\]!, x2!
+** cpye \[x0\]!, \[\1\]!, x2!
+** str x1, \[x3\]
+** ret
+*/
+void
+move2 (int *x, int *y, long z, int **res)
+{
+ __builtin_memmove (x, y, z);
+ *res = y;
+}
+
+/*
+** move3:
+** mov (x[0-9]+), x2
+** cpyp \[x0\]!, \[x1\]!, \1!
+** cpym \[x0\]!, \[x1\]!, \1!
+** cpye \[x0\]!, \[x1\]!, \1!
+** str x2, \[x3\]
+** ret
+*/
+void
+move3 (int *x, int *y, long z, long *res)
+{
+ __builtin_memmove (x, y, z);
+ *res = z;
+}
+
+/*
** set1:
** mov (x[0-9]+), x0
** setp \[\1\]!, x2!, x1
diff --git a/gcc/testsuite/gcc.target/aarch64/stp_aligned.c b/gcc/testsuite/gcc.target/aarch64/stp_aligned.c
index ab9c2f4..20bd953 100644
--- a/gcc/testsuite/gcc.target/aarch64/stp_aligned.c
+++ b/gcc/testsuite/gcc.target/aarch64/stp_aligned.c
@@ -13,15 +13,6 @@ TYPE *stp_aligned_##TYPE(char* ptr, TYPE x){ \
return arr; \
}
-#define STP_TEST_UNALIGNED(TYPE) \
-TYPE *stp_unaligned_##TYPE(char* ptr, TYPE x){ \
- TYPE *arr = (TYPE*) ((uintptr_t)ptr & ~(2 * 8 * _Alignof(TYPE) - 1)); \
- TYPE *a = arr+1; \
- a[0] = x; \
- a[1] = x; \
- return a; \
-}
-
#define STP_TEST_ADJUST_ALIGNED(TYPE) \
TYPE *stp_aligned_adjust_##TYPE(char* ptr, TYPE x){ \
TYPE *arr = (TYPE*) ((uintptr_t)ptr & ~(2 * 8 * _Alignof(TYPE) - 1)); \
@@ -32,27 +23,11 @@ TYPE *stp_aligned_adjust_##TYPE(char* ptr, TYPE x){ \
return arr; \
}
-#define STP_TEST_ADJUST_UNALIGNED(TYPE) \
-TYPE *stp_unaligned_adjust_##TYPE(char* ptr, TYPE x){ \
- TYPE *arr = (TYPE*) ((uintptr_t)ptr & ~(2 * 8 * _Alignof(TYPE) - 1)); \
- TYPE *a = arr+1; \
- a[100] = x; \
- a[101] = x; \
- a[102] = x; \
- a[103] = x; \
- return a; \
-}
-
STP_TEST_ALIGNED(int32_t);
STP_TEST_ALIGNED(int64_t);
STP_TEST_ALIGNED(v4si);
-STP_TEST_UNALIGNED(int32_t);
-STP_TEST_UNALIGNED(int64_t);
-STP_TEST_UNALIGNED(v4si);
STP_TEST_ADJUST_ALIGNED(int32_t);
STP_TEST_ADJUST_ALIGNED(int64_t);
-STP_TEST_ADJUST_UNALIGNED(int32_t);
-STP_TEST_ADJUST_UNALIGNED(int64_t);
/* { dg-final { scan-assembler-times "stp\tw\[0-9\]+, w\[0-9\]" 3 } } */
/* { dg-final { scan-assembler-times "stp\tx\[0-9\]+, x\[0-9\]" 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/stp_unaligned.c b/gcc/testsuite/gcc.target/aarch64/stp_unaligned.c
new file mode 100644
index 0000000..9a5690a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/stp_unaligned.c
@@ -0,0 +1,37 @@
+/* { dg-options "-O2 --param=aarch64-stp-policy=aligned -mcpu=generic" } */
+
+#include <stdlib.h>
+#include <stdint.h>
+
+typedef int v4si __attribute__ ((vector_size (16)));
+
+#define STP_TEST_UNALIGNED(TYPE) \
+TYPE *stp_unaligned_##TYPE(char* ptr, TYPE x){ \
+ TYPE *arr = (TYPE*) ((uintptr_t)ptr & ~(2 * 8 * _Alignof(TYPE) - 1)); \
+ TYPE *a = arr+1; \
+ a[0] = x; \
+ a[1] = x; \
+ return a; \
+}
+
+#define STP_TEST_ADJUST_UNALIGNED(TYPE) \
+TYPE *stp_unaligned_adjust_##TYPE(char* ptr, TYPE x){ \
+ TYPE *arr = (TYPE*) ((uintptr_t)ptr & ~(2 * 8 * _Alignof(TYPE) - 1)); \
+ TYPE *a = arr+1; \
+ a[100] = x; \
+ a[101] = x; \
+ a[102] = x; \
+ a[103] = x; \
+ return a; \
+}
+
+STP_TEST_UNALIGNED(int32_t);
+STP_TEST_UNALIGNED(int64_t);
+STP_TEST_UNALIGNED(v4si);
+STP_TEST_ADJUST_UNALIGNED(int32_t);
+STP_TEST_ADJUST_UNALIGNED(int64_t);
+
+/* { dg-final { scan-assembler-times "stp\tw\[0-9\]+, w\[0-9\]" 0 } } */
+/* { dg-final { scan-assembler-times "stp\tx\[0-9\]+, x\[0-9\]" 0 } } */
+/* { dg-final { scan-assembler-times "stp\tq\[0-9\]+, q\[0-9\]" 0 } } */
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2.c
index 4622a1e..bbb45d2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2.c
@@ -215,9 +215,9 @@ test_7 (void)
** add sp, sp, #?16
** ldr p4, \[sp\]
** addvl sp, sp, #1
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -283,9 +283,9 @@ test_9 (int n)
** addvl sp, x29, #-1
** ldr p4, \[sp\]
** addvl sp, sp, #1
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -319,9 +319,9 @@ test_10 (int n)
** addvl sp, x29, #-1
** ldr p4, \[sp\]
** addvl sp, sp, #1
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** add sp, sp, #?3008
** add sp, sp, #?126976
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_1024.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_1024.c
index e31200f..9437c7a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_1024.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_1024.c
@@ -176,9 +176,9 @@ test_7 (void)
** add sp, sp, #?16
** ldr z16, \[sp\]
** add sp, sp, #?128
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -234,9 +234,9 @@ test_9 (int n)
** sub sp, x29, #128
** ldr z16, \[sp\]
** add sp, sp, #?128
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -268,9 +268,9 @@ test_10 (int n)
** sub sp, x29, #128
** ldr z16, \[sp\]
** add sp, sp, #?128
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** add sp, sp, #?3008
** add sp, sp, #?126976
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_128.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_128.c
index 41193b4..b4e1627 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_128.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_128.c
@@ -176,9 +176,9 @@ test_7 (void)
** add sp, sp, #?16
** ldr p4, \[sp\]
** add sp, sp, #?16
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -234,9 +234,9 @@ test_9 (int n)
** sub sp, x29, #16
** ldr p4, \[sp\]
** add sp, sp, #?16
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -267,9 +267,9 @@ test_10 (int n)
** sub sp, x29, #16
** ldr p4, \[sp\]
** add sp, sp, #?16
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** add sp, sp, #?3008
** add sp, sp, #?126976
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_2048.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_2048.c
index f637516..9212093 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_2048.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_2048.c
@@ -176,9 +176,9 @@ test_7 (void)
** add sp, sp, #?16
** ldr z16, \[sp\]
** add sp, sp, #?256
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -234,9 +234,9 @@ test_9 (int n)
** sub sp, x29, #256
** ldr z16, \[sp\]
** add sp, sp, #?256
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -268,9 +268,9 @@ test_10 (int n)
** sub sp, x29, #256
** ldr z16, \[sp\]
** add sp, sp, #?256
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** add sp, sp, #?3008
** add sp, sp, #?126976
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_256.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_256.c
index 6bcbb57..bd8bef0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_256.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_256.c
@@ -176,9 +176,9 @@ test_7 (void)
** add sp, sp, #?16
** ldr z16, \[sp\]
** add sp, sp, #?32
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -234,9 +234,9 @@ test_9 (int n)
** sub sp, x29, #32
** ldr z16, \[sp\]
** add sp, sp, #?32
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -267,9 +267,9 @@ test_10 (int n)
** sub sp, x29, #32
** ldr z16, \[sp\]
** add sp, sp, #?32
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** add sp, sp, #?3008
** add sp, sp, #?126976
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_512.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_512.c
index dc7df8e..2c76cce 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_512.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_2_512.c
@@ -176,9 +176,9 @@ test_7 (void)
** add sp, sp, #?16
** ldr z16, \[sp\]
** add sp, sp, #?64
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -234,9 +234,9 @@ test_9 (int n)
** sub sp, x29, #64
** ldr z16, \[sp\]
** add sp, sp, #?64
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** mov x12, #?4144
** add sp, sp, x12
** ret
@@ -268,9 +268,9 @@ test_10 (int n)
** sub sp, x29, #64
** ldr z16, \[sp\]
** add sp, sp, #?64
+** ldp x29, x30, \[sp\]
** ldp x24, x25, \[sp, 16\]
** ldr x26, \[sp, 32\]
-** ldp x29, x30, \[sp\]
** add sp, sp, #?3008
** add sp, sp, #?126976
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pre_cond_share_1.c b/gcc/testsuite/gcc.target/aarch64/sve/pre_cond_share_1.c
new file mode 100644
index 0000000..b51d0f2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pre_cond_share_1.c
@@ -0,0 +1,132 @@
+/* { dg-do compile } */
+/* { dg-options "-Ofast -fdump-tree-optimized" } */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <math.h>
+#include <float.h>
+
+typedef struct __attribute__((__packed__)) _Atom {
+ float x, y, z;
+ int32_t type;
+} Atom;
+
+typedef struct __attribute__((__packed__)) _FFParams {
+ int32_t hbtype;
+ float radius;
+ float hphb;
+ float elsc;
+} FFParams;
+
+#ifndef PPWI
+#define PPWI (64)
+#endif
+
+#ifndef ITERS
+#define ITERS 8
+#endif
+
+#define DIFF_TOLERANCE_PCT 0.025f
+
+#define POSES_SIZE 393216
+#define PROTEIN_SIZE 938
+#define LIGAND_SIZE 26
+#define FORCEFIELD_SIZE 34
+
+#define ZERO 0.0f
+#define QUARTER 0.25f
+#define HALF 0.5f
+#define ONE 1.0f
+#define TWO 2.0f
+#define FOUR 4.0f
+#define CNSTNT 45.0f
+
+// Energy evaluation parameters
+#define HBTYPE_F 70
+#define HBTYPE_E 69
+#define HARDNESS 38.0f
+#define NPNPDIST 5.5f
+#define NPPDIST 1.0f
+
+void
+fasten_main(size_t group, size_t ntypes, size_t nposes, size_t natlig, size_t natpro, //
+ const Atom *protein, const Atom *ligand, //
+ const float *transforms_0, const float *transforms_1, const float *transforms_2, //
+ const float *transforms_3, const float *transforms_4, const float *transforms_5, //
+ const FFParams *forcefield, float *energies //
+) {
+
+ float etot[PPWI];
+ float lpos_x[PPWI];
+
+ for (int l = 0; l < PPWI; l++) {
+ etot[l] = 0.f;
+ lpos_x[l] = 0.f;
+ }
+
+ // Loop over ligand atoms
+ for (int il = 0; il < natlig; il++) {
+ // Load ligand atom data
+ const Atom l_atom = ligand[il];
+ const FFParams l_params = forcefield[l_atom.type];
+ const int lhphb_ltz = l_params.hphb < 0.f;
+ const int lhphb_gtz = l_params.hphb > 0.f;
+
+ // Transform ligand atom
+
+ // Loop over protein atoms
+ for (int ip = 0; ip < natpro; ip++) {
+ // Load protein atom data
+ const Atom p_atom = protein[ip];
+ const FFParams p_params = forcefield[p_atom.type];
+
+ const float radij = p_params.radius + l_params.radius;
+ const float r_radij = ONE / radij;
+
+ const float elcdst = (p_params.hbtype == HBTYPE_F && l_params.hbtype == HBTYPE_F) ? FOUR
+ : TWO;
+ const float elcdst1 = (p_params.hbtype == HBTYPE_F && l_params.hbtype == HBTYPE_F)
+ ? QUARTER : HALF;
+ const int type_E = ((p_params.hbtype == HBTYPE_E || l_params.hbtype == HBTYPE_E));
+
+ const int phphb_ltz = p_params.hphb < 0.f;
+ const int phphb_gtz = p_params.hphb > 0.f;
+ const int phphb_nz = p_params.hphb != 0.f;
+ const float p_hphb = p_params.hphb * (phphb_ltz && lhphb_gtz ? -ONE : ONE);
+ const float l_hphb = l_params.hphb * (phphb_gtz && lhphb_ltz ? -ONE : ONE);
+ const float distdslv = (phphb_ltz ? (lhphb_ltz ? NPNPDIST : NPPDIST) : (lhphb_ltz
+ ? NPPDIST
+ : -FLT_MAX));
+ const float r_distdslv = ONE / distdslv;
+
+ const float chrg_init = l_params.elsc * p_params.elsc;
+ const float dslv_init = p_hphb + l_hphb;
+
+ for (int l = 0; l < PPWI; l++) {
+ // Calculate distance between atoms
+ const float x = lpos_x[l] - p_atom.x;
+ const float distij = (x * x);
+
+ // Calculate the sum of the sphere radii
+ const float distbb = distij - radij;
+
+ const int zone1 = (distbb < ZERO);
+
+ // Calculate formal and dipole charge interactions
+ float chrg_e = chrg_init * ((zone1 ? ONE : (ONE - distbb * elcdst1)) *
+ (distbb < elcdst ? ONE : ZERO));
+ float neg_chrg_e = -fabsf(chrg_e);
+ chrg_e = type_E ? neg_chrg_e : chrg_e;
+ etot[l] += chrg_e * CNSTNT;
+ }
+ }
+ }
+
+ // Write result
+ for (int l = 0; l < PPWI; l++) {
+ energies[group * PPWI + l] = etot[l] * HALF;
+ }
+}
+
+/* { dg-final { scan-tree-dump-times {\.COND_MUL} 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-times {\.VCOND} 1 "optimized" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/test_frame_10.c b/gcc/testsuite/gcc.target/aarch64/test_frame_10.c
index c195050..c54ab2d 100644
--- a/gcc/testsuite/gcc.target/aarch64/test_frame_10.c
+++ b/gcc/testsuite/gcc.target/aarch64/test_frame_10.c
@@ -14,6 +14,6 @@
t_frame_pattern_outgoing (test10, 480, "x19", 24, a[8], a[9], a[10])
t_frame_run (test10)
-/* { dg-final { scan-assembler-times "stp\tx19, x30, \\\[sp, \[0-9\]+\\\]" 1 } } */
-/* { dg-final { scan-assembler "ldp\tx19, x30, \\\[sp, \[0-9\]+\\\]" } } */
+/* { dg-final { scan-assembler-times "stp\tx30, x19, \\\[sp, \[0-9\]+\\\]" 1 } } */
+/* { dg-final { scan-assembler "ldp\tx30, x19, \\\[sp, \[0-9\]+\\\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/test_frame_2.c b/gcc/testsuite/gcc.target/aarch64/test_frame_2.c
index 7e5df84..0d71531 100644
--- a/gcc/testsuite/gcc.target/aarch64/test_frame_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/test_frame_2.c
@@ -14,6 +14,6 @@ t_frame_pattern (test2, 200, "x19")
t_frame_run (test2)
-/* { dg-final { scan-assembler-times "stp\tx19, x30, \\\[sp, -\[0-9\]+\\\]!" 1 } } */
-/* { dg-final { scan-assembler "ldp\tx19, x30, \\\[sp\\\], \[0-9\]+" } } */
+/* { dg-final { scan-assembler-times "stp\tx30, x19, \\\[sp, -\[0-9\]+\\\]!" 1 } } */
+/* { dg-final { scan-assembler "ldp\tx30, x19, \\\[sp\\\], \[0-9\]+" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/test_frame_4.c b/gcc/testsuite/gcc.target/aarch64/test_frame_4.c
index ed13487..b41229c 100644
--- a/gcc/testsuite/gcc.target/aarch64/test_frame_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/test_frame_4.c
@@ -13,6 +13,6 @@
t_frame_pattern (test4, 400, "x19")
t_frame_run (test4)
-/* { dg-final { scan-assembler-times "stp\tx19, x30, \\\[sp, -\[0-9\]+\\\]!" 1 } } */
-/* { dg-final { scan-assembler "ldp\tx19, x30, \\\[sp\\\], \[0-9\]+" } } */
+/* { dg-final { scan-assembler-times "stp\tx30, x19, \\\[sp, -\[0-9\]+\\\]!" 1 } } */
+/* { dg-final { scan-assembler "ldp\tx30, x19, \\\[sp\\\], \[0-9\]+" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/test_frame_7.c b/gcc/testsuite/gcc.target/aarch64/test_frame_7.c
index 96452794..5702656 100644
--- a/gcc/testsuite/gcc.target/aarch64/test_frame_7.c
+++ b/gcc/testsuite/gcc.target/aarch64/test_frame_7.c
@@ -13,6 +13,6 @@
t_frame_pattern (test7, 700, "x19")
t_frame_run (test7)
-/* { dg-final { scan-assembler-times "stp\tx19, x30, \\\[sp]" 1 } } */
-/* { dg-final { scan-assembler "ldp\tx19, x30, \\\[sp\\\]" } } */
+/* { dg-final { scan-assembler-times "stp\tx30, x19, \\\[sp]" 1 } } */
+/* { dg-final { scan-assembler "ldp\tx30, x19, \\\[sp\\\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/xorsign.c b/gcc/testsuite/gcc.target/aarch64/xorsign.c
index 22c5829..dfb7ba7 100644
--- a/gcc/testsuite/gcc.target/aarch64/xorsign.c
+++ b/gcc/testsuite/gcc.target/aarch64/xorsign.c
@@ -79,8 +79,9 @@ check_l_neg_rev (long double x, long double y)
return __builtin_copysignl (-1.0, y) * x;
}
-/* { dg-final { scan-assembler "\[ \t\]?eor\[ \t\]?" } } */
-/* { dg-final { scan-assembler "\[ \t\]?and\[ \t\]?" } } */
+/* { dg-final { scan-assembler-times {eor\tv[0-9]+\.16b, v[0-9]+\.16b, v[0-9]+\.16b} 8 } } */
+/* { dg-final { scan-assembler-times {and\tv[0-9]+\.16b, v[0-9]+\.16b, v[0-9]+\.16b} 8 } } */
/* { dg-final { scan-assembler-not "copysign" } } */
+/* { dg-final { scan-assembler-not "fmov" } } */
/* { dg-final { scan-assembler-not "\[ \t\]?orr\[ \t\]?" } } */
/* { dg-final { scan-assembler-not "\[ \t\]?fmul\[ \t\]?" } } */
diff --git a/gcc/testsuite/gcc.target/arc/add_f-combine.c b/gcc/testsuite/gcc.target/arc/add_f-combine.c
new file mode 100644
index 0000000..cfa3676
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/add_f-combine.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O1" } */
+
+/* Check if combiner is matching add.f patterns. */
+
+int a1 (int a, int b)
+{
+ if (a + b)
+ {
+ return 1;
+ }
+ return a + 2;
+}
+
+/* { dg-final { scan-assembler "add.f\\s+0,r\\d+,r\\d+" } } */
diff --git a/gcc/testsuite/gcc.target/arc/add_n-combine.c b/gcc/testsuite/gcc.target/arc/add_n-combine.c
index 84e261e..fd311b3 100644
--- a/gcc/testsuite/gcc.target/arc/add_n-combine.c
+++ b/gcc/testsuite/gcc.target/arc/add_n-combine.c
@@ -46,5 +46,5 @@ void f() {
}
/* { dg-final { scan-assembler "@at1\\+1" } } */
-/* { dg-final { scan-assembler "@at2\\+2" } } */
+/* { dg-final { scan-assembler "add2" } } */
/* { dg-final { scan-assembler "add3" } } */
diff --git a/gcc/testsuite/gcc.target/arc/ashrsi-1.c b/gcc/testsuite/gcc.target/arc/ashrsi-1.c
new file mode 100644
index 0000000..3100aa3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/ashrsi-1.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=hs" } */
+
+int ashr1(int x) { return x >> 1; }
+int ashr2(int x) { return x >> 2; }
+int ashr3(int x) { return x >> 3; }
+int ashr4(int x) { return x >> 4; }
+int ashr5(int x) { return x >> 5; }
+int ashr6(int x) { return x >> 6; }
+int ashr7(int x) { return x >> 7; }
+int ashr8(int x) { return x >> 8; }
+int ashr9(int x) { return x >> 9; }
+int ashr10(int x) { return x >> 10; }
+int ashr11(int x) { return x >> 11; }
+int ashr12(int x) { return x >> 12; }
+int ashr13(int x) { return x >> 13; }
+int ashr14(int x) { return x >> 14; }
+int ashr15(int x) { return x >> 15; }
+int ashr16(int x) { return x >> 16; }
+int ashr17(int x) { return x >> 17; }
+int ashr18(int x) { return x >> 18; }
+int ashr19(int x) { return x >> 19; }
+int ashr20(int x) { return x >> 20; }
+int ashr21(int x) { return x >> 21; }
+int ashr22(int x) { return x >> 22; }
+int ashr23(int x) { return x >> 23; }
+int ashr24(int x) { return x >> 24; }
+int ashr25(int x) { return x >> 25; }
+int ashr26(int x) { return x >> 26; }
+int ashr27(int x) { return x >> 27; }
+int ashr28(int x) { return x >> 28; }
+int ashr29(int x) { return x >> 29; }
+int ashr30(int x) { return x >> 30; }
+int ashr31(int x) { return x >> 31; }
+
+/* { dg-final { scan-assembler-times "asr_s\\s+r0,r0" 31 } } */
diff --git a/gcc/testsuite/gcc.target/arc/ashrsi-2.c b/gcc/testsuite/gcc.target/arc/ashrsi-2.c
new file mode 100644
index 0000000..b551ee5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/ashrsi-2.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+int foo(int x) { return x >> 1; }
+
+/* { dg-final { scan-assembler-times "asr_s\\s+r0,r0" 1 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
diff --git a/gcc/testsuite/gcc.target/arc/ashrsi-3.c b/gcc/testsuite/gcc.target/arc/ashrsi-3.c
new file mode 100644
index 0000000..c030682
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/ashrsi-3.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+int foo(int x, int y) { return y >> 1; }
+
+/* { dg-final { scan-assembler-times "asr_s\\s+r0,r1" 1 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
diff --git a/gcc/testsuite/gcc.target/arc/ashrsi-4.c b/gcc/testsuite/gcc.target/arc/ashrsi-4.c
new file mode 100644
index 0000000..98e58bc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/ashrsi-4.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+int foo(int x) { return x >> 2; }
+
+/* { dg-final { scan-assembler-times "asr_s\\s+r0,r0" 2 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
diff --git a/gcc/testsuite/gcc.target/arc/ashrsi-5.c b/gcc/testsuite/gcc.target/arc/ashrsi-5.c
new file mode 100644
index 0000000..f40af2e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/ashrsi-5.c
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+int foo(int x, int y) { return y >> 2; }
+
+/* { dg-final { scan-assembler-times "asr_s\\s+r0,r0" 1 } } */
+/* { dg-final { scan-assembler-times "asr_s\\s+r0,r1" 1 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
diff --git a/gcc/testsuite/gcc.target/arc/enter-dw2-1.c b/gcc/testsuite/gcc.target/arc/enter-dw2-1.c
index 25d0356..653ea72 100644
--- a/gcc/testsuite/gcc.target/arc/enter-dw2-1.c
+++ b/gcc/testsuite/gcc.target/arc/enter-dw2-1.c
@@ -16,13 +16,13 @@ void foo (void)
}
-/* { dg-final { scan-assembler-times "enter_s" 1 } } */
+/* { dg-final { scan-assembler-times "enter_s" 1 {xfail *-linux-* } } } */
/* { dg-final { scan-assembler-times "\.cfi_def_cfa_offset 32" 1 } } */
-/* { dg-final { scan-assembler-times "\.cfi_offset 31, -32" 1 } } */
-/* { dg-final { scan-assembler-times "\.cfi_offset 13, -28" 1 } } */
-/* { dg-final { scan-assembler-times "\.cfi_offset 14, -24" 1 } } */
-/* { dg-final { scan-assembler-times "\.cfi_offset 15, -20" 1 } } */
-/* { dg-final { scan-assembler-times "\.cfi_offset 16, -16" 1 } } */
-/* { dg-final { scan-assembler-times "\.cfi_offset 17, -12" 1 } } */
-/* { dg-final { scan-assembler-times "\.cfi_offset 18, -8" 1 } } */
-/* { dg-final { scan-assembler-times "\.cfi_offset 19, -4" 1 } } */
+/* { dg-final { scan-assembler-times "\.cfi_offset 31, -32" 1 {xfail *-linux-* } } } */
+/* { dg-final { scan-assembler-times "\.cfi_offset 13, -28" 1 {xfail *-linux-* } } } */
+/* { dg-final { scan-assembler-times "\.cfi_offset 14, -24" 1 {xfail *-linux-* } } } */
+/* { dg-final { scan-assembler-times "\.cfi_offset 15, -20" 1 {xfail *-linux-* } } } */
+/* { dg-final { scan-assembler-times "\.cfi_offset 16, -16" 1 {xfail *-linux-* } } } */
+/* { dg-final { scan-assembler-times "\.cfi_offset 17, -12" 1 {xfail *-linux-* } } } */
+/* { dg-final { scan-assembler-times "\.cfi_offset 18, -8" 1 {xfail *-linux-* } } } */
+/* { dg-final { scan-assembler-times "\.cfi_offset 19, -4" 1 {xfail *-linux-* } } } */
diff --git a/gcc/testsuite/gcc.target/arc/firq-4.c b/gcc/testsuite/gcc.target/arc/firq-4.c
index 969ee79..cd939bf 100644
--- a/gcc/testsuite/gcc.target/arc/firq-4.c
+++ b/gcc/testsuite/gcc.target/arc/firq-4.c
@@ -28,4 +28,3 @@ handler1 (void)
/* { dg-final { scan-assembler-not "fp,\\\[sp" } } */
/* { dg-final { scan-assembler-not "push.*fp" } } */
-/* { dg-final { scan-assembler "mov_s.*fp,sp" } } */
diff --git a/gcc/testsuite/gcc.target/arc/firq-6.c b/gcc/testsuite/gcc.target/arc/firq-6.c
index 9421200..df04e46 100644
--- a/gcc/testsuite/gcc.target/arc/firq-6.c
+++ b/gcc/testsuite/gcc.target/arc/firq-6.c
@@ -18,4 +18,3 @@ handler1 (void)
"r25", "fp");
}
/* { dg-final { scan-assembler-not "(s|l)(t|d)d.*r\[0-9\]+,\\\[sp,\[0-9\]+\\\]" } } */
-/* { dg-final { scan-assembler "mov_s.*fp,sp" } } */
diff --git a/gcc/testsuite/gcc.target/arc/loop-3.c b/gcc/testsuite/gcc.target/arc/loop-3.c
index 7f55e2f..ae0d611 100644
--- a/gcc/testsuite/gcc.target/arc/loop-3.c
+++ b/gcc/testsuite/gcc.target/arc/loop-3.c
@@ -23,5 +23,5 @@ void fn1(void)
}
}
-/* { dg-final { scan-assembler "bne.*@.L2" } } */
+/* { dg-final { scan-assembler "bne.*\\.L2" } } */
/* { dg-final { scan-assembler-not "add.eq" } } */
diff --git a/gcc/testsuite/gcc.target/arc/lshrsi-1.c b/gcc/testsuite/gcc.target/arc/lshrsi-1.c
new file mode 100644
index 0000000..9bec79d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/lshrsi-1.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=hs" } */
+
+unsigned int lshr1(unsigned int x) { return x >> 1; }
+unsigned int lshr2(unsigned int x) { return x >> 2; }
+unsigned int lshr3(unsigned int x) { return x >> 3; }
+unsigned int lshr4(unsigned int x) { return x >> 4; }
+unsigned int lshr5(unsigned int x) { return x >> 5; }
+unsigned int lshr6(unsigned int x) { return x >> 6; }
+unsigned int lshr7(unsigned int x) { return x >> 7; }
+unsigned int lshr8(unsigned int x) { return x >> 8; }
+unsigned int lshr9(unsigned int x) { return x >> 9; }
+unsigned int lshr10(unsigned int x) { return x >> 10; }
+unsigned int lshr11(unsigned int x) { return x >> 11; }
+unsigned int lshr12(unsigned int x) { return x >> 12; }
+unsigned int lshr13(unsigned int x) { return x >> 13; }
+unsigned int lshr14(unsigned int x) { return x >> 14; }
+unsigned int lshr15(unsigned int x) { return x >> 15; }
+unsigned int lshr16(unsigned int x) { return x >> 16; }
+unsigned int lshr17(unsigned int x) { return x >> 17; }
+unsigned int lshr18(unsigned int x) { return x >> 18; }
+unsigned int lshr19(unsigned int x) { return x >> 19; }
+unsigned int lshr20(unsigned int x) { return x >> 20; }
+unsigned int lshr21(unsigned int x) { return x >> 21; }
+unsigned int lshr22(unsigned int x) { return x >> 22; }
+unsigned int lshr23(unsigned int x) { return x >> 23; }
+unsigned int lshr24(unsigned int x) { return x >> 24; }
+unsigned int lshr25(unsigned int x) { return x >> 25; }
+unsigned int lshr26(unsigned int x) { return x >> 26; }
+unsigned int lshr27(unsigned int x) { return x >> 27; }
+unsigned int lshr28(unsigned int x) { return x >> 28; }
+unsigned int lshr29(unsigned int x) { return x >> 29; }
+unsigned int lshr30(unsigned int x) { return x >> 30; }
+unsigned int lshr31(unsigned int x) { return x >> 31; }
+
+/* { dg-final { scan-assembler-times "lsr_s\\s+r0,r0" 31 } } */
diff --git a/gcc/testsuite/gcc.target/arc/lshrsi-2.c b/gcc/testsuite/gcc.target/arc/lshrsi-2.c
new file mode 100644
index 0000000..d857740
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/lshrsi-2.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+unsigned int foo(unsigned int x) { return x >> 1; }
+
+/* { dg-final { scan-assembler-times "lsr_s\\s+r0,r0" 1 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
diff --git a/gcc/testsuite/gcc.target/arc/lshrsi-3.c b/gcc/testsuite/gcc.target/arc/lshrsi-3.c
new file mode 100644
index 0000000..58bfac0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/lshrsi-3.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+unsigned int foo(unsigned int x, unsigned int y){ return y >> 1; }
+
+/* { dg-final { scan-assembler-times "lsr_s\\s+r0,r1" 1 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
diff --git a/gcc/testsuite/gcc.target/arc/lshrsi-4.c b/gcc/testsuite/gcc.target/arc/lshrsi-4.c
new file mode 100644
index 0000000..3094de2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/lshrsi-4.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+unsigned int foo(unsigned int x) { return x >> 2; }
+
+/* { dg-final { scan-assembler-times "lsr_s\\s+r0,r0" 2 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
diff --git a/gcc/testsuite/gcc.target/arc/lshrsi-5.c b/gcc/testsuite/gcc.target/arc/lshrsi-5.c
new file mode 100644
index 0000000..dce3f00
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/lshrsi-5.c
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+unsigned int foo(unsigned int x, unsigned int y){ return y >> 2; }
+
+/* { dg-final { scan-assembler-times "lsr_s\\s+r0,r0" 1 } } */
+/* { dg-final { scan-assembler-times "lsr_s\\s+r0,r1" 1 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
diff --git a/gcc/testsuite/gcc.target/arc/mtune-ARC600.c b/gcc/testsuite/gcc.target/arc/mtune-ARC600.c
deleted file mode 100644
index a483d14..0000000
--- a/gcc/testsuite/gcc.target/arc/mtune-ARC600.c
+++ /dev/null
@@ -1,4 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-mtune=ARC600" } */
-
-/* { dg-final { scan-assembler ".cpu ARC700" } } */
diff --git a/gcc/testsuite/gcc.target/arc/mtune-ARC601.c b/gcc/testsuite/gcc.target/arc/mtune-ARC601.c
deleted file mode 100644
index ed57bd7..0000000
--- a/gcc/testsuite/gcc.target/arc/mtune-ARC601.c
+++ /dev/null
@@ -1,4 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-mtune=ARC601" } */
-
-/* { dg-final { scan-assembler ".cpu ARC700" } } */
diff --git a/gcc/testsuite/gcc.target/arc/mtune-ARC700-xmac b/gcc/testsuite/gcc.target/arc/mtune-ARC700-xmac
deleted file mode 100644
index 2f1e137..0000000
--- a/gcc/testsuite/gcc.target/arc/mtune-ARC700-xmac
+++ /dev/null
@@ -1,4 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-mtune=ARC700-xmac" } */
-
-/* { dg-final { scan-assembler ".cpu ARC700" } } */
diff --git a/gcc/testsuite/gcc.target/arc/mtune-ARC700.c b/gcc/testsuite/gcc.target/arc/mtune-ARC700.c
deleted file mode 100644
index 851ea73..0000000
--- a/gcc/testsuite/gcc.target/arc/mtune-ARC700.c
+++ /dev/null
@@ -1,4 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-mtune=ARC700" } */
-
-/* { dg-final { scan-assembler ".cpu ARC700" } } */
diff --git a/gcc/testsuite/gcc.target/arc/mtune-ARC725D.c b/gcc/testsuite/gcc.target/arc/mtune-ARC725D.c
deleted file mode 100644
index e2aa484..0000000
--- a/gcc/testsuite/gcc.target/arc/mtune-ARC725D.c
+++ /dev/null
@@ -1,4 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-mtune=ARC725D" } */
-
-/* { dg-final { scan-assembler ".cpu ARC700" } } */
diff --git a/gcc/testsuite/gcc.target/arc/mtune-ARC750D.c b/gcc/testsuite/gcc.target/arc/mtune-ARC750D.c
deleted file mode 100644
index 2092330..0000000
--- a/gcc/testsuite/gcc.target/arc/mtune-ARC750D.c
+++ /dev/null
@@ -1,4 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-mtune=ARC750D" } */
-
-/* { dg-final { scan-assembler ".cpu ARC700" } } */
diff --git a/gcc/testsuite/gcc.target/arc/scc-ltu.c b/gcc/testsuite/gcc.target/arc/scc-ltu.c
new file mode 100644
index 0000000..653c55d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/scc-ltu.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+unsigned int foo(unsigned int x, unsigned int y)
+{
+ return (x+y) < x;
+}
+
+/* { dg-final { scan-assembler "rlc\\s+r0,0" } } */
+/* { dg-final { scan-assembler "add.f\\s+0,r0,r1" } } */
+/* { dg-final { scan-assembler-not "mov_s\\s+r0,1" } } */
+/* { dg-final { scan-assembler-not "mov\.hs\\s+r0,0" } } */
diff --git a/gcc/testsuite/gcc.target/arc/shlsi-1.c b/gcc/testsuite/gcc.target/arc/shlsi-1.c
new file mode 100644
index 0000000..eea7c56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/shlsi-1.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=hs" } */
+
+unsigned int shl1(unsigned int x) { return x << 1; }
+unsigned int shl2(unsigned int x) { return x << 2; }
+unsigned int shl3(unsigned int x) { return x << 3; }
+unsigned int shl4(unsigned int x) { return x << 4; }
+unsigned int shl5(unsigned int x) { return x << 5; }
+unsigned int shl6(unsigned int x) { return x << 6; }
+unsigned int shl7(unsigned int x) { return x << 7; }
+unsigned int shl8(unsigned int x) { return x << 8; }
+unsigned int shl9(unsigned int x) { return x << 9; }
+unsigned int shl10(unsigned int x) { return x << 10; }
+unsigned int shl11(unsigned int x) { return x << 11; }
+unsigned int shl12(unsigned int x) { return x << 12; }
+unsigned int shl13(unsigned int x) { return x << 13; }
+unsigned int shl14(unsigned int x) { return x << 14; }
+unsigned int shl15(unsigned int x) { return x << 15; }
+unsigned int shl16(unsigned int x) { return x << 16; }
+unsigned int shl17(unsigned int x) { return x << 17; }
+unsigned int shl18(unsigned int x) { return x << 18; }
+unsigned int shl19(unsigned int x) { return x << 19; }
+unsigned int shl20(unsigned int x) { return x << 20; }
+unsigned int shl21(unsigned int x) { return x << 21; }
+unsigned int shl22(unsigned int x) { return x << 22; }
+unsigned int shl23(unsigned int x) { return x << 23; }
+unsigned int shl24(unsigned int x) { return x << 24; }
+unsigned int shl25(unsigned int x) { return x << 25; }
+unsigned int shl26(unsigned int x) { return x << 26; }
+unsigned int shl27(unsigned int x) { return x << 27; }
+unsigned int shl28(unsigned int x) { return x << 28; }
+unsigned int shl29(unsigned int x) { return x << 29; }
+unsigned int shl30(unsigned int x) { return x << 30; }
+unsigned int shl31(unsigned int x) { return x << 31; }
+
+/* { dg-final { scan-assembler-times "asl_s\\s+r0,r0,\[1-9\]" 31 } } */
diff --git a/gcc/testsuite/gcc.target/arc/shlsi-2.c b/gcc/testsuite/gcc.target/arc/shlsi-2.c
new file mode 100644
index 0000000..ab8d2f8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/shlsi-2.c
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+unsigned int foo(unsigned int x) { return x << 1; }
+
+/* { dg-final { scan-assembler-times "asl_s\\s+r0,r0" 1 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
+
diff --git a/gcc/testsuite/gcc.target/arc/shlsi-3.c b/gcc/testsuite/gcc.target/arc/shlsi-3.c
new file mode 100644
index 0000000..244a786
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/shlsi-3.c
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+unsigned int foo(unsigned int x, unsigned int y) { return y << 1; }
+
+/* { dg-final { scan-assembler-times "asl_s\\s+r0,r1" 1 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
+
diff --git a/gcc/testsuite/gcc.target/arc/shlsi-4.c b/gcc/testsuite/gcc.target/arc/shlsi-4.c
new file mode 100644
index 0000000..8fdc25e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/shlsi-4.c
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+unsigned int foo(unsigned int x) { return x << 2; }
+
+/* { dg-final { scan-assembler-times "asl_s\\s+r0,r0" 2 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
+
diff --git a/gcc/testsuite/gcc.target/arc/shlsi-5.c b/gcc/testsuite/gcc.target/arc/shlsi-5.c
new file mode 100644
index 0000000..a91103e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arc/shlsi-5.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=em" } */
+
+unsigned int foo(unsigned int x, unsigned int y) { return y << 2; }
+
+/* { dg-final { scan-assembler-times "asl_s\\s+r0,r0" 1 } } */
+/* { dg-final { scan-assembler-times "asl_s\\s+r0,r1" 1 } } */
+/* { dg-final { scan-assembler "j_s\.d" } } */
+
diff --git a/gcc/testsuite/gcc.target/arc/tls-ld.c b/gcc/testsuite/gcc.target/arc/tls-ld.c
index 68ab9bf..47c71f5 100644
--- a/gcc/testsuite/gcc.target/arc/tls-ld.c
+++ b/gcc/testsuite/gcc.target/arc/tls-ld.c
@@ -13,6 +13,5 @@ int *ae2 (void)
return &e2;
}
-/* { dg-final { scan-assembler "add\\s+r0,pcl,@.tbss@tlsgd" } } */
+/* { dg-final { scan-assembler "add\\s+r0,pcl,@e2@tlsgd" } } */
/* { dg-final { scan-assembler "bl\\s+@__tls_get_addr@plt" } } */
-/* { dg-final { scan-assembler "add_s\\s+r0,r0,@e2@dtpoff" } } */
diff --git a/gcc/testsuite/gcc.target/arc/tls-le.c b/gcc/testsuite/gcc.target/arc/tls-le.c
index ae3089b..6deca1a 100644
--- a/gcc/testsuite/gcc.target/arc/tls-le.c
+++ b/gcc/testsuite/gcc.target/arc/tls-le.c
@@ -13,4 +13,4 @@ int *ae2 (void)
return &e2;
}
-/* { dg-final { scan-assembler "add r0,r25,@e2@tpoff" } } */
+/* { dg-final { scan-assembler "add\\sr0,r25,@e2@tpoff" } } */
diff --git a/gcc/testsuite/gcc.target/arc/uncached-7.c b/gcc/testsuite/gcc.target/arc/uncached-7.c
index 4001b8b..23e5fe2 100644
--- a/gcc/testsuite/gcc.target/arc/uncached-7.c
+++ b/gcc/testsuite/gcc.target/arc/uncached-7.c
@@ -8,4 +8,4 @@ void s_acc(void)
s[10] = 15;
}
-/* { dg-final { scan-assembler-times "st\.di" 1 } } */
+/* { dg-final { scan-assembler-times "st\.di" 1 { xfail *-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic_loaddi_7.c b/gcc/testsuite/gcc.target/arm/atomic_loaddi_7.c
index 6743663..79e36ed 100644
--- a/gcc/testsuite/gcc.target/arm/atomic_loaddi_7.c
+++ b/gcc/testsuite/gcc.target/arm/atomic_loaddi_7.c
@@ -6,4 +6,4 @@
#include "atomic_loaddi_seq_cst.x"
/* { dg-final { scan-assembler-times "ldrexd\tr\[0-9\]+, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 1 } } */
-/* { dg-final { scan-assembler-times "dmb\tish" 1 } } */
+/* { dg-final { scan-assembler-times "dmb\tish" 2 } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic_loaddi_8.c b/gcc/testsuite/gcc.target/arm/atomic_loaddi_8.c
index f7bd3e5..7241d36 100644
--- a/gcc/testsuite/gcc.target/arm/atomic_loaddi_8.c
+++ b/gcc/testsuite/gcc.target/arm/atomic_loaddi_8.c
@@ -6,4 +6,4 @@
#include "atomic_loaddi_seq_cst.x"
/* { dg-final { scan-assembler-times "ldrd\tr\[0-9\]+, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 1 } } */
-/* { dg-final { scan-assembler-times "dmb\tish" 1 } } */
+/* { dg-final { scan-assembler-times "dmb\tish" 2 } } */
diff --git a/gcc/testsuite/gcc.target/arm/pr111235.c b/gcc/testsuite/gcc.target/arm/pr111235.c
new file mode 100644
index 0000000..b06a5bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/pr111235.c
@@ -0,0 +1,39 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+/* { dg-require-effective-target arm_arch_v7a_ok } */
+/* { dg-add-options arm_arch_v7a } */
+
+#include <stdatomic.h>
+
+int t0 (int *p, int x)
+{
+ if (x > 100)
+ x = atomic_load_explicit (p, memory_order_relaxed);
+ return x + 1;
+}
+
+long long t1 (long long *p, int x)
+{
+ if (x > 100)
+ x = atomic_load_explicit (p, memory_order_relaxed);
+ return x + 1;
+}
+
+void t2 (int *p, int x)
+{
+ if (x > 100)
+ atomic_store_explicit (p, x, memory_order_relaxed);
+}
+
+void t3 (long long *p, int x)
+{
+ if (x > 100)
+ atomic_store_explicit (p, x, memory_order_relaxed);
+}
+
+/* { dg-final { scan-assembler-times "ldrexd\tr\[0-9\]+, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 2 } } */
+/* { dg-final { scan-assembler-not "ldrgt" } } */
+/* { dg-final { scan-assembler-not "ldrdgt" } } */
+/* { dg-final { scan-assembler-not "ldrexdgt" } } */
+/* { dg-final { scan-assembler-not "strgt" } } */
+/* { dg-final { scan-assembler-not "strdgt" } } */
diff --git a/gcc/testsuite/gcc.target/i386/apx-1.c b/gcc/testsuite/gcc.target/i386/apx-1.c
new file mode 100644
index 0000000..4e580ec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/apx-1.c
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mapxf" } */
+/* { dg-error "'-mapxf' is not supported for 32-bit code" "" { target ia32 } 0 } */
+
+void
+apx_hanlder ()
+{
+}
diff --git a/gcc/testsuite/gcc.target/i386/apx-egprs-names.c b/gcc/testsuite/gcc.target/i386/apx-egprs-names.c
new file mode 100644
index 0000000..f0517e4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/apx-egprs-names.c
@@ -0,0 +1,17 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-mapxf -m64" } */
+/* { dg-final { scan-assembler "r31" } } */
+/* { dg-final { scan-assembler "r30" } } */
+/* { dg-final { scan-assembler "r29" } } */
+/* { dg-final { scan-assembler "r28" } } */
+void foo ()
+{
+ register long a __asm ("r31");
+ register int b __asm ("r30");
+ register short c __asm ("r29");
+ register char d __asm ("r28");
+ __asm__ __volatile__ ("mov %0, %%rax" : : "r" (a) : "rax");
+ __asm__ __volatile__ ("mov %0, %%eax" : : "r" (b) : "eax");
+ __asm__ __volatile__ ("mov %0, %%eax" : : "r" (c) : "eax");
+ __asm__ __volatile__ ("mov %0, %%eax" : : "r" (d) : "eax");
+}
diff --git a/gcc/testsuite/gcc.target/i386/apx-inline-gpr-norex2.c b/gcc/testsuite/gcc.target/i386/apx-inline-gpr-norex2.c
new file mode 100644
index 0000000..208d53d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/apx-inline-gpr-norex2.c
@@ -0,0 +1,25 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -mapxf -m64" } */
+
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+void constraint_test ()
+{
+ register u64 *r16 __asm__("%r16");
+ register u64 r17 __asm__("%r17");
+ u64 *addr = r16;
+
+ __asm__ __volatile__ ("test_mapping_g_m %0, %%rax" : : "g" (r16) : "rax");
+ __asm__ __volatile__ ("test_mapping_g_r %0, %%rax" : : "g" (r17) : "rax");
+ __asm__ __volatile__ ("test_mapping_m %0, %%rax" : : "m" (addr) : "rax");
+ __asm__ __volatile__ ("test_mapping_r %0, %%rax" : : "r" (r17) : "rax");
+ __asm__ __volatile__ ("test_mapping_rm %0, %%rax" : "=r,m" (r16) : : "rax");
+}
+
+/* { dg-final { scan-assembler-not "test_mapping_g_m %r16, %rax" } } */
+/* { dg-final { scan-assembler-not "test_mapping_g_r %r17, %rax" } } */
+/* { dg-final { scan-assembler-not "test_mapping_m %r16, %rax" } } */
+/* { dg-final { scan-assembler-not "test_mapping_r %r17, %rax" } } */
+/* { dg-final { scan-assembler-not "test_mapping_rm %r16, %rax" } } */
+
diff --git a/gcc/testsuite/gcc.target/i386/apx-interrupt-1.c b/gcc/testsuite/gcc.target/i386/apx-interrupt-1.c
new file mode 100644
index 0000000..dc1fc3f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/apx-interrupt-1.c
@@ -0,0 +1,102 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-mapxf -m64 -O2 -mgeneral-regs-only -mno-cld -mno-push-args -maccumulate-outgoing-args" } */
+
+extern void foo (void *) __attribute__ ((interrupt));
+extern int bar (int);
+
+void foo (void *frame)
+{
+ int a,b,c,d,e,f,i;
+ a = bar (5);
+ b = bar (a);
+ c = bar (b);
+ d = bar (c);
+ e = bar (d);
+ f = bar (e);
+ for (i = 1; i < 10; i++)
+ {
+ a += bar (a + i) + bar (b + i) +
+ bar (c + i) + bar (d + i) +
+ bar (e + i) + bar (f + i);
+ }
+}
+/* { dg-final { scan-assembler-times "push(?:l|q)\[\\t \]*%(?:e|r)ax" 1 } } */
+/* { dg-final { scan-assembler-times "push(?:l|q)\[\\t \]*%(?:e|r)bx" 1 } } */
+/* { dg-final { scan-assembler-times "push(?:l|q)\[\\t \]*%(?:e|r)cx" 1 } } */
+/* { dg-final { scan-assembler-times "push(?:l|q)\[\\t \]*%(?:e|r)dx" 1 } } */
+/* { dg-final { scan-assembler-times "push(?:l|q)\[\\t \]*%(?:e|r)si" 1 } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%rdi" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r8" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r9" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r10" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r11" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r12" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r13" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r14" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r15" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r16" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r17" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r18" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r19" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r20" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r21" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r22" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r23" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r24" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r25" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r26" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r27" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r28" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r29" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r30" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "pushq\[\\t \]*%r31" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 145, -16} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 144, -24} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 143, -32} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 142, -40} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 141, -48} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 140, -56} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 139, -64} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 138, -72} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 137, -80} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 136, -88} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 135, -96} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 134, -104} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 133, -112} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 132, -120} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 131, -128} 1 } } */
+/* { dg-final { scan-assembler-times {\t\.cfi_offset 130, -136} 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_restore" 15} } */
+/* { dg-final { scan-assembler-times "pop(?:l|q)\[\\t \]*%(?:e|r)ax" 1 } } */
+/* { dg-final { scan-assembler-times "pop(?:l|q)\[\\t \]*%(?:e|r)bx" 1 } } */
+/* { dg-final { scan-assembler-times "pop(?:l|q)\[\\t \]*%(?:e|r)cx" 1 } } */
+/* { dg-final { scan-assembler-times "pop(?:l|q)\[\\t \]*%(?:e|r)dx" 1 } } */
+/* { dg-final { scan-assembler-times "pop(?:l|q)\[\\t \]*%(?:e|r)si" 1 } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%rdi" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r8" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r9" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r10" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r11" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r12" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r13" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r14" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r15" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r16" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r17" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r18" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r19" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r20" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r21" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r22" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r23" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r24" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r25" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r26" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r27" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r28" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r29" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r30" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "popq\[\\t \]*%r31" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "iret" 1 { target ia32 } } } */
+/* { dg-final { scan-assembler-times "iretq" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "\tcld" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/apx-legacy-insn-check-norex2-asm.c b/gcc/testsuite/gcc.target/i386/apx-legacy-insn-check-norex2-asm.c
new file mode 100644
index 0000000..fb0f62e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/apx-legacy-insn-check-norex2-asm.c
@@ -0,0 +1,5 @@
+/* { dg-do assemble { target { apxf && { ! ia32 } } } } */
+/* { dg-options "-O1 -mapxf -m64 -DDTYPE32" } */
+
+#include "apx-legacy-insn-check-norex2.c"
+
diff --git a/gcc/testsuite/gcc.target/i386/apx-legacy-insn-check-norex2.c b/gcc/testsuite/gcc.target/i386/apx-legacy-insn-check-norex2.c
new file mode 100644
index 0000000..641feaf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/apx-legacy-insn-check-norex2.c
@@ -0,0 +1,181 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O3 -mapxf -m64 -DDTYPE32" } */
+
+#include <immintrin.h>
+
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+#ifndef DTYPE32
+#define DTYPE32
+#endif
+
+#ifdef DTYPE32
+typedef u32 DTYPE;
+#endif
+
+__attribute__((target("xsave,fxsr")))
+void legacy_test ()
+{
+ register DTYPE* val __asm__("r16");
+ _xsave64 (val, 1);
+ _xrstor64 (val, 1);
+ _fxsave64 (val);
+ _fxrstor64 (val);
+}
+
+/* { dg-final { scan-assembler-not "xsave64\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "xrstor64\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "fxsave64\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "fxrstor64\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+
+#ifdef DTYPE
+#undef DTYPE
+#define DTYPE u64
+#endif
+
+typedef union
+{
+ __m128i xi[8];
+ __m128 xf[8];
+ __m128d xd[8];
+ __m256i yi[4];
+ __m256 yf[4];
+ __m256d yd[4];
+ DTYPE a[16];
+} tmp_u;
+
+__attribute__((target("sse4.2,aes")))
+void sse_test ()
+{
+ register tmp_u *tdst __asm__("%r16");
+ register tmp_u *src1 __asm__("%r17");
+ register tmp_u *src2 __asm__("%r18");
+
+ src1->xi[0] = _mm_minpos_epu16 (src1->xi[1]);
+ src1->a[2] = _mm_testc_si128 (src1->xi[3], src2->xi[4]);
+ src1->xf[3] = _mm_round_ss (src1->xf[5], src2->xf[6],
+ _MM_FROUND_CUR_DIRECTION);
+ src1->xf[4] = _mm_round_ps (src1->xf[7], _MM_FROUND_CUR_DIRECTION);
+ src1->xd[0] = _mm_round_sd (src1->xd[2], src2->xd[3],
+ _MM_FROUND_CUR_DIRECTION);
+ src1->xd[1] = _mm_round_pd (src1->xd[4], _MM_FROUND_CUR_DIRECTION);
+
+ src1->xi[0] = _mm_hadd_epi16 (tdst->xi[2], src2->xi[3]);
+ src1->xi[1] = _mm_hadd_epi32 (tdst->xi[0], src2->xi[1]);
+ tdst->xi[2] = _mm_hadds_epi16 (src1->xi[4], src2->xi[5]);
+ tdst->xi[3] = _mm_hsub_epi16 (src1->xi[6], src2->xi[7]);
+ tdst->xi[4] = _mm_hsub_epi32 (src1->xi[0], src2->xi[1]);
+ tdst->xi[5] = _mm_hsubs_epi16 (src1->xi[2], src2->xi[3]);
+
+ src1->xi[6] = _mm_cmpeq_epi64 (tdst->xi[4], src2->xi[5]);
+ src1->xi[7] = _mm_cmpgt_epi64 (tdst->xi[6], src2->xi[7]);
+
+ tdst->xf[0] = _mm_dp_ps (src1->xf[0], src2->xf[1], 0xbf);
+ tdst->xd[1] = _mm_dp_pd (src1->xd[2], src2->xd[3], 0xae);
+
+ tdst->xi[2] = _mm_mpsadbw_epu8 (src1->xi[4], src2->xi[5], 0xc1);
+
+ tdst->xi[3] = _mm_blend_epi16 (src1->xi[6], src2->xi[7], 0xc);
+ tdst->xi[4] = _mm_blendv_epi8 (src1->xi[0], src2->xi[1], tdst->xi[2]);
+ tdst->xf[5] = _mm_blend_ps (src1->xf[3], src2->xf[4], 0x4);
+ tdst->xf[6] = _mm_blendv_ps (src1->xf[5], src2->xf[6], tdst->xf[7]);
+ tdst->xd[7] = _mm_blend_pd (tdst->xd[0], src1->xd[1], 0x1);
+ tdst->xd[0] = _mm_blendv_pd (src1->xd[2], src2->xd[3], tdst->xd[4]);
+
+ tdst->xi[1] = _mm_sign_epi8 (src1->xi[5], src2->xi[6]);
+ tdst->xi[2] = _mm_sign_epi16 (src1->xi[7], src2->xi[0]);
+ tdst->xi[3] = _mm_sign_epi32 (src1->xi[1], src2->xi[2]);
+
+ tdst->a[2] = _mm_cmpestri (src1->xi[3], 16, src2->xi[4], 16, 0x0c);
+ tdst->xi[4] = _mm_cmpestrm (src1->xi[3], 16, src2->xi[4], 16, 0x20);
+ tdst->a[5] = _mm_cmpistri (src1->xi[5], src2->xi[6], 0x30);
+ tdst->xi[6] = _mm_cmpistrm (src1->xi[5], src2->xi[6], 0x40);
+
+ tdst->xi[7] = _mm_aesimc_si128 (src1->xi[7]);
+ tdst->xi[0] = _mm_aeskeygenassist_si128 (src1->xi[1], 0x1b);
+}
+
+__attribute__((target("avx2,aes")))
+void vex_test ()
+{
+
+ register tmp_u *tdst __asm__("%r16");
+ register tmp_u *src1 __asm__("%r17");
+ register tmp_u *src2 __asm__("%r18");
+
+ src1->xi[0] = _mm_minpos_epu16 (src1->xi[1]);
+ src1->a[2] = _mm256_testc_si256 (src1->yi[2], src2->yi[3]);
+ src1->xf[3] = _mm_round_ss (src1->xf[5], src2->xf[6],
+ _MM_FROUND_CUR_DIRECTION);
+ src1->yf[4] = _mm256_round_ps (src1->yf[2], _MM_FROUND_CUR_DIRECTION);
+ src1->xd[0] = _mm_round_sd (src1->xd[2], src2->xd[3],
+ _MM_FROUND_CUR_DIRECTION);
+ src1->yd[1] = _mm256_round_pd (src1->yd[3], _MM_FROUND_CUR_DIRECTION);
+
+ src1->yi[1] = _mm256_hadd_epi16 (tdst->yi[2], src2->yi[3]);
+ src1->yi[2] = _mm256_hadd_epi32 (tdst->yi[0], src2->yi[1]);
+ tdst->yi[3] = _mm256_hadds_epi16 (src1->yi[1], src2->yi[2]);
+ tdst->yi[0] = _mm256_hsub_epi16 (src1->yi[3], src2->yi[0]);
+ tdst->yi[1] = _mm256_hsub_epi32 (src1->yi[0], src2->yi[1]);
+ tdst->yi[2] = _mm256_hsubs_epi16 (src1->yi[2], src2->yi[3]);
+
+ src1->yi[2] = _mm256_cmpeq_epi64 (tdst->yi[1], src2->yi[2]);
+ src1->yi[1] = _mm256_cmpgt_epi64 (tdst->yi[3], src2->yi[0]);
+
+ tdst->yf[2] = _mm256_dp_ps (src1->yf[0], src2->yf[1], 0xbf);
+
+ tdst->yi[3] = _mm256_mpsadbw_epu8 (src1->yi[1], src2->yi[1], 0xc1);
+
+ tdst->yi[0] = _mm256_blend_epi16 (src1->yi[1], src2->yi[2], 0xc);
+ tdst->yi[1] = _mm256_blendv_epi8 (src1->yi[1], src2->yi[2], tdst->yi[0]);
+ tdst->yf[2] = _mm256_blend_ps (src1->yf[0], src2->yf[1], 0x4);
+ tdst->yf[3] = _mm256_blendv_ps (src1->yf[2], src2->yf[3], tdst->yf[1]);
+ tdst->yd[3] = _mm256_blend_pd (tdst->yd[1], src1->yd[0], 0x1);
+ tdst->yd[1] = _mm256_blendv_pd (src1->yd[2], src2->yd[3], tdst->yd[2]);
+
+ tdst->yi[2] = _mm256_sign_epi8 (src1->yi[0], src2->yi[1]);
+ tdst->yi[3] = _mm256_sign_epi16 (src1->yi[2], src2->yi[3]);
+ tdst->yi[0] = _mm256_sign_epi32 (src1->yi[0], src2->yi[1]);
+
+ tdst->a[2] = _mm_cmpestri (src1->xi[3], 16, src2->xi[4], 16, 0x0c);
+ tdst->xi[4] = _mm_cmpestrm (src1->xi[3], 16, src2->xi[4], 16, 0x20);
+ tdst->a[5] = _mm_cmpistri (src1->xi[5], src2->xi[6], 0x30);
+ tdst->xi[6] = _mm_cmpistrm (src1->xi[5], src2->xi[6], 0x40);
+
+ tdst->xi[7] = _mm_aesimc_si128 (src1->xi[7]);
+ tdst->xi[0] = _mm_aeskeygenassist_si128 (src1->xi[1], 0x1b);
+}
+
+/* { dg-final { scan-assembler-not "v?pcmpeqq\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?pcmpgtq\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?phaddw\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?phaddd\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?phaddsw\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?phsubw\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?phsubd\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?phsubsw\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?dpps\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?dppd\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?psadbw\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?pblendw\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?pblendvb\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?blendps\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?blendvps\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?blendpd\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?blendvpd\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?psignb\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?psignw\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?psignd\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?phminposuw\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?ptest\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?roundss\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?roundsd\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?roundps\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?roundpd\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?pcmpestri\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?pcmpistri\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?pcmpestrm\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?pcmpistrm\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?aesimc\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
+/* { dg-final { scan-assembler-not "v?aeskeygenassist\[ \\t]+\\\.\\\*r\(1\[6-9\]\|2\[0-9\]|30\|31\)" } } */
diff --git a/gcc/testsuite/gcc.target/i386/apx-push2pop2-1.c b/gcc/testsuite/gcc.target/i386/apx-push2pop2-1.c
new file mode 100644
index 0000000..c7968d6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/apx-push2pop2-1.c
@@ -0,0 +1,45 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -mapxf" } */
+
+extern int bar (int);
+
+void foo ()
+{
+ int a,b,c,d,e,f,i;
+ a = bar (5);
+ b = bar (a);
+ c = bar (b);
+ d = bar (c);
+ e = bar (d);
+ f = bar (e);
+ for (i = 1; i < 10; i++)
+ {
+ a += bar (a + i) + bar (b + i) +
+ bar (c + i) + bar (d + i) +
+ bar (e + i) + bar (f + i);
+ }
+}
+
+/* { dg-final { scan-assembler-times ".cfi_def_cfa_offset 16" 2 } } */
+/* { dg-final { scan-assembler-times "pushq\[^\n\r]*%r15(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 15, -16(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "push2\[\\t \]*\[^\n\r]*%r14\[^\n\r]*%r13\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_def_cfa_offset 32" 2 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 14, -24(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 13, -32(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "push2\[\\t \]*\[^\n\r]*%r12\[^\n\r]*%rbp\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_def_cfa_offset 48" 2 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 12, -40(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 6, -48(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "pushq\[^\n\r]*%rbx(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_def_cfa_offset 56" 2 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 3, -56(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "popq\[^\n\r]*rbx(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "pop2\[\\t \]*\[^\n\r]*%rbp\[^\n\r]*%r12\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_restore 12(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_restore 6(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "pop2\[\\t \]*\[^\n\r]*%r13\[^\n\r]*%r14\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_restore 14(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_restore 13(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "popq\[^\n\r]*%r15(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_def_cfa_offset 8(?:\n|\[ \\t\]+#)" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/apx-push2pop2_force_drap-1.c b/gcc/testsuite/gcc.target/i386/apx-push2pop2_force_drap-1.c
new file mode 100644
index 0000000..3878799
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/apx-push2pop2_force_drap-1.c
@@ -0,0 +1,29 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -mapxf -mforce-drap" } */
+
+#include "apx-push2pop2-1.c"
+
+
+/* { dg-final { scan-assembler-times ".cfi_def_cfa_offset 16" 2 } } */
+/* { dg-final { scan-assembler-times "pushq\[^\n\r]*%r15(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 15, -16(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "push2\[\\t \]*\[^\n\r]*%r14\[^\n\r]*%r13\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_def_cfa_offset 32" 2 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 14, -24(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 13, -32(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "push2\[\\t \]*\[^\n\r]*%r12\[^\n\r]*%rbp\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_def_cfa_offset 48" 2 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 12, -40(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 6, -48(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "pushq\[^\n\r]*%rbx(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_def_cfa_offset 56" 2 } } */
+/* { dg-final { scan-assembler-times ".cfi_offset 3, -56(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "popq\[^\n\r]*rbx(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "pop2\[\\t \]*\[^\n\r]*%rbp\[^\n\r]*%r12\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_restore 12(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_restore 6(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "pop2\[\\t \]*\[^\n\r]*%r13\[^\n\r]*%r14\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_restore 14(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_restore 13(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "popq\[^\n\r]*%r15(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times ".cfi_def_cfa_offset 8(?:\n|\[ \\t\]+#)" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/apx-push2pop2_interrupt-1.c b/gcc/testsuite/gcc.target/i386/apx-push2pop2_interrupt-1.c
new file mode 100644
index 0000000..747f7aa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/apx-push2pop2_interrupt-1.c
@@ -0,0 +1,28 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -mapxf -mgeneral-regs-only -mno-cld -mno-push-args -maccumulate-outgoing-args" } */
+
+extern void foo (void *) __attribute__ ((interrupt));
+
+extern int bar (int);
+
+void foo (void *frame)
+{
+ int a,b,c,d,e,f,i;
+ a = bar (5);
+ b = bar (a);
+ c = bar (b);
+ d = bar (c);
+ e = bar (d);
+ f = bar (e);
+ for (i = 1; i < 10; i++)
+ {
+ a += bar (a + i) + bar (b + i) +
+ bar (c + i) + bar (d + i) +
+ bar (e + i) + bar (f + i);
+ }
+}
+
+/* { dg-final { scan-assembler-times "pushq" 31 } } */
+/* { dg-final { scan-assembler-times "popq" 31 } } */
+/* { dg-final { scan-assembler-not "push2\[\\t \]+" } } */
+/* { dg-final { scan-assembler-not "pop2\[\\t \]+" } } */
diff --git a/gcc/testsuite/gcc.target/i386/apx-spill_to_egprs-1.c b/gcc/testsuite/gcc.target/i386/apx-spill_to_egprs-1.c
new file mode 100644
index 0000000..290863d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/apx-spill_to_egprs-1.c
@@ -0,0 +1,25 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -march=skylake-avx512 -mapxf -DDTYPE32" } */
+
+#include "spill_to_mask-1.c"
+
+/* { dg-final { scan-assembler "movl\[ \t]+\[^\\n\\r\]*, %r16d" } } */
+/* { dg-final { scan-assembler "movl\[ \t]+\[^\\n\\r\]*, %r17d" } } */
+/* { dg-final { scan-assembler "movl\[ \t]+\[^\\n\\r\]*, %r18d" } } */
+/* { dg-final { scan-assembler "movq\[ \t]+\[^\\n\\r\]*, %r19" } } */
+/* { dg-final { scan-assembler "movl\[ \t]+\[^\\n\\r\]*, %r20d" } } */
+/* { dg-final { scan-assembler "movl\[ \t]+\[^\\n\\r\]*, %r21d" } } */
+/* { dg-final { scan-assembler "movl\[ \t]+\[^\\n\\r\]*, %r22d" } } */
+/* { dg-final { scan-assembler "movl\[ \t]+\[^\\n\\r\]*, %r23d" } } */
+/* { dg-final { scan-assembler "movl\[ \t]+\[^\\n\\r\]*, %r24d" } } */
+/* { dg-final { scan-assembler "addl\[ \t]+\[^\\n\\r\]*, %r25d" } } */
+/* { dg-final { scan-assembler "movl\[ \t]+\[^\\n\\r\]*, %r26d" } } */
+/* { dg-final { scan-assembler "movl\[ \t]+\[^\\n\\r\]*, %r27d" } } */
+/* { dg-final { scan-assembler "movbel\[ \t]+\[^\\n\\r\]*, %r28d" } } */
+/* { dg-final { scan-assembler "movbel\[ \t]+\[^\\n\\r\]*, %r29d" } } */
+/* { dg-final { scan-assembler "movbel\[ \t]+\[^\\n\\r\]*, %r30d" } } */
+/* { dg-final { scan-assembler "movbel\[ \t]+\[^\\n\\r\]*, %r31d" } } */
+/* { dg-final { scan-assembler-not "knot" } } */
+/* { dg-final { scan-assembler-not "kxor" } } */
+/* { dg-final { scan-assembler-not "kor" } } */
+/* { dg-final { scan-assembler-not "kandn" } } */
diff --git a/gcc/testsuite/gcc.target/i386/ashldi3-2.c b/gcc/testsuite/gcc.target/i386/ashldi3-2.c
new file mode 100644
index 0000000..053389d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/ashldi3-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile { target ia32 } } */
+/* { dg-options "-O2 -mno-stv" } */
+
+long long foo(long long x)
+{
+ return x << 1;
+}
+
+/* { dg-final { scan-assembler "adcl" } } */
+/* { dg-final { scan-assembler-not "shldl" } } */
diff --git a/gcc/testsuite/gcc.target/i386/ashlti3-3.c b/gcc/testsuite/gcc.target/i386/ashlti3-3.c
new file mode 100644
index 0000000..4f14ca0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/ashlti3-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-O2" } */
+
+__int128 foo(__int128 x)
+{
+ return x << 1;
+}
+
+/* { dg-final { scan-assembler "adcq" } } */
+/* { dg-final { scan-assembler-not "shldq" } } */
diff --git a/gcc/testsuite/gcc.target/i386/avx512fp16-64-32-vecop-1.c b/gcc/testsuite/gcc.target/i386/avx512fp16-64-32-vecop-1.c
index 754e909..de88354 100644
--- a/gcc/testsuite/gcc.target/i386/avx512fp16-64-32-vecop-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx512fp16-64-32-vecop-1.c
@@ -1,10 +1,10 @@
/* { dg-do compile } */
/* { dg-options "-O2 -mavx512fp16 -mavx512vl" } */
-/* { dg-final { scan-assembler-times "vaddph" 2 } } */
-/* { dg-final { scan-assembler-times "vsubph" 2 } } */
-/* { dg-final { scan-assembler-times "vmulph" 2 } } */
-/* { dg-final { scan-assembler-times "vdivph" 2 } } */
+/* { dg-final { scan-assembler-times "vaddph" 2 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "vsubph" 2 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "vmulph" 2 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "vdivph" 2 { target { ! ia32 } } } } */
#define DO_PRAGMA(X) _Pragma(#X)
diff --git a/gcc/testsuite/gcc.target/i386/funcspec-56.inc b/gcc/testsuite/gcc.target/i386/funcspec-56.inc
index ca558b3..c6307fb 100644
--- a/gcc/testsuite/gcc.target/i386/funcspec-56.inc
+++ b/gcc/testsuite/gcc.target/i386/funcspec-56.inc
@@ -92,6 +92,7 @@ extern void test_avxvnniint16 (void) __attribute__((__target__("avxvnniint16"))
extern void test_sm3 (void) __attribute__((__target__("sm3")));
extern void test_sha512 (void) __attribute__((__target__("sha512")));
extern void test_sm4 (void) __attribute__((__target__("sm4")));
+extern void test_user_msr (void) __attribute__((__target__("usermsr")));
extern void test_no_sgx (void) __attribute__((__target__("no-sgx")));
extern void test_no_avx5124fmaps(void) __attribute__((__target__("no-avx5124fmaps")));
@@ -185,6 +186,7 @@ extern void test_no_avxvnniint16 (void) __attribute__((__target__("no-avxvnniin
extern void test_no_sm3 (void) __attribute__((__target__("no-sm3")));
extern void test_no_sha512 (void) __attribute__((__target__("no-sha512")));
extern void test_no_sm4 (void) __attribute__((__target__("no-sm4")));
+extern void test_no_user_msr (void) __attribute__((__target__("no-usermsr")));
extern void test_arch_nocona (void) __attribute__((__target__("arch=nocona")));
extern void test_arch_core2 (void) __attribute__((__target__("arch=core2")));
@@ -214,6 +216,8 @@ extern void test_arch_graniterapids (void) __attribute__((__target__("arch=grani
extern void test_arch_graniterapids_d (void) __attribute__((__target__("arch=graniterapids-d")));
extern void test_arch_arrowlake (void) __attribute__((__target__("arch=arrowlake")));
extern void test_arch_arrowlake_s (void) __attribute__((__target__("arch=arrowlake-s")));
+extern void test_arch_clearwaterforest (void) __attribute__((__target__("arch=clearwaterforest")));
+extern void test_arch_pantherlake (void) __attribute__((__target__("arch=pantherlake")));
extern void test_arch_lujiazui (void) __attribute__((__target__("arch=lujiazui")));
extern void test_arch_k8 (void) __attribute__((__target__("arch=k8")));
extern void test_arch_k8_sse3 (void) __attribute__((__target__("arch=k8-sse3")));
diff --git a/gcc/testsuite/gcc.target/i386/large-data.c b/gcc/testsuite/gcc.target/i386/large-data.c
new file mode 100644
index 0000000..bdd4acd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/large-data.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target lp64 } */
+/* { dg-options "-O2 -mcmodel=large -mlarge-data-threshold=4" } */
+/* { dg-final { scan-assembler {\.lbss} } } */
+/* { dg-final { scan-assembler {\.bss} } } */
+/* { dg-final { scan-assembler {\.ldata} } } */
+/* { dg-final { scan-assembler {\.data} } } */
+/* { dg-final { scan-assembler {\.lrodata} } } */
+/* { dg-final { scan-assembler {\.rodata} } } */
+
+const char rodata_a[] = "abc", rodata_b[] = "abcd";
+char data_a[4] = {1}, data_b[5] = {1};
+char bss_a[4], bss_b[5];
diff --git a/gcc/testsuite/gcc.target/i386/lea-2.c b/gcc/testsuite/gcc.target/i386/lea-2.c
new file mode 100644
index 0000000..e9f12d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/lea-2.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-Oz" } */
+int foo(int x) { return x<<2; }
+int bar(int x) { return x<<3; }
+long long fool(long long x) { return x<<2; }
+long long barl(long long x) { return x<<3; }
+/* { dg-final { scan-assembler-not "lea\[lq\]" } } */
diff --git a/gcc/testsuite/gcc.target/i386/noevex512-1.c b/gcc/testsuite/gcc.target/i386/noevex512-1.c
new file mode 100644
index 0000000..7fd45f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/noevex512-1.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O0 -march=x86-64 -mavx512f -mno-evex512 -Wno-psabi" } */
+/* { dg-final { scan-assembler-not ".%zmm" } } */
+
+typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__));
+
+__m512d
+foo ()
+{
+ __m512d a, b;
+ a = a + b;
+ return a;
+}
diff --git a/gcc/testsuite/gcc.target/i386/noevex512-2.c b/gcc/testsuite/gcc.target/i386/noevex512-2.c
new file mode 100644
index 0000000..1c206e3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/noevex512-2.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=x86-64 -mavx512bw -mno-evex512" } */
+
+#include <immintrin.h>
+
+long long
+foo (long long c)
+{
+ register long long a __asm ("k7") = c;
+ long long b = foo (a);
+ asm volatile ("" : "+k" (b)); /* { dg-error "inconsistent operand constraints in an 'asm'" } */
+ return b;
+}
diff --git a/gcc/testsuite/gcc.target/i386/noevex512-3.c b/gcc/testsuite/gcc.target/i386/noevex512-3.c
new file mode 100644
index 0000000..10e00c2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/noevex512-3.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-march=x86-64 -Wno-psabi -mavx512f" } */
+/* { dg-final { scan-assembler-not ".%zmm" } } */
+
+typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__));
+
+__attribute__ ((target ("no-evex512"))) __m512d
+foo ()
+{
+ __m512d a, b;
+ a = a + b;
+ return a;
+}
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-absneghf.c b/gcc/testsuite/gcc.target/i386/part-vect-absneghf.c
new file mode 100644
index 0000000..48aed14
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-absneghf.c
@@ -0,0 +1,91 @@
+/* { dg-do run { target avx512fp16 } } */
+/* { dg-options "-O1 -mavx512fp16 -mavx512vl -ftree-vectorize -fdump-tree-slp-details -fdump-tree-optimized" } */
+
+extern void abort ();
+
+static void do_test (void);
+
+#define DO_TEST do_test
+#define AVX512FP16
+#include "avx512-check.h"
+
+#define N 16
+_Float16 a[N] = {-0.1f, -3.2f, -6.3f, -9.4f,
+ -12.5f, -15.6f, -18.7f, -21.8f,
+ 24.9f, 27.1f, 30.2f, 33.3f,
+ 36.4f, 39.5f, 42.6f, 45.7f};
+_Float16 b[N] = {-1.2f, 3.4f, -5.6f, 7.8f,
+ -9.0f, 1.0f, -2.0f, 3.0f,
+ -4.0f, -5.0f, 6.0f, 7.0f,
+ -8.0f, -9.0f, 10.0f, 11.0f};
+_Float16 r[N];
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+abs_32 (void)
+{
+ r[0] = __builtin_fabsf16 (b[0]);
+ r[1] = __builtin_fabsf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+abs_64 (void)
+{
+ r[0] = __builtin_fabsf16 (b[0]);
+ r[1] = __builtin_fabsf16 (b[1]);
+ r[2] = __builtin_fabsf16 (b[2]);
+ r[3] = __builtin_fabsf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+neg_32 (void)
+{
+ r[0] = -b[0];
+ r[1] = -b[1];
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+neg_64 (void)
+{
+ r[0] = -b[0];
+ r[1] = -b[1];
+ r[2] = -b[2];
+ r[3] = -b[3];
+}
+
+static void
+__attribute__ ((noinline, noclone))
+do_test (void)
+{
+ abs_32 ();
+ /* check results: */
+ for (int i = 0; i != 2; i++)
+ if (r[i] != __builtin_fabsf16 (b[i]))
+ abort ();
+
+ abs_64 ();
+ /* check results: */
+ for (int i = 0; i != 4; i++)
+ if (r[i] != __builtin_fabsf16 (b[i]))
+ abort ();
+
+ neg_32 ();
+ /* check results: */
+ for (int i = 0; i != 2; i++)
+ if (r[i] != -b[i])
+ abort ();
+
+ neg_64 ();
+ /* check results: */
+ for (int i = 0; i != 4; i++)
+ if (r[i] != -b[i])
+ abort ();
+}
+
+/* { dg-final { scan-tree-dump-times "vectorized using 8 byte vectors" 2 "slp2" } } */
+/* { dg-final { scan-tree-dump-times "vectorized using 4 byte vectors" 2 "slp2" } } */
+/* { dg-final { scan-tree-dump-times {(?n)ABS_EXPR <vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n)= -vect} 2 "optimized" { target { ! ia32 } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-copysignhf.c b/gcc/testsuite/gcc.target/i386/part-vect-copysignhf.c
new file mode 100644
index 0000000..811617bc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-copysignhf.c
@@ -0,0 +1,60 @@
+/* { dg-do run { target avx512fp16 } } */
+/* { dg-options "-O1 -mavx512fp16 -mavx512vl -ftree-vectorize -fdump-tree-slp-details -fdump-tree-optimized" } */
+
+extern void abort ();
+
+static void do_test (void);
+
+#define DO_TEST do_test
+#define AVX512FP16
+#include "avx512-check.h"
+
+#define N 16
+_Float16 a[N] = {-0.1f, -3.2f, -6.3f, -9.4f,
+ -12.5f, -15.6f, -18.7f, -21.8f,
+ 24.9f, 27.1f, 30.2f, 33.3f,
+ 36.4f, 39.5f, 42.6f, 45.7f};
+_Float16 b[N] = {-1.2f, 3.4f, -5.6f, 7.8f,
+ -9.0f, 1.0f, -2.0f, 3.0f,
+ -4.0f, -5.0f, 6.0f, 7.0f,
+ -8.0f, -9.0f, 10.0f, 11.0f};
+_Float16 r[N];
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+copysign_32 (void)
+{
+ r[0] = __builtin_copysignf16 (1.0f, b[0]);
+ r[1] = __builtin_copysignf16 (1.0f, b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+copysign_64 (void)
+{
+ r[0] = __builtin_copysignf16 (1.0f, b[0]);
+ r[1] = __builtin_copysignf16 (1.0f, b[1]);
+ r[2] = __builtin_copysignf16 (1.0f, b[2]);
+ r[3] = __builtin_copysignf16 (1.0f, b[3]);
+}
+
+static void
+__attribute__ ((noinline, noclone))
+do_test (void)
+{
+ copysign_32 ();
+ /* check results: */
+ for (int i = 0; i != 2; i++)
+ if (r[i] != __builtin_copysignf16 (1.0f, b[i]))
+ abort ();
+
+ copysign_64 ();
+ /* check results: */
+ for (int i = 0; i != 4; i++)
+ if (r[i] != __builtin_copysignf16 (1.0f, b[i]))
+ abort ();
+}
+
+/* { dg-final { scan-tree-dump-times "vectorized using 8 byte vectors" 1 "slp2" } } */
+/* { dg-final { scan-tree-dump-times "vectorized using 4 byte vectors" 1 "slp2" } } */
+/* { dg-final { scan-tree-dump-times ".COPYSIGN" 2 "optimized" { target { ! ia32 } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-fmaddsubhf-1.c b/gcc/testsuite/gcc.target/i386/part-vect-fmaddsubhf-1.c
new file mode 100644
index 0000000..051f992
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-fmaddsubhf-1.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512fp16 -mavx512vl -O2" } */
+/* { dg-final { scan-assembler-times "vfmaddsub...ph\[ \t\]+\[^\n\]*%xmm\[0-9\]" 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "vfmsubadd...ph\[ \t\]+\[^\n\]*%xmm\[0-9\]" 1 { target { ! ia32 } } } } */
+
+void vec_fmaddsub_fp16(int n, _Float16 da_r, _Float16 *x, _Float16* y, _Float16* __restrict z)
+{
+ for (int i = 0; i < 4; i += 2)
+ {
+ z[i] = da_r * x[i] - y[i];
+ z[i+1] = da_r * x[i+1] + y[i+1];
+ }
+}
+
+void vec_fmasubadd_fp16(int n, _Float16 da_r, _Float16 *x, _Float16* y, _Float16* __restrict z)
+{
+ for (int i = 0; i < 4; i += 2)
+ {
+ z[i] = da_r * x[i] + y[i];
+ z[i+1] = da_r * x[i+1] - y[i+1];
+ }
+}
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-fmahf-1.c b/gcc/testsuite/gcc.target/i386/part-vect-fmahf-1.c
new file mode 100644
index 0000000..46e3cd3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-fmahf-1.c
@@ -0,0 +1,58 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx512fp16 -mavx512vl" } */
+/* { dg-final { scan-assembler-times "vfmadd132ph\[^\n\r\]*xmm\[0-9\]" 2 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "vfnmadd132ph\[^\n\r\]*xmm\[0-9\]" 2 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "vfmsub132ph\[^\n\r\]*xmm\[0-9\]" 2 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "vfnmsub132ph\[^\n\r\]*xmm\[0-9\]" 2 { target { ! ia32 } } } } */
+
+typedef _Float16 v4hf __attribute__ ((__vector_size__ (8)));
+typedef _Float16 v2hf __attribute__ ((__vector_size__ (4)));
+
+v4hf
+fma_v4hf (v4hf a, v4hf b, v4hf c)
+{
+ return a * b + c;
+}
+
+v4hf
+fnma_v4hf (v4hf a, v4hf b, v4hf c)
+{
+ return -a * b + c;
+}
+
+v4hf
+fms_v4hf (v4hf a, v4hf b, v4hf c)
+{
+ return a * b - c;
+}
+
+v4hf
+fnms_v4hf (v4hf a, v4hf b, v4hf c)
+{
+ return -a * b - c;
+}
+
+v2hf
+fma_v2hf (v2hf a, v2hf b, v2hf c)
+{
+ return a * b + c;
+}
+
+v2hf
+fnma_v2hf (v2hf a, v2hf b, v2hf c)
+{
+ return -a * b + c;
+}
+
+v2hf
+fms_v2hf (v2hf a, v2hf b, v2hf c)
+{
+ return a * b - c;
+}
+
+v2hf
+fnms_v2hf (v2hf a, v2hf b, v2hf c)
+{
+ return -a * b - c;
+}
+
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-hf-convert-1.c b/gcc/testsuite/gcc.target/i386/part-vect-hf-convert-1.c
new file mode 100644
index 0000000..9542601
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-hf-convert-1.c
@@ -0,0 +1,111 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-mavx512fp16 -mavx512vl -O2" } */
+/* { dg-final { scan-assembler-times {(?n)vcvttph2w[ \t]} 2 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvttph2uw[ \t]} 2 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvttph2dq[ \t]} 1 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvttph2udq[ \t]} 1 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvtw2ph[ \t]} 2 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvtuw2ph[ \t]} 2 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvtdq2phx[ \t]} 1 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvtudq2phx[ \t]} 1 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvtph2psx[ \t]} 1 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvtps2phxx[ \t]} 1 } } */
+
+
+void
+fix_32 (short* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 2; i++)
+ pa[i] = pb[i];
+}
+
+void
+fix_64 (short* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 4; i++)
+ pa[i] = pb[i];
+}
+
+void
+fixuns_32 (unsigned short* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 2; i++)
+ pa[i] = pb[i];
+}
+
+void
+fixuns_64 (unsigned short* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 4; i++)
+ pa[i] = pb[i];
+}
+
+void
+float_32 (short* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 2; i++)
+ pb[i] = pa[i];
+}
+
+void
+float_64 (short* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 4; i++)
+ pb[i] = pa[i];
+}
+
+void
+floatuns_32 (unsigned short* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 2; i++)
+ pb[i] = pa[i];
+}
+
+void
+floatuns_64 (unsigned short* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 4; i++)
+ pb[i] = pa[i];
+}
+
+void
+fix_32si (int* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 2; i++)
+ pa[i] = pb[i];
+}
+
+void
+fix_32usi (unsigned int* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 2; i++)
+ pa[i] = pb[i];
+}
+
+void
+float_32si (int* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 2; i++)
+ pb[i] = pa[i];
+}
+
+void
+float_32usi (unsigned int* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 2; i++)
+ pb[i] = pa[i];
+}
+
+void
+float_extend (float* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 2; i++)
+ pa[i] = pb[i];
+}
+
+void
+float_truncate (float* __restrict pa, _Float16* pb)
+{
+ for (int i = 0; i != 2; i++)
+ pb[i] = pa[i];
+}
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-roundhf.c b/gcc/testsuite/gcc.target/i386/part-vect-roundhf.c
new file mode 100644
index 0000000..38235c1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-roundhf.c
@@ -0,0 +1,217 @@
+/* { dg-do run { target avx512fp16 } } */
+/* { dg-options "-O1 -mavx512fp16 -mavx512vl -fdump-tree-slp-details -fdump-tree-optimized" } */
+
+extern void abort ();
+
+static void do_test (void);
+
+#define DO_TEST do_test
+#define AVX512FP16
+#include "avx512-check.h"
+
+#define N 16
+_Float16 b[N] = {-1.2f, 3.4f, -5.6f, 7.8f,
+ -9.0f, 1.0f, -2.0f, 3.0f,
+ -4.0f, -5.0f, 6.0f, 7.0f,
+ -8.0f, -9.0f, 10.0f, 11.0f};
+_Float16 r[N];
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+round_32 (void)
+{
+ r[0] = __builtin_roundf16 (b[0]);
+ r[1] = __builtin_roundf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+round_64 (void)
+{
+ r[0] = __builtin_roundf16 (b[0]);
+ r[1] = __builtin_roundf16 (b[1]);
+ r[2] = __builtin_roundf16 (b[2]);
+ r[3] = __builtin_roundf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+rint_32 (void)
+{
+ r[0] = __builtin_rintf16 (b[0]);
+ r[1] = __builtin_rintf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+rint_64 (void)
+{
+ r[0] = __builtin_rintf16 (b[0]);
+ r[1] = __builtin_rintf16 (b[1]);
+ r[2] = __builtin_rintf16 (b[2]);
+ r[3] = __builtin_rintf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+nearbyint_32 (void)
+{
+ r[0] = __builtin_nearbyintf16 (b[0]);
+ r[1] = __builtin_nearbyintf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+nearbyint_64 (void)
+{
+ r[0] = __builtin_nearbyintf16 (b[0]);
+ r[1] = __builtin_nearbyintf16 (b[1]);
+ r[2] = __builtin_nearbyintf16 (b[2]);
+ r[3] = __builtin_nearbyintf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+trunc_32 (void)
+{
+ r[0] = __builtin_truncf16 (b[0]);
+ r[1] = __builtin_truncf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+trunc_64 (void)
+{
+ r[0] = __builtin_truncf16 (b[0]);
+ r[1] = __builtin_truncf16 (b[1]);
+ r[2] = __builtin_truncf16 (b[2]);
+ r[3] = __builtin_truncf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+floor_32 (void)
+{
+ r[0] = __builtin_floorf16 (b[0]);
+ r[1] = __builtin_floorf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+floor_64 (void)
+{
+ r[0] = __builtin_floorf16 (b[0]);
+ r[1] = __builtin_floorf16 (b[1]);
+ r[2] = __builtin_floorf16 (b[2]);
+ r[3] = __builtin_floorf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+ceil_32 (void)
+{
+ r[0] = __builtin_ceilf16 (b[0]);
+ r[1] = __builtin_ceilf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+ceil_64 (void)
+{
+ r[0] = __builtin_ceilf16 (b[0]);
+ r[1] = __builtin_ceilf16 (b[1]);
+ r[2] = __builtin_ceilf16 (b[2]);
+ r[3] = __builtin_ceilf16 (b[3]);
+}
+
+_Float16
+__attribute__((noipa,noinline,optimize("Ofast")))
+dummy_roundf16 (_Float16 a)
+{
+ return __builtin_roundf16 (a);
+}
+static void
+__attribute__ ((noinline, noclone))
+do_test (void)
+{
+ round_32 ();
+ /* check results: */
+ for (int i = 0; i != 2; i++)
+ if (r[i] != dummy_roundf16 (b[i]))
+ abort ();
+
+ round_64 ();
+ /* check results: */
+ for (int i = 0; i != 4; i++)
+ if (r[i] != dummy_roundf16 (b[i]))
+ abort ();
+
+ rint_32 ();
+ /* check results: */
+ for (int i = 0; i != 2; i++)
+ if (r[i] != __builtin_rintf16 (b[i]))
+ abort ();
+
+ rint_64 ();
+ /* check results: */
+ for (int i = 0; i != 4; i++)
+ if (r[i] != __builtin_rintf16 (b[i]))
+ abort ();
+
+ nearbyint_32 ();
+ /* check results: */
+ for (int i = 0; i != 2; i++)
+ if (r[i] != __builtin_nearbyintf16 (b[i]))
+ abort ();
+
+ nearbyint_64 ();
+ /* check results: */
+ for (int i = 0; i != 4; i++)
+ if (r[i] != __builtin_nearbyintf16 (b[i]))
+ abort ();
+
+ trunc_32 ();
+ /* check results: */
+ for (int i = 0; i != 2; i++)
+ if (r[i] != __builtin_truncf16 (b[i]))
+ abort ();
+
+ trunc_64 ();
+ /* check results: */
+ for (int i = 0; i != 4; i++)
+ if (r[i] != __builtin_truncf16 (b[i]))
+ abort ();
+
+ floor_32 ();
+ /* check results: */
+ for (int i = 0; i != 2; i++)
+ if (r[i] != __builtin_floorf16 (b[i]))
+ abort ();
+
+ floor_64 ();
+ /* check results: */
+ for (int i = 0; i != 4; i++)
+ if (r[i] != __builtin_floorf16 (b[i]))
+ abort ();
+
+ ceil_32 ();
+ /* check results: */
+ for (int i = 0; i != 2; i++)
+ if (r[i] != __builtin_ceilf16 (b[i]))
+ abort ();
+
+ ceil_64 ();
+ /* check results: */
+ for (int i = 0; i != 4; i++)
+ if (r[i] != __builtin_ceilf16 (b[i]))
+ abort ();
+}
+
+/* { dg-final { scan-tree-dump-times "vectorized using 8 byte vectors" 6 "slp2" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times "vectorized using 4 byte vectors" 6 "slp2" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).CEIL \(vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).FLOOR \(vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).ROUND \(vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).RINT \(vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).NEARBYINT \(vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).TRUNC \(vect} 2 "optimized" { target { ! ia32 } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-sqrtph-1.c b/gcc/testsuite/gcc.target/i386/part-vect-sqrtph-1.c
new file mode 100644
index 0000000..b7f9e7f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-sqrtph-1.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512fp16 -mavx512vl -Ofast" } */
+/* { dg-final { scan-assembler-times {(?n)vsqrtph[ \t].*%xmm[0-9]} 2 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times {(?n)vsqrtph[ \t].*%xmm[0-9]} 2 { target { ! ia32 } } } } */
+
+void
+foo16_sqrt (_Float16* a, _Float16* __restrict c)
+{
+ c[0] = __builtin_sqrtf16 (a[0]);
+ c[1] = __builtin_sqrtf16 (a[1]);
+}
+
+void
+foo32_sqrt(_Float16* a, _Float16* __restrict c)
+{
+ c[0] = __builtin_sqrtf16 (a[0]);
+ c[1] = __builtin_sqrtf16 (a[1]);
+ c[2] = __builtin_sqrtf16 (a[2]);
+ c[3] = __builtin_sqrtf16 (a[3]);
+}
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-vminmaxph-1.c b/gcc/testsuite/gcc.target/i386/part-vect-vminmaxph-1.c
new file mode 100644
index 0000000..ef215e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-vminmaxph-1.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512fp16 -mavx512vl -Ofast" } */
+/* { dg-final { scan-assembler-times {(?n)vmaxph[ \t].*%xmm[0-9]} 2 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times {(?n)vminph[ \t].*%xmm[0-9]} 2 { target { ! ia32 } } } } */
+
+void
+foo16_max (_Float16* a, _Float16* b, _Float16* __restrict c)
+{
+ c[0] = __builtin_fmaxf16 (a[0], b[0]);
+ c[1] = __builtin_fmaxf16 (a[1], b[1]);
+}
+
+void
+foo32_max(_Float16* a, _Float16* b, _Float16* __restrict c)
+{
+ c[0] = __builtin_fmaxf16 (a[0], b[0]);
+ c[1] = __builtin_fmaxf16 (a[1], b[1]);
+ c[2] = __builtin_fmaxf16 (a[2], b[2]);
+ c[3] = __builtin_fmaxf16 (a[3], b[3]);
+}
+
+void
+foo16_min (_Float16* a, _Float16* b, _Float16* __restrict c)
+{
+ c[0] = __builtin_fminf16 (a[0], b[0]);
+ c[1] = __builtin_fminf16 (a[1], b[1]);
+}
+
+void
+foo32_min(_Float16* a, _Float16* b, _Float16* __restrict c)
+{
+ c[0] = __builtin_fminf16 (a[0], b[0]);
+ c[1] = __builtin_fminf16 (a[1], b[1]);
+ c[2] = __builtin_fminf16 (a[2], b[2]);
+ c[3] = __builtin_fminf16 (a[3], b[3]);
+}
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-xorsignhf.c b/gcc/testsuite/gcc.target/i386/part-vect-xorsignhf.c
new file mode 100644
index 0000000..a8ec60a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-xorsignhf.c
@@ -0,0 +1,60 @@
+/* { dg-do run { target avx512fp16 } } */
+/* { dg-options "-O1 -mavx512fp16 -mavx512vl -ftree-vectorize -fdump-tree-slp-details -fdump-tree-optimized" } */
+
+extern void abort ();
+
+static void do_test (void);
+
+#define DO_TEST do_test
+#define AVX512FP16
+#include "avx512-check.h"
+
+#define N 16
+_Float16 a[N] = {-0.1f, -3.2f, -6.3f, -9.4f,
+ -12.5f, -15.6f, -18.7f, -21.8f,
+ 24.9f, 27.1f, 30.2f, 33.3f,
+ 36.4f, 39.5f, 42.6f, 45.7f};
+_Float16 b[N] = {-1.2f, 3.4f, -5.6f, 7.8f,
+ -9.0f, 1.0f, -2.0f, 3.0f,
+ -4.0f, -5.0f, 6.0f, 7.0f,
+ -8.0f, -9.0f, 10.0f, 11.0f};
+_Float16 r[N];
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+xorsign_32 (void)
+{
+ r[0] = a[0] * __builtin_copysignf16 (1.0f, b[0]);
+ r[1] = a[1] * __builtin_copysignf16 (1.0f, b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+xorsign_64 (void)
+{
+ r[0] = a[0] * __builtin_copysignf16 (1.0f, b[0]);
+ r[1] = a[1] * __builtin_copysignf16 (1.0f, b[1]);
+ r[2] = a[2] * __builtin_copysignf16 (1.0f, b[2]);
+ r[3] = a[3] * __builtin_copysignf16 (1.0f, b[3]);
+}
+
+static void
+__attribute__ ((noinline, noclone))
+do_test (void)
+{
+ xorsign_32 ();
+ /* check results: */
+ for (int i = 0; i != 2; i++)
+ if (r[i] != a[i] * __builtin_copysignf16 (1.0f, b[i]))
+ abort ();
+
+ xorsign_64 ();
+ /* check results: */
+ for (int i = 0; i != 4; i++)
+ if (r[i] != a[i] * __builtin_copysignf16 (1.0f, b[i]))
+ abort ();
+}
+
+/* { dg-final { scan-tree-dump-times "vectorized using 8 byte vectors" 1 "slp2" } } */
+/* { dg-final { scan-tree-dump-times "vectorized using 4 byte vectors" 1 "slp2" } } */
+/* { dg-final { scan-tree-dump-times ".XORSIGN" 2 "optimized" { target { ! ia32 } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr106245-1.c b/gcc/testsuite/gcc.target/i386/pr106245-1.c
new file mode 100644
index 0000000..a0403e9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr106245-1.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+int f(int a)
+{
+ return (a << 31) >> 31;
+}
+
+/* { dg-final { scan-assembler-not "sarb" } } */
+/* { dg-final { scan-assembler-not "movsbl" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr110701.c b/gcc/testsuite/gcc.target/i386/pr110701.c
new file mode 100644
index 0000000..3f2cea5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr110701.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+int a;
+long b;
+int *c = &a;
+short d(short e, short f) { return e * f; }
+void foo() {
+ *c = d(340, b >= 0) ^ 3;
+}
+
+/* { dg-final { scan-assembler "andl\[ \\t]\\\$340," } } */
+/* { dg-final { scan-assembler-not "andw\[ \\t]\\\$340," } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr111657.c b/gcc/testsuite/gcc.target/i386/pr111657.c
new file mode 100644
index 0000000..fe54fca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr111657.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-rtl-expand -mno-sse" } */
+
+struct a { long arr[30]; };
+
+__seg_gs struct a m;
+void bar (struct a *dst) { *dst = m; }
+
+/* { dg-final { scan-rtl-dump-not "libcall" "expand" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr111745.c b/gcc/testsuite/gcc.target/i386/pr111745.c
new file mode 100644
index 0000000..e8989d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr111745.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512fp16 -mavx512vl -ffloat-store -O2" } */
+
+char c;
+_Float16 __attribute__((__vector_size__ (4 * sizeof (_Float16)))) f;
+_Float16 __attribute__((__vector_size__ (2 * sizeof (_Float16)))) f1;
+
+void
+foo (void)
+{
+ f /= c;
+}
+
+void
+foo1 (void)
+{
+ f1 /= c;
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr111845.c b/gcc/testsuite/gcc.target/i386/pr111845.c
new file mode 100644
index 0000000..d52110a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr111845.c
@@ -0,0 +1,47 @@
+/* PR tree-optimization/111845 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -g -masm=att" } */
+/* { dg-final { scan-assembler-times "\tadcq\t" 8 { target lp64 } } } */
+/* { dg-final { scan-assembler-times "\tadcl\t" 8 { target ia32 } } } */
+
+unsigned long l, m;
+
+__attribute__((noipa)) void
+foo (unsigned long x, unsigned long y, unsigned long h, unsigned long i, int a, int b)
+{
+ unsigned long c, d;
+ unsigned long e = __builtin_add_overflow (x, y, &c);
+ unsigned long f = __builtin_add_overflow (c, a < b, &d);
+ m = ((h + i) + e) + f;
+ l = d;
+}
+
+__attribute__((noipa)) void
+bar (unsigned long x, unsigned long y, unsigned long h, unsigned long i, int a, int b)
+{
+ unsigned long c, d;
+ unsigned long e = __builtin_add_overflow (x, y, &c);
+ unsigned long f = __builtin_add_overflow (c, a < b, &d);
+ m = (h + (i + e)) + f;
+ l = d;
+}
+
+__attribute__((noipa)) void
+baz (unsigned long x, unsigned long y, unsigned long h, unsigned long i, int a, int b)
+{
+ unsigned long c, d;
+ unsigned long e = __builtin_add_overflow (x, y, &c);
+ unsigned long f = __builtin_add_overflow (c, a < b, &d);
+ m = h + (i + (e + f));
+ l = d;
+}
+
+__attribute__((noipa)) void
+qux (unsigned long x, unsigned long y, unsigned long h, unsigned long i, int a, int b)
+{
+ unsigned long c, d;
+ unsigned long e = __builtin_add_overflow (x, y, &c);
+ unsigned long f = __builtin_add_overflow (c, a < b, &d);
+ m = h + ((i + e) + f);
+ l = d;
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr52146.c b/gcc/testsuite/gcc.target/i386/pr52146.c
index 9bd8136..03e3e95 100644
--- a/gcc/testsuite/gcc.target/i386/pr52146.c
+++ b/gcc/testsuite/gcc.target/i386/pr52146.c
@@ -16,4 +16,4 @@ test2 (void)
*apic_tpr_addr = 0;
}
-/* { dg-final { scan-assembler-not "\[,\\t \]+-18874240" } } */
+/* { dg-final { scan-assembler-not "\[,\\t \]+-18874240\n" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr90096.c b/gcc/testsuite/gcc.target/i386/pr90096.c
index 871e0ff..74f052e 100644
--- a/gcc/testsuite/gcc.target/i386/pr90096.c
+++ b/gcc/testsuite/gcc.target/i386/pr90096.c
@@ -10,7 +10,7 @@ volatile __mmask64 m64;
void
foo (int i)
{
- x1 = _mm512_gf2p8affineinv_epi64_epi8 (x1, x2, 3); /* { dg-error "needs isa option -mgfni -mavx512f" } */
+ x1 = _mm512_gf2p8affineinv_epi64_epi8 (x1, x2, 3); /* { dg-error "needs isa option -mevex512 -mgfni -mavx512f" } */
}
#ifdef __x86_64__
diff --git a/gcc/testsuite/gcc.target/i386/rcr-1.c b/gcc/testsuite/gcc.target/i386/rcr-1.c
new file mode 100644
index 0000000..8f369ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/rcr-1.c
@@ -0,0 +1,6 @@
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-Oz" } */
+unsigned __int128 foo(unsigned __int128 x) { return x >> 1; }
+__int128 bar(__int128 x) { return x >> 1; }
+/* { dg-final { scan-assembler-times "rcrq" 2 } } */
+/* { dg-final { scan-assembler-not "shrdq" } } */
diff --git a/gcc/testsuite/gcc.target/i386/rcr-2.c b/gcc/testsuite/gcc.target/i386/rcr-2.c
new file mode 100644
index 0000000..c8ed50e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/rcr-2.c
@@ -0,0 +1,6 @@
+/* { dg-do compile { target ia32 } } */
+/* { dg-options "-Oz -mno-stv" } */
+unsigned long long foo(unsigned long long x) { return x >> 1; }
+long long bar(long long x) { return x >> 1; }
+/* { dg-final { scan-assembler-times "rcrl" 2 } } */
+/* { dg-final { scan-assembler-not "shrdl" } } */
diff --git a/gcc/testsuite/gcc.target/i386/user_msr-1.c b/gcc/testsuite/gcc.target/i386/user_msr-1.c
new file mode 100644
index 0000000..4478523
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/user_msr-1.c
@@ -0,0 +1,20 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-musermsr -O2" } */
+/* { dg-final { scan-assembler-times "urdmsr\[ \\t\]\\%r\[a-z\]x, \\%r\[a-z\]x" 1 } } */
+/* { dg-final { scan-assembler-times "urdmsr\[ \\t\]\\\$121" 1 } } */
+/* { dg-final { scan-assembler-times "uwrmsr\[ \\t\]\\%r\[a-z\]x, \\%r\[a-z\]x" 1 } } */
+/* { dg-final { scan-assembler-times "uwrmsr\[ \\t\]\\%r\[a-z\]x, \\\$121" 1 } } */
+
+#include <x86gprintrin.h>
+
+volatile unsigned long long x;
+volatile unsigned long long y;
+
+void extern
+user_msr_test (void)
+{
+ x = _urdmsr(y);
+ x = _urdmsr(121);
+ _uwrmsr(y, x);
+ _uwrmsr(121, x);
+}
diff --git a/gcc/testsuite/gcc.target/i386/user_msr-2.c b/gcc/testsuite/gcc.target/i386/user_msr-2.c
new file mode 100644
index 0000000..ab0e76f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/user_msr-2.c
@@ -0,0 +1,16 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -musermsr" } */
+/* { dg-final { scan-assembler-times "urdmsr\[ \\t\]\\%r\[a-z\]x, \\%r\[a-z\]x" 1 } } */
+/* { dg-final { scan-assembler-times "uwrmsr\[ \\t\]\\%r\[a-z\]x, \\%r\[a-z\]x" 1 } } */
+/* { dg-final { scan-assembler-times "movabsq\[ \\t\]\\\$20018842566655, \\%r\[a-z\]x" 1 } } */
+
+#include <x86gprintrin.h>
+
+volatile unsigned long long x;
+
+void extern
+user_msr_test (void)
+{
+ x = _urdmsr(0x1234ffffffffULL);
+ _uwrmsr(0x1234ffffffffULL, x);
+}
diff --git a/gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-1.c b/gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-1.c
new file mode 100644
index 0000000..e350996
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-1.c
@@ -0,0 +1,43 @@
+/* { dg-do run } */
+/* { dg-require-effective-target avx512vl } */
+/* { dg-options "-O2 -fopenmp-simd -mavx512vl" } */
+
+#include "avx512vl-check.h"
+
+#ifndef SIMDLEN
+#define SIMDLEN 4
+#endif
+
+int x[1024];
+
+#pragma omp declare simd simdlen(SIMDLEN)
+__attribute__((noinline)) int
+foo (int a, int b)
+{
+ return a + b;
+}
+
+void __attribute__((noipa))
+bar (void)
+{
+#pragma omp simd
+ for (int i = 0; i < 1024; i++)
+ if (x[i] < 20)
+ x[i] = foo (x[i], x[i]);
+}
+
+void avx512vl_test ()
+{
+ int i;
+#pragma GCC novector
+ for (i = 0; i < 1024; i++)
+ x[i] = i;
+
+ bar ();
+
+#pragma GCC novector
+ for (i = 0; i < 1024; i++)
+ if ((i < 20 && x[i] != i + i)
+ || (i >= 20 && x[i] != i))
+ abort ();
+}
diff --git a/gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-2.c b/gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-2.c
new file mode 100644
index 0000000..d9968ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-2.c
@@ -0,0 +1,6 @@
+/* { dg-do run } */
+/* { dg-require-effective-target avx512vl } */
+/* { dg-options "-O2 -fopenmp-simd -mavx512vl" } */
+
+#define SIMDLEN 8
+#include "vect-simd-clone-avx512-1.c"
diff --git a/gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-3.c b/gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-3.c
new file mode 100644
index 0000000..c05f6c8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/vect-simd-clone-avx512-3.c
@@ -0,0 +1,6 @@
+/* { dg-do run } */
+/* { dg-require-effective-target avx512vl } */
+/* { dg-options "-O2 -fopenmp-simd -mavx512vl" } */
+
+#define SIMDLEN 16
+#include "vect-simd-clone-avx512-1.c"
diff --git a/gcc/testsuite/gcc.target/i386/x86gprintrin-1.c b/gcc/testsuite/gcc.target/i386/x86gprintrin-1.c
index 68da4db..b9479be 100644
--- a/gcc/testsuite/gcc.target/i386/x86gprintrin-1.c
+++ b/gcc/testsuite/gcc.target/i386/x86gprintrin-1.c
@@ -1,7 +1,7 @@
/* Test that <x86gprintrin.h> is usable with -O -std=c89 -pedantic-errors. */
/* { dg-do compile } */
/* { dg-options "-O -std=c89 -pedantic-errors -march=x86-64 -madx -mbmi -mbmi2 -mcldemote -mclflushopt -mclwb -mclzero -menqcmd -mfsgsbase -mfxsr -mhreset -mlzcnt -mlwp -mmovdiri -mmwaitx -mpconfig -mpopcnt -mpku -mptwrite -mrdpid -mrdrnd -mrdseed -mrtm -mserialize -msgx -mshstk -mtbm -mtsxldtrk -mwaitpkg -mwbnoinvd -mxsave -mxsavec -mxsaveopt -mxsaves -mraoint -mno-sse -mno-mmx" } */
-/* { dg-additional-options "-mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
+/* { dg-additional-options "-musermsr -mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
#include <x86gprintrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/x86gprintrin-2.c b/gcc/testsuite/gcc.target/i386/x86gprintrin-2.c
index 737c2a2..ee167eb 100644
--- a/gcc/testsuite/gcc.target/i386/x86gprintrin-2.c
+++ b/gcc/testsuite/gcc.target/i386/x86gprintrin-2.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-O2 -Werror-implicit-function-declaration -march=x86-64 -madx -mbmi -mbmi2 -mcldemote -mclflushopt -mclwb -mclzero -menqcmd -mfsgsbase -mfxsr -mhreset -mlzcnt -mlwp -mmovdiri -mmwaitx -mpconfig -mpopcnt -mpku -mptwrite -mrdpid -mrdrnd -mrdseed -mrtm -mserialize -msgx -mshstk -mtbm -mtsxldtrk -mwaitpkg -mwbnoinvd -mxsave -mxsavec -mxsaveopt -mxsaves -mraoint -mno-sse -mno-mmx" } */
/* { dg-add-options bind_pic_locally } */
-/* { dg-additional-options "-mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
+/* { dg-additional-options "-musermsr -mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
/* Test that the intrinsics in <x86gprintrin.h> compile with optimization.
All of them are defined as inline functions that reference the proper
@@ -32,4 +32,8 @@
#define __builtin_ia32_cmpccxadd(A, B, C, D) __builtin_ia32_cmpccxadd(A, B, C, 1)
#define __builtin_ia32_cmpccxadd64(A, B, C, D) __builtin_ia32_cmpccxadd64(A, B, C, 1)
+/* usermsrintrin.h */
+#define __builtin_ia32_urdmsr(A) __builtin_ia32_urdmsr(1)
+#define __builtin_ia32_uwrmsr(A, B) __builtin_ia32_uwrmsr(1, B)
+
#include <x86gprintrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/x86gprintrin-3.c b/gcc/testsuite/gcc.target/i386/x86gprintrin-3.c
index 52690b1..a870391 100644
--- a/gcc/testsuite/gcc.target/i386/x86gprintrin-3.c
+++ b/gcc/testsuite/gcc.target/i386/x86gprintrin-3.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-O0 -Werror-implicit-function-declaration -march=x86-64 -madx -mbmi -mbmi2 -mcldemote -mclflushopt -mclwb -mclzero -menqcmd -mfsgsbase -mfxsr -mhreset -mlzcnt -mlwp -mmovdiri -mmwaitx -mpconfig -mpopcnt -mpku -mptwrite -mrdpid -mrdrnd -mrdseed -mrtm -mserialize -msgx -mshstk -mtbm -mtsxldtrk -mwaitpkg -mwbnoinvd -mxsave -mxsavec -mxsaveopt -mxsaves -mraoint -mno-sse -mno-mmx" } */
/* { dg-add-options bind_pic_locally } */
-/* { dg-additional-options "-mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
+/* { dg-additional-options "-musermsr -mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
/* Test that the intrinsics in <x86gprintrin.h> compile without optimization.
All of them are defined as inline functions that reference the proper
@@ -14,3 +14,29 @@
#define __inline
#include <x86gprintrin.h>
+
+#define _CONCAT(x,y) x ## y
+
+#define test_0(func, type, imm) \
+ type _CONCAT(_0,func) (int const I) \
+ { return func (imm); }
+
+#define test_1(func, type, op1_type) \
+ type _CONCAT(_1,func) (op1_type A) \
+ { return func (A); }
+
+#define test_1r(func, type, op1_type, imm) \
+ type _CONCAT(_1r,func) (op1_type A, int const I) \
+ { return func (imm, A); }
+
+#define test_2(func, type, op1_type, op2_type) \
+ type _CONCAT(_2,func) (op1_type A, op2_type B) \
+ { return func (A, B); }
+
+/* usermsrintrin.h */
+#ifdef __x86_64__
+test_0 (_urdmsr, unsigned long long, 1)
+test_1 (_urdmsr, unsigned long long, unsigned long long)
+test_1r (_uwrmsr, void, unsigned long long, 1)
+test_2 (_uwrmsr, void, unsigned long long, unsigned long long)
+#endif
diff --git a/gcc/testsuite/gcc.target/i386/x86gprintrin-4.c b/gcc/testsuite/gcc.target/i386/x86gprintrin-4.c
index 94cfc58..c3c7947 100644
--- a/gcc/testsuite/gcc.target/i386/x86gprintrin-4.c
+++ b/gcc/testsuite/gcc.target/i386/x86gprintrin-4.c
@@ -13,9 +13,27 @@
#define extern
#define __inline
+#define _CONCAT(x,y) x ## y
+
+#define test_0(func, type, imm) \
+ type _CONCAT(_0,func) (int const I) \
+ { return func (imm); }
+
+#define test_1(func, type, op1_type) \
+ type _CONCAT(_1,func) (op1_type A) \
+ { return func (A); }
+
+#define test_1r(func, type, op1_type, imm) \
+ type _CONCAT(_1r,func) (op1_type A, int const I) \
+ { return func (imm, A); }
+
+#define test_2(func, type, op1_type, op2_type) \
+ type _CONCAT(_2,func) (op1_type A, op2_type B) \
+ { return func (A, B); }
+
#ifndef DIFFERENT_PRAGMAS
#ifdef __x86_64__
-#pragma GCC target ("adx,bmi,bmi2,cmpccxadd,fsgsbase,fxsr,hreset,lwp,lzcnt,popcnt,prefetchi,raoint,rdrnd,rdseed,tbm,rtm,serialize,tsxldtrk,uintr,xsaveopt")
+#pragma GCC target ("adx,bmi,bmi2,cmpccxadd,fsgsbase,fxsr,hreset,lwp,lzcnt,popcnt,prefetchi,raoint,rdrnd,rdseed,tbm,rtm,serialize,tsxldtrk,uintr,usermsr,xsaveopt")
#else
#pragma GCC target ("adx,bmi,bmi2,fsgsbase,fxsr,hreset,lwp,lzcnt,popcnt,raoint,rdrnd,rdseed,tbm,rtm,serialize,tsxldtrk,xsaveopt")
#endif
@@ -29,6 +47,18 @@
/* x86intrin.h (LWP/BMI/BMI2/TBM/LZCNT). */
#ifdef DIFFERENT_PRAGMAS
+#ifdef __x86_64__
+#pragma GCC target ("lwp,bmi,bmi2,tbm,lzcnt,usermsr")
+#else
#pragma GCC target ("lwp,bmi,bmi2,tbm,lzcnt")
#endif
+#endif
#include <x86gprintrin.h>
+
+/* usermsrintrin.h */
+#ifdef __x86_64__
+test_0 (_urdmsr, unsigned long long, 1)
+test_1 (_urdmsr, unsigned long long, unsigned long long)
+test_1r (_uwrmsr, void, unsigned long long, 1)
+test_2 (_uwrmsr, void, unsigned long long, unsigned long long)
+#endif
diff --git a/gcc/testsuite/gcc.target/i386/x86gprintrin-5.c b/gcc/testsuite/gcc.target/i386/x86gprintrin-5.c
index 95f3e0a..ef126ea 100644
--- a/gcc/testsuite/gcc.target/i386/x86gprintrin-5.c
+++ b/gcc/testsuite/gcc.target/i386/x86gprintrin-5.c
@@ -31,8 +31,12 @@
#define __builtin_ia32_cmpccxadd(A, B, C, D) __builtin_ia32_cmpccxadd(A, B, C, 1)
#define __builtin_ia32_cmpccxadd64(A, B, C, D) __builtin_ia32_cmpccxadd64(A, B, C, 1)
+/* usermsrintrin.h */
+#define __builtin_ia32_urdmsr(A) __builtin_ia32_urdmsr(1)
+#define __builtin_ia32_uwrmsr(A, B) __builtin_ia32_uwrmsr(1, B)
+
#ifdef __x86_64__
-#pragma GCC target ("adx,bmi,bmi2,clflushopt,clwb,clzero,cmpccxadd,enqcmd,fsgsbase,fxsr,hreset,lwp,lzcnt,mwaitx,pconfig,pku,popcnt,prefetchi,raoint,rdpid,rdrnd,rdseed,tbm,rtm,serialize,sgx,tsxldtrk,uintr,xsavec,xsaveopt,xsaves,wbnoinvd")
+#pragma GCC target ("adx,bmi,bmi2,clflushopt,clwb,clzero,cmpccxadd,enqcmd,fsgsbase,fxsr,hreset,lwp,lzcnt,mwaitx,pconfig,pku,popcnt,prefetchi,raoint,rdpid,rdrnd,rdseed,tbm,rtm,serialize,sgx,tsxldtrk,uintr,usermsr,xsavec,xsaveopt,xsaves,wbnoinvd")
#else
#pragma GCC target ("adx,bmi,bmi2,clflushopt,clwb,clzero,enqcmd,fsgsbase,fxsr,hreset,lwp,lzcnt,mwaitx,pconfig,pku,popcnt,raoint,rdpid,rdrnd,rdseed,tbm,rtm,serialize,sgx,tsxldtrk,xsavec,xsaveopt,xsaves,wbnoinvd")
#endif
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-1.c
new file mode 100644
index 0000000..28be329
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-1.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
+void
+foo (unsigned char *dst, unsigned char *src)
+{
+ for (int y = 0; y < 16; y++)
+ {
+ for (int x = 0; x < 16; x++)
+ dst[x] = src[x] + 1;
+ dst += 32;
+ src += 32;
+ }
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/const-build.c b/gcc/testsuite/gcc.target/powerpc/const-build.c
new file mode 100644
index 0000000..52941ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/const-build.c
@@ -0,0 +1,143 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -save-temps" } */
+/* { dg-require-effective-target has_arch_ppc64 } */
+
+/* Verify that two instructions are successfully used to build constants.
+ One insn is li or lis, another is rotate: rldicl, rldicr or rldic. */
+
+#define NOIPA __attribute__ ((noipa))
+
+struct fun
+{
+ long long (*f) (void);
+ long long val;
+};
+
+long long NOIPA
+li_rotldi_1 (void)
+{
+ return 0x7531000000000LL;
+}
+
+long long NOIPA
+li_rotldi_2 (void)
+{
+ return 0x2100000000000064LL;
+}
+
+long long NOIPA
+li_rotldi_3 (void)
+{
+ return 0xffff8531ffffffffLL;
+}
+
+long long NOIPA
+li_rotldi_4 (void)
+{
+ return 0x21ffffffffffff94LL;
+}
+
+long long NOIPA
+lis_rotldi_5 (void)
+{
+ return 0xffff85310000ffffLL;
+}
+
+long long NOIPA
+lis_rotldi_6 (void)
+{
+ return 0x5310000ffffffff8LL;
+}
+
+long long NOIPA
+li_rldicl_7 (void)
+{
+ return 0x3ffffffa1LL;
+}
+
+long long NOIPA
+li_rldicl_8 (void)
+{
+ return 0xff8531ffffffffLL;
+}
+
+long long NOIPA
+lis_rldicl_9 (void)
+{
+ return 0x00ff85310000ffffLL;
+}
+
+long long NOIPA
+li_rldicr_10 (void)
+{
+ return 0xffff8531fff00000LL;
+}
+
+long long NOIPA
+li_rldicr_11 (void)
+{
+ return 0x21fffffffff00000LL;
+}
+
+long long NOIPA
+lis_rldicr_12 (void)
+{
+ return 0x5310000ffffffff0LL;
+}
+
+long long NOIPA
+li_rldic_13 (void)
+{
+ return 0x000f853100000000LL;
+}
+long long NOIPA
+li_rldic_14 (void)
+{
+ return 0xffff853100ffffffLL;
+}
+
+long long NOIPA
+li_rldic_15 (void)
+{
+ return 0x800000ffffffff31LL;
+}
+
+long long NOIPA
+li_rldic_16 (void)
+{
+ return 0x800000000fffff31LL;
+}
+
+struct fun arr[] = {
+ {li_rotldi_1, 0x7531000000000LL},
+ {li_rotldi_2, 0x2100000000000064LL},
+ {li_rotldi_3, 0xffff8531ffffffffLL},
+ {li_rotldi_4, 0x21ffffffffffff94LL},
+ {lis_rotldi_5, 0xffff85310000ffffLL},
+ {lis_rotldi_6, 0x5310000ffffffff8LL},
+ {li_rldicl_7, 0x3ffffffa1LL},
+ {li_rldicl_8, 0xff8531ffffffffLL},
+ {lis_rldicl_9, 0x00ff85310000ffffLL},
+ {li_rldicr_10, 0xffff8531fff00000LL},
+ {li_rldicr_11, 0x21fffffffff00000LL},
+ {lis_rldicr_12, 0x5310000ffffffff0LL},
+ {li_rldic_13, 0x000f853100000000LL},
+ {li_rldic_14, 0xffff853100ffffffLL},
+ {li_rldic_15, 0x800000ffffffff31LL},
+ {li_rldic_16, 0x800000000fffff31LL}
+};
+
+/* { dg-final { scan-assembler-times {\mrotldi\M} 6 } } */
+/* { dg-final { scan-assembler-times {\mrldicl\M} 3 } } */
+/* { dg-final { scan-assembler-times {\mrldicr\M} 3 } } */
+/* { dg-final { scan-assembler-times {\mrldic\M} 4 } } */
+
+int
+main ()
+{
+ for (int i = 0; i < sizeof (arr) / sizeof (arr[0]); i++)
+ if ((*arr[i].f) () != arr[i].val)
+ __builtin_abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/pr108338.c b/gcc/testsuite/gcc.target/powerpc/pr108338.c
new file mode 100644
index 0000000..5f2f628
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr108338.c
@@ -0,0 +1,52 @@
+/* { dg-do run } */
+/* { dg-require-effective-target hard_float } */
+/* { dg-options "-O2 -save-temps" } */
+
+/* Under lp64, parameter 'v' is in DI regs, then bitcast sub DI to SF. */
+/* { dg-final { scan-assembler-times {\mxscvspdpn\M} 2 { target { lp64 && has_arch_pwr8 } } } } */
+/* { dg-final { scan-assembler-times {\mmtvsrd\M} 2 { target { lp64 && { has_arch_pwr8 && { ! has_arch_pwr9 } } } } } } */
+/* { dg-final { scan-assembler-times {\mmtvsrd\M} 1 { target { lp64 && has_arch_pwr9 } } } } */
+/* { dg-final { scan-assembler-times {\mmtvsrws\M} 1 { target { lp64 && has_arch_pwr9 } } } } */
+/* { dg-final { scan-assembler-times {\mrldicr\M} 1 { target { lp64 && has_arch_pwr8 } } } } */
+/* { dg-final { scan-assembler-times {\msldi\M} 1 { target { lp64 && { has_arch_pwr8 && { ! has_arch_pwr9 } } } } } } */
+
+struct di_sf_sf
+{
+ float f1; float f2; long long l;
+};
+
+float __attribute__ ((noipa))
+sf_from_high32bit_di (struct di_sf_sf v)
+{
+#ifdef __LITTLE_ENDIAN__
+ return v.f2;
+#else
+ return v.f1;
+#endif
+}
+
+float __attribute__ ((noipa))
+sf_from_low32bit_di (struct di_sf_sf v)
+{
+#ifdef __LITTLE_ENDIAN__
+ return v.f1;
+#else
+ return v.f2;
+#endif
+}
+
+int main()
+{
+ struct di_sf_sf v;
+ v.f1 = v.f2 = 0.0f;
+#ifdef __LITTLE_ENDIAN__
+ v.f1 = 1.0f;
+ v.f2 = 2.0f;
+#else
+ v.f1 = 2.0f;
+ v.f2 = 1.0f;
+#endif
+ if (sf_from_high32bit_di (v) != 2.0f || sf_from_low32bit_di (v) != 1.0f)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/pr88558-p7.c b/gcc/testsuite/gcc.target/powerpc/pr88558-p7.c
new file mode 100644
index 0000000..3932656
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr88558-p7.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-math-errno -mdejagnu-cpu=power7" } */
+
+/* -fno-math-errno is required to make {i,l,ll}rint{,f} inlined */
+
+#include "pr88558.h"
+
+/* { dg-final { scan-assembler-times {\mfctid\M} 4 { target lp64 } } } */
+/* { dg-final { scan-assembler-times {\mfctid\M} 2 { target ilp32 } } } */
+/* { dg-final { scan-assembler-times {\mfctiw\M} 2 { target lp64 } } } */
+/* { dg-final { scan-assembler-times {\mfctiw\M} 4 { target ilp32 } } } */
+/* { dg-final { scan-assembler-times {\mstfiwx\M} 2 { target lp64 } } } */
+/* { dg-final { scan-assembler-times {\mstfiwx\M} 4 { target ilp32 } } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/pr88558-p8.c b/gcc/testsuite/gcc.target/powerpc/pr88558-p8.c
new file mode 100644
index 0000000..1afc8fd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr88558-p8.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-options "-O2 -fno-math-errno -mdejagnu-cpu=power8" } */
+
+/* -fno-math-errno is required to make {i,l,ll}rint{,f} inlined */
+
+#include "pr88558.h"
+
+/* { dg-final { scan-assembler-times {\mfctid\M} 4 { target lp64 } } } */
+/* { dg-final { scan-assembler-times {\mfctid\M} 2 { target ilp32 } } } */
+/* { dg-final { scan-assembler-times {\mfctiw\M} 2 { target lp64 } } } */
+/* { dg-final { scan-assembler-times {\mfctiw\M} 4 { target ilp32 } } } */
+/* { dg-final { scan-assembler-times {\mmfvsrwz\M} 2 { target lp64 } } } */
+/* { dg-final { scan-assembler-times {\mmfvsrwz\M} 4 { target ilp32 } } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/pr88558.h b/gcc/testsuite/gcc.target/powerpc/pr88558.h
new file mode 100644
index 0000000..9c604fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr88558.h
@@ -0,0 +1,29 @@
+long int test1 (double a)
+{
+ return __builtin_lrint (a);
+}
+
+long long test2 (double a)
+{
+ return __builtin_llrint (a);
+}
+
+int test3 (double a)
+{
+ return __builtin_irint (a);
+}
+
+long int test4 (float a)
+{
+ return __builtin_lrintf (a);
+}
+
+long long test5 (float a)
+{
+ return __builtin_llrintf (a);
+}
+
+int test6 (float a)
+{
+ return __builtin_irintf (a);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-compile.c b/gcc/testsuite/gcc.target/riscv/cv-alu-compile.c
new file mode 100644
index 0000000..57289bb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-compile.c
@@ -0,0 +1,252 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+#include <stdint.h>
+
+extern int d;
+extern int e;
+extern int f;
+
+void
+foo0(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_addN (a, b, 0);
+ e = __builtin_riscv_cv_alu_addN (a, b, 7);
+ f = __builtin_riscv_cv_alu_addN (a, b, 31);
+}
+
+void
+foo1(int a, int b, int c)
+{
+ d = __builtin_riscv_cv_alu_addN (a, b, c);
+}
+
+void
+foo2(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_addRN (a, b, 0);
+ e = __builtin_riscv_cv_alu_addRN (a, b, 7);
+ f = __builtin_riscv_cv_alu_addRN (a, b, 31);
+}
+
+int
+foo3(int a, int b, int c)
+{
+ return __builtin_riscv_cv_alu_addRN (a, b, c);
+}
+
+void
+foo4(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_adduN (a, b, 0);
+ e = __builtin_riscv_cv_alu_adduN (a, b, 7);
+ f = __builtin_riscv_cv_alu_adduN (a, b, 31);
+}
+
+int
+foo5(int a, int b, int c)
+{
+ return __builtin_riscv_cv_alu_adduN (a, b, c);
+}
+
+void
+foo6(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_adduRN (a, b, 0);
+ e = __builtin_riscv_cv_alu_adduRN (a, b, 7);
+ f = __builtin_riscv_cv_alu_adduRN (a, b, 31);
+}
+
+int
+foo7(int a, int b, int c)
+{
+ return __builtin_riscv_cv_alu_adduRN (a, b, c);
+}
+
+int
+foo8(int a, int b)
+{
+ return __builtin_riscv_cv_alu_clip (a, 15);
+}
+
+int
+foo9(int a, int b)
+{
+ return __builtin_riscv_cv_alu_clip (a, 10);
+}
+
+int
+foo10(int a, int b)
+{
+ return __builtin_riscv_cv_alu_clipu (a, 15);
+}
+
+int
+foo11(int a, int b)
+{
+ return __builtin_riscv_cv_alu_clipu (a, 10);
+}
+
+int
+foo12(int a)
+{
+ return __builtin_riscv_cv_alu_extbs (a);
+}
+
+int
+foo13(int a)
+{
+ return __builtin_riscv_cv_alu_extbz (a);
+}
+
+int
+foo14(int b)
+{
+ return __builtin_riscv_cv_alu_exths (b);
+}
+
+int
+foo15(int a)
+{
+ return __builtin_riscv_cv_alu_exthz (a);
+}
+
+int
+foo16(int a, int b)
+{
+ return __builtin_riscv_cv_alu_max (a, b);
+}
+
+int
+foo17(int a, int b)
+{
+ return __builtin_riscv_cv_alu_maxu (a, b);
+}
+
+int
+foo18(int a, int b)
+{
+ return __builtin_riscv_cv_alu_min (a, b);
+}
+
+int
+foo19(int a, int b)
+{
+ return __builtin_riscv_cv_alu_minu (a, b);
+}
+
+int
+foo20(int a, int b)
+{
+ return __builtin_riscv_cv_alu_slet (a, b);
+}
+
+int
+foo21(unsigned int a, unsigned int b)
+{
+ return __builtin_riscv_cv_alu_sletu (a, b);
+}
+
+void
+foo22(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_subN (a, b, 0);
+ e = __builtin_riscv_cv_alu_subN (a, b, 7);
+ f = __builtin_riscv_cv_alu_subN (a, b, 31);
+}
+
+int
+foo23(int a, int b, int c)
+{
+ return __builtin_riscv_cv_alu_subN (a, b, c);
+}
+
+void
+foo24(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_subRN (a, b, 0);
+ e = __builtin_riscv_cv_alu_subRN (a, b, 7);
+ f = __builtin_riscv_cv_alu_subRN (a, b, 31);
+}
+
+int
+foo25(int a, int b, int c)
+{
+ return __builtin_riscv_cv_alu_subRN (a, b, c);
+}
+
+void
+foo26(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_subuN (a, b, 0);
+ e = __builtin_riscv_cv_alu_subuN (a, b, 7);
+ f = __builtin_riscv_cv_alu_subuN (a, b, 31);
+}
+
+int
+foo27(int a, int b, int c)
+{
+ return __builtin_riscv_cv_alu_subuN (a, b, c);
+}
+
+void
+foo28(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_subuRN (a, b, 0);
+ e = __builtin_riscv_cv_alu_subuRN (a, b, 7);
+ f = __builtin_riscv_cv_alu_subuRN (a, b, 31);
+}
+
+int
+foo29(int a, int b, int c)
+{
+ return __builtin_riscv_cv_alu_subuRN (a, b, c);
+}
+
+/* { dg-final { scan-assembler-times "cv\.addn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 { target { no-opts "-O1" no-opts "-O2" no-opts "-O3" no-opts "-Og" no-opts "-Oz" no-opts "-Os" } } } } */
+/* { dg-final { scan-assembler-times "cv\.addn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.addn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.addnr\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.addrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.addrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.addrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.addrnr\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.addun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 { target { no-opts "-O1" no-opts "-O2" no-opts "-O3" no-opts "-Og" no-opts "-Oz" no-opts "-Os" } } } } */
+/* { dg-final { scan-assembler-times "cv\.addun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.addun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.addunr\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.addurn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.addurn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.addurn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.addurnr\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.clip\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),5" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.clipr\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.clipu\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),5" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.clipur\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.extbs\t" 1 { target { no-opts "-O1" no-opts "-O2" no-opts "-O3" no-opts "-Og" no-opts "-Oz" no-opts "-Os" } } } } */
+/* { dg-final { scan-assembler-times "cv\\.extbz\t" 1 { target { no-opts "-O1" no-opts "-O2" no-opts "-O3" no-opts "-Og" no-opts "-Oz" no-opts "-Os" } } } } */
+/* { dg-final { scan-assembler-times "cv\\.exths\t" 1 { target { no-opts "-O1" no-opts "-O2" no-opts "-O3" no-opts "-Og" no-opts "-Oz" no-opts "-Os" } } } } */
+/* { dg-final { scan-assembler-times "cv\\.exthz\t" 1 { target { no-opts "-O1" no-opts "-O2" no-opts "-O3" no-opts "-Og" no-opts "-Oz" no-opts "-Os" } } } } */
+/* { dg-final { scan-assembler-times "cv\\.max\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.maxu\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.min\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.minu\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.sle\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.sleu\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.subn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 { target { no-opts "-O1" no-opts "-O2" no-opts "-O3" no-opts "-Og" no-opts "-Oz" no-opts "-Os" } } } } */
+/* { dg-final { scan-assembler-times "cv\.subn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.subn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.subnr\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.subrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.subrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.subrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.subrnr\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.subun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 { target { no-opts "-O1" no-opts "-O2" no-opts "-O3" no-opts "-Og" no-opts "-Oz" no-opts "-Os" } } } } */
+/* { dg-final { scan-assembler-times "cv\.subun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.subun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.subunr\t" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.suburn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.suburn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.suburn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.suburnr\t" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addn.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addn.c
new file mode 100644
index 0000000..aa8610f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addn.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+extern int d;
+
+void
+foo(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_addN (a, b, 65536); /* { dg-warning "unsigned conversion from \'int\' to \'unsigned char\' changes value from \'65536\' to \'0\'" } */
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addrn.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addrn.c
new file mode 100644
index 0000000..12371b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addrn.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+extern int d;
+
+void
+foo(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_addRN (a, b, 65536); /* { dg-warning "unsigned conversion from \'int\' to \'unsigned char\' changes value from \'65536\' to \'0\'" } */
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addun.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addun.c
new file mode 100644
index 0000000..3faad6c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addun.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+extern int d;
+
+void
+foo(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_adduN (a, b, 65536); /* { dg-warning "unsigned conversion from \'int\' to \'unsigned char\' changes value from \'65536\' to \'0\'" } */
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addurn.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addurn.c
new file mode 100644
index 0000000..39dc575
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-addurn.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+extern int d;
+
+void
+foo(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_adduRN (a, b, 65536); /* { dg-warning "unsigned conversion from \'int\' to \'unsigned char\' changes value from \'65536\' to \'0\'" } */
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-clip.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-clip.c
new file mode 100644
index 0000000..a5ee231
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-clip.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+extern int d;
+
+void
+foo(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_clip (a, 4294967296); /* { dg-warning "overflow in conversion from \'long long int\' to \'int\' changes value from \'4294967296\' to \'0\'" } */
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-clipu.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-clipu.c
new file mode 100644
index 0000000..1ee11d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-clipu.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+extern int d;
+
+void
+foo(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_clipu (a, 4294967296); /* { dg-warning "unsigned conversion from \'long long int\' to \'unsigned int\' changes value from \'4294967296\' to \'0\'" } */
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subn.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subn.c
new file mode 100644
index 0000000..91d6bd5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subn.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+extern int d;
+
+void
+foo(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_subN (a, b, 65536); /* { dg-warning "unsigned conversion from \'int\' to \'unsigned char\' changes value from \'65536\' to \'0\'" } */
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subrn.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subrn.c
new file mode 100644
index 0000000..3c7e4ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subrn.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+extern int d;
+
+void
+foo(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_subRN (a, b, 65536); /* { dg-warning "unsigned conversion from \'int\' to \'unsigned char\' changes value from \'65536\' to \'0\'" } */
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subun.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subun.c
new file mode 100644
index 0000000..46218ea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-subun.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+extern int d;
+
+void
+foo(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_subuN (a, b, 65536); /* { dg-warning "unsigned conversion from \'int\' to \'unsigned char\' changes value from \'65536\' to \'0\'" } */
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-suburn.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-suburn.c
new file mode 100644
index 0000000..f20378d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile-suburn.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i_xcvalu -mabi=ilp32" } */
+
+extern int d;
+
+void
+foo(int a, int b)
+{
+ d = __builtin_riscv_cv_alu_subuRN (a, b, 65536); /* { dg-warning "unsigned conversion from \'int\' to \'unsigned char\' changes value from \'65536\' to \'0\'" } */
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile.c b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile.c
new file mode 100644
index 0000000..bbdb2d5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-alu-fail-compile.c
@@ -0,0 +1,32 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_alu } */
+/* { dg-options "-march=rv32i -mabi=ilp32" } */
+
+extern int d;
+
+int
+foo(int a, int b, int c)
+{
+ d += __builtin_riscv_cv_alu_slet (a, b); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_sletu (a, b); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_addN (a, b, 31); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_addRN (a, b, 31); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_adduN (a, b, 31); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_adduRN (a, b, 31); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_clip (a, 31); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_clipu (a, 35); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_extbs (a); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_extbz (a); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_exths (a); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_exthz (a); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_min (a, b); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_minu (a, b); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_max (a, b); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_maxu (a, b); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_subN (a, b, 31); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_subRN (a, b, 31); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_subuN (a, b, 31); /* { dg-warning "implicit declaration of function" } */
+ d += __builtin_riscv_cv_alu_subuRN (a, b, 31); /* { dg-warning "implicit declaration of function" } */
+
+ return d;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-compile.c b/gcc/testsuite/gcc.target/riscv/cv-mac-compile.c
new file mode 100644
index 0000000..c5d4320
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-compile.c
@@ -0,0 +1,198 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+
+extern int d;
+extern int e;
+extern int f;
+
+int
+foo0(int a, int b, int c)
+{
+ return __builtin_riscv_cv_mac_mac (a, b, c);
+}
+
+void
+foo1(int a, int b, int c)
+{
+ d = __builtin_riscv_cv_mac_machhsN (a, b, c, 0);
+ e = __builtin_riscv_cv_mac_machhsN (a, b, c, 7);
+ f = __builtin_riscv_cv_mac_machhsN (a, b, c, 31);
+}
+
+void
+foo2(int a, int b, int c)
+{
+ d = __builtin_riscv_cv_mac_machhsRN (a, b, c, 0);
+ e = __builtin_riscv_cv_mac_machhsRN (a, b, c, 7);
+ f = __builtin_riscv_cv_mac_machhsRN (a, b, c, 31);
+}
+
+void
+foo3(int a, int b, int c)
+{
+ d = __builtin_riscv_cv_mac_machhuN (a, b, c, 0);
+ e = __builtin_riscv_cv_mac_machhuN (a, b, c, 7);
+ f = __builtin_riscv_cv_mac_machhuN (a, b, c, 31);
+}
+
+void
+foo4(int a, int b, int c)
+{
+ d = __builtin_riscv_cv_mac_machhuRN (a, b, c, 0);
+ e = __builtin_riscv_cv_mac_machhuRN (a, b, c, 7);
+ f = __builtin_riscv_cv_mac_machhuRN (a, b, c, 31);
+}
+
+void
+foo5(int a, int b, int c)
+{
+ d = __builtin_riscv_cv_mac_macsN (a, b, c, 0);
+ e = __builtin_riscv_cv_mac_macsN (a, b, c, 7);
+ f = __builtin_riscv_cv_mac_macsN (a, b, c, 31);
+}
+
+void
+foo6(int a, int b, int c)
+{
+ d = __builtin_riscv_cv_mac_macsRN (a, b, c, 0);
+ e = __builtin_riscv_cv_mac_macsRN (a, b, c, 7);
+ f = __builtin_riscv_cv_mac_macsRN (a, b, c, 31);
+}
+
+void
+foo7(int a, int b, int c)
+{
+ d = __builtin_riscv_cv_mac_macuN (a, b, c, 0);
+ e = __builtin_riscv_cv_mac_macuN (a, b, c, 7);
+ f = __builtin_riscv_cv_mac_macuN (a, b, c, 31);
+}
+
+void
+foo8(int a, int b, int c)
+{
+ d = __builtin_riscv_cv_mac_macuRN (a, b, c, 0);
+ e = __builtin_riscv_cv_mac_macuRN (a, b, c, 7);
+ f = __builtin_riscv_cv_mac_macuRN (a, b, c, 31);
+}
+
+int
+foo9(int a, int b, int c)
+{
+ return __builtin_riscv_cv_mac_msu (a, b, c);
+}
+
+void
+foo10(int a, int b)
+{
+ d = __builtin_riscv_cv_mac_mulhhsN (a, b, 0);
+ e = __builtin_riscv_cv_mac_mulhhsN (a, b, 7);
+ f = __builtin_riscv_cv_mac_mulhhsN (a, b, 31);
+}
+
+void
+foo11(int a, int b)
+{
+ d = __builtin_riscv_cv_mac_mulhhsRN (a, b, 0);
+ e = __builtin_riscv_cv_mac_mulhhsRN (a, b, 7);
+ f = __builtin_riscv_cv_mac_mulhhsRN (a, b, 31);
+}
+
+void
+foo12(int a, int b)
+{
+ d = __builtin_riscv_cv_mac_mulhhuN (a, b, 0);
+ e = __builtin_riscv_cv_mac_mulhhuN (a, b, 7);
+ f = __builtin_riscv_cv_mac_mulhhuN (a, b, 31);
+}
+
+void
+foo13(int a, int b)
+{
+ d = __builtin_riscv_cv_mac_mulhhuRN (a, b, 0);
+ e = __builtin_riscv_cv_mac_mulhhuRN (a, b, 7);
+ f = __builtin_riscv_cv_mac_mulhhuRN (a, b, 31);
+}
+
+void
+foo14(int a, int b)
+{
+ d = __builtin_riscv_cv_mac_mulsN (a, b, 0);
+ e = __builtin_riscv_cv_mac_mulsN (a, b, 7);
+ f = __builtin_riscv_cv_mac_mulsN (a, b, 31);
+}
+
+void
+foo15(int a, int b)
+{
+ d = __builtin_riscv_cv_mac_mulsRN (a, b, 0);
+ e = __builtin_riscv_cv_mac_mulsRN (a, b, 7);
+ f = __builtin_riscv_cv_mac_mulsRN (a, b, 31);
+}
+
+void
+foo16(int a, int b)
+{
+ d = __builtin_riscv_cv_mac_muluN (a, b, 0);
+ e = __builtin_riscv_cv_mac_muluN (a, b, 7);
+ f = __builtin_riscv_cv_mac_muluN (a, b, 31);
+}
+
+void
+foo17(int a, int b)
+{
+ d = __builtin_riscv_cv_mac_muluRN (a, b, 0);
+ e = __builtin_riscv_cv_mac_muluRN (a, b, 7);
+ f = __builtin_riscv_cv_mac_muluRN (a, b, 31);
+}
+
+/* { dg-final { scan-assembler-times "cv\.mac\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\)" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhsn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhsn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhsn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhsrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhsrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhsrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhurn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhurn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.machhurn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macsn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macsn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macsn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macsrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macsrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macsrn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macun\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macurn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macurn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.macurn\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.msu\t\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\),\(\?\:t\[0-6\]\|a\[0-7\]\|s\[1-11\]\)" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhsn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhsn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhsn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhsrn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhsrn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhsrn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhun\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhun\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhun\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhurn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhurn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulhhurn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulsn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulsn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulsn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulsn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulsn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulsn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulun\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulun\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulun\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],31" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulurn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],0" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulurn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],7" 1 } } */
+/* { dg-final { scan-assembler-times "cv\.mulurn\t\[a-z\]\[0-9\],\[a-z\]\[0-9\],\[a-z\]\[0-9\],31" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mac.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mac.c
new file mode 100644
index 0000000..cfb1ed8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mac.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+
+#include <stdint.h>
+
+extern int32_t res1;
+extern int32_t res2;
+extern int32_t res3;
+extern int32_t res4;
+extern int32_t res5;
+extern int32_t res6;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_mac (12147483649, 21, 47); /* { dg-warning "overflow in conversion" } */
+ res2 = __builtin_riscv_cv_mac_mac (648, 12147483649, 48); /* { dg-warning "overflow in conversion" } */
+ res3 = __builtin_riscv_cv_mac_mac (648, 48, 12147483649); /* { dg-warning "overflow in conversion" } */
+ res4 = __builtin_riscv_cv_mac_mac (-2147483649, 21, 47); /* { dg-warning "overflow in conversion" } */
+ res5 = __builtin_riscv_cv_mac_mac (648, -2147483649, 48); /* { dg-warning "overflow in conversion" } */
+ res6 = __builtin_riscv_cv_mac_mac (648, 48, -2147483649); /* { dg-warning "overflow in conversion" } */
+
+ return res1+res2+res3+res4+res5+res6;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhsn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhsn.c
new file mode 100644
index 0000000..32c5329
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhsn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern int32_t res1;
+extern int32_t res2;
+extern int32_t res3;
+extern int32_t res4;
+extern int32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_machhsN (648, 219, 319, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_machhsN (648, 219, 325, 0);
+ res3 = __builtin_riscv_cv_mac_machhsN (648, 219, 319, 15);
+ res4 = __builtin_riscv_cv_mac_machhsN (648, 219, 325, 31);
+ res5 = __builtin_riscv_cv_mac_machhsN (648, 219, 325, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhsrn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhsrn.c
new file mode 100644
index 0000000..1b8e4e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhsrn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern int32_t res1;
+extern int32_t res2;
+extern int32_t res3;
+extern int32_t res4;
+extern int32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_machhsRN (648, 219, 319, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_machhsRN (648, 219, 325, 0);
+ res3 = __builtin_riscv_cv_mac_machhsRN (648, 219, 319, 15);
+ res4 = __builtin_riscv_cv_mac_machhsRN (648, 219, 325, 31);
+ res5 = __builtin_riscv_cv_mac_machhsRN (648, 219, 325, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhun.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhun.c
new file mode 100644
index 0000000..08cb17a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhun.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern uint32_t res1;
+extern uint32_t res2;
+extern uint32_t res3;
+extern uint32_t res4;
+extern uint32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_machhuN (648, 219, 319, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_machhuN (648, 219, 325, 0);
+ res3 = __builtin_riscv_cv_mac_machhuN (648, 219, 319, 15);
+ res4 = __builtin_riscv_cv_mac_machhuN (648, 219, 325, 31);
+ res5 = __builtin_riscv_cv_mac_machhuN (648, 219, 325, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhurn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhurn.c
new file mode 100644
index 0000000..cbfc8ee
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-machhurn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern uint32_t res1;
+extern uint32_t res2;
+extern uint32_t res3;
+extern uint32_t res4;
+extern uint32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_machhuRN (648, 219, 319, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_machhuRN (648, 219, 325, 0);
+ res3 = __builtin_riscv_cv_mac_machhuRN (648, 219, 319, 15);
+ res4 = __builtin_riscv_cv_mac_machhuRN (648, 219, 325, 31);
+ res5 = __builtin_riscv_cv_mac_machhuRN (648, 219, 325, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macsn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macsn.c
new file mode 100644
index 0000000..6ea3f39
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macsn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern int32_t res1;
+extern int32_t res2;
+extern int32_t res3;
+extern int32_t res4;
+extern int32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_macsN (648, 219, 319, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_macsN (648, 219, 325, 0);
+ res3 = __builtin_riscv_cv_mac_macsN (648, 219, 319, 15);
+ res4 = __builtin_riscv_cv_mac_macsN (648, 219, 325, 31);
+ res5 = __builtin_riscv_cv_mac_macsN (648, 219, 325, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macsrn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macsrn.c
new file mode 100644
index 0000000..1862846
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macsrn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern int32_t res1;
+extern int32_t res2;
+extern int32_t res3;
+extern int32_t res4;
+extern int32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_macsRN (648, 219, 319, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_macsRN (648, 219, 325, 0);
+ res3 = __builtin_riscv_cv_mac_macsRN (648, 219, 319, 15);
+ res4 = __builtin_riscv_cv_mac_macsRN (648, 219, 325, 31);
+ res5 = __builtin_riscv_cv_mac_macsRN (648, 219, 325, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macun.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macun.c
new file mode 100644
index 0000000..58c139a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macun.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern uint32_t res1;
+extern uint32_t res2;
+extern uint32_t res3;
+extern uint32_t res4;
+extern uint32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_macuN (648, 219, 319, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_macuN (648, 219, 325, 0);
+ res3 = __builtin_riscv_cv_mac_macuN (648, 219, 319, 15);
+ res4 = __builtin_riscv_cv_mac_macuN (648, 219, 325, 31);
+ res5 = __builtin_riscv_cv_mac_macuN (648, 219, 325, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macurn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macurn.c
new file mode 100644
index 0000000..65f7b14
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-macurn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern uint32_t res1;
+extern uint32_t res2;
+extern uint32_t res3;
+extern uint32_t res4;
+extern uint32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_macuRN (648, 219, 319, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_macuRN (648, 219, 325, 0);
+ res3 = __builtin_riscv_cv_mac_macuRN (648, 219, 319, 15);
+ res4 = __builtin_riscv_cv_mac_macuRN (648, 219, 325, 31);
+ res5 = __builtin_riscv_cv_mac_macuRN (648, 219, 325, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-msu.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-msu.c
new file mode 100644
index 0000000..caee5eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-msu.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+
+#include <stdint.h>
+
+extern int32_t res1;
+extern int32_t res2;
+extern int32_t res3;
+extern int32_t res4;
+extern int32_t res5;
+extern int32_t res6;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_msu (12147483649, 21, 47); /* { dg-warning "overflow in conversion" } */
+ res2 = __builtin_riscv_cv_mac_msu (648, 12147483649, 48); /* { dg-warning "overflow in conversion" } */
+ res3 = __builtin_riscv_cv_mac_msu (648, 48, 12147483649); /* { dg-warning "overflow in conversion" } */
+ res4 = __builtin_riscv_cv_mac_msu (-2147483649, 21, 47); /* { dg-warning "overflow in conversion" } */
+ res5 = __builtin_riscv_cv_mac_msu (648, -2147483649, 48); /* { dg-warning "overflow in conversion" } */
+ res6 = __builtin_riscv_cv_mac_msu (648, 48, -2147483649); /* { dg-warning "overflow in conversion" } */
+
+ return res1+res2+res3+res4+res5+res6;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhsn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhsn.c
new file mode 100644
index 0000000..dd00ab3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhsn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern int32_t res1;
+extern int32_t res2;
+extern int32_t res3;
+extern int32_t res4;
+extern int32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_mulhhsN (648, 219, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_mulhhsN (648, 219, 0);
+ res3 = __builtin_riscv_cv_mac_mulhhsN (648, 219, 15);
+ res4 = __builtin_riscv_cv_mac_mulhhsN (648, 219, 31);
+ res5 = __builtin_riscv_cv_mac_mulhhsN (648, 219, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhsrn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhsrn.c
new file mode 100644
index 0000000..c1c1d08
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhsrn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern int32_t res1;
+extern int32_t res2;
+extern int32_t res3;
+extern int32_t res4;
+extern int32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_mulhhsRN (648, 219, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_mulhhsRN (648, 219, 0);
+ res3 = __builtin_riscv_cv_mac_mulhhsRN (648, 219, 15);
+ res4 = __builtin_riscv_cv_mac_mulhhsRN (648, 219, 31);
+ res5 = __builtin_riscv_cv_mac_mulhhsRN (648, 219, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhun.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhun.c
new file mode 100644
index 0000000..0516b6f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhun.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern uint32_t res1;
+extern uint32_t res2;
+extern uint32_t res3;
+extern uint32_t res4;
+extern uint32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_mulhhuN (648, 219, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_mulhhuN (648, 219, 0);
+ res3 = __builtin_riscv_cv_mac_mulhhuN (648, 219, 15);
+ res4 = __builtin_riscv_cv_mac_mulhhuN (648, 219, 31);
+ res5 = __builtin_riscv_cv_mac_mulhhuN (648, 219, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhurn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhurn.c
new file mode 100644
index 0000000..d605bc6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulhhurn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern uint32_t res1;
+extern uint32_t res2;
+extern uint32_t res3;
+extern uint32_t res4;
+extern uint32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_mulhhuRN (648, 219, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_mulhhuRN (648, 219, 0);
+ res3 = __builtin_riscv_cv_mac_mulhhuRN (648, 219, 15);
+ res4 = __builtin_riscv_cv_mac_mulhhuRN (648, 219, 31);
+ res5 = __builtin_riscv_cv_mac_mulhhuRN (648, 219, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulsn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulsn.c
new file mode 100644
index 0000000..9bcf2b4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulsn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern int32_t res1;
+extern int32_t res2;
+extern int32_t res3;
+extern int32_t res4;
+extern int32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_mulsN (648, 219, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_mulsN (648, 219, 0);
+ res3 = __builtin_riscv_cv_mac_mulsN (648, 219, 15);
+ res4 = __builtin_riscv_cv_mac_mulsN (648, 219, 31);
+ res5 = __builtin_riscv_cv_mac_mulsN (648, 219, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulsrn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulsrn.c
new file mode 100644
index 0000000..2af1065
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulsrn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern int32_t res1;
+extern int32_t res2;
+extern int32_t res3;
+extern int32_t res4;
+extern int32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_mulsRN (648, 219, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_mulsRN (648, 219, 0);
+ res3 = __builtin_riscv_cv_mac_mulsRN (648, 219, 15);
+ res4 = __builtin_riscv_cv_mac_mulsRN (648, 219, 31);
+ res5 = __builtin_riscv_cv_mac_mulsRN (648, 219, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulun.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulun.c
new file mode 100644
index 0000000..8ed53f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulun.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern uint32_t res1;
+extern uint32_t res2;
+extern uint32_t res3;
+extern uint32_t res4;
+extern uint32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_muluN (648, 219, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_muluN (648, 219, 0);
+ res3 = __builtin_riscv_cv_mac_muluN (648, 219, 15);
+ res4 = __builtin_riscv_cv_mac_muluN (648, 219, 31);
+ res5 = __builtin_riscv_cv_mac_muluN (648, 219, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulurn.c b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulurn.c
new file mode 100644
index 0000000..b3b8a3d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-fail-compile-mulurn.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-march=rv32i_xcvmac -mabi=ilp32" } */
+/* { dg-skip-if "Skip LTO tests of builtin compilation" { *-*-* } { "-flto" } } */
+
+#include <stdint.h>
+
+extern uint32_t res1;
+extern uint32_t res2;
+extern uint32_t res3;
+extern uint32_t res4;
+extern uint32_t res5;
+
+int
+main (void)
+{
+ res1 = __builtin_riscv_cv_mac_muluRN (648, 219, -1); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+ res2 = __builtin_riscv_cv_mac_muluRN (648, 219, 0);
+ res3 = __builtin_riscv_cv_mac_muluRN (648, 219, 15);
+ res4 = __builtin_riscv_cv_mac_muluRN (648, 219, 31);
+ res5 = __builtin_riscv_cv_mac_muluRN (648, 219, 32); /* { dg-error "invalid argument to built-in function" "" { target *-*-* } } */
+
+ return res1+res2+res3+res4+res5;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/cv-mac-test-autogeneration.c b/gcc/testsuite/gcc.target/riscv/cv-mac-test-autogeneration.c
new file mode 100644
index 0000000..1ee9c26
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/cv-mac-test-autogeneration.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target cv_mac } */
+/* { dg-options "-O2 -march=rv32im_xcvmac -mabi=ilp32" } */
+
+int
+foo0(int a, int b, int c)
+{
+ return a * b + c;
+}
+
+int
+foo1(int a, int b, int c)
+{
+ return a - b * c;
+}
+
+/* { dg-final { scan-assembler-times "cv\\.mac" 1 } } */
+/* { dg-final { scan-assembler-times "cv\\.msu" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/fle-ieee.c b/gcc/testsuite/gcc.target/riscv/fle-ieee.c
index e55331f..12d0451 100644
--- a/gcc/testsuite/gcc.target/riscv/fle-ieee.c
+++ b/gcc/testsuite/gcc.target/riscv/fle-ieee.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target hard_float } */
-/* { dg-options "-fno-finite-math-only -ftrapping-math -fno-signaling-nans" } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -fno-finite-math-only -ftrapping-math -fno-signaling-nans" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc -mabi=ilp32d -fno-finite-math-only -ftrapping-math -fno-signaling-nans" { target { rv32 } } } */
long
fle (double x, double y)
diff --git a/gcc/testsuite/gcc.target/riscv/fle-snan.c b/gcc/testsuite/gcc.target/riscv/fle-snan.c
index f40bb2c..146b786 100644
--- a/gcc/testsuite/gcc.target/riscv/fle-snan.c
+++ b/gcc/testsuite/gcc.target/riscv/fle-snan.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target hard_float } */
-/* { dg-options "-fno-finite-math-only -ftrapping-math -fsignaling-nans" } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -fno-finite-math-only -ftrapping-math -fsignaling-nans" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc -mabi=ilp32d -fno-finite-math-only -ftrapping-math -fsignaling-nans" { target { rv32 } } } */
long
fle (double x, double y)
diff --git a/gcc/testsuite/gcc.target/riscv/fle.c b/gcc/testsuite/gcc.target/riscv/fle.c
index 97c8ab9..2379e22 100644
--- a/gcc/testsuite/gcc.target/riscv/fle.c
+++ b/gcc/testsuite/gcc.target/riscv/fle.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target hard_float } */
-/* { dg-options "-fno-finite-math-only -fno-trapping-math -fno-signaling-nans" } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -fno-finite-math-only -fno-trapping-math -fno-signaling-nans" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc -mabi=ilp32d -fno-finite-math-only -fno-trapping-math -fno-signaling-nans" { target { rv32 } } } */
long
fle (double x, double y)
diff --git a/gcc/testsuite/gcc.target/riscv/flef-ieee.c b/gcc/testsuite/gcc.target/riscv/flef-ieee.c
index f3e7e7d..b6ee6ed 100644
--- a/gcc/testsuite/gcc.target/riscv/flef-ieee.c
+++ b/gcc/testsuite/gcc.target/riscv/flef-ieee.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target hard_float } */
-/* { dg-options "-fno-finite-math-only -ftrapping-math -fno-signaling-nans" } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -fno-finite-math-only -ftrapping-math -fno-signaling-nans" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc -mabi=ilp32f -fno-finite-math-only -ftrapping-math -fno-signaling-nans" { target { rv32 } } } */
long
flef (float x, float y)
diff --git a/gcc/testsuite/gcc.target/riscv/flef-snan.c b/gcc/testsuite/gcc.target/riscv/flef-snan.c
index ef75b35..e8611e8 100644
--- a/gcc/testsuite/gcc.target/riscv/flef-snan.c
+++ b/gcc/testsuite/gcc.target/riscv/flef-snan.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target hard_float } */
-/* { dg-options "-fno-finite-math-only -ftrapping-math -fsignaling-nans" } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -fno-finite-math-only -ftrapping-math -fsignaling-nans" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc -mabi=ilp32f -fno-finite-math-only -ftrapping-math -fsignaling-nans" { target { rv32 } } } */
long
flef (float x, float y)
diff --git a/gcc/testsuite/gcc.target/riscv/flef.c b/gcc/testsuite/gcc.target/riscv/flef.c
index 379f511..ce5420b 100644
--- a/gcc/testsuite/gcc.target/riscv/flef.c
+++ b/gcc/testsuite/gcc.target/riscv/flef.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target hard_float } */
-/* { dg-options "-fno-finite-math-only -fno-trapping-math -fno-signaling-nans" } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -fno-finite-math-only -fno-trapping-math -fno-signaling-nans" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc -mabi=ilp32f -fno-finite-math-only -fno-trapping-math -fno-signaling-nans" { target { rv32 } } } */
long
flef (float x, float y)
diff --git a/gcc/testsuite/gcc.target/riscv/flt-ieee.c b/gcc/testsuite/gcc.target/riscv/flt-ieee.c
index c40a0fc..cff30c6 100644
--- a/gcc/testsuite/gcc.target/riscv/flt-ieee.c
+++ b/gcc/testsuite/gcc.target/riscv/flt-ieee.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target hard_float } */
-/* { dg-options "-fno-finite-math-only -ftrapping-math -fno-signaling-nans" } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -fno-finite-math-only -ftrapping-math -fno-signaling-nans" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc -mabi=ilp32d -fno-finite-math-only -ftrapping-math -fno-signaling-nans" { target { rv32 } } } */
long
flt (double x, double y)
diff --git a/gcc/testsuite/gcc.target/riscv/flt-snan.c b/gcc/testsuite/gcc.target/riscv/flt-snan.c
index c958ec0..238748a 100644
--- a/gcc/testsuite/gcc.target/riscv/flt-snan.c
+++ b/gcc/testsuite/gcc.target/riscv/flt-snan.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target hard_float } */
-/* { dg-options "-fno-finite-math-only -ftrapping-math -fsignaling-nans" } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -fno-finite-math-only -ftrapping-math -fsignaling-nans" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc -mabi=ilp32d -fno-finite-math-only -ftrapping-math -fsignaling-nans" { target { rv32 } } } */
long
flt (double x, double y)
diff --git a/gcc/testsuite/gcc.target/riscv/fltf-ieee.c b/gcc/testsuite/gcc.target/riscv/fltf-ieee.c
index a9c0805..024d530 100644
--- a/gcc/testsuite/gcc.target/riscv/fltf-ieee.c
+++ b/gcc/testsuite/gcc.target/riscv/fltf-ieee.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target hard_float } */
-/* { dg-options "-fno-finite-math-only -ftrapping-math -fno-signaling-nans" } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -fno-finite-math-only -ftrapping-math -fno-signaling-nans" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc -mabi=ilp32f -fno-finite-math-only -ftrapping-math -fno-signaling-nans" { target { rv32 } } } */
long
fltf (float x, float y)
diff --git a/gcc/testsuite/gcc.target/riscv/fltf-snan.c b/gcc/testsuite/gcc.target/riscv/fltf-snan.c
index 34a51e3..9e12f37 100644
--- a/gcc/testsuite/gcc.target/riscv/fltf-snan.c
+++ b/gcc/testsuite/gcc.target/riscv/fltf-snan.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target hard_float } */
-/* { dg-options "-fno-finite-math-only -ftrapping-math -fsignaling-nans" } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -fno-finite-math-only -ftrapping-math -fsignaling-nans" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc -mabi=ilp32f -fno-finite-math-only -ftrapping-math -fsignaling-nans" { target { rv32 } } } */
long
fltf (float x, float y)
diff --git a/gcc/testsuite/gcc.target/riscv/fold-mem-offsets-1.c b/gcc/testsuite/gcc.target/riscv/fold-mem-offsets-1.c
new file mode 100644
index 0000000..ffb4993
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/fold-mem-offsets-1.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ffold-mem-offsets" } */
+
+void sink(int arr[2]);
+
+void
+foo(int a, int b, int i)
+{
+ int arr[2] = {a, b};
+ arr[i]++;
+ sink(arr);
+}
+
+/* The should be no negative memory offsets when using -ffold-mem-offsets. */
+/* { dg-final { scan-assembler-not "lw\t.*,-.*\\(.*\\)" } } */
+/* { dg-final { scan-assembler-not "sw\t.*,-.*\\(.*\\)" } } */ \ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/fold-mem-offsets-2.c b/gcc/testsuite/gcc.target/riscv/fold-mem-offsets-2.c
new file mode 100644
index 0000000..ca96180
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/fold-mem-offsets-2.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ffold-mem-offsets" } */
+
+void sink(int arr[3]);
+
+void
+foo(int a, int b, int c, int i)
+{
+ int arr1[3] = {a, b, c};
+ int arr2[3] = {a, c, b};
+ int arr3[3] = {c, b, a};
+
+ arr1[i]++;
+ arr2[i]++;
+ arr3[i]++;
+
+ sink(arr1);
+ sink(arr2);
+ sink(arr3);
+}
+
+/* The should be no negative memory offsets when using -ffold-mem-offsets. */
+/* { dg-final { scan-assembler-not "lw\t.*,-.*\\(.*\\)" } } */
+/* { dg-final { scan-assembler-not "sw\t.*,-.*\\(.*\\)" } } */ \ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/fold-mem-offsets-3.c b/gcc/testsuite/gcc.target/riscv/fold-mem-offsets-3.c
new file mode 100644
index 0000000..83f82c0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/fold-mem-offsets-3.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ffold-mem-offsets" } */
+
+void load(int arr[2]);
+
+int
+foo(long unsigned int i)
+{
+ int arr[2];
+ load(arr);
+
+ return arr[3 * i + 77];
+}
+
+/* The should be no negative memory offsets when using -ffold-mem-offsets. */
+/* { dg-final { scan-assembler-not "lw\t.*,-.*\\(.*\\)" } } */
+/* { dg-final { scan-assembler-not "addi\t.*,.*,77" } } */ \ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/pr111466.c b/gcc/testsuite/gcc.target/riscv/pr111466.c
new file mode 100644
index 0000000..3348d59
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/pr111466.c
@@ -0,0 +1,15 @@
+/* Simplified varaint of gcc.target/riscv/zba-adduw.c. */
+
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zba_zbs -mabi=lp64" } */
+/* { dg-skip-if "" { *-*-* } { "-O0" } } */
+
+unsigned int foo2(int unused, int n, unsigned y, unsigned delta){
+ int s = 0;
+ unsigned int x = 0;
+ for (;x<n;x +=delta)
+ s += x+y;
+ return s;
+}
+
+/* { dg-final { scan-assembler-not "\msext\M" } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-1.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-1.h
index 4742d92..36af15b 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-1.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-1.h
@@ -1,4 +1,4 @@
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(OLD_TYPE, NEW_TYPE) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-2.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-2.h
index b084eaa..1aad4a65 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-2.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2float-2.h
@@ -1,4 +1,4 @@
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(OLD_TYPE, NEW_TYPE) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-1.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-1.h
index 2df68aa..639adc3 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-1.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-1.h
@@ -1,4 +1,4 @@
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(OLD_TYPE, NEW_TYPE) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-2.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-2.h
index 9735141..3d518a4 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-2.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_float2int-2.h
@@ -1,4 +1,4 @@
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(OLD_TYPE, NEW_TYPE) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-1.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-1.h
index 5b0baee..1c49e39 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-1.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-1.h
@@ -1,4 +1,4 @@
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(OLD_TYPE, NEW_TYPE) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-2.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-2.h
index 2177c94..640e0db 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-2.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2float-2.h
@@ -1,4 +1,4 @@
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(OLD_TYPE, NEW_TYPE) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-1.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-1.h
index c8ef6df..eb820d3 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-1.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-1.h
@@ -1,4 +1,4 @@
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(OLD_TYPE, NEW_TYPE) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-2.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-2.h
index f53c1b3..5653ef61 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-2.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_convert_int2int-2.h
@@ -1,4 +1,4 @@
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(OLD_TYPE, NEW_TYPE) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_sqrt-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_sqrt-1.c
index 21219b4..c7bd37e 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_sqrt-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_sqrt-1.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv_zvfh -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model -ffast-math" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(TYPE, OP) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_sqrt-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_sqrt-2.c
index 2fcdc33..c2fb92f 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_sqrt-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_sqrt-2.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv_zvfh -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model -ffast-math" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(TYPE, OP) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-1.c
index 8076243..cb738a8 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-1.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv_zvfh -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define abs(A) ((A) < 0 ? -(A) : (A))
#define neg(A) (-(A))
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-2.c
index 8e44301..d9fb086 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-2.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv_zvfh -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define abs(A) ((A) < 0 ? -(A) : (A))
#define neg(A) (-(A))
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-3.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-3.c
index 6da5b6e..1458393 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-3.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-3.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv_zvfh -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define abs(A) ((A) < 0 ? -(A) : (A))
#define neg(A) (-(A))
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-4.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-4.c
index 5428c28..e120e8f 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-4.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-4.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv_zvfh -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define abs(A) ((A) < 0 ? -(A) : (A))
#define neg(A) (-(A))
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-5.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-5.c
index 8e56737..775e65e 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-5.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-5.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv_zvfh -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define abs(A) ((A) < 0 ? -(A) : (A))
#define neg(A) (-(A))
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-6.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-6.c
index 65a36d0..6331440 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-6.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-6.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv_zvfh -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define abs(A) ((A) < 0 ? -(A) : (A))
#define neg(A) (-(A))
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-7.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-7.c
index 356fe9f..4847aec 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-7.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-7.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv_zvfh -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define abs(A) ((A) < 0 ? -(A) : (A))
#define neg(A) (-(A))
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-8.c
index 5208a85..ae4d118 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_unary-8.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv_zvfh -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define abs(A) ((A) < 0 ? -(A) : (A))
#define neg(A) (-(A))
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/gather-scatter/offset_extend-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/gather-scatter/offset_extend-1.c
new file mode 100644
index 0000000..b7936eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/gather-scatter/offset_extend-1.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=rv64gcv -mabi=lp64d" } */
+
+void
+f (int *restrict y, int *restrict x, int *restrict indices, int n)
+{
+ for (int i = 0; i < n; ++i)
+ y[i] = x[indices[i]] + 1;
+}
+
+/* { dg-final { scan-assembler {vluxei64\.v} } } */
+/* { dg-final { scan-assembler {vsll\.vi} } } */
+/* { dg-final { scan-assembler {vsext\.vf2} } } */
+/* { dg-final { scan-assembler-not {vluxei32\.v} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-8.c
index 2568d69..cf2fd1d 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-8.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv32gcv -mabi=ilp32d --param riscv-autovec-preference=scalable -fno-vect-cost-model -fdump-tree-optimized-details" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define VEC_PERM(TYPE) \
TYPE __attribute__ ((noinline, noclone)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-9.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-9.c
index 7c42438..1b99ffd 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-9.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-9.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=rv64gcv -mabi=lp64d --param riscv-autovec-preference=scalable -fno-vect-cost-model -fdump-tree-optimized-details" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define VEC_PERM(TYPE) \
TYPE __attribute__ ((noinline, noclone)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111232.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111232.c
index de815c5..edad140 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111232.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111232.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-march=rv64gcv -mabi=lp64d --param=riscv-autovec-preference=scalable -Ofast -fno-schedule-insns -fno-schedule-insns2" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
int16_t
foo (int8_t *restrict x, int8_t *restrict y, int n)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111751.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111751.c
new file mode 100644
index 0000000..0f1e8a7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111751.c
@@ -0,0 +1,55 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#define N 16
+
+int foo1 ()
+{
+ int i;
+ char ia[N];
+ char ic[N] = {0,3,6,9,12,15,18,21,24,27,30,33,36,39,42,45};
+ char ib[N] = {0,3,6,9,12,15,18,21,24,27,30,33,36,39,42,45};
+
+ /* Not vectorizable, multiplication */
+ for (i = 0; i < N; i++)
+ {
+ ia[i] = ib[i] * ic[i];
+ }
+
+ /* check results: */
+ for (i = 0; i < N; i++)
+ {
+ if (ia[i] != (char) (ib[i] * ic[i]))
+ __builtin_abort ();
+ }
+
+ return 0;
+}
+
+typedef int half_word;
+
+int foo2 ()
+{
+ int i;
+ half_word ia[N];
+ half_word ic[N] = {0,3,6,9,12,15,18,21,24,27,30,33,36,39,42,45};
+ half_word ib[N] = {0,3,6,9,12,15,18,21,24,27,30,33,36,39,42,45};
+
+ /* Not worthwhile, only 2 parts per int */
+ for (i = 0; i < N; i++)
+ {
+ ia[i] = ib[i] + ic[i];
+ }
+
+ /* check results: */
+ for (i = 0; i < N; i++)
+ {
+ if (ia[i] != ib[i] + ic[i])
+ __builtin_abort ();
+ }
+
+ return 0;
+}
+
+/* { dg-final { scan-assembler-times {li\s+[a-x0-9]+,0\s+ret} 2 } } */
+/* { dg-final { scan-assembler-not {vset} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/bswap16-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/bswap16-0.c
new file mode 100644
index 0000000..10d235a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/bswap16-0.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <stdint-gcc.h>
+#include "test-math.h"
+
+/*
+** test_uint16_t___builtin_bswap16:
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e16,\s*m1,\s*ta,\s*ma
+** vsrl\.vi\s+v[0-9]+,\s*v[0-9],\s*8+
+** vsll\.vi\s+v[0-9]+,\s*v[0-9],\s*8+
+** vor\.vv\s+v[0-9]+,\s*v[0-9],\s*v[0-9]+
+** ...
+*/
+TEST_UNARY_CALL (uint16_t, __builtin_bswap16)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/bswap16-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/bswap16-run-0.c
new file mode 100644
index 0000000..8d45ceb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/bswap16-run-0.c
@@ -0,0 +1,44 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model" } */
+
+#include <stdint-gcc.h>
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+uint16_t in[ARRAY_SIZE];
+uint16_t out[ARRAY_SIZE];
+uint16_t ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL (uint16_t, __builtin_bswap16)
+TEST_ASSERT (uint16_t)
+
+/* TEST_INIT Arguments:
+ +-------+-------+---------------------------+---------+
+ | type | input | reference | test id |
+ +-------+-------+---------------------------+---------+
+*/
+TEST_INIT (uint16_t, 0x1234u, __builtin_bswap16 (0x1234u), 1)
+TEST_INIT (uint16_t, 0x1122u, __builtin_bswap16 (0x1122u), 2)
+TEST_INIT (uint16_t, 0xa55au, __builtin_bswap16 (0xa55au), 3)
+TEST_INIT (uint16_t, 0x0000u, __builtin_bswap16 (0x0000u), 4)
+TEST_INIT (uint16_t, 0xffffu, __builtin_bswap16 (0xffffu), 5)
+TEST_INIT (uint16_t, 0x4321u, __builtin_bswap16 (0x4321u), 6)
+
+int
+main ()
+{
+ /* RUN_TEST Arguments:
+ +------+---------+-------------+----+-----+-----+------------+
+ | type | test id | fun to test | in | out | ref | array size |
+ +------+---------+-------------+----+-----+-----+------------+
+ */
+ RUN_TEST (uint16_t, 1, __builtin_bswap16, in, out, ref, ARRAY_SIZE);
+ RUN_TEST (uint16_t, 2, __builtin_bswap16, in, out, ref, ARRAY_SIZE);
+ RUN_TEST (uint16_t, 3, __builtin_bswap16, in, out, ref, ARRAY_SIZE);
+ RUN_TEST (uint16_t, 4, __builtin_bswap16, in, out, ref, ARRAY_SIZE);
+ RUN_TEST (uint16_t, 5, __builtin_bswap16, in, out, ref, ARRAY_SIZE);
+ RUN_TEST (uint16_t, 6, __builtin_bswap16, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/cvt-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/cvt-0.c
new file mode 100644
index 0000000..762b140
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/cvt-0.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh -mabi=lp64d -O3 -ftree-vectorize -ffast-math -fno-vect-cost-model -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <stdint-gcc.h>
+
+/*
+** test_int65_to_fp16:
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e32,\s*mf2,\s*ta,\s*ma
+** vfncvt\.f\.x\.w\s+v[0-9]+,\s*v[0-9]+
+** vsetvli\s+zero,\s*zero,\s*e16,\s*mf4,\s*ta,\s*ma
+** vfncvt\.f\.f\.w\s+v[0-9]+,\s*v[0-9]+
+** ...
+*/
+void
+test_int65_to_fp16 (int64_t * __restrict a, _Float16 *b, unsigned n)
+{
+ for (unsigned i = 0; i < n; i++)
+ b[i] = (_Float16) (a[i]);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/cvt-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/cvt-1.c
new file mode 100644
index 0000000..3180ba3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/cvt-1.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh -mabi=lp64d -O3 -ftree-vectorize -ffast-math -fno-vect-cost-model -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <stdint-gcc.h>
+
+/*
+** test_uint65_to_fp16:
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e32,\s*mf2,\s*ta,\s*ma
+** vfncvt\.f\.xu\.w\s+v[0-9]+,\s*v[0-9]+
+** vsetvli\s+zero,\s*zero,\s*e16,\s*mf4,\s*ta,\s*ma
+** vfncvt\.f\.f\.w\s+v[0-9]+,\s*v[0-9]+
+** ...
+*/
+void
+test_uint65_to_fp16 (uint64_t * __restrict a, _Float16 *b, unsigned n)
+{
+ for (unsigned i = 0; i < n; i++)
+ b[i] = (_Float16) (a[i]);
+}
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ceil-run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ceil-run-1.c
index 88611e8..419a3de 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ceil-run-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ceil-run-1.c
@@ -12,15 +12,15 @@ float ref[ARRAY_SIZE];
TEST_UNARY_CALL (float, __builtin_ceilf)
TEST_ASSERT (float)
-TEST_INIT (float, 1.2, 2.0, 1)
-TEST_INIT (float, -1.2, -1.0, 2)
-TEST_INIT (float, 3.0, 3.0, 3)
-TEST_INIT (float, 8388607.5, 8388608.0, 4)
-TEST_INIT (float, 8388609.0, 8388609.0, 5)
-TEST_INIT (float, 0.0, 0.0, 6)
-TEST_INIT (float, -0.0, -0.0, 7)
-TEST_INIT (float, -8388607.5, -8388607.0, 8)
-TEST_INIT (float, -8388608.0, -8388608.0, 9)
+TEST_INIT (float, 1.2, __builtin_ceilf (1.2), 1)
+TEST_INIT (float, -1.2, __builtin_ceilf (-1.2), 2)
+TEST_INIT (float, 3.0, __builtin_ceilf (3.0), 3)
+TEST_INIT (float, 8388607.5, __builtin_ceilf (8388607.5), 4)
+TEST_INIT (float, 8388609.0, __builtin_ceilf (8388609.0), 5)
+TEST_INIT (float, 0.0, __builtin_ceilf (0.0), 6)
+TEST_INIT (float, -0.0,__builtin_ceilf (-0.0), 7)
+TEST_INIT (float, -8388607.5, __builtin_ceilf (-8388607.5), 8)
+TEST_INIT (float, -8388608.0, __builtin_ceilf (-8388608.0), 9)
int
main ()
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ceil-run-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ceil-run-2.c
index bb4c86c..2b29c8e 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ceil-run-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ceil-run-2.c
@@ -12,15 +12,15 @@ double ref[ARRAY_SIZE];
TEST_UNARY_CALL (double, __builtin_ceil)
TEST_ASSERT (double)
-TEST_INIT (double, 1.2, 2.0, 1)
-TEST_INIT (double, -1.2, -1.0, 2)
-TEST_INIT (double, 3.0, 3.0, 3)
-TEST_INIT (double, 4503599627370495.5, 4503599627370496.0, 4)
-TEST_INIT (double, 4503599627370497.0, 4503599627370497.0, 5)
-TEST_INIT (double, 0.0, 0.0, 6)
-TEST_INIT (double, -0.0, -0.0, 7)
-TEST_INIT (double, -4503599627370495.5, -4503599627370495.0, 8)
-TEST_INIT (double, -4503599627370496.0, -4503599627370496.0, 9)
+TEST_INIT (double, 1.2, __builtin_ceil (1.2), 1)
+TEST_INIT (double, -1.2, __builtin_ceil (-1.2), 2)
+TEST_INIT (double, 3.0, __builtin_ceil (3.0), 3)
+TEST_INIT (double, 4503599627370495.5, __builtin_ceil (4503599627370495.5), 4)
+TEST_INIT (double, 4503599627370497.0, __builtin_ceil (4503599627370497.0), 5)
+TEST_INIT (double, 0.0, __builtin_ceil (0.0), 6)
+TEST_INIT (double, -0.0, __builtin_ceil (-0.0), 7)
+TEST_INIT (double, -4503599627370495.5, __builtin_ceil (-4503599627370495.5), 8)
+TEST_INIT (double, -4503599627370496.0, __builtin_ceil (-4503599627370496.0), 9)
int
main ()
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-floor-run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-floor-run-1.c
index 4af60c9..07b61cd 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-floor-run-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-floor-run-1.c
@@ -12,15 +12,15 @@ float ref[ARRAY_SIZE];
TEST_UNARY_CALL (float, __builtin_floorf)
TEST_ASSERT (float)
-TEST_INIT (float, 1.2, 1.0, 1)
-TEST_INIT (float, -1.2, -2.0, 2)
-TEST_INIT (float, 3.0, 3.0, 3)
-TEST_INIT (float, 8388607.5, 8388607.0, 4)
-TEST_INIT (float, 8388609.0, 8388609.0, 5)
-TEST_INIT (float, 0.0, 0.0, 6)
-TEST_INIT (float, -0.0, -0.0, 7)
-TEST_INIT (float, -8388607.5, -8388608.0, 8)
-TEST_INIT (float, -8388608.0, -8388608.0, 9)
+TEST_INIT (float, 1.2, __builtin_floorf (1.2), 1)
+TEST_INIT (float, -1.2, __builtin_floorf (-1.2), 2)
+TEST_INIT (float, 3.0, __builtin_floorf (3.0), 3)
+TEST_INIT (float, 8388607.5, __builtin_floorf (8388607.5), 4)
+TEST_INIT (float, 8388609.0, __builtin_floorf (8388609.0), 5)
+TEST_INIT (float, 0.0, __builtin_floorf (0.0), 6)
+TEST_INIT (float, -0.0, __builtin_floorf (-0.0), 7)
+TEST_INIT (float, -8388607.5, __builtin_floorf (-8388607.5), 8)
+TEST_INIT (float, -8388608.0, __builtin_floorf (-8388608.0), 9)
int
main ()
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-floor-run-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-floor-run-2.c
index ad3735c..7201ac1 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-floor-run-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-floor-run-2.c
@@ -12,15 +12,15 @@ double ref[ARRAY_SIZE];
TEST_UNARY_CALL (double, __builtin_floor)
TEST_ASSERT (double)
-TEST_INIT (double, 1.2, 1.0, 1)
-TEST_INIT (double, -1.2, -2.0, 2)
-TEST_INIT (double, 3.0, 3.0, 3)
-TEST_INIT (double, 4503599627370495.5, 4503599627370495.0, 4)
-TEST_INIT (double, 4503599627370497.0, 4503599627370497.0, 5)
-TEST_INIT (double, 0.0, 0.0, 6)
-TEST_INIT (double, -0.0, -0.0, 7)
-TEST_INIT (double, -4503599627370495.5, -4503599627370496.0, 8)
-TEST_INIT (double, -4503599627370496.0, -4503599627370496.0, 9)
+TEST_INIT (double, 1.2, __builtin_floor (1.2), 1)
+TEST_INIT (double, -1.2, __builtin_floor (-1.2), 2)
+TEST_INIT (double, 3.0, __builtin_floor (3.0), 3)
+TEST_INIT (double, 4503599627370495.5, __builtin_floor (4503599627370495.5), 4)
+TEST_INIT (double, 4503599627370497.0, __builtin_floor (4503599627370497.0), 5)
+TEST_INIT (double, 0.0, __builtin_floor (0.0), 6)
+TEST_INIT (double, -0.0, __builtin_floor (-0.0), 7)
+TEST_INIT (double, -4503599627370495.5, __builtin_floor (-4503599627370496.0), 8)
+TEST_INIT (double, -4503599627370496.0, __builtin_floor (-4503599627370496.0), 9)
int
main ()
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iceil-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iceil-0.c
new file mode 100644
index 0000000..2d4a1d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iceil-0.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_float_int___builtin_iceilf:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+3
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e32,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (float, int, __builtin_iceilf)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iceil-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iceil-run-0.c
new file mode 100644
index 0000000..714173a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iceil-run-0.c
@@ -0,0 +1,63 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+float in[ARRAY_SIZE];
+int out[ARRAY_SIZE];
+int ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (float, int, __builtin_iceilf)
+TEST_ASSERT (int)
+
+TEST_INIT_CVT (float, 1.2, int, __builtin_iceilf (1.2), 1)
+TEST_INIT_CVT (float, -1.2, int, __builtin_iceilf (-1.2), 2)
+TEST_INIT_CVT (float, 0.5, int, __builtin_iceilf (0.5), 3)
+TEST_INIT_CVT (float, -0.5, int, __builtin_iceilf (-0.5), 4)
+TEST_INIT_CVT (float, 0.1, int, __builtin_iceilf (0.1), 5)
+TEST_INIT_CVT (float, -0.1, int, __builtin_iceilf (-0.1), 6)
+TEST_INIT_CVT (float, 3.0, int, __builtin_iceilf (3.0), 7)
+TEST_INIT_CVT (float, -3.0, int, __builtin_iceilf (-3.0), 8)
+TEST_INIT_CVT (float, 8388607.5, int, __builtin_iceilf (8388607.5), 9)
+TEST_INIT_CVT (float, 8388609.0, int, __builtin_iceilf (8388609.0), 10)
+TEST_INIT_CVT (float, -8388607.5, int, __builtin_iceilf (-8388607.5), 11)
+TEST_INIT_CVT (float, -8388609.0, int, __builtin_iceilf (-8388609.0), 12)
+TEST_INIT_CVT (float, 0.0, int, __builtin_iceilf (-0.0), 13)
+TEST_INIT_CVT (float, -0.0, int, __builtin_iceilf (-0.0), 14)
+TEST_INIT_CVT (float, 2147483520.0, int, __builtin_iceilf (2147483520.0), 15)
+TEST_INIT_CVT (float, 2147483648.0, int, 0x7fffffff, 16)
+TEST_INIT_CVT (float, -2147483648.0, int, __builtin_iceilf (-2147483648.0), 17)
+TEST_INIT_CVT (float, -2147483904.0, int, 0x80000000, 18)
+TEST_INIT_CVT (float, __builtin_inf (), int, __builtin_iceilf (__builtin_inff ()), 19)
+TEST_INIT_CVT (float, -__builtin_inf (), int, __builtin_iceilf (-__builtin_inff ()), 20)
+TEST_INIT_CVT (float, __builtin_nanf (""), int, 0x7fffffff, 21)
+
+int
+main ()
+{
+ RUN_TEST_CVT (float, int, 1, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 2, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 3, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 4, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 5, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 6, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 7, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 8, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 9, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 10, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 11, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 12, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 13, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 14, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 15, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 16, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 17, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 18, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 19, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 20, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 21, __builtin_iceilf, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ifloor-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ifloor-0.c
new file mode 100644
index 0000000..b9ec415
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ifloor-0.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_float_int___builtin_ifloorf:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+2
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e32,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (float, int, __builtin_ifloorf)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ifloor-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ifloor-run-0.c
new file mode 100644
index 0000000..8ef4da0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-ifloor-run-0.c
@@ -0,0 +1,63 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+float in[ARRAY_SIZE];
+int out[ARRAY_SIZE];
+int ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (float, int, __builtin_ifloorf)
+TEST_ASSERT (int)
+
+TEST_INIT_CVT (float, 1.2, int, __builtin_ifloorf (1.2), 1)
+TEST_INIT_CVT (float, -1.2, int, __builtin_ifloorf (-1.2), 2)
+TEST_INIT_CVT (float, 0.5, int, __builtin_ifloorf (0.5), 3)
+TEST_INIT_CVT (float, -0.5, int, __builtin_ifloorf (-0.5), 4)
+TEST_INIT_CVT (float, 0.1, int, __builtin_ifloorf (0.1), 5)
+TEST_INIT_CVT (float, -0.1, int, __builtin_ifloorf (-0.1), 6)
+TEST_INIT_CVT (float, 3.0, int, __builtin_ifloorf (3.0), 7)
+TEST_INIT_CVT (float, -3.0, int, __builtin_ifloorf (-3.0), 8)
+TEST_INIT_CVT (float, 8388607.5, int, __builtin_ifloorf (8388607.5), 9)
+TEST_INIT_CVT (float, 8388609.0, int, __builtin_ifloorf (8388609.0), 10)
+TEST_INIT_CVT (float, -8388607.5, int, __builtin_ifloorf (-8388607.5), 11)
+TEST_INIT_CVT (float, -8388609.0, int, __builtin_ifloorf (-8388609.0), 12)
+TEST_INIT_CVT (float, 0.0, int, __builtin_ifloorf (-0.0), 13)
+TEST_INIT_CVT (float, -0.0, int, __builtin_ifloorf (-0.0), 14)
+TEST_INIT_CVT (float, 2147483520.0, int, __builtin_ifloorf (2147483520.0), 15)
+TEST_INIT_CVT (float, 2147483648.0, int, 0x7fffffff, 16)
+TEST_INIT_CVT (float, -2147483648.0, int, __builtin_ifloorf (-2147483648.0), 17)
+TEST_INIT_CVT (float, -2147483904.0, int, 0x80000000, 18)
+TEST_INIT_CVT (float, __builtin_inf (), int, __builtin_ifloorf (__builtin_inff ()), 19)
+TEST_INIT_CVT (float, -__builtin_inf (), int, __builtin_ifloorf (-__builtin_inff ()), 20)
+TEST_INIT_CVT (float, __builtin_nanf (""), int, 0x7fffffff, 21)
+
+int
+main ()
+{
+ RUN_TEST_CVT (float, int, 1, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 2, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 3, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 4, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 5, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 6, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 7, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 8, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 9, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 10, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 11, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 12, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 13, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 14, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 15, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 16, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 17, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 18, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 19, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 20, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 21, __builtin_ifloorf, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-irint-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-irint-0.c
new file mode 100644
index 0000000..3ca2f65
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-irint-0.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_float_int___builtin_irintf:
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e32,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+*/
+TEST_UNARY_CALL_CVT (float, int, __builtin_irintf)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-irint-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-irint-run-0.c
new file mode 100644
index 0000000..0be3852
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-irint-run-0.c
@@ -0,0 +1,63 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+float in[ARRAY_SIZE];
+int out[ARRAY_SIZE];
+int ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (float, int, __builtin_irintf)
+TEST_ASSERT (int)
+
+TEST_INIT_CVT (float, 1.2, int, __builtin_irintf (1.2), 1)
+TEST_INIT_CVT (float, -1.2, int, __builtin_irintf (-1.2), 2)
+TEST_INIT_CVT (float, 0.5, int, __builtin_irintf (0.5), 3)
+TEST_INIT_CVT (float, -0.5, int, __builtin_irintf (-0.5), 4)
+TEST_INIT_CVT (float, 0.1, int, __builtin_irintf (0.1), 5)
+TEST_INIT_CVT (float, -0.1, int, __builtin_irintf (-0.1), 6)
+TEST_INIT_CVT (float, 3.0, int, __builtin_irintf (3.0), 7)
+TEST_INIT_CVT (float, -3.0, int, __builtin_irintf (-3.0), 8)
+TEST_INIT_CVT (float, 8388607.5, int, __builtin_irintf (8388607.5), 9)
+TEST_INIT_CVT (float, 8388609.0, int, __builtin_irintf (8388609.0), 10)
+TEST_INIT_CVT (float, -8388607.5, int, __builtin_irintf (-8388607.5), 11)
+TEST_INIT_CVT (float, -8388609.0, int, __builtin_irintf (-8388609.0), 12)
+TEST_INIT_CVT (float, 0.0, int, __builtin_irintf (-0.0), 13)
+TEST_INIT_CVT (float, -0.0, int, __builtin_irintf (-0.0), 14)
+TEST_INIT_CVT (float, 2147483520.0, int, __builtin_irintf (2147483520.0), 15)
+TEST_INIT_CVT (float, 2147483648.0, int, __builtin_irintf (2147483648.0), 16)
+TEST_INIT_CVT (float, -2147483648.0, int, __builtin_irintf (-2147483648.0), 17)
+TEST_INIT_CVT (float, -2147483904.0, int, __builtin_irintf (-2147483904.0), 18)
+TEST_INIT_CVT (float, __builtin_inf (), int, __builtin_irintf (__builtin_inff ()), 19)
+TEST_INIT_CVT (float, -__builtin_inf (), int, __builtin_irintf (-__builtin_inff ()), 20)
+TEST_INIT_CVT (float, __builtin_nanf (""), int, 0x7fffffff, 21)
+
+int
+main ()
+{
+ RUN_TEST_CVT (float, int, 1, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 2, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 3, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 4, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 5, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 6, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 7, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 8, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 9, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 10, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 11, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 12, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 13, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 14, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 15, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 16, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 17, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 18, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 19, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 20, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 21, __builtin_irintf, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iround-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iround-0.c
new file mode 100644
index 0000000..f32515d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iround-0.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_float_int___builtin_iroundf:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+4
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e32,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (float, int, __builtin_iroundf)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iround-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iround-run-0.c
new file mode 100644
index 0000000..2e05e44
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-iround-run-0.c
@@ -0,0 +1,63 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+float in[ARRAY_SIZE];
+int out[ARRAY_SIZE];
+int ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (float, int, __builtin_iroundf)
+TEST_ASSERT (int)
+
+TEST_INIT_CVT (float, 1.2, int, __builtin_iroundf (1.2), 1)
+TEST_INIT_CVT (float, -1.2, int, __builtin_iroundf (-1.2), 2)
+TEST_INIT_CVT (float, 0.5, int, __builtin_iroundf (0.5), 3)
+TEST_INIT_CVT (float, -0.5, int, __builtin_iroundf (-0.5), 4)
+TEST_INIT_CVT (float, 0.1, int, __builtin_iroundf (0.1), 5)
+TEST_INIT_CVT (float, -0.1, int, __builtin_iroundf (-0.1), 6)
+TEST_INIT_CVT (float, 3.0, int, __builtin_iroundf (3.0), 7)
+TEST_INIT_CVT (float, -3.0, int, __builtin_iroundf (-3.0), 8)
+TEST_INIT_CVT (float, 8388607.5, int, __builtin_iroundf (8388607.5), 9)
+TEST_INIT_CVT (float, 8388609.0, int, __builtin_iroundf (8388609.0), 10)
+TEST_INIT_CVT (float, -8388607.5, int, __builtin_iroundf (-8388607.5), 11)
+TEST_INIT_CVT (float, -8388609.0, int, __builtin_iroundf (-8388609.0), 12)
+TEST_INIT_CVT (float, 0.0, int, __builtin_iroundf (-0.0), 13)
+TEST_INIT_CVT (float, -0.0, int, __builtin_iroundf (-0.0), 14)
+TEST_INIT_CVT (float, 2147483520.0, int, __builtin_iroundf (2147483520.0), 15)
+TEST_INIT_CVT (float, 2147483648.0, int, 0x7fffffff, 16)
+TEST_INIT_CVT (float, -2147483648.0, int, __builtin_iroundf (-2147483648.0), 17)
+TEST_INIT_CVT (float, -2147483904.0, int, 0x80000000, 18)
+TEST_INIT_CVT (float, __builtin_inf (), int, __builtin_iroundf (__builtin_inff ()), 19)
+TEST_INIT_CVT (float, -__builtin_inf (), int, __builtin_iroundf (-__builtin_inff ()), 20)
+TEST_INIT_CVT (float, __builtin_nanf (""), int, 0x7fffffff, 21)
+
+int
+main ()
+{
+ RUN_TEST_CVT (float, int, 1, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 2, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 3, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 4, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 5, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 6, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 7, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 8, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 9, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 10, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 11, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 12, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 13, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 14, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 15, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 16, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 17, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 18, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 19, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 20, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, int, 21, __builtin_iroundf, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-0.c
new file mode 100644
index 0000000..3b13a52
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-0.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_double_long___builtin_lceil:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+3
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e64,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (double, long, __builtin_lceil)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-1.c
new file mode 100644
index 0000000..5ff5d1d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-1.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32f -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_float_long___builtin_lceilf:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+3
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e32,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (float, long, __builtin_lceilf)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-run-0.c
new file mode 100644
index 0000000..8a0f668
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-run-0.c
@@ -0,0 +1,69 @@
+/* { dg-do run { target { riscv_v && rv64 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+double in[ARRAY_SIZE];
+long out[ARRAY_SIZE];
+long ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (double, long, __builtin_lceil)
+TEST_ASSERT (long)
+
+TEST_INIT_CVT (double, 1.2, long, __builtin_lceil (1.2), 1)
+TEST_INIT_CVT (double, -1.2, long, __builtin_lceil (-1.2), 2)
+TEST_INIT_CVT (double, 0.5, long, __builtin_lceil (0.5), 3)
+TEST_INIT_CVT (double, -0.5, long, __builtin_lceil (-0.5), 4)
+TEST_INIT_CVT (double, 0.1, long, __builtin_lceil (0.1), 5)
+TEST_INIT_CVT (double, -0.1, long, __builtin_lceil (-0.1), 6)
+TEST_INIT_CVT (double, 3.0, long, __builtin_lceil (3.0), 7)
+TEST_INIT_CVT (double, -3.0, long, __builtin_lceil (-3.0), 8)
+TEST_INIT_CVT (double, 4503599627370495.5, long, __builtin_lceil (4503599627370495.5), 9)
+TEST_INIT_CVT (double, 4503599627370497.0, long, __builtin_lceil (4503599627370497.0), 10)
+TEST_INIT_CVT (double, -4503599627370495.5, long, __builtin_lceil (-4503599627370495.5), 11)
+TEST_INIT_CVT (double, -4503599627370496.0, long, __builtin_lceil (-4503599627370496.0), 12)
+TEST_INIT_CVT (double, 0.0, long, __builtin_lceil (-0.0), 13)
+TEST_INIT_CVT (double, -0.0, long, __builtin_lceil (-0.0), 14)
+TEST_INIT_CVT (double, 9223372036854774784.0, long, __builtin_lceil (9223372036854774784.0), 15)
+TEST_INIT_CVT (double, 9223372036854775808.0, long, 0x7fffffffffffffff, 16)
+TEST_INIT_CVT (double, -9223372036854775808.0, long, __builtin_lceil (-9223372036854775808.0), 17)
+TEST_INIT_CVT (double, -9223372036854777856.0, long, 0x8000000000000000, 18)
+TEST_INIT_CVT (double, __builtin_inf (), long, __builtin_lceil (__builtin_inf ()), 19)
+TEST_INIT_CVT (double, -__builtin_inf (), long, __builtin_lceil (-__builtin_inf ()), 20)
+TEST_INIT_CVT (double, __builtin_nan (""), long, 0x7fffffffffffffff, 21)
+
+/*
+ Similar to lround, some reference are hard-code instead of leveraging
+ scalar __builtin_lceil because the return value of a NaN or an infinity,
+ or the rounded value is too large to be stored in a long is UNSPECIFIED.
+*/
+
+int
+main ()
+{
+ RUN_TEST_CVT (double, long, 1, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 2, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 3, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 4, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 5, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 6, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 7, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 8, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 9, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 10, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 11, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 12, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 13, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 14, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 15, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 16, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 17, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 18, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 19, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 20, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 21, __builtin_lceil, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-run-1.c
new file mode 100644
index 0000000..e5e22ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lceil-run-1.c
@@ -0,0 +1,69 @@
+/* { dg-do run { target { riscv_v && rv32 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+float in[ARRAY_SIZE];
+long out[ARRAY_SIZE];
+long ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (float, long, __builtin_lceilf)
+TEST_ASSERT (long)
+
+TEST_INIT_CVT (float, 1.2, long, __builtin_lceilf (1.2), 1)
+TEST_INIT_CVT (float, -1.2, long, __builtin_lceilf (-1.2), 2)
+TEST_INIT_CVT (float, 0.5, long, __builtin_lceilf (0.5), 3)
+TEST_INIT_CVT (float, -0.5, long, __builtin_lceilf (-0.5), 4)
+TEST_INIT_CVT (float, 0.1, long, __builtin_lceilf (0.1), 5)
+TEST_INIT_CVT (float, -0.1, long, __builtin_lceilf (-0.1), 6)
+TEST_INIT_CVT (float, 3.0, long, __builtin_lceilf (3.0), 7)
+TEST_INIT_CVT (float, -3.0, long, __builtin_lceilf (-3.0), 8)
+TEST_INIT_CVT (float, 8388607.5, long, __builtin_lceilf (8388607.5), 9)
+TEST_INIT_CVT (float, 8388609.0, long, __builtin_lceilf (8388609.0), 10)
+TEST_INIT_CVT (float, -8388607.5, long, __builtin_lceilf (-8388607.5), 11)
+TEST_INIT_CVT (float, -8388609.0, long, __builtin_lceilf (-8388609.0), 12)
+TEST_INIT_CVT (float, 0.0, long, __builtin_lceilf (-0.0), 13)
+TEST_INIT_CVT (float, -0.0, long, __builtin_lceilf (-0.0), 14)
+TEST_INIT_CVT (float, 2147483520.0, long, __builtin_lceilf (2147483520.0), 15)
+TEST_INIT_CVT (float, 2147483648.0, long, 0x7fffffff, 16)
+TEST_INIT_CVT (float, -2147483648.0, long, __builtin_lceilf (-2147483648.0), 17)
+TEST_INIT_CVT (float, -2147483904.0, long, 0x80000000, 18)
+TEST_INIT_CVT (float, __builtin_inf (), long, __builtin_lceilf (__builtin_inff ()), 19)
+TEST_INIT_CVT (float, -__builtin_inf (), long, __builtin_lceilf (-__builtin_inff ()), 20)
+TEST_INIT_CVT (float, __builtin_nanf (""), long, 0x7fffffff, 21)
+
+/*
+ Similar to lround, some reference are hard-code instead of leveraging
+ scalar __builtin_lceil because the return value of a NaN or an infinity,
+ or the rounded value is too large to be stored in a long is UNSPECIFIED.
+*/
+
+int
+main ()
+{
+ RUN_TEST_CVT (float, long, 1, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 2, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 3, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 4, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 5, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 6, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 7, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 8, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 9, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 10, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 11, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 12, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 13, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 14, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 15, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 16, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 17, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 18, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 19, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 20, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 21, __builtin_lceilf, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-0.c
new file mode 100644
index 0000000..ac2d172
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-0.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_double_long___builtin_lfloor:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+2
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e64,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (double, long, __builtin_lfloor)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-1.c
new file mode 100644
index 0000000..164e97c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-1.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32f -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_float_long___builtin_lfloorf:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+2
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e32,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (float, long, __builtin_lfloorf)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-run-0.c
new file mode 100644
index 0000000..3b71048
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-run-0.c
@@ -0,0 +1,69 @@
+/* { dg-do run { target { riscv_v && rv64 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+double in[ARRAY_SIZE];
+long out[ARRAY_SIZE];
+long ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (double, long, __builtin_lfloor)
+TEST_ASSERT (long)
+
+TEST_INIT_CVT (double, 1.2, long, __builtin_lfloor (1.2), 1)
+TEST_INIT_CVT (double, -1.2, long, __builtin_lfloor (-1.2), 2)
+TEST_INIT_CVT (double, 0.5, long, __builtin_lfloor (0.5), 3)
+TEST_INIT_CVT (double, -0.5, long, __builtin_lfloor (-0.5), 4)
+TEST_INIT_CVT (double, 0.1, long, __builtin_lfloor (0.1), 5)
+TEST_INIT_CVT (double, -0.1, long, __builtin_lfloor (-0.1), 6)
+TEST_INIT_CVT (double, 3.0, long, __builtin_lfloor (3.0), 7)
+TEST_INIT_CVT (double, -3.0, long, __builtin_lfloor (-3.0), 8)
+TEST_INIT_CVT (double, 4503599627370495.5, long, __builtin_lfloor (4503599627370495.5), 9)
+TEST_INIT_CVT (double, 4503599627370497.0, long, __builtin_lfloor (4503599627370497.0), 10)
+TEST_INIT_CVT (double, -4503599627370495.5, long, __builtin_lfloor (-4503599627370495.5), 11)
+TEST_INIT_CVT (double, -4503599627370496.0, long, __builtin_lfloor (-4503599627370496.0), 12)
+TEST_INIT_CVT (double, 0.0, long, __builtin_lfloor (-0.0), 13)
+TEST_INIT_CVT (double, -0.0, long, __builtin_lfloor (-0.0), 14)
+TEST_INIT_CVT (double, 9223372036854774784.0, long, __builtin_lfloor (9223372036854774784.0), 15)
+TEST_INIT_CVT (double, 9223372036854775808.0, long, 0x7fffffffffffffff, 16)
+TEST_INIT_CVT (double, -9223372036854775808.0, long, __builtin_lfloor (-9223372036854775808.0), 17)
+TEST_INIT_CVT (double, -9223372036854777856.0, long, 0x8000000000000000, 18)
+TEST_INIT_CVT (double, __builtin_inf (), long, __builtin_lfloor (__builtin_inf ()), 19)
+TEST_INIT_CVT (double, -__builtin_inf (), long, __builtin_lfloor (-__builtin_inf ()), 20)
+TEST_INIT_CVT (double, __builtin_nan (""), long, 0x7fffffffffffffff, 21)
+
+/*
+ Similar to lround, some reference are hard-code instead of leveraging
+ scalar __builtin_lfloor because the return value of a NaN or an infinity,
+ or the rounded value is too large to be stored in a long is UNSPECIFIED.
+*/
+
+int
+main ()
+{
+ RUN_TEST_CVT (double, long, 1, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 2, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 3, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 4, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 5, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 6, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 7, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 8, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 9, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 10, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 11, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 12, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 13, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 14, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 15, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 16, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 17, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 18, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 19, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 20, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 21, __builtin_lfloor, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-run-1.c
new file mode 100644
index 0000000..60f50fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lfloor-run-1.c
@@ -0,0 +1,69 @@
+/* { dg-do run { target { riscv_v && rv32 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+float in[ARRAY_SIZE];
+long out[ARRAY_SIZE];
+long ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (float, long, __builtin_lfloorf)
+TEST_ASSERT (long)
+
+TEST_INIT_CVT (float, 1.2, long, __builtin_lfloorf (1.2), 1)
+TEST_INIT_CVT (float, -1.2, long, __builtin_lfloorf (-1.2), 2)
+TEST_INIT_CVT (float, 0.5, long, __builtin_lfloorf (0.5), 3)
+TEST_INIT_CVT (float, -0.5, long, __builtin_lfloorf (-0.5), 4)
+TEST_INIT_CVT (float, 0.1, long, __builtin_lfloorf (0.1), 5)
+TEST_INIT_CVT (float, -0.1, long, __builtin_lfloorf (-0.1), 6)
+TEST_INIT_CVT (float, 3.0, long, __builtin_lfloorf (3.0), 7)
+TEST_INIT_CVT (float, -3.0, long, __builtin_lfloorf (-3.0), 8)
+TEST_INIT_CVT (float, 8388607.5, long, __builtin_lfloorf (8388607.5), 9)
+TEST_INIT_CVT (float, 8388609.0, long, __builtin_lfloorf (8388609.0), 10)
+TEST_INIT_CVT (float, -8388607.5, long, __builtin_lfloorf (-8388607.5), 11)
+TEST_INIT_CVT (float, -8388609.0, long, __builtin_lfloorf (-8388609.0), 12)
+TEST_INIT_CVT (float, 0.0, long, __builtin_lfloorf (-0.0), 13)
+TEST_INIT_CVT (float, -0.0, long, __builtin_lfloorf (-0.0), 14)
+TEST_INIT_CVT (float, 2147483520.0, long, __builtin_lfloorf (2147483520.0), 15)
+TEST_INIT_CVT (float, 2147483648.0, long, 0x7fffffff, 16)
+TEST_INIT_CVT (float, -2147483648.0, long, __builtin_lfloorf (-2147483648.0), 17)
+TEST_INIT_CVT (float, -2147483904.0, long, 0x80000000, 18)
+TEST_INIT_CVT (float, __builtin_inf (), long, __builtin_lfloorf (__builtin_inff ()), 19)
+TEST_INIT_CVT (float, -__builtin_inf (), long, __builtin_lfloorf (-__builtin_inff ()), 20)
+TEST_INIT_CVT (float, __builtin_nanf (""), long, 0x7fffffff, 21)
+
+/*
+ Similar to lround, some reference are hard-code instead of leveraging
+ scalar __builtin_lfloor because the return value of a NaN or an infinity,
+ or the rounded value is too large to be stored in a long is UNSPECIFIED.
+*/
+
+int
+main ()
+{
+ RUN_TEST_CVT (float, long, 1, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 2, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 3, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 4, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 5, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 6, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 7, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 8, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 9, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 10, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 11, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 12, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 13, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 14, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 15, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 16, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 17, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 18, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 19, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 20, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 21, __builtin_lfloorf, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llceil-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llceil-0.c
new file mode 100644
index 0000000..3480c3e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llceil-0.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <stdint-gcc.h>
+#include "test-math.h"
+
+/*
+** test_double_int64_t___builtin_llceil:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+3
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e64,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (double, int64_t, __builtin_llceil)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llceil-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llceil-run-0.c
new file mode 100644
index 0000000..5ccbe64
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llceil-run-0.c
@@ -0,0 +1,64 @@
+/* { dg-do run { target { riscv_v && rv64 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include <stdint-gcc.h>
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+double in[ARRAY_SIZE];
+int64_t out[ARRAY_SIZE];
+int64_t ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (double, int64_t, __builtin_llceil)
+TEST_ASSERT (int64_t)
+
+TEST_INIT_CVT (double, 1.2, int64_t, __builtin_llceil (1.2), 1)
+TEST_INIT_CVT (double, -1.2, int64_t, __builtin_llceil (-1.2), 2)
+TEST_INIT_CVT (double, 0.5, int64_t, __builtin_llceil (0.5), 3)
+TEST_INIT_CVT (double, -0.5, int64_t, __builtin_llceil (-0.5), 4)
+TEST_INIT_CVT (double, 0.1, int64_t, __builtin_llceil (0.1), 5)
+TEST_INIT_CVT (double, -0.1, int64_t, __builtin_llceil (-0.1), 6)
+TEST_INIT_CVT (double, 3.0, int64_t, __builtin_llceil (3.0), 7)
+TEST_INIT_CVT (double, -3.0, int64_t, __builtin_llceil (-3.0), 8)
+TEST_INIT_CVT (double, 4503599627370495.5, int64_t, __builtin_llceil (4503599627370495.5), 9)
+TEST_INIT_CVT (double, 4503599627370497.0, int64_t, __builtin_llceil (4503599627370497.0), 10)
+TEST_INIT_CVT (double, -4503599627370495.5, int64_t, __builtin_llceil (-4503599627370495.5), 11)
+TEST_INIT_CVT (double, -4503599627370496.0, int64_t, __builtin_llceil (-4503599627370496.0), 12)
+TEST_INIT_CVT (double, 0.0, int64_t, __builtin_llceil (-0.0), 13)
+TEST_INIT_CVT (double, -0.0, int64_t, __builtin_llceil (-0.0), 14)
+TEST_INIT_CVT (double, 9223372036854774784.0, int64_t, __builtin_llceil (9223372036854774784.0), 15)
+TEST_INIT_CVT (double, 9223372036854775808.0, int64_t, 0x7fffffffffffffff, 16)
+TEST_INIT_CVT (double, -9223372036854775808.0, int64_t, __builtin_llceil (-9223372036854775808.0), 17)
+TEST_INIT_CVT (double, -9223372036854777856.0, int64_t, 0x8000000000000000, 18)
+TEST_INIT_CVT (double, __builtin_inf (), int64_t, __builtin_llceil (__builtin_inf ()), 19)
+TEST_INIT_CVT (double, -__builtin_inf (), int64_t, __builtin_llceil (-__builtin_inf ()), 20)
+TEST_INIT_CVT (double, __builtin_nan (""), int64_t, 0x7fffffffffffffff, 21)
+
+int
+main ()
+{
+ RUN_TEST_CVT (double, int64_t, 1, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 2, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 3, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 4, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 5, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 6, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 7, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 8, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 9, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 10, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 11, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 12, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 13, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 14, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 15, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 16, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 17, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 18, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 19, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 20, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 21, __builtin_llceil, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llfloor-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llfloor-0.c
new file mode 100644
index 0000000..4b10f96
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llfloor-0.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <stdint-gcc.h>
+#include "test-math.h"
+
+/*
+** test_double_int64_t___builtin_llfloor:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+2
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e64,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (double, int64_t, __builtin_llfloor)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llfloor-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llfloor-run-0.c
new file mode 100644
index 0000000..2282913
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llfloor-run-0.c
@@ -0,0 +1,64 @@
+/* { dg-do run { target { riscv_v && rv64 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include <stdint-gcc.h>
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+double in[ARRAY_SIZE];
+int64_t out[ARRAY_SIZE];
+int64_t ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (double, int64_t, __builtin_llfloor)
+TEST_ASSERT (int64_t)
+
+TEST_INIT_CVT (double, 1.2, int64_t, __builtin_llfloor (1.2), 1)
+TEST_INIT_CVT (double, -1.2, int64_t, __builtin_llfloor (-1.2), 2)
+TEST_INIT_CVT (double, 0.5, int64_t, __builtin_llfloor (0.5), 3)
+TEST_INIT_CVT (double, -0.5, int64_t, __builtin_llfloor (-0.5), 4)
+TEST_INIT_CVT (double, 0.1, int64_t, __builtin_llfloor (0.1), 5)
+TEST_INIT_CVT (double, -0.1, int64_t, __builtin_llfloor (-0.1), 6)
+TEST_INIT_CVT (double, 3.0, int64_t, __builtin_llfloor (3.0), 7)
+TEST_INIT_CVT (double, -3.0, int64_t, __builtin_llfloor (-3.0), 8)
+TEST_INIT_CVT (double, 4503599627370495.5, int64_t, __builtin_llfloor (4503599627370495.5), 9)
+TEST_INIT_CVT (double, 4503599627370497.0, int64_t, __builtin_llfloor (4503599627370497.0), 10)
+TEST_INIT_CVT (double, -4503599627370495.5, int64_t, __builtin_llfloor (-4503599627370495.5), 11)
+TEST_INIT_CVT (double, -4503599627370496.0, int64_t, __builtin_llfloor (-4503599627370496.0), 12)
+TEST_INIT_CVT (double, 0.0, int64_t, __builtin_llfloor (-0.0), 13)
+TEST_INIT_CVT (double, -0.0, int64_t, __builtin_llfloor (-0.0), 14)
+TEST_INIT_CVT (double, 9223372036854774784.0, int64_t, __builtin_llfloor (9223372036854774784.0), 15)
+TEST_INIT_CVT (double, 9223372036854775808.0, int64_t, 0x7fffffffffffffff, 16)
+TEST_INIT_CVT (double, -9223372036854775808.0, int64_t, __builtin_llfloor (-9223372036854775808.0), 17)
+TEST_INIT_CVT (double, -9223372036854777856.0, int64_t, 0x8000000000000000, 18)
+TEST_INIT_CVT (double, __builtin_inf (), int64_t, __builtin_llfloor (__builtin_inf ()), 19)
+TEST_INIT_CVT (double, -__builtin_inf (), int64_t, __builtin_llfloor (-__builtin_inf ()), 20)
+TEST_INIT_CVT (double, __builtin_nan (""), int64_t, 0x7fffffffffffffff, 21)
+
+int
+main ()
+{
+ RUN_TEST_CVT (double, int64_t, 1, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 2, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 3, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 4, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 5, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 6, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 7, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 8, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 9, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 10, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 11, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 12, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 13, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 14, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 15, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 16, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 17, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 18, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 19, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 20, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 21, __builtin_llfloor, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llrint-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llrint-0.c
new file mode 100644
index 0000000..4bf125f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llrint-0.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <stdint-gcc.h>
+#include "test-math.h"
+
+/*
+** test_double_int64_t___builtin_llrint:
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e64,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+*/
+TEST_UNARY_CALL_CVT (double, int64_t, __builtin_llrint)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llrint-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llrint-run-0.c
new file mode 100644
index 0000000..409175a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llrint-run-0.c
@@ -0,0 +1,64 @@
+/* { dg-do run { target { riscv_v && rv64 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include <stdint-gcc.h>
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+double in[ARRAY_SIZE];
+int64_t out[ARRAY_SIZE];
+int64_t ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (double, int64_t, __builtin_llrint)
+TEST_ASSERT (int64_t)
+
+TEST_INIT_CVT (double, 1.2, int64_t, __builtin_llrint (1.2), 1)
+TEST_INIT_CVT (double, -1.2, int64_t, __builtin_llrint (-1.2), 2)
+TEST_INIT_CVT (double, 0.5, int64_t, __builtin_llrint (0.5), 3)
+TEST_INIT_CVT (double, -0.5, int64_t, __builtin_llrint (-0.5), 4)
+TEST_INIT_CVT (double, 0.1, int64_t, __builtin_llrint (0.1), 5)
+TEST_INIT_CVT (double, -0.1, int64_t, __builtin_llrint (-0.1), 6)
+TEST_INIT_CVT (double, 3.0, int64_t, __builtin_llrint (3.0), 7)
+TEST_INIT_CVT (double, -3.0, int64_t, __builtin_llrint (-3.0), 8)
+TEST_INIT_CVT (double, 4503599627370495.5, int64_t, __builtin_llrint (4503599627370495.5), 9)
+TEST_INIT_CVT (double, 4503599627370497.0, int64_t, __builtin_llrint (4503599627370497.0), 10)
+TEST_INIT_CVT (double, -4503599627370495.5, int64_t, __builtin_llrint (-4503599627370495.5), 11)
+TEST_INIT_CVT (double, -4503599627370496.0, int64_t, __builtin_llrint (-4503599627370496.0), 12)
+TEST_INIT_CVT (double, 0.0, int64_t, __builtin_llrint (-0.0), 13)
+TEST_INIT_CVT (double, -0.0, int64_t, __builtin_llrint (-0.0), 14)
+TEST_INIT_CVT (double, 9223372036854774784.0, int64_t, __builtin_llrint (9223372036854774784.0), 15)
+TEST_INIT_CVT (double, 9223372036854775808.0, int64_t, __builtin_llrint (9223372036854775808.0), 16)
+TEST_INIT_CVT (double, -9223372036854775808.0, int64_t, __builtin_llrint (-9223372036854775808.0), 17)
+TEST_INIT_CVT (double, -9223372036854777856.0, int64_t, __builtin_llrint (-9223372036854777856.0), 18)
+TEST_INIT_CVT (double, __builtin_inf (), int64_t, __builtin_llrint (__builtin_inf ()), 19)
+TEST_INIT_CVT (double, -__builtin_inf (), int64_t, __builtin_llrint (-__builtin_inf ()), 20)
+TEST_INIT_CVT (double, __builtin_nan (""), int64_t, 0x7fffffffffffffff, 21)
+
+int
+main ()
+{
+ RUN_TEST_CVT (double, int64_t, 1, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 2, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 3, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 4, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 5, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 6, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 7, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 8, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 9, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 10, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 11, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 12, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 13, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 14, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 15, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 16, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 17, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 18, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 19, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 20, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 21, __builtin_llrint, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llround-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llround-0.c
new file mode 100644
index 0000000..4f8b455
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llround-0.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <stdint-gcc.h>
+#include "test-math.h"
+
+/*
+** test_double_int64_t___builtin_llround:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+4
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e64,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (double, int64_t, __builtin_llround)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llround-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llround-run-0.c
new file mode 100644
index 0000000..c5b6084
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-llround-run-0.c
@@ -0,0 +1,64 @@
+/* { dg-do run { target { riscv_v && rv64 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include <stdint-gcc.h>
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+double in[ARRAY_SIZE];
+int64_t out[ARRAY_SIZE];
+int64_t ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (double, int64_t, __builtin_llround)
+TEST_ASSERT (int64_t)
+
+TEST_INIT_CVT (double, 1.2, int64_t, __builtin_llround (1.2), 1)
+TEST_INIT_CVT (double, -1.2, int64_t, __builtin_llround (-1.2), 2)
+TEST_INIT_CVT (double, 0.5, int64_t, __builtin_llround (0.5), 3)
+TEST_INIT_CVT (double, -0.5, int64_t, __builtin_llround (-0.5), 4)
+TEST_INIT_CVT (double, 0.1, int64_t, __builtin_llround (0.1), 5)
+TEST_INIT_CVT (double, -0.1, int64_t, __builtin_llround (-0.1), 6)
+TEST_INIT_CVT (double, 3.0, int64_t, __builtin_llround (3.0), 7)
+TEST_INIT_CVT (double, -3.0, int64_t, __builtin_llround (-3.0), 8)
+TEST_INIT_CVT (double, 4503599627370495.5, int64_t, __builtin_llround (4503599627370495.5), 9)
+TEST_INIT_CVT (double, 4503599627370497.0, int64_t, __builtin_llround (4503599627370497.0), 10)
+TEST_INIT_CVT (double, -4503599627370495.5, int64_t, __builtin_llround (-4503599627370495.5), 11)
+TEST_INIT_CVT (double, -4503599627370496.0, int64_t, __builtin_llround (-4503599627370496.0), 12)
+TEST_INIT_CVT (double, 0.0, int64_t, __builtin_llround (-0.0), 13)
+TEST_INIT_CVT (double, -0.0, int64_t, __builtin_llround (-0.0), 14)
+TEST_INIT_CVT (double, 9223372036854774784.0, int64_t, __builtin_llround (9223372036854774784.0), 15)
+TEST_INIT_CVT (double, 9223372036854775808.0, int64_t, 0x7fffffffffffffff, 16)
+TEST_INIT_CVT (double, -9223372036854775808.0, int64_t, __builtin_llround (-9223372036854775808.0), 17)
+TEST_INIT_CVT (double, -9223372036854777856.0, int64_t, 0x8000000000000000, 18)
+TEST_INIT_CVT (double, __builtin_inf (), int64_t, __builtin_llround (__builtin_inf ()), 19)
+TEST_INIT_CVT (double, -__builtin_inf (), int64_t, __builtin_llround (-__builtin_inf ()), 20)
+TEST_INIT_CVT (double, __builtin_nan (""), int64_t, 0x7fffffffffffffff, 21)
+
+int
+main ()
+{
+ RUN_TEST_CVT (double, int64_t, 1, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 2, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 3, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 4, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 5, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 6, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 7, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 8, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 9, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 10, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 11, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 12, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 13, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 14, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 15, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 16, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 17, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 18, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 19, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 20, __builtin_llround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, int64_t, 21, __builtin_llround, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-0.c
new file mode 100644
index 0000000..a60ef30
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-0.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_double_long___builtin_lrint:
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e64,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+*/
+TEST_UNARY_CALL_CVT (double, long, __builtin_lrint)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-1.c
new file mode 100644
index 0000000..57e92ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-1.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32f -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_float_long___builtin_lrintf:
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e32,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+*/
+TEST_UNARY_CALL_CVT (float, long, __builtin_lrintf)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-run-0.c
new file mode 100644
index 0000000..2281c59
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-run-0.c
@@ -0,0 +1,63 @@
+/* { dg-do run { target { riscv_v && rv64 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+double in[ARRAY_SIZE];
+long out[ARRAY_SIZE];
+long ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (double, long, __builtin_lrint)
+TEST_ASSERT (long)
+
+TEST_INIT_CVT (double, 1.2, long, __builtin_lrint (1.2), 1)
+TEST_INIT_CVT (double, -1.2, long, __builtin_lrint (-1.2), 2)
+TEST_INIT_CVT (double, 0.5, long, __builtin_lrint (0.5), 3)
+TEST_INIT_CVT (double, -0.5, long, __builtin_lrint (-0.5), 4)
+TEST_INIT_CVT (double, 0.1, long, __builtin_lrint (0.1), 5)
+TEST_INIT_CVT (double, -0.1, long, __builtin_lrint (-0.1), 6)
+TEST_INIT_CVT (double, 3.0, long, __builtin_lrint (3.0), 7)
+TEST_INIT_CVT (double, -3.0, long, __builtin_lrint (-3.0), 8)
+TEST_INIT_CVT (double, 4503599627370495.5, long, __builtin_lrint (4503599627370495.5), 9)
+TEST_INIT_CVT (double, 4503599627370497.0, long, __builtin_lrint (4503599627370497.0), 10)
+TEST_INIT_CVT (double, -4503599627370495.5, long, __builtin_lrint (-4503599627370495.5), 11)
+TEST_INIT_CVT (double, -4503599627370496.0, long, __builtin_lrint (-4503599627370496.0), 12)
+TEST_INIT_CVT (double, 0.0, long, __builtin_lrint (-0.0), 13)
+TEST_INIT_CVT (double, -0.0, long, __builtin_lrint (-0.0), 14)
+TEST_INIT_CVT (double, 9223372036854774784.0, long, __builtin_lrint (9223372036854774784.0), 15)
+TEST_INIT_CVT (double, 9223372036854775808.0, long, __builtin_lrint (9223372036854775808.0), 16)
+TEST_INIT_CVT (double, -9223372036854775808.0, long, __builtin_lrint (-9223372036854775808.0), 17)
+TEST_INIT_CVT (double, -9223372036854777856.0, long, __builtin_lrint (-9223372036854777856.0), 18)
+TEST_INIT_CVT (double, __builtin_inf (), long, __builtin_lrint (__builtin_inf ()), 19)
+TEST_INIT_CVT (double, -__builtin_inf (), long, __builtin_lrint (-__builtin_inf ()), 20)
+TEST_INIT_CVT (double, __builtin_nan (""), long, 0x7fffffffffffffff, 21)
+
+int
+main ()
+{
+ RUN_TEST_CVT (double, long, 1, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 2, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 3, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 4, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 5, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 6, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 7, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 8, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 9, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 10, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 11, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 12, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 13, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 14, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 15, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 16, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 17, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 18, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 19, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 20, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 21, __builtin_lrint, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-run-1.c
new file mode 100644
index 0000000..ee7ec3c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lrint-run-1.c
@@ -0,0 +1,63 @@
+/* { dg-do run { target { riscv_v && rv32 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+float in[ARRAY_SIZE];
+long out[ARRAY_SIZE];
+long ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (float, long, __builtin_lrintf)
+TEST_ASSERT (long)
+
+TEST_INIT_CVT (float, 1.2, long, __builtin_lrintf (1.2), 1)
+TEST_INIT_CVT (float, -1.2, long, __builtin_lrintf (-1.2), 2)
+TEST_INIT_CVT (float, 0.5, long, __builtin_lrintf (0.5), 3)
+TEST_INIT_CVT (float, -0.5, long, __builtin_lrintf (-0.5), 4)
+TEST_INIT_CVT (float, 0.1, long, __builtin_lrintf (0.1), 5)
+TEST_INIT_CVT (float, -0.1, long, __builtin_lrintf (-0.1), 6)
+TEST_INIT_CVT (float, 3.0, long, __builtin_lrintf (3.0), 7)
+TEST_INIT_CVT (float, -3.0, long, __builtin_lrintf (-3.0), 8)
+TEST_INIT_CVT (float, 8388607.5, long, __builtin_lrintf (8388607.5), 9)
+TEST_INIT_CVT (float, 8388609.0, long, __builtin_lrintf (8388609.0), 10)
+TEST_INIT_CVT (float, -8388607.5, long, __builtin_lrintf (-8388607.5), 11)
+TEST_INIT_CVT (float, -8388609.0, long, __builtin_lrintf (-8388609.0), 12)
+TEST_INIT_CVT (float, 0.0, long, __builtin_lrintf (-0.0), 13)
+TEST_INIT_CVT (float, -0.0, long, __builtin_lrintf (-0.0), 14)
+TEST_INIT_CVT (float, 2147483520.0, long, __builtin_lrintf (2147483520.0), 15)
+TEST_INIT_CVT (float, 2147483648.0, long, __builtin_lrintf (2147483648.0), 16)
+TEST_INIT_CVT (float, -2147483648.0, long, __builtin_lrintf (-2147483648.0), 17)
+TEST_INIT_CVT (float, -2147483904.0, long, __builtin_lrintf (-2147483904.0), 18)
+TEST_INIT_CVT (float, __builtin_inf (), long, __builtin_lrintf (__builtin_inff ()), 19)
+TEST_INIT_CVT (float, -__builtin_inf (), long, __builtin_lrintf (-__builtin_inff ()), 20)
+TEST_INIT_CVT (float, __builtin_nanf (""), long, 0x7fffffff, 21)
+
+int
+main ()
+{
+ RUN_TEST_CVT (float, long, 1, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 2, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 3, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 4, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 5, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 6, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 7, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 8, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 9, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 10, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 11, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 12, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 13, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 14, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 15, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 16, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 17, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 18, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 19, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 20, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 21, __builtin_lrintf, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-0.c
new file mode 100644
index 0000000..32b7348
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-0.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_double_long___builtin_lround:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+4
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e64,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (double, long, __builtin_lround)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-1.c
new file mode 100644
index 0000000..a4d6fcf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-1.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32f -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "test-math.h"
+
+/*
+** test_float_long___builtin_lroundf:
+** frrm\s+[atx][0-9]+
+** ...
+** fsrmi\s+4
+** ...
+** vsetvli\s+[atx][0-9]+,\s*zero,\s*e32,\s*m1,\s*ta,\s*ma
+** vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+
+** ...
+** fsrm\s+[atx][0-9]+
+** ret
+*/
+TEST_UNARY_CALL_CVT (float, long, __builtin_lroundf)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-run-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-run-0.c
new file mode 100644
index 0000000..ec4d9f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-run-0.c
@@ -0,0 +1,72 @@
+/* { dg-do run { target { riscv_v && rv64 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+double in[ARRAY_SIZE];
+long out[ARRAY_SIZE];
+long ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (double, long, __builtin_lround)
+TEST_ASSERT (long)
+
+TEST_INIT_CVT (double, 1.2, long, __builtin_lround (1.2), 1)
+TEST_INIT_CVT (double, -1.2, long, __builtin_lround (-1.2), 2)
+TEST_INIT_CVT (double, 0.5, long, __builtin_lround (0.5), 3)
+TEST_INIT_CVT (double, -0.5, long, __builtin_lround (-0.5), 4)
+TEST_INIT_CVT (double, 0.1, long, __builtin_lround (0.1), 5)
+TEST_INIT_CVT (double, -0.1, long, __builtin_lround (-0.1), 6)
+TEST_INIT_CVT (double, 3.0, long, __builtin_lround (3.0), 7)
+TEST_INIT_CVT (double, -3.0, long, __builtin_lround (-3.0), 8)
+TEST_INIT_CVT (double, 4503599627370495.5, long, __builtin_lround (4503599627370495.5), 9)
+TEST_INIT_CVT (double, 4503599627370497.0, long, __builtin_lround (4503599627370497.0), 10)
+TEST_INIT_CVT (double, -4503599627370495.5, long, __builtin_lround (-4503599627370495.5), 11)
+TEST_INIT_CVT (double, -4503599627370496.0, long, __builtin_lround (-4503599627370496.0), 12)
+TEST_INIT_CVT (double, 0.0, long, __builtin_lround (-0.0), 13)
+TEST_INIT_CVT (double, -0.0, long, __builtin_lround (-0.0), 14)
+TEST_INIT_CVT (double, 9223372036854774784.0, long, __builtin_lround (9223372036854774784.0), 15)
+TEST_INIT_CVT (double, 9223372036854775808.0, long, 0x7fffffffffffffff, 16)
+TEST_INIT_CVT (double, -9223372036854775808.0, long, __builtin_lround (-9223372036854775808.0), 17)
+TEST_INIT_CVT (double, -9223372036854777856.0, long, 0x8000000000000000, 18)
+TEST_INIT_CVT (double, __builtin_inf (), long, __builtin_lround (__builtin_inf ()), 19)
+TEST_INIT_CVT (double, -__builtin_inf (), long, __builtin_lround (-__builtin_inf ()), 20)
+TEST_INIT_CVT (double, __builtin_nan (""), long, 0x7fffffffffffffff, 21)
+
+/* According to the manual as below.
+
+ If x is a NaN or an infinity, or the rounded value is too large to
+ be stored in a long (long long in the case of the ll* functions),
+ then a domain error occurs, and the return value is unspecified.
+
+ Some reference are hard-code instead of leveraging scalar __builtin_lround.
+*/
+
+int
+main ()
+{
+ RUN_TEST_CVT (double, long, 1, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 2, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 3, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 4, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 5, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 6, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 7, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 8, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 9, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 10, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 11, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 12, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 13, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 14, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 15, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 16, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 17, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 18, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 19, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 20, __builtin_lround, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (double, long, 21, __builtin_lround, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-run-1.c
new file mode 100644
index 0000000..76e4e43
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-lround-run-1.c
@@ -0,0 +1,72 @@
+/* { dg-do run { target { riscv_v && rv32 } } } */
+/* { dg-additional-options "-std=c99 -O3 -ftree-vectorize -fno-vect-cost-model -ffast-math" } */
+
+#include "test-math.h"
+
+#define ARRAY_SIZE 128
+
+float in[ARRAY_SIZE];
+long out[ARRAY_SIZE];
+long ref[ARRAY_SIZE];
+
+TEST_UNARY_CALL_CVT (float, long, __builtin_lroundf)
+TEST_ASSERT (long)
+
+TEST_INIT_CVT (float, 1.2, long, __builtin_lroundf (1.2), 1)
+TEST_INIT_CVT (float, -1.2, long, __builtin_lroundf (-1.2), 2)
+TEST_INIT_CVT (float, 0.5, long, __builtin_lroundf (0.5), 3)
+TEST_INIT_CVT (float, -0.5, long, __builtin_lroundf (-0.5), 4)
+TEST_INIT_CVT (float, 0.1, long, __builtin_lroundf (0.1), 5)
+TEST_INIT_CVT (float, -0.1, long, __builtin_lroundf (-0.1), 6)
+TEST_INIT_CVT (float, 3.0, long, __builtin_lroundf (3.0), 7)
+TEST_INIT_CVT (float, -3.0, long, __builtin_lroundf (-3.0), 8)
+TEST_INIT_CVT (float, 8388607.5, long, __builtin_lroundf (8388607.5), 9)
+TEST_INIT_CVT (float, 8388609.0, long, __builtin_lroundf (8388609.0), 10)
+TEST_INIT_CVT (float, -8388607.5, long, __builtin_lroundf (-8388607.5), 11)
+TEST_INIT_CVT (float, -8388609.0, long, __builtin_lroundf (-8388609.0), 12)
+TEST_INIT_CVT (float, 0.0, long, __builtin_lroundf (-0.0), 13)
+TEST_INIT_CVT (float, -0.0, long, __builtin_lroundf (-0.0), 14)
+TEST_INIT_CVT (float, 2147483520.0, long, __builtin_lroundf (2147483520.0), 15)
+TEST_INIT_CVT (float, 2147483648.0, long, 0x7fffffff, 16)
+TEST_INIT_CVT (float, -2147483648.0, long, __builtin_lroundf (-2147483648.0), 17)
+TEST_INIT_CVT (float, -2147483904.0, long, 0x80000000, 18)
+TEST_INIT_CVT (float, __builtin_inf (), long, __builtin_lroundf (__builtin_inff ()), 19)
+TEST_INIT_CVT (float, -__builtin_inf (), long, __builtin_lroundf (-__builtin_inff ()), 20)
+TEST_INIT_CVT (float, __builtin_nanf (""), long, 0x7fffffff, 21)
+
+/* According to the manual as below.
+
+ If x is a NaN or an infinity, or the rounded value is too large to
+ be stored in a long (long long in the case of the ll* functions),
+ then a domain error occurs, and the return value is unspecified.
+
+ Some reference are hard-code instead of leveraging scalar __builtin_lround.
+*/
+
+int
+main ()
+{
+ RUN_TEST_CVT (float, long, 1, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 2, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 3, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 4, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 5, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 6, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 7, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 8, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 9, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 10, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 11, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 12, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 13, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 14, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 15, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 16, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 17, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 18, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 19, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 20, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+ RUN_TEST_CVT (float, long, 21, __builtin_lroundf, in, out, ref, ARRAY_SIZE);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-1.c
index 6786e54..b0209ee 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-1.c
@@ -9,24 +9,30 @@ float in[ARRAY_SIZE];
float out[ARRAY_SIZE];
float ref[ARRAY_SIZE];
+static float
+get_ref_nearbyintf (float val)
+{
+ set_rm (FRM_RTZ);
+
+ return __builtin_nearbyintf (val);
+}
+
TEST_UNARY_CALL (float, __builtin_nearbyintf)
TEST_ASSERT (float)
-TEST_INIT (float, 1.2, 1.0, 1)
-TEST_INIT (float, -1.2, -1.0, 2)
-TEST_INIT (float, 3.0, 3.0, 3)
-TEST_INIT (float, 8388607.5, 8388607.0, 4)
-TEST_INIT (float, 8388609.0, 8388609.0, 5)
-TEST_INIT (float, 0.0, 0.0, 6)
-TEST_INIT (float, -0.0, -0.0, 7)
-TEST_INIT (float, -8388607.5, -8388607.0, 8)
-TEST_INIT (float, -8388608.0, -8388608.0, 9)
+TEST_INIT (float, 1.2, get_ref_nearbyintf (1.2), 1)
+TEST_INIT (float, -1.2, get_ref_nearbyintf (-1.2), 2)
+TEST_INIT (float, 3.0, get_ref_nearbyintf (3.0), 3)
+TEST_INIT (float, 8388607.5, get_ref_nearbyintf (8388607.5), 4)
+TEST_INIT (float, 8388609.0, get_ref_nearbyintf (8388609.0), 5)
+TEST_INIT (float, 0.0, get_ref_nearbyintf (0.0), 6)
+TEST_INIT (float, -0.0, get_ref_nearbyintf (-0.0), 7)
+TEST_INIT (float, -8388607.5, get_ref_nearbyintf (-8388607.5), 8)
+TEST_INIT (float, -8388608.0, get_ref_nearbyintf (-8388608.0), 9)
int
main ()
{
- unsigned fflags_before = get_fflags ();
-
set_rm (FRM_RTZ);
RUN_TEST (float, 1, __builtin_nearbyintf, in, out, ref, ARRAY_SIZE);
@@ -39,10 +45,5 @@ main ()
RUN_TEST (float, 8, __builtin_nearbyintf, in, out, ref, ARRAY_SIZE);
RUN_TEST (float, 9, __builtin_nearbyintf, in, out, ref, ARRAY_SIZE);
- unsigned fflags_after = get_fflags ();
-
- if (fflags_before != fflags_after)
- __builtin_abort ();
-
return 0;
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-2.c
index 9d3a3a6..2329a51 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-nearbyint-run-2.c
@@ -9,24 +9,30 @@ double in[ARRAY_SIZE];
double out[ARRAY_SIZE];
double ref[ARRAY_SIZE];
+static double
+get_ref_nearbyint (double val)
+{
+ set_rm (FRM_RNE);
+
+ return __builtin_nearbyint (val);
+}
+
TEST_UNARY_CALL (double, __builtin_nearbyint)
TEST_ASSERT (double)
-TEST_INIT (double, 1.2, 1.0, 1)
-TEST_INIT (double, -1.8, -2.0, 2)
-TEST_INIT (double, 3.0, 3.0, 3)
-TEST_INIT (double, 4503599627370495.5, 4503599627370496.0, 4)
-TEST_INIT (double, 4503599627370497.0, 4503599627370497.0, 5)
-TEST_INIT (double, 0.0, 0.0, 6)
-TEST_INIT (double, -0.0, -0.0, 7)
-TEST_INIT (double, -4503599627370495.5, -4503599627370496.0, 8)
-TEST_INIT (double, -4503599627370496.0, -4503599627370496.0, 9)
+TEST_INIT (double, 1.2, get_ref_nearbyint (1.2), 1)
+TEST_INIT (double, -1.8, get_ref_nearbyint (-1.8), 2)
+TEST_INIT (double, 3.0, get_ref_nearbyint (3.0), 3)
+TEST_INIT (double, 4503599627370495.5, get_ref_nearbyint (4503599627370495.5), 4)
+TEST_INIT (double, 4503599627370497.0, get_ref_nearbyint (4503599627370497.0), 5)
+TEST_INIT (double, 0.0, get_ref_nearbyint (0.0), 6)
+TEST_INIT (double, -0.0, get_ref_nearbyint (-0.0), 7)
+TEST_INIT (double, -4503599627370495.5, get_ref_nearbyint (-4503599627370495.5), 8)
+TEST_INIT (double, -4503599627370496.0, get_ref_nearbyint (-4503599627370496.0), 9)
int
main ()
{
- unsigned fflags_before = get_fflags ();
-
set_rm (FRM_RNE);
RUN_TEST (double, 1, __builtin_nearbyint, in, out, ref, ARRAY_SIZE);
@@ -39,10 +45,5 @@ main ()
RUN_TEST (double, 8, __builtin_nearbyint, in, out, ref, ARRAY_SIZE);
RUN_TEST (double, 9, __builtin_nearbyint, in, out, ref, ARRAY_SIZE);
- unsigned fflags_after = get_fflags ();
-
- if (fflags_before != fflags_after)
- __builtin_abort ();
-
return 0;
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-rint-run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-rint-run-1.c
index 080f1d9..9590d44 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-rint-run-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-rint-run-1.c
@@ -9,24 +9,30 @@ float in[ARRAY_SIZE];
float out[ARRAY_SIZE];
float ref[ARRAY_SIZE];
+static float
+get_ref_rintf (float val)
+{
+ set_rm (FRM_RTZ);
+
+ return __builtin_rintf (val);
+}
+
TEST_UNARY_CALL (float, __builtin_rintf)
TEST_ASSERT (float)
-TEST_INIT (float, 1.2, 1.0, 1)
-TEST_INIT (float, -1.2, -1.0, 2)
-TEST_INIT (float, 3.0, 3.0, 3)
-TEST_INIT (float, 8388607.5, 8388607.0, 4)
-TEST_INIT (float, 8388609.0, 8388609.0, 5)
-TEST_INIT (float, 0.0, 0.0, 6)
-TEST_INIT (float, -0.0, -0.0, 7)
-TEST_INIT (float, -8388607.5, -8388607.0, 8)
-TEST_INIT (float, -8388608.0, -8388608.0, 9)
+TEST_INIT (float, 1.2, get_ref_rintf (1.2), 1)
+TEST_INIT (float, -1.2, get_ref_rintf (-1.2), 2)
+TEST_INIT (float, 3.0, get_ref_rintf (3.0), 3)
+TEST_INIT (float, 8388607.5, get_ref_rintf (8388607.5), 4)
+TEST_INIT (float, 8388609.0, get_ref_rintf (8388609.0), 5)
+TEST_INIT (float, 0.0, get_ref_rintf (0.0), 6)
+TEST_INIT (float, -0.0, get_ref_rintf (-0.0), 7)
+TEST_INIT (float, -8388607.5, get_ref_rintf (-8388607.5), 8)
+TEST_INIT (float, -8388608.0, get_ref_rintf (-8388608.0), 9)
int
main ()
{
- unsigned fflags_before = get_fflags ();
-
set_rm (FRM_RTZ);
RUN_TEST (float, 1, __builtin_rintf, in, out, ref, ARRAY_SIZE);
@@ -39,10 +45,5 @@ main ()
RUN_TEST (float, 8, __builtin_rintf, in, out, ref, ARRAY_SIZE);
RUN_TEST (float, 9, __builtin_rintf, in, out, ref, ARRAY_SIZE);
- unsigned fflags_after = get_fflags ();
-
- if (fflags_before == fflags_after)
- __builtin_abort ();
-
return 0;
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-rint-run-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-rint-run-2.c
index 6d03118..fe58649 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-rint-run-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-rint-run-2.c
@@ -9,24 +9,30 @@ double in[ARRAY_SIZE];
double out[ARRAY_SIZE];
double ref[ARRAY_SIZE];
+static double
+get_ref_rint (double val)
+{
+ set_rm (FRM_RNE);
+
+ return __builtin_rint (val);
+}
+
TEST_UNARY_CALL (double, __builtin_rint)
TEST_ASSERT (double)
-TEST_INIT (double, 1.2, 1.0, 1)
-TEST_INIT (double, -1.8, -2.0, 2)
-TEST_INIT (double, 3.0, 3.0, 3)
-TEST_INIT (double, 4503599627370495.5, 4503599627370496.0, 4)
-TEST_INIT (double, 4503599627370497.0, 4503599627370497.0, 5)
-TEST_INIT (double, 0.0, 0.0, 6)
-TEST_INIT (double, -0.0, -0.0, 7)
-TEST_INIT (double, -4503599627370495.5, -4503599627370496.0, 8)
-TEST_INIT (double, -4503599627370496.0, -4503599627370496.0, 9)
+TEST_INIT (double, 1.2, get_ref_rint (1.2), 1)
+TEST_INIT (double, -1.8, get_ref_rint (-1.8), 2)
+TEST_INIT (double, 3.0, get_ref_rint (3.0), 3)
+TEST_INIT (double, 4503599627370495.5, get_ref_rint (4503599627370495.5), 4)
+TEST_INIT (double, 4503599627370497.0, get_ref_rint (4503599627370497.0), 5)
+TEST_INIT (double, 0.0, get_ref_rint (0.0), 6)
+TEST_INIT (double, -0.0, get_ref_rint (-0.0), 7)
+TEST_INIT (double, -4503599627370495.5, get_ref_rint (-4503599627370495.5), 8)
+TEST_INIT (double, -4503599627370496.0, get_ref_rint (-4503599627370496.0), 9)
int
main ()
{
- unsigned fflags_before = get_fflags ();
-
set_rm (FRM_RNE);
RUN_TEST (double, 1, __builtin_rint, in, out, ref, ARRAY_SIZE);
@@ -39,10 +45,5 @@ main ()
RUN_TEST (double, 8, __builtin_rint, in, out, ref, ARRAY_SIZE);
RUN_TEST (double, 9, __builtin_rint, in, out, ref, ARRAY_SIZE);
- unsigned fflags_after = get_fflags ();
-
- if (fflags_before == fflags_after)
- __builtin_abort ();
-
return 0;
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-round-run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-round-run-1.c
index fc8686f..27e04d7 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-round-run-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-round-run-1.c
@@ -12,15 +12,15 @@ float ref[ARRAY_SIZE];
TEST_UNARY_CALL (float, __builtin_roundf)
TEST_ASSERT (float)
-TEST_INIT (float, 1.2, 1.0, 1)
-TEST_INIT (float, -1.6, -2.0, 2)
-TEST_INIT (float, 3.0, 3.0, 3)
-TEST_INIT (float, 8388607.5, 8388608.0, 4)
-TEST_INIT (float, 8388609.0, 8388609.0, 5)
-TEST_INIT (float, 0.0, 0.0, 6)
-TEST_INIT (float, -0.0, -0.0, 7)
-TEST_INIT (float, -8388607.5, -8388608.0, 8)
-TEST_INIT (float, -8388608.0, -8388608.0, 9)
+TEST_INIT (float, 1.2, __builtin_roundf (1.2), 1)
+TEST_INIT (float, -1.6, __builtin_roundf (-1.6), 2)
+TEST_INIT (float, 3.0, __builtin_roundf (3.0), 3)
+TEST_INIT (float, 8388607.5, __builtin_roundf (8388607.5), 4)
+TEST_INIT (float, 8388609.0, __builtin_roundf (8388609.0), 5)
+TEST_INIT (float, 0.0, __builtin_roundf (0.0), 6)
+TEST_INIT (float, -0.0, __builtin_roundf (-0.0), 7)
+TEST_INIT (float, -8388607.5, __builtin_roundf (-8388607.5), 8)
+TEST_INIT (float, -8388608.0, __builtin_roundf (-8388608.0), 9)
int
main ()
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-round-run-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-round-run-2.c
index 14ddf6c..cef280c 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-round-run-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-round-run-2.c
@@ -12,15 +12,15 @@ double ref[ARRAY_SIZE];
TEST_UNARY_CALL (double, __builtin_round)
TEST_ASSERT (double)
-TEST_INIT (double, 1.2, 1.0, 1)
-TEST_INIT (double, -1.8, -2.0, 2)
-TEST_INIT (double, 3.0, 3.0, 3)
-TEST_INIT (double, 4503599627370495.5, 4503599627370496.0, 4)
-TEST_INIT (double, 4503599627370497.0, 4503599627370497.0, 5)
-TEST_INIT (double, 0.0, 0.0, 6)
-TEST_INIT (double, -0.0, -0.0, 7)
-TEST_INIT (double, -4503599627370495.5, -4503599627370496.0, 8)
-TEST_INIT (double, -4503599627370496.0, -4503599627370496.0, 9)
+TEST_INIT (double, 1.2, __builtin_round (1.2), 1)
+TEST_INIT (double, -1.8, __builtin_round (-1.8), 2)
+TEST_INIT (double, 3.0, __builtin_round (3.0), 3)
+TEST_INIT (double, 4503599627370495.5, __builtin_round (4503599627370495.5), 4)
+TEST_INIT (double, 4503599627370497.0, __builtin_round (4503599627370497.0), 5)
+TEST_INIT (double, 0.0, __builtin_round (0.0), 6)
+TEST_INIT (double, -0.0, __builtin_round (-0.0), 7)
+TEST_INIT (double, -4503599627370495.5, __builtin_round (-4503599627370495.5), 8)
+TEST_INIT (double, -4503599627370496.0, __builtin_round (-4503599627370496.0), 9)
int
main ()
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-trunc-run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-trunc-run-1.c
index 8b9f6d2..910031d 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-trunc-run-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-trunc-run-1.c
@@ -12,15 +12,15 @@ float ref[ARRAY_SIZE];
TEST_UNARY_CALL (float, __builtin_truncf)
TEST_ASSERT (float)
-TEST_INIT (float, 1.2, 1.0, 1)
-TEST_INIT (float, -1.2, -1.0, 2)
-TEST_INIT (float, 3.0, 3.0, 3)
-TEST_INIT (float, 8388607.5, 8388607.0, 4)
-TEST_INIT (float, 8388609.0, 8388609.0, 5)
-TEST_INIT (float, 0.0, 0.0, 6)
-TEST_INIT (float, -0.0, -0.0, 7)
-TEST_INIT (float, -8388607.5, -8388607.0, 8)
-TEST_INIT (float, -8388608.0, -8388608.0, 9)
+TEST_INIT (float, 1.2, __builtin_truncf (1.2), 1)
+TEST_INIT (float, -1.2,__builtin_truncf (-1.2), 2)
+TEST_INIT (float, 3.0, __builtin_truncf (3.0), 3)
+TEST_INIT (float, 8388607.5, __builtin_truncf (8388607.5), 4)
+TEST_INIT (float, 8388609.0, __builtin_truncf (8388609.0), 5)
+TEST_INIT (float, 0.0, __builtin_truncf (0.0), 6)
+TEST_INIT (float, -0.0, __builtin_truncf (-0.0), 7)
+TEST_INIT (float, -8388607.5, __builtin_truncf (-8388607.5), 8)
+TEST_INIT (float, -8388608.0, __builtin_truncf (-8388608.0), 9)
int
main ()
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-trunc-run-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-trunc-run-2.c
index 2ae354f..20d91f0 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-trunc-run-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/math-trunc-run-2.c
@@ -12,15 +12,15 @@ double ref[ARRAY_SIZE];
TEST_UNARY_CALL (double, __builtin_trunc)
TEST_ASSERT (double)
-TEST_INIT (double, 1.2, 1.0, 1)
-TEST_INIT (double, -1.2, -1.0, 2)
-TEST_INIT (double, 3.0, 3.0, 3)
-TEST_INIT (double, 4503599627370495.5, 4503599627370495.0, 4)
-TEST_INIT (double, 4503599627370497.0, 4503599627370497.0, 5)
-TEST_INIT (double, 0.0, 0.0, 6)
-TEST_INIT (double, -0.0, -0.0, 7)
-TEST_INIT (double, -4503599627370495.5, -4503599627370495.0, 8)
-TEST_INIT (double, -4503599627370496.0, -4503599627370496.0, 9)
+TEST_INIT (double, 1.2, __builtin_trunc (1.2), 1)
+TEST_INIT (double, -1.2, __builtin_trunc (-1.2), 2)
+TEST_INIT (double, 3.0, __builtin_trunc (3.0), 3)
+TEST_INIT (double, 4503599627370495.5, __builtin_trunc (4503599627370495.5), 4)
+TEST_INIT (double, 4503599627370497.0, __builtin_trunc (4503599627370497.0), 5)
+TEST_INIT (double, 0.0, __builtin_trunc (0.0), 6)
+TEST_INIT (double, -0.0, __builtin_trunc (-0.0), 7)
+TEST_INIT (double, -4503599627370495.5, __builtin_trunc (-4503599627370495.5), 8)
+TEST_INIT (double, -4503599627370496.0, __builtin_trunc (-4503599627370496.0), 9)
int
main ()
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/test-math.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/test-math.h
index b63ca56..a1c9d55 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/test-math.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/test-math.h
@@ -5,6 +5,14 @@
out[i] = CALL (in[i]); \
}
+#define TEST_UNARY_CALL_CVT(TYPE_IN, TYPE_OUT, CALL) \
+ void test_##TYPE_IN##_##TYPE_OUT##_##CALL ( \
+ TYPE_OUT *out, TYPE_IN *in, unsigned count) \
+ { \
+ for (unsigned i = 0; i < count; i++) \
+ out[i] = CALL (in[i]); \
+ }
+
#define TEST_COND_UNARY_CALL(TYPE, CALL) \
void test_##TYPE##_##CALL (TYPE *out, int *cond, TYPE *in, unsigned count) \
{ \
@@ -22,6 +30,17 @@
} \
}
+#define TEST_INIT_CVT(TYPE_IN, VAL_IN, TYPE_REF, VAL_REF, NUM) \
+ void test_##TYPE_IN##_##TYPE_REF##_init_##NUM ( \
+ TYPE_IN *in, TYPE_REF *ref, unsigned size) \
+ { \
+ for (unsigned i = 0; i < size; i++) \
+ { \
+ in[i] = VAL_IN; \
+ ref[i] = VAL_REF; \
+ } \
+ }
+
#define TEST_ASSERT(TYPE) \
void test_##TYPE##_assert (TYPE *out, TYPE *ref, unsigned size) \
{ \
@@ -37,6 +56,11 @@
test_##TYPE##_##CALL (OUT, IN, SIZE); \
test_##TYPE##_assert (OUT, REF, SIZE);
+#define RUN_TEST_CVT(TYPE_IN, TYPE_OUT, NUM, CALL, IN, OUT, REF, SIZE) \
+ test_##TYPE_IN##_##TYPE_OUT##_init_##NUM (IN, REF, SIZE); \
+ test_##TYPE_IN##_##TYPE_OUT##_##CALL (OUT, IN, SIZE); \
+ test_##TYPE_OUT##_assert (OUT, REF, SIZE);
+
#define FRM_RNE 0
#define FRM_RTZ 1
#define FRM_RDN 2
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive-1.c
new file mode 100644
index 0000000..7dc2b99
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive-1.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 --param riscv-autovec-preference=fixed-vlmax -Wno-psabi" } */
+
+#include <stdint-gcc.h>
+
+typedef int8_t vnx4i __attribute__ ((vector_size (4)));
+typedef uint8_t vnx4ui __attribute__ ((vector_size (4)));
+
+#define MASK_4 0, 1, 0, 1
+
+vnx4i __attribute__ ((noinline, noclone)) test_1 (vnx4i x, vnx4i y)
+{
+ return __builtin_shufflevector (x, y, MASK_4);
+}
+
+vnx4ui __attribute__ ((noinline, noclone)) test_2 (vnx4ui x, vnx4ui y)
+{
+ return __builtin_shufflevector (x, y, MASK_4);
+}
+
+/* { dg-final { scan-assembler-times {\tvrgather\.vi} 2 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive-2.c
new file mode 100644
index 0000000..9aa9100
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive-2.c
@@ -0,0 +1,45 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh -mabi=lp64d -O3 --param riscv-autovec-preference=fixed-vlmax -Wno-psabi" } */
+
+#include <stdint-gcc.h>
+
+typedef int8_t vnx8i __attribute__ ((vector_size (8)));
+typedef int16_t vnx4i __attribute__ ((vector_size (8)));
+typedef uint8_t vnx8ui __attribute__ ((vector_size (8)));
+typedef uint16_t vnx4ui __attribute__ ((vector_size (8)));
+typedef _Float16 vnx4f __attribute__ ((vector_size (8)));
+
+#define MASK_4 4, 5, 4, 5
+#define MASK_8 12, 13, 14, 15, 12, 13, 14, 15
+
+vnx8i __attribute__ ((noinline, noclone))
+test_1 (vnx8i x, vnx8i y)
+{
+ return __builtin_shufflevector (x, y, MASK_8);
+}
+
+vnx4i __attribute__ ((noinline, noclone))
+test_2 (vnx4i x, vnx4i y)
+{
+ return __builtin_shufflevector (x, y, MASK_4);
+}
+
+vnx8ui __attribute__ ((noinline, noclone))
+test_3 (vnx8ui x, vnx8ui y)
+{
+ return __builtin_shufflevector (x, y, MASK_8);
+}
+
+vnx4ui __attribute__ ((noinline, noclone))
+test_4 (vnx4ui x, vnx4ui y)
+{
+ return __builtin_shufflevector (x, y, MASK_4);
+}
+
+vnx4f __attribute__ ((noinline, noclone))
+test_5 (vnx4f x, vnx4f y)
+{
+ return __builtin_shufflevector (x, y, MASK_4);
+}
+
+/* { dg-final { scan-assembler-times {\tvrgather\.vi} 5 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive_run-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive_run-1.c
new file mode 100644
index 0000000..d12424e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive_run-1.c
@@ -0,0 +1,27 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-options "-O3 --param riscv-autovec-preference=fixed-vlmax -Wno-psabi" } */
+
+#include <assert.h>
+#include "consecutive-1.c"
+
+int
+main (void)
+{
+ vnx4i test_1_x = {99, 111, 2, 4};
+ vnx4i test_1_y = {4, 5, 7, 8};
+ vnx4i test_1_except = {99, 111, 99, 111};
+ vnx4i test_1_real;
+ test_1_real = test_1 (test_1_x, test_1_y);
+ for (int i = 0; i < 4; i++)
+ assert (test_1_real[i] == test_1_except[i]);
+
+ vnx4ui test_2_x = {99, 111, 2, 4};
+ vnx4ui test_2_y = {4, 5, 6, 8};
+ vnx4ui test_2_except = {99, 111, 99, 111};
+ vnx4ui test_2_real;
+ test_2_real = test_2 (test_2_x, test_2_y);
+ for (int i = 0; i < 4; i++)
+ assert (test_2_real[i] == test_2_except[i]);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive_run-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive_run-2.c
new file mode 100644
index 0000000..8362e9f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive_run-2.c
@@ -0,0 +1,51 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-options "-O3 --param riscv-autovec-preference=fixed-vlmax -Wno-psabi" } */
+
+#include <assert.h>
+#include "consecutive-2.c"
+
+int
+main (void)
+{
+ vnx8i test_1_x = {0, 1, 2, 3, 5, 6, 7, 8};
+ vnx8i test_1_y = {8, 9, 10, 11, 13, 14, 15, 16};
+ vnx8i test_1_except = {13, 14, 15, 16, 13, 14, 15, 16};
+ vnx8i test_1_real;
+ test_1_real = test_1 (test_1_x, test_1_y);
+ for (int i = 0; i < 8; i++)
+ assert (test_1_real[i] == test_1_except[i]);
+
+ vnx4i test_2_x = {1, 2, 3, 4};
+ vnx4i test_2_y = {5, 6, 7, 8};
+ vnx4i test_2_except = {5, 6, 5, 6};
+ vnx4i test_2_real;
+ test_2_real = test_2 (test_2_x, test_2_y);
+ for (int i = 0; i < 4; i++)
+ assert (test_2_real[i] == test_2_except[i]);
+
+ vnx8ui test_3_x = {0, 1, 2, 3, 4, 5, 6, 8};
+ vnx8ui test_3_y = {8, 9, 10, 11, 12, 13, 15, 16};
+ vnx8ui test_3_except = {12, 13, 15, 16, 12, 13, 15, 16};
+ vnx8ui test_3_real;
+ test_3_real = test_3 (test_3_x, test_3_y);
+ for (int i = 0; i < 8; i++)
+ assert (test_3_real[i] == test_3_except[i]);
+
+ vnx4ui test_4_x = {1, 2, 3, 4};
+ vnx4ui test_4_y = {4, 5, 6, 8};
+ vnx4ui test_4_except = {4, 5, 4, 5};
+ vnx4ui test_4_real;
+ test_4_real = test_4 (test_4_x, test_4_y);
+ for (int i = 0; i < 4; i++)
+ assert (test_4_real[i] == test_4_except[i]);
+
+ vnx4f test_5_x = {0, 1, 3, 4};
+ vnx4f test_5_y = {4, 5, 6, 7};
+ vnx4f test_5_except = {4, 5, 4, 5};
+ vnx4f test_5_real;
+ test_5_real = test_5 (test_5_x, test_5_y);
+ for (int i = 0; i < 4; i++)
+ assert (test_5_real[i] == test_5_except[i]);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/perm.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/perm.h
index 18cb4af..1c5e472 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/perm.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/perm.h
@@ -1,4 +1,4 @@
-#include <stdint.h>
+#include <stdint-gcc.h>
typedef int8_t vnx2qi __attribute__ ((vector_size (2)));
typedef int8_t vnx4qi __attribute__ ((vector_size (4)));
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/bswap16-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/bswap16-0.c
new file mode 100644
index 0000000..11880ba
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/bswap16-0.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V (bswap16, 1, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 2, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 4, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 8, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 16, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 32, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 64, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 128, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 256, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 512, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 1024, uint16_t, __builtin_bswap16)
+DEF_OP_V (bswap16, 2048, uint16_t, __builtin_bswap16)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vsrl\.vi\s+v[0-9]+,\s*v[0-9]+,\s*8} 11 } } */
+/* { dg-final { scan-assembler-times {vsll\.vi\s+v[0-9]+,\s*v[0-9]+,\s*8} 11 } } */
+/* { dg-final { scan-assembler-times {vor\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 11 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-1.c
new file mode 100644
index 0000000..c010c88
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-1.c
@@ -0,0 +1,94 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fdump-tree-optimized" } */
+
+#include "def.h"
+
+#define MASK_8 0, 1, 0, 1, 0, 1, 0, 1
+#define MASK_16 MASK_8, MASK_8
+#define MASK_32 MASK_16, MASK_16
+#define MASK_64 MASK_32, MASK_32
+#define MASK_64 MASK_32, MASK_32
+#define MASK_128 MASK_64, MASK_64
+#define MASK_256 MASK_128, MASK_128
+#define MASK_512 MASK_256, MASK_256
+#define MASK_1024 MASK_512, MASK_512
+#define MASK_2048 MASK_1024, MASK_1024
+#define MASK_4096 MASK_2048, MASK_2048
+
+DEF_CONSECUTIVE (v8qi, 8)
+DEF_CONSECUTIVE (v16qi, 16)
+DEF_CONSECUTIVE (v32qi, 32)
+DEF_CONSECUTIVE (v64qi, 64)
+DEF_CONSECUTIVE (v128qi, 128)
+DEF_CONSECUTIVE (v256qi, 256)
+DEF_CONSECUTIVE (v512qi, 512)
+DEF_CONSECUTIVE (v1024qi, 1024)
+DEF_CONSECUTIVE (v2048qi, 2048)
+DEF_CONSECUTIVE (v4096qi, 4096)
+DEF_CONSECUTIVE (v8uqi, 8)
+DEF_CONSECUTIVE (v16uqi, 16)
+DEF_CONSECUTIVE (v32uqi, 32)
+DEF_CONSECUTIVE (v64uqi, 64)
+DEF_CONSECUTIVE (v128uqi, 128)
+DEF_CONSECUTIVE (v256uqi, 256)
+DEF_CONSECUTIVE (v512uqi, 512)
+DEF_CONSECUTIVE (v1024uqi, 1024)
+DEF_CONSECUTIVE (v2048uqi, 2048)
+DEF_CONSECUTIVE (v4096uqi, 4096)
+
+DEF_CONSECUTIVE (v8hi, 8)
+DEF_CONSECUTIVE (v16hi, 16)
+DEF_CONSECUTIVE (v32hi, 32)
+DEF_CONSECUTIVE (v64hi, 64)
+DEF_CONSECUTIVE (v128hi, 128)
+DEF_CONSECUTIVE (v256hi, 256)
+DEF_CONSECUTIVE (v512hi, 512)
+DEF_CONSECUTIVE (v1024hi, 1024)
+DEF_CONSECUTIVE (v2048hi, 2048)
+DEF_CONSECUTIVE (v8uhi, 8)
+DEF_CONSECUTIVE (v16uhi, 16)
+DEF_CONSECUTIVE (v32uhi, 32)
+DEF_CONSECUTIVE (v64uhi, 64)
+DEF_CONSECUTIVE (v128uhi, 128)
+DEF_CONSECUTIVE (v256uhi, 256)
+DEF_CONSECUTIVE (v512uhi, 512)
+DEF_CONSECUTIVE (v1024uhi, 1024)
+DEF_CONSECUTIVE (v2048uhi, 2048)
+
+DEF_CONSECUTIVE (v8si, 8)
+DEF_CONSECUTIVE (v16si, 16)
+DEF_CONSECUTIVE (v32si, 32)
+DEF_CONSECUTIVE (v64si, 64)
+DEF_CONSECUTIVE (v128si, 128)
+DEF_CONSECUTIVE (v256si, 256)
+DEF_CONSECUTIVE (v512si, 512)
+DEF_CONSECUTIVE (v1024si, 1024)
+DEF_CONSECUTIVE (v8usi, 8)
+DEF_CONSECUTIVE (v16usi, 16)
+DEF_CONSECUTIVE (v32usi, 32)
+DEF_CONSECUTIVE (v64usi, 64)
+DEF_CONSECUTIVE (v128usi, 128)
+DEF_CONSECUTIVE (v256usi, 256)
+DEF_CONSECUTIVE (v512usi, 512)
+DEF_CONSECUTIVE (v1024usi, 1024)
+
+DEF_CONSECUTIVE (v8hf, 8)
+DEF_CONSECUTIVE (v16hf, 16)
+DEF_CONSECUTIVE (v32hf, 32)
+DEF_CONSECUTIVE (v64hf, 64)
+DEF_CONSECUTIVE (v128hf, 128)
+DEF_CONSECUTIVE (v256hf, 256)
+DEF_CONSECUTIVE (v512hf, 512)
+DEF_CONSECUTIVE (v1024hf, 1024)
+DEF_CONSECUTIVE (v2048hf, 2048)
+
+DEF_CONSECUTIVE (v8sf, 8)
+DEF_CONSECUTIVE (v16sf, 16)
+DEF_CONSECUTIVE (v32sf, 32)
+DEF_CONSECUTIVE (v64sf, 64)
+DEF_CONSECUTIVE (v128sf, 128)
+DEF_CONSECUTIVE (v256sf, 256)
+DEF_CONSECUTIVE (v512sf, 512)
+DEF_CONSECUTIVE (v1024sf, 1024)
+
+/* { dg-final { scan-assembler-times {vrgather\.vi\s+v[0-9]+,\s*v[0-9]+,\s*0} 71 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-2.c
new file mode 100644
index 0000000..ccbbb24
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-2.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fdump-tree-optimized" } */
+
+#include "def.h"
+
+#define MASK_8 4, 5, 6, 7, 4, 5, 6, 7
+#define MASK_16 MASK_8, MASK_8
+#define MASK_32 MASK_16, MASK_16
+#define MASK_64 MASK_32, MASK_32
+#define MASK_64 MASK_32, MASK_32
+#define MASK_128 MASK_64, MASK_64
+#define MASK_256 MASK_128, MASK_128
+#define MASK_512 MASK_256, MASK_256
+#define MASK_1024 MASK_512, MASK_512
+#define MASK_2048 MASK_1024, MASK_1024
+#define MASK_4096 MASK_2048, MASK_2048
+
+DEF_CONSECUTIVE (v8qi, 8)
+DEF_CONSECUTIVE (v16qi, 16)
+DEF_CONSECUTIVE (v32qi, 32)
+DEF_CONSECUTIVE (v64qi, 64)
+DEF_CONSECUTIVE (v128qi, 128)
+DEF_CONSECUTIVE (v256qi, 256)
+DEF_CONSECUTIVE (v512qi, 512)
+DEF_CONSECUTIVE (v1024qi, 1024)
+DEF_CONSECUTIVE (v2048qi, 2048)
+DEF_CONSECUTIVE (v4096qi, 4096)
+DEF_CONSECUTIVE (v8uqi, 8)
+DEF_CONSECUTIVE (v16uqi, 16)
+DEF_CONSECUTIVE (v32uqi, 32)
+DEF_CONSECUTIVE (v64uqi, 64)
+DEF_CONSECUTIVE (v128uqi, 128)
+DEF_CONSECUTIVE (v256uqi, 256)
+DEF_CONSECUTIVE (v512uqi, 512)
+DEF_CONSECUTIVE (v1024uqi, 1024)
+DEF_CONSECUTIVE (v2048uqi, 2048)
+DEF_CONSECUTIVE (v4096uqi, 4096)
+
+DEF_CONSECUTIVE (v8hi, 8)
+DEF_CONSECUTIVE (v16hi, 16)
+DEF_CONSECUTIVE (v32hi, 32)
+DEF_CONSECUTIVE (v64hi, 64)
+DEF_CONSECUTIVE (v128hi, 128)
+DEF_CONSECUTIVE (v256hi, 256)
+DEF_CONSECUTIVE (v512hi, 512)
+DEF_CONSECUTIVE (v1024hi, 1024)
+DEF_CONSECUTIVE (v2048hi, 2048)
+DEF_CONSECUTIVE (v8uhi, 8)
+DEF_CONSECUTIVE (v16uhi, 16)
+DEF_CONSECUTIVE (v32uhi, 32)
+DEF_CONSECUTIVE (v64uhi, 64)
+DEF_CONSECUTIVE (v128uhi, 128)
+DEF_CONSECUTIVE (v256uhi, 256)
+DEF_CONSECUTIVE (v512uhi, 512)
+DEF_CONSECUTIVE (v1024uhi, 1024)
+DEF_CONSECUTIVE (v2048uhi, 2048)
+
+DEF_CONSECUTIVE (v8hf, 8)
+DEF_CONSECUTIVE (v16hf, 16)
+DEF_CONSECUTIVE (v32hf, 32)
+DEF_CONSECUTIVE (v64hf, 64)
+DEF_CONSECUTIVE (v128hf, 128)
+DEF_CONSECUTIVE (v256hf, 256)
+DEF_CONSECUTIVE (v512hf, 512)
+DEF_CONSECUTIVE (v1024hf, 1024)
+DEF_CONSECUTIVE (v2048hf, 2048)
+
+/* { dg-final { scan-assembler-times {vrgather\.vi\s+v[0-9]+,\s*v[0-9]+,\s*1} 47 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-3.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-3.c
new file mode 100644
index 0000000..7de3c7d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-3.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fdump-tree-optimized" } */
+
+#include "def.h"
+
+#define MASK_8 2, 3, 4, 5, 2, 3, 4, 5
+#define MASK_16 MASK_8, MASK_8
+#define MASK_32 MASK_16, MASK_16
+#define MASK_64 MASK_32, MASK_32
+#define MASK_64 MASK_32, MASK_32
+#define MASK_128 MASK_64, MASK_64
+#define MASK_256 MASK_128, MASK_128
+#define MASK_512 MASK_256, MASK_256
+#define MASK_1024 MASK_512, MASK_512
+#define MASK_2048 MASK_1024, MASK_1024
+#define MASK_4096 MASK_2048, MASK_2048
+
+DEF_CONSECUTIVE (v8qi, 8)
+DEF_CONSECUTIVE (v16qi, 16)
+DEF_CONSECUTIVE (v32qi, 32)
+DEF_CONSECUTIVE (v64qi, 64)
+DEF_CONSECUTIVE (v128qi, 128)
+DEF_CONSECUTIVE (v256qi, 256)
+DEF_CONSECUTIVE (v512qi, 512)
+DEF_CONSECUTIVE (v1024qi, 1024)
+DEF_CONSECUTIVE (v2048qi, 2048)
+DEF_CONSECUTIVE (v4096qi, 4096)
+DEF_CONSECUTIVE (v8uqi, 8)
+DEF_CONSECUTIVE (v16uqi, 16)
+DEF_CONSECUTIVE (v32uqi, 32)
+DEF_CONSECUTIVE (v64uqi, 64)
+DEF_CONSECUTIVE (v128uqi, 128)
+DEF_CONSECUTIVE (v256uqi, 256)
+DEF_CONSECUTIVE (v512uqi, 512)
+DEF_CONSECUTIVE (v1024uqi, 1024)
+DEF_CONSECUTIVE (v2048uqi, 2048)
+DEF_CONSECUTIVE (v4096uqi, 4096)
+
+DEF_CONSECUTIVE (v8hi, 8)
+DEF_CONSECUTIVE (v16hi, 16)
+DEF_CONSECUTIVE (v32hi, 32)
+DEF_CONSECUTIVE (v64hi, 64)
+DEF_CONSECUTIVE (v128hi, 128)
+DEF_CONSECUTIVE (v256hi, 256)
+DEF_CONSECUTIVE (v512hi, 512)
+DEF_CONSECUTIVE (v1024hi, 1024)
+DEF_CONSECUTIVE (v2048hi, 2048)
+DEF_CONSECUTIVE (v8uhi, 8)
+DEF_CONSECUTIVE (v16uhi, 16)
+DEF_CONSECUTIVE (v32uhi, 32)
+DEF_CONSECUTIVE (v64uhi, 64)
+DEF_CONSECUTIVE (v128uhi, 128)
+DEF_CONSECUTIVE (v256uhi, 256)
+DEF_CONSECUTIVE (v512uhi, 512)
+DEF_CONSECUTIVE (v1024uhi, 1024)
+DEF_CONSECUTIVE (v2048uhi, 2048)
+
+DEF_CONSECUTIVE (v8hf, 8)
+DEF_CONSECUTIVE (v16hf, 16)
+DEF_CONSECUTIVE (v32hf, 32)
+DEF_CONSECUTIVE (v64hf, 64)
+DEF_CONSECUTIVE (v128hf, 128)
+DEF_CONSECUTIVE (v256hf, 256)
+DEF_CONSECUTIVE (v512hf, 512)
+DEF_CONSECUTIVE (v1024hf, 1024)
+DEF_CONSECUTIVE (v2048hf, 2048)
+
+/* { dg-final { scan-assembler-not {vrgather\.vi\s+v[0-9]+,\s*v[0-9]+,\s*1} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/cvt-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/cvt-0.c
new file mode 100644
index 0000000..5637b05
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/cvt-0.c
@@ -0,0 +1,47 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -ffast-math --param=riscv-autovec-lmul=m8 -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_CONVERT (fp16, int64_t, _Float16, 1)
+DEF_CONVERT (fp16, int64_t, _Float16, 2)
+DEF_CONVERT (fp16, int64_t, _Float16, 4)
+DEF_CONVERT (fp16, int64_t, _Float16, 8)
+DEF_CONVERT (fp16, int64_t, _Float16, 16)
+DEF_CONVERT (fp16, int64_t, _Float16, 32)
+DEF_CONVERT (fp16, int64_t, _Float16, 64)
+DEF_CONVERT (fp16, int64_t, _Float16, 128)
+DEF_CONVERT (fp16, int64_t, _Float16, 256)
+DEF_CONVERT (fp16, int64_t, _Float16, 512)
+DEF_CONVERT (fp16, int64_t, _Float16, 1024)
+DEF_CONVERT (fp16, int64_t, _Float16, 2048)
+
+DEF_CONVERT (fp16, uint64_t, _Float16, 1)
+DEF_CONVERT (fp16, uint64_t, _Float16, 2)
+DEF_CONVERT (fp16, uint64_t, _Float16, 4)
+DEF_CONVERT (fp16, uint64_t, _Float16, 8)
+DEF_CONVERT (fp16, uint64_t, _Float16, 16)
+DEF_CONVERT (fp16, uint64_t, _Float16, 32)
+DEF_CONVERT (fp16, uint64_t, _Float16, 64)
+DEF_CONVERT (fp16, uint64_t, _Float16, 128)
+DEF_CONVERT (fp16, uint64_t, _Float16, 256)
+DEF_CONVERT (fp16, uint64_t, _Float16, 512)
+DEF_CONVERT (fp16, uint64_t, _Float16, 1024)
+DEF_CONVERT (fp16, uint64_t, _Float16, 2048)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfncvt\.f\.x\.w\s+v[0-9]+,\s*v[0-9]+} 15 } } */
+/* { dg-final { scan-assembler-times {vfncvt\.f\.xu\.w\s+v[0-9]+,\s*v[0-9]+} 15 } } */
+/* { dg-final { scan-assembler-times {vfncvt\.f\.f\.w\s+v[0-9]+,\s*v[0-9]+} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/def.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/def.h
index fa124ff..8dd5bcf 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/def.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/def.h
@@ -213,6 +213,15 @@ typedef double v512df __attribute__ ((vector_size (4096)));
a[i] = OP (b[i]); \
}
+#define DEF_OP_V_CVT(PREFIX, NUM, TYPE_IN, TYPE_OUT, OP) \
+ void __attribute__ ((noinline, noclone)) \
+ PREFIX##_##TYPE_IN##_##TYPE_OUT##_##NUM (TYPE_OUT *restrict a, \
+ TYPE_IN *restrict b) \
+ { \
+ for (int i = 0; i < NUM; ++i) \
+ a[i] = OP (b[i]); \
+ }
+
#define DEF_CALL_VV(PREFIX, NUM, TYPE, CALL) \
void __attribute__ ((noinline, noclone)) \
PREFIX##_##TYPE##NUM (TYPE *restrict a, TYPE *restrict b, TYPE *restrict c) \
@@ -824,3 +833,9 @@ typedef double v512df __attribute__ ((vector_size (4096)));
a[i] = cond[i] ? (TYPE3) (b[i] >> shift) : a[i]; \
return a; \
}
+
+#define DEF_CONSECUTIVE(TYPE, NUM) \
+ TYPE f##TYPE (TYPE a, TYPE b) \
+ { \
+ return __builtin_shufflevector (a, b, MASK_##NUM); \
+ }
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-iceil-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-iceil-0.c
new file mode 100644
index 0000000..f8877a1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-iceil-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (iceilf, 1, float, int, __builtin_iceilf)
+DEF_OP_V_CVT (iceilf, 2, float, int, __builtin_iceilf)
+DEF_OP_V_CVT (iceilf, 4, float, int, __builtin_iceilf)
+DEF_OP_V_CVT (iceilf, 8, float, int, __builtin_iceilf)
+DEF_OP_V_CVT (iceilf, 16, float, int, __builtin_iceilf)
+DEF_OP_V_CVT (iceilf, 32, float, int, __builtin_iceilf)
+DEF_OP_V_CVT (iceilf, 64, float, int, __builtin_iceilf)
+DEF_OP_V_CVT (iceilf, 128, float, int, __builtin_iceilf)
+DEF_OP_V_CVT (iceilf, 256, float, int, __builtin_iceilf)
+DEF_OP_V_CVT (iceilf, 512, float, int, __builtin_iceilf)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-ifloor-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-ifloor-0.c
new file mode 100644
index 0000000..69eaa11
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-ifloor-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (ifloorf, 1, float, int, __builtin_ifloorf)
+DEF_OP_V_CVT (ifloorf, 2, float, int, __builtin_ifloorf)
+DEF_OP_V_CVT (ifloorf, 4, float, int, __builtin_ifloorf)
+DEF_OP_V_CVT (ifloorf, 8, float, int, __builtin_ifloorf)
+DEF_OP_V_CVT (ifloorf, 16, float, int, __builtin_ifloorf)
+DEF_OP_V_CVT (ifloorf, 32, float, int, __builtin_ifloorf)
+DEF_OP_V_CVT (ifloorf, 64, float, int, __builtin_ifloorf)
+DEF_OP_V_CVT (ifloorf, 128, float, int, __builtin_ifloorf)
+DEF_OP_V_CVT (ifloorf, 256, float, int, __builtin_ifloorf)
+DEF_OP_V_CVT (ifloorf, 512, float, int, __builtin_ifloorf)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-irint-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-irint-0.c
new file mode 100644
index 0000000..3297bc6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-irint-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (irintf, 1, float, int, __builtin_irintf)
+DEF_OP_V_CVT (irintf, 2, float, int, __builtin_irintf)
+DEF_OP_V_CVT (irintf, 4, float, int, __builtin_irintf)
+DEF_OP_V_CVT (irintf, 8, float, int, __builtin_irintf)
+DEF_OP_V_CVT (irintf, 16, float, int, __builtin_irintf)
+DEF_OP_V_CVT (irintf, 32, float, int, __builtin_irintf)
+DEF_OP_V_CVT (irintf, 64, float, int, __builtin_irintf)
+DEF_OP_V_CVT (irintf, 128, float, int, __builtin_irintf)
+DEF_OP_V_CVT (irintf, 256, float, int, __builtin_irintf)
+DEF_OP_V_CVT (irintf, 512, float, int, __builtin_irintf)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-iround-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-iround-0.c
new file mode 100644
index 0000000..12fe7a2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-iround-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (iroundf, 1, float, int, __builtin_iroundf)
+DEF_OP_V_CVT (iroundf, 2, float, int, __builtin_iroundf)
+DEF_OP_V_CVT (iroundf, 4, float, int, __builtin_iroundf)
+DEF_OP_V_CVT (iroundf, 8, float, int, __builtin_iroundf)
+DEF_OP_V_CVT (iroundf, 16, float, int, __builtin_iroundf)
+DEF_OP_V_CVT (iroundf, 32, float, int, __builtin_iroundf)
+DEF_OP_V_CVT (iroundf, 64, float, int, __builtin_iroundf)
+DEF_OP_V_CVT (iroundf, 128, float, int, __builtin_iroundf)
+DEF_OP_V_CVT (iroundf, 256, float, int, __builtin_iroundf)
+DEF_OP_V_CVT (iroundf, 512, float, int, __builtin_iroundf)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lceil-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lceil-0.c
new file mode 100644
index 0000000..f843574
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lceil-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (lceil, 1, double, long, __builtin_lceil)
+DEF_OP_V_CVT (lceil, 2, double, long, __builtin_lceil)
+DEF_OP_V_CVT (lceil, 4, double, long, __builtin_lceil)
+DEF_OP_V_CVT (lceil, 8, double, long, __builtin_lceil)
+DEF_OP_V_CVT (lceil, 16, double, long, __builtin_lceil)
+DEF_OP_V_CVT (lceil, 32, double, long, __builtin_lceil)
+DEF_OP_V_CVT (lceil, 64, double, long, __builtin_lceil)
+DEF_OP_V_CVT (lceil, 128, double, long, __builtin_lceil)
+DEF_OP_V_CVT (lceil, 256, double, long, __builtin_lceil)
+DEF_OP_V_CVT (lceil, 512, double, long, __builtin_lceil)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lceil-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lceil-1.c
new file mode 100644
index 0000000..2d40808
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lceil-1.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv_zvl4096b -mabi=ilp32f -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (lceilf, 1, float, long, __builtin_lceilf)
+DEF_OP_V_CVT (lceilf, 2, float, long, __builtin_lceilf)
+DEF_OP_V_CVT (lceilf, 4, float, long, __builtin_lceilf)
+DEF_OP_V_CVT (lceilf, 8, float, long, __builtin_lceilf)
+DEF_OP_V_CVT (lceilf, 16, float, long, __builtin_lceilf)
+DEF_OP_V_CVT (lceilf, 32, float, long, __builtin_lceilf)
+DEF_OP_V_CVT (lceilf, 64, float, long, __builtin_lceilf)
+DEF_OP_V_CVT (lceilf, 128, float, long, __builtin_lceilf)
+DEF_OP_V_CVT (lceilf, 256, float, long, __builtin_lceilf)
+DEF_OP_V_CVT (lceilf, 512, float, long, __builtin_lceilf)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lfloor-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lfloor-0.c
new file mode 100644
index 0000000..fe61e99
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lfloor-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (lfloor, 1, double, long, __builtin_lfloor)
+DEF_OP_V_CVT (lfloor, 2, double, long, __builtin_lfloor)
+DEF_OP_V_CVT (lfloor, 4, double, long, __builtin_lfloor)
+DEF_OP_V_CVT (lfloor, 8, double, long, __builtin_lfloor)
+DEF_OP_V_CVT (lfloor, 16, double, long, __builtin_lfloor)
+DEF_OP_V_CVT (lfloor, 32, double, long, __builtin_lfloor)
+DEF_OP_V_CVT (lfloor, 64, double, long, __builtin_lfloor)
+DEF_OP_V_CVT (lfloor, 128, double, long, __builtin_lfloor)
+DEF_OP_V_CVT (lfloor, 256, double, long, __builtin_lfloor)
+DEF_OP_V_CVT (lfloor, 512, double, long, __builtin_lfloor)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lfloor-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lfloor-1.c
new file mode 100644
index 0000000..a64e5c4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lfloor-1.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv_zvl4096b -mabi=ilp32f -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (lfloorf, 1, float, long, __builtin_lfloorf)
+DEF_OP_V_CVT (lfloorf, 2, float, long, __builtin_lfloorf)
+DEF_OP_V_CVT (lfloorf, 4, float, long, __builtin_lfloorf)
+DEF_OP_V_CVT (lfloorf, 8, float, long, __builtin_lfloorf)
+DEF_OP_V_CVT (lfloorf, 16, float, long, __builtin_lfloorf)
+DEF_OP_V_CVT (lfloorf, 32, float, long, __builtin_lfloorf)
+DEF_OP_V_CVT (lfloorf, 64, float, long, __builtin_lfloorf)
+DEF_OP_V_CVT (lfloorf, 128, float, long, __builtin_lfloorf)
+DEF_OP_V_CVT (lfloorf, 256, float, long, __builtin_lfloorf)
+DEF_OP_V_CVT (lfloorf, 512, float, long, __builtin_lfloorf)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llceil-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llceil-0.c
new file mode 100644
index 0000000..204e3ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llceil-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (llceil, 1, double, int64_t, __builtin_llceil)
+DEF_OP_V_CVT (llceil, 2, double, int64_t, __builtin_llceil)
+DEF_OP_V_CVT (llceil, 4, double, int64_t, __builtin_llceil)
+DEF_OP_V_CVT (llceil, 8, double, int64_t, __builtin_llceil)
+DEF_OP_V_CVT (llceil, 16, double, int64_t, __builtin_llceil)
+DEF_OP_V_CVT (llceil, 32, double, int64_t, __builtin_llceil)
+DEF_OP_V_CVT (llceil, 64, double, int64_t, __builtin_llceil)
+DEF_OP_V_CVT (llceil, 128, double, int64_t, __builtin_llceil)
+DEF_OP_V_CVT (llceil, 256, double, int64_t, __builtin_llceil)
+DEF_OP_V_CVT (llceil, 512, double, int64_t, __builtin_llceil)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llfloor-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llfloor-0.c
new file mode 100644
index 0000000..205a5d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llfloor-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (llfloor, 1, double, int64_t, __builtin_llfloor)
+DEF_OP_V_CVT (llfloor, 2, double, int64_t, __builtin_llfloor)
+DEF_OP_V_CVT (llfloor, 4, double, int64_t, __builtin_llfloor)
+DEF_OP_V_CVT (llfloor, 8, double, int64_t, __builtin_llfloor)
+DEF_OP_V_CVT (llfloor, 16, double, int64_t, __builtin_llfloor)
+DEF_OP_V_CVT (llfloor, 32, double, int64_t, __builtin_llfloor)
+DEF_OP_V_CVT (llfloor, 64, double, int64_t, __builtin_llfloor)
+DEF_OP_V_CVT (llfloor, 128, double, int64_t, __builtin_llfloor)
+DEF_OP_V_CVT (llfloor, 256, double, int64_t, __builtin_llfloor)
+DEF_OP_V_CVT (llfloor, 512, double, int64_t, __builtin_llfloor)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llrint-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llrint-0.c
new file mode 100644
index 0000000..b0bf422
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llrint-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (llrint, 1, double, int64_t, __builtin_llrint)
+DEF_OP_V_CVT (llrint, 2, double, int64_t, __builtin_llrint)
+DEF_OP_V_CVT (llrint, 4, double, int64_t, __builtin_llrint)
+DEF_OP_V_CVT (llrint, 8, double, int64_t, __builtin_llrint)
+DEF_OP_V_CVT (llrint, 16, double, int64_t, __builtin_llrint)
+DEF_OP_V_CVT (llrint, 32, double, int64_t, __builtin_llrint)
+DEF_OP_V_CVT (llrint, 64, double, int64_t, __builtin_llrint)
+DEF_OP_V_CVT (llrint, 128, double, int64_t, __builtin_llrint)
+DEF_OP_V_CVT (llrint, 256, double, int64_t, __builtin_llrint)
+DEF_OP_V_CVT (llrint, 512, double, int64_t, __builtin_llrint)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llround-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llround-0.c
new file mode 100644
index 0000000..9bed764
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-llround-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (llround, 1, double, int64_t, __builtin_llround)
+DEF_OP_V_CVT (llround, 2, double, int64_t, __builtin_llround)
+DEF_OP_V_CVT (llround, 4, double, int64_t, __builtin_llround)
+DEF_OP_V_CVT (llround, 8, double, int64_t, __builtin_llround)
+DEF_OP_V_CVT (llround, 16, double, int64_t, __builtin_llround)
+DEF_OP_V_CVT (llround, 32, double, int64_t, __builtin_llround)
+DEF_OP_V_CVT (llround, 64, double, int64_t, __builtin_llround)
+DEF_OP_V_CVT (llround, 128, double, int64_t, __builtin_llround)
+DEF_OP_V_CVT (llround, 256, double, int64_t, __builtin_llround)
+DEF_OP_V_CVT (llround, 512, double, int64_t, __builtin_llround)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lrint-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lrint-0.c
new file mode 100644
index 0000000..561edef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lrint-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (lrint, 1, double, long, __builtin_lrint)
+DEF_OP_V_CVT (lrint, 2, double, long, __builtin_lrint)
+DEF_OP_V_CVT (lrint, 4, double, long, __builtin_lrint)
+DEF_OP_V_CVT (lrint, 8, double, long, __builtin_lrint)
+DEF_OP_V_CVT (lrint, 16, double, long, __builtin_lrint)
+DEF_OP_V_CVT (lrint, 32, double, long, __builtin_lrint)
+DEF_OP_V_CVT (lrint, 64, double, long, __builtin_lrint)
+DEF_OP_V_CVT (lrint, 128, double, long, __builtin_lrint)
+DEF_OP_V_CVT (lrint, 256, double, long, __builtin_lrint)
+DEF_OP_V_CVT (lrint, 512, double, long, __builtin_lrint)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lrint-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lrint-1.c
new file mode 100644
index 0000000..5414352
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lrint-1.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv_zvfh_zvl4096b -mabi=ilp32f -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (lrintf, 1, float, long, __builtin_lrintf)
+DEF_OP_V_CVT (lrintf, 2, float, long, __builtin_lrintf)
+DEF_OP_V_CVT (lrintf, 4, float, long, __builtin_lrintf)
+DEF_OP_V_CVT (lrintf, 8, float, long, __builtin_lrintf)
+DEF_OP_V_CVT (lrintf, 16, float, long, __builtin_lrintf)
+DEF_OP_V_CVT (lrintf, 32, float, long, __builtin_lrintf)
+DEF_OP_V_CVT (lrintf, 64, float, long, __builtin_lrintf)
+DEF_OP_V_CVT (lrintf, 128, float, long, __builtin_lrintf)
+DEF_OP_V_CVT (lrintf, 256, float, long, __builtin_lrintf)
+DEF_OP_V_CVT (lrintf, 512, float, long, __builtin_lrintf)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lround-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lround-0.c
new file mode 100644
index 0000000..c2a9f6b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lround-0.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (lround, 1, double, long, __builtin_lround)
+DEF_OP_V_CVT (lround, 2, double, long, __builtin_lround)
+DEF_OP_V_CVT (lround, 4, double, long, __builtin_lround)
+DEF_OP_V_CVT (lround, 8, double, long, __builtin_lround)
+DEF_OP_V_CVT (lround, 16, double, long, __builtin_lround)
+DEF_OP_V_CVT (lround, 32, double, long, __builtin_lround)
+DEF_OP_V_CVT (lround, 64, double, long, __builtin_lround)
+DEF_OP_V_CVT (lround, 128, double, long, __builtin_lround)
+DEF_OP_V_CVT (lround, 256, double, long, __builtin_lround)
+DEF_OP_V_CVT (lround, 512, double, long, __builtin_lround)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lround-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lround-1.c
new file mode 100644
index 0000000..5a43133
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/math-lround-1.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv_zvfh_zvl4096b -mabi=ilp32f -O3 --param=riscv-autovec-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+
+#include "def.h"
+
+DEF_OP_V_CVT (lroundf, 1, float, long, __builtin_lroundf)
+DEF_OP_V_CVT (lroundf, 2, float, long, __builtin_lroundf)
+DEF_OP_V_CVT (lroundf, 4, float, long, __builtin_lroundf)
+DEF_OP_V_CVT (lroundf, 8, float, long, __builtin_lroundf)
+DEF_OP_V_CVT (lroundf, 16, float, long, __builtin_lroundf)
+DEF_OP_V_CVT (lroundf, 32, float, long, __builtin_lroundf)
+DEF_OP_V_CVT (lroundf, 64, float, long, __builtin_lroundf)
+DEF_OP_V_CVT (lroundf, 128, float, long, __builtin_lroundf)
+DEF_OP_V_CVT (lroundf, 256, float, long, __builtin_lroundf)
+DEF_OP_V_CVT (lroundf, 512, float, long, __builtin_lroundf)
+
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+/* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+} 9 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-1.c
index aedf988..24bb724 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-1.c
@@ -5,54 +5,6 @@
#include "def.h"
/*
-** mov0:
-** lbu\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sb\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov0 (int8_t *in, int8_t *out)
-{
- v1qi v = *(v1qi*)in;
- *(v1qi*)out = v;
-}
-
-/*
-** mov1:
-** lhu\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sh\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov1 (int8_t *in, int8_t *out)
-{
- v2qi v = *(v2qi*)in;
- *(v2qi*)out = v;
-}
-
-/*
-** mov2:
-** lw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov2 (int8_t *in, int8_t *out)
-{
- v4qi v = *(v4qi*)in;
- *(v4qi*)out = v;
-}
-
-/*
-** mov3:
-** ld\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sd\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov3 (int8_t *in, int8_t *out)
-{
- v8qi v = *(v8qi*)in;
- *(v8qi*)out = v;
-}
-
-/*
** mov4:
** vsetivli\s+zero,\s*16,\s*e8,\s*mf8,\s*t[au],\s*m[au]
** vle8\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-10.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-10.c
index 5e96154..cae96b3 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-10.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-10.c
@@ -5,18 +5,6 @@
#include "def.h"
/*
-** mov0:
-** fld\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** fsd\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov0 (double *in, double *out)
-{
- v1df v = *(v1df*)in;
- *(v1df*)out = v;
-}
-
-/*
** mov1:
** vsetivli\s+zero,\s*2,\s*e64,\s*m1,\s*t[au],\s*m[au]
** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-2.c
deleted file mode 100644
index 10ae197..0000000
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-2.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-march=rv32gcv_zvfh_zvl4096b -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
-/* { dg-final { check-function-bodies "**" "" } } */
-
-#include "def.h"
-
-/*
-** mov:
-** lw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** lw\s+[a-x0-9]+,4\s*\([a-x0-9]+\)
-** sw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sw\s+[a-x0-9]+,4\s*\([a-x0-9]+\)
-** ret
-*/
-void mov (int8_t *in, int8_t *out)
-{
- v8qi v = *(v8qi*)in;
- *(v8qi*)out = v;
-}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-3.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-3.c
index f2880ae..86ce228 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-3.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-3.c
@@ -5,42 +5,6 @@
#include "def.h"
/*
-** mov0:
-** lhu\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sh\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov0 (int16_t *in, int16_t *out)
-{
- v1hi v = *(v1hi*)in;
- *(v1hi*)out = v;
-}
-
-/*
-** mov1:
-** lw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov1 (int16_t *in, int16_t *out)
-{
- v2hi v = *(v2hi*)in;
- *(v2hi*)out = v;
-}
-
-/*
-** mov2:
-** ld\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sd\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov2 (int16_t *in, int16_t *out)
-{
- v4hi v = *(v4hi*)in;
- *(v4hi*)out = v;
-}
-
-/*
** mov3:
** vsetivli\s+zero,\s*8,\s*e16,\s*mf4,\s*t[au],\s*m[au]
** vle16\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-4.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-4.c
deleted file mode 100644
index f81f169..0000000
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-4.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-march=rv32gcv_zvfh_zvl4096b -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
-/* { dg-final { check-function-bodies "**" "" } } */
-
-#include "def.h"
-
-/*
-** mov:
-** lw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** lw\s+[a-x0-9]+,4\s*\([a-x0-9]+\)
-** sw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sw\s+[a-x0-9]+,4\s*\([a-x0-9]+\)
-** ret
-*/
-void mov (int16_t *in, int16_t *out)
-{
- v4hi v = *(v4hi*)in;
- *(v4hi*)out = v;
-}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-5.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-5.c
index c30ed8f..0447520 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-5.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-5.c
@@ -5,30 +5,6 @@
#include "def.h"
/*
-** mov0:
-** lw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov0 (int32_t *in, int32_t *out)
-{
- v1si v = *(v1si*)in;
- *(v1si*)out = v;
-}
-
-/*
-** mov1:
-** ld\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sd\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov1 (int32_t *in, int32_t *out)
-{
- v2si v = *(v2si*)in;
- *(v2si*)out = v;
-}
-
-/*
** mov2:
** vsetivli\s+zero,\s*4,\s*e32,\s*mf2,\s*t[au],\s*m[au]
** vle32\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-6.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-6.c
deleted file mode 100644
index d6dbff1..0000000
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-6.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-march=rv32gcv_zvfh_zvl4096b -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
-/* { dg-final { check-function-bodies "**" "" } } */
-
-#include "def.h"
-
-/*
-** mov:
-** lw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** lw\s+[a-x0-9]+,4\s*\([a-x0-9]+\)
-** sw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sw\s+[a-x0-9]+,4\s*\([a-x0-9]+\)
-** ret
-*/
-void mov (int32_t *in, int32_t *out)
-{
- v2si v = *(v2si*)in;
- *(v2si*)out = v;
-}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-7.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-7.c
index 46509e3..d0674a4 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-7.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-7.c
@@ -5,18 +5,6 @@
#include "def.h"
/*
-** mov0:
-** ld\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** sd\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov0 (int64_t *in, int64_t *out)
-{
- v1di v = *(v1di*)in;
- *(v1di*)out = v;
-}
-
-/*
** mov1:
** vsetivli\s+zero,\s*2,\s*e64,\s*m1,\s*t[au],\s*m[au]
** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-8.c
index 1cba7dd..b905c74 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-8.c
@@ -5,42 +5,6 @@
#include "def.h"
/*
-** mov0:
-** flh\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** fsh\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov0 (_Float16 *in, _Float16 *out)
-{
- v1hf v = *(v1hf*)in;
- *(v1hf*)out = v;
-}
-
-/*
-** mov1:
-** flw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** fsw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov1 (_Float16 *in, _Float16 *out)
-{
- v2hf v = *(v2hf*)in;
- *(v2hf*)out = v;
-}
-
-/*
-** mov2:
-** fld\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** fsd\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov2 (_Float16 *in, _Float16 *out)
-{
- v4hf v = *(v4hf*)in;
- *(v4hf*)out = v;
-}
-
-/*
** mov3:
** vsetivli\s+zero,\s*8,\s*e16,\s*mf4,\s*t[au],\s*m[au]
** vle16\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-9.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-9.c
index 0773f6a..5f9bc05 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-9.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-9.c
@@ -5,30 +5,6 @@
#include "def.h"
/*
-** mov0:
-** flw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** fsw\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov0 (float *in, float *out)
-{
- v1sf v = *(v1sf*)in;
- *(v1sf*)out = v;
-}
-
-/*
-** mov1:
-** fld\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** fsd\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
-** ret
-*/
-void mov1 (float *in, float *out)
-{
- v2sf v = *(v2sf*)in;
- *(v2sf*)out = v;
-}
-
-/*
** mov2:
** vsetivli\s+zero,\s*4,\s*e32,\s*mf2,\s*t[au],\s*m[au]
** vle32\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/perm-4.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/perm-4.c
index 4d6862c..d2d4938 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/perm-4.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/perm-4.c
@@ -3,7 +3,7 @@
#include "../vls-vlmax/perm-4.c"
-/* { dg-final { scan-assembler-times {vrgather\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 19 } } */
+/* { dg-final { scan-assembler-times {vrgather\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 18 } } */
/* { dg-final { scan-assembler-times {vrgatherei16\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 12 } } */
-/* { dg-final { scan-assembler-times {vrsub\.vi} 24 } } */
+/* { dg-final { scan-assembler-times {vrsub\.vi} 23 } } */
/* { dg-final { scan-assembler-times {vrsub\.vx} 7 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-call-args-4-run.c b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-call-args-4-run.c
index 01cd55f..aa28bb7 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-call-args-4-run.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-call-args-4-run.c
@@ -6,7 +6,7 @@
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
-#include <stdint.h>
+#include <stdint-gcc.h>
#include "riscv_vector.h"
vint64m8_t
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-1.c
new file mode 100644
index 0000000..549d664
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-1.c
@@ -0,0 +1,81 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O1" } */
+/* { dg-add-options riscv_v } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#if 0 /* Using include files when using a multilib-relevant -march option is dicey */
+#include <string.h>
+#else
+extern void *memcpy(void *__restrict dest, const void *__restrict src, __SIZE_TYPE__ n);
+#endif
+
+/* memcpy should be implemented using the cpymem pattern.
+** f1:
+XX \.L\d+: # local label is ignored
+** vsetvli\s+[ta][0-7],a2,e8,m8,ta,ma
+** vle8\.v\s+v\d+,0\(a1\)
+** vse8\.v\s+v\d+,0\(a0\)
+** add\s+a1,a1,[ta][0-7]
+** add\s+a0,a0,[ta][0-7]
+** sub\s+a2,a2,[ta][0-7]
+** bne\s+a2,zero,\.L\d+
+** ret
+*/
+
+void f1 (void *a, void *b, __SIZE_TYPE__ l)
+{
+ memcpy (a, b, l);
+}
+
+/* We should still use cpymem even with slightly different types, as signed
+ overflow is undefined.
+** f2:
+XX \.L\d+: # local label is ignored
+** vsetvli\s+[ta][0-7],a2,e8,m8,ta,ma
+** vle8\.v\s+v\d+,0\(a1\)
+** vse8\.v\s+v\d+,0\(a0\)
+** add\s+a1,a1,[ta][0-7]
+** add\s+a0,a0,[ta][0-7]
+** sub\s+a2,a2,[ta][0-7]
+** bne\s+a2,zero,\.L\d+
+** ret
+*/
+void f2 (__INT32_TYPE__* a, __INT32_TYPE__* b, int l)
+{
+ memcpy (a, b, l);
+}
+
+/* If it is known that the pointer arguments to memcpy point
+ to an aligned object, cpymem can use that alignment.
+ Use extern here so that we get a known alignment, lest
+ DATA_ALIGNMENT force us to make the scan pattern accomodate
+ code for different alignments depending on word size.
+** f3: { target { any-opts "-mcmodel=medlow" } }
+** lui\s+[ta][0-7],%hi\(a_a\)
+** lui\s+[ta][0-7],%hi\(a_b\)
+** addi\s+a4,[ta][0-7],%lo\(a_b\)
+** vsetivli\s+zero,16,e32,m4,ta,ma
+** vle32.v\s+v\d+,0\([ta][0-7]\)
+** addi\s+[ta][0-7],[ta][0-7],%lo\(a_a\)
+** vse32\.v\s+v\d+,0\([ta][0-7]\)
+** ret
+*/
+
+/*
+** f3: { target { any-opts "-mcmodel=medany" } }
+** lla\s+[ta][0-7],a_b
+** vsetivli\s+zero,16,e32,m4,ta,ma
+** vle32.v\s+v\d+,0\([ta][0-7]\)
+** lla\s+[ta][0-7],a_a
+** vse32\.v\s+v\d+,0\([ta][0-7]\)
+** ret
+*/
+
+extern struct { __INT32_TYPE__ a[16]; } a_a, a_b;
+
+void f3 ()
+{
+ memcpy (&a_a, &a_b, sizeof a_a);
+}
+
+/* { dg-final { scan-assembler-not {\m(tail|call)\s+memcpy\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-2.c
new file mode 100644
index 0000000..7b706b6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-2.c
@@ -0,0 +1,46 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O1" } */
+/* { dg-add-options riscv_v } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+typedef struct { char c[16]; } c16;
+typedef struct { char c[32]; } c32;
+typedef struct { short s; char c[30]; } s16;
+
+/* A short struct copy can use vsetivli.
+** f1:
+** vsetivli\s+zero,16,e8,m1,ta,ma
+** vle8.v\s+v1,0\(a1\)
+** vse8.v\s+v1,0\(a0\)
+** ret
+*/
+void f1 (c16 *a, c16* b)
+{
+ *a = *b;
+}
+
+/* A longer one needs li.
+** f2:
+** li\s+[ta][0-7],32
+** vsetvli\s+zero,[ta][0-7],e8,m2,ta,ma
+** vle8.v\s+v2,0\(a1\)
+** vse8.v\s+v2,0\(a0\)
+** ret
+*/
+void f2 (c32 *a, c32* b)
+{
+ *a = *b;
+}
+
+/* A 32 byte struct is still short enough for vsetivli
+ if we can use an element width larger than 8.
+** f3:
+** vsetivli\s+zero,16,e16,m2,ta,ma
+** vle16.v\s+v2,0\(a1\)
+** vse16.v\s+v2,0\(a0\)
+** ret
+*/
+void f3 (s16 *a, s16* b)
+{
+ *a = *b;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr110119-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110119-2.c
index 958d1ad..3dadc99 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/pr110119-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110119-2.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-march=rv64gczve32x -mabi=lp64d --param=riscv-autovec-preference=fixed-vlmax -Wno-psabi" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#include "riscv_vector.h"
__attribute__ ((noipa)) vint32m1x3_t
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr90263.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr90263.c
new file mode 100644
index 0000000..7308428
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr90263.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+/* { dg-require-effective-target glibc } */
+
+#include "../../../../gcc.dg/pr90263.c"
+
+/* { dg-final { scan-assembler-not "memcpy" { target { riscv_v } } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/fortran/pr111566.f90 b/gcc/testsuite/gcc.target/riscv/rvv/fortran/pr111566.f90
new file mode 100644
index 0000000..2e30dc9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/fortran/pr111566.f90
@@ -0,0 +1,31 @@
+! { dg-do compile }
+! { dg-options "-march=rv64gcv -mabi=lp64d -Ofast -fallow-argument-mismatch -fmax-stack-var-size=65536 -S -std=legacy -w" }
+
+module a
+ integer,parameter :: SHR_KIND_R8 = selected_real_kind(12)
+end module a
+module b
+ use a, c => shr_kind_r8
+contains
+ subroutine d(cg , km, i1, i2)
+ real (c) ch(i2,km)
+ real (c) cg(4,i1:i2,km)
+ real dc(i2,km)
+ real(c) ci(i2,km)
+ real(c) cj(i2,km)
+ do k=2,ck
+ do i=i1,0
+ cl = ci(i,k) *ci(i,1) / cj(i,k)+ch(i,1)
+ cm = cg(1,i,k) - min(e,cg(1,i,co))
+ dc(i,k) = sign(cm, cl)
+ enddo
+ enddo
+ if ( cq == 0 ) then
+ do i=i1,i2
+ if( cr <= cs ) then
+ cg= sign( min(ct, cg), cg)
+ endif
+ enddo
+ endif
+ end subroutine d
+end module b
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp b/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp
index ff76e17..674ba0d 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp
+++ b/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp
@@ -58,10 +58,12 @@ set AUTOVEC_TEST_OPTS [list \
{-ftree-vectorize -O3 --param riscv-autovec-lmul=m2} \
{-ftree-vectorize -O3 --param riscv-autovec-lmul=m4} \
{-ftree-vectorize -O3 --param riscv-autovec-lmul=m8} \
+ {-ftree-vectorize -O3 --param riscv-autovec-lmul=dynamic} \
{-ftree-vectorize -O2 --param riscv-autovec-lmul=m1} \
{-ftree-vectorize -O2 --param riscv-autovec-lmul=m2} \
{-ftree-vectorize -O2 --param riscv-autovec-lmul=m4} \
- {-ftree-vectorize -O2 --param riscv-autovec-lmul=m8} ]
+ {-ftree-vectorize -O2 --param riscv-autovec-lmul=m8} \
+ {-ftree-vectorize -O2 --param riscv-autovec-lmul=dynamic} ]
foreach op $AUTOVEC_TEST_OPTS {
dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/autovec/partial/*.\[cS\]]] \
"" "$op"
@@ -104,18 +106,22 @@ set AUTOVEC_TEST_OPTS [list \
{-ftree-vectorize -O3 --param riscv-autovec-preference=fixed-vlmax --param riscv-autovec-lmul=m2 -fno-vect-cost-model -ffast-math} \
{-ftree-vectorize -O3 --param riscv-autovec-preference=fixed-vlmax --param riscv-autovec-lmul=m4 -fno-vect-cost-model -ffast-math} \
{-ftree-vectorize -O3 --param riscv-autovec-preference=fixed-vlmax --param riscv-autovec-lmul=m8 -fno-vect-cost-model -ffast-math} \
+ {-ftree-vectorize -O3 --param riscv-autovec-preference=fixed-vlmax --param riscv-autovec-lmul=dynamic -ffast-math} \
{-ftree-vectorize -O2 --param riscv-autovec-preference=fixed-vlmax --param riscv-autovec-lmul=m1 -fno-vect-cost-model -ffast-math} \
{-ftree-vectorize -O2 --param riscv-autovec-preference=fixed-vlmax --param riscv-autovec-lmul=m2 -fno-vect-cost-model -ffast-math} \
{-ftree-vectorize -O2 --param riscv-autovec-preference=fixed-vlmax --param riscv-autovec-lmul=m4 -fno-vect-cost-model -ffast-math} \
{-ftree-vectorize -O2 --param riscv-autovec-preference=fixed-vlmax --param riscv-autovec-lmul=m8 -fno-vect-cost-model -ffast-math} \
+ {-ftree-vectorize -O2 --param riscv-autovec-preference=fixed-vlmax --param riscv-autovec-lmul=dynamic -ffast-math} \
{-ftree-vectorize -O3 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=m1 -fno-vect-cost-model -ffast-math} \
{-ftree-vectorize -O3 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=m2 -fno-vect-cost-model -ffast-math} \
{-ftree-vectorize -O3 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=m4 -fno-vect-cost-model -ffast-math} \
{-ftree-vectorize -O3 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=m8 -fno-vect-cost-model -ffast-math} \
+ {-ftree-vectorize -O3 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=dynamic -ffast-math} \
{-ftree-vectorize -O2 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=m1 -fno-vect-cost-model -ffast-math} \
{-ftree-vectorize -O2 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=m2 -fno-vect-cost-model -ffast-math} \
{-ftree-vectorize -O2 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=m4 -fno-vect-cost-model -ffast-math} \
- {-ftree-vectorize -O2 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=m8 -fno-vect-cost-model -ffast-math} ]
+ {-ftree-vectorize -O2 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=m8 -fno-vect-cost-model -ffast-math} \
+ {-ftree-vectorize -O2 --param riscv-autovec-preference=scalable --param riscv-autovec-lmul=dynamic -ffast-math} ]
foreach op $AUTOVEC_TEST_OPTS {
dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/autovec/gather-scatter/*.\[cS\]]] \
"" "$op"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111255.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111255.c
index 736f683..d667dbc 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111255.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111255.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 --param riscv-autovec-lmul=m2 -fno-vect-cost-model" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
#define DEF_LOOP(OLD_TYPE, NEW_TYPE) \
void __attribute__ ((noipa)) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c
index c566f8a..2ec9487 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c
@@ -88,8 +88,8 @@ void f (void * restrict in, void * restrict out, int n, int cond)
}
}
-/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
-/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e16,\s*mf4,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
-/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
-/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
-/* { dg-final { scan-assembler-times {vsetvli} 10 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 10 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
+/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e16,\s*mf4,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
+/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e32,\s*mf2,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
+/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
+/* { dg-final { scan-assembler-times {vsetvli} 19 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c
index d0e7525..bcafce3 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c
@@ -80,8 +80,8 @@ void f (void * restrict in, void * restrict out, int n, int cond)
}
}
-/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
-/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e16,\s*mf4,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
-/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
-/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
-/* { dg-final { scan-assembler-times {vsetvli} 9 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 9 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
+/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e16,\s*mf4,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
+/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e32,\s*mf2,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
+/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
+/* { dg-final { scan-assembler-times {vsetvli} 17 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/wredsum_vlmax.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/wredsum_vlmax.c
index 6b7c773..5d11267 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/wredsum_vlmax.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/wredsum_vlmax.c
@@ -1,8 +1,8 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvl256b --param=riscv-autovec-preference=fixed-vlmax -O3" } */
+/* { dg-options "-march=rv64gcv_zvl256b -mabi=lp64d --param=riscv-autovec-preference=fixed-vlmax -O3" } */
-#include <stdint.h>
+#include <stdint-gcc.h>
int16_t foo (int8_t *restrict a)
{
diff --git a/gcc/testsuite/gcc.target/riscv/xtheadcondmov-indirect.c b/gcc/testsuite/gcc.target/riscv/xtheadcondmov-indirect.c
index c3253ba..427c9c1 100644
--- a/gcc/testsuite/gcc.target/riscv/xtheadcondmov-indirect.c
+++ b/gcc/testsuite/gcc.target/riscv/xtheadcondmov-indirect.c
@@ -1,16 +1,11 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv32gc_xtheadcondmov -fno-sched-pressure" { target { rv32 } } } */
-/* { dg-options "-march=rv64gc_xtheadcondmov -fno-sched-pressure" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_xtheadcondmov" { target { rv32 } } } */
+/* { dg-options "-march=rv64gc_xtheadcondmov" { target { rv64 } } } */
/* { dg-skip-if "" { *-*-* } {"-O0" "-Os" "-Og" "-Oz" "-flto" } } */
-/* { dg-final { check-function-bodies "**" "" } } */
-/*
-** ConEmv_imm_imm_reg:
-** addi a[0-9]+,a[0-9]+,-1000
-** li a[0-9]+,10
-** th\.mvnez a[0-9]+,a[0-9]+,a[0-9]+
-** ret
-*/
+/* addi aX, aX, -1000
+ li aX, 10
+ th.mvnez aX, aX, aX */
int ConEmv_imm_imm_reg(int x, int y)
{
if (x == 1000)
@@ -18,13 +13,8 @@ int ConEmv_imm_imm_reg(int x, int y)
return y;
}
-/*
-** ConEmv_imm_reg_reg:
-** addi a[0-9]+,a[0-9]+,-1000
-** th.mveqz a[0-9]+,a[0-9]+,a[0-9]+
-** mv a[0-9]+,a[0-9]+
-** ret
-*/
+/* addi aX, aX, -1000
+ th.mveqz aX, aX, aX */
int ConEmv_imm_reg_reg(int x, int y, int z)
{
if (x == 1000)
@@ -32,13 +22,9 @@ int ConEmv_imm_reg_reg(int x, int y, int z)
return z;
}
-/*
-** ConEmv_reg_imm_reg:
-** sub a[0-9]+,a[0-9]+,a[0-9]+
-** li a[0-9]+,10
-** th.mvnez a[0-9]+,a[0-9]+,a[0-9]+
-** ret
-*/
+/* sub aX, aX, aX
+ li aX, 10
+ th.mvnez aX, aX, aX */
int ConEmv_reg_imm_reg(int x, int y, int z)
{
if (x == y)
@@ -46,13 +32,8 @@ int ConEmv_reg_imm_reg(int x, int y, int z)
return z;
}
-/*
-** ConEmv_reg_reg_reg:
-** sub a[0-9]+,a[0-9]+,a[0-9]+
-** th.mveqz a[0-9]+,a[0-9]+,a[0-9]+
-** mv a[0-9]+,a[0-9]+
-** ret
-*/
+/* sub aX, aX, aX
+ th.mveqz aX, aX, aX */
int ConEmv_reg_reg_reg(int x, int y, int z, int n)
{
if (x == y)
@@ -60,14 +41,10 @@ int ConEmv_reg_reg_reg(int x, int y, int z, int n)
return n;
}
-/*
-** ConNmv_imm_imm_reg:
-** addi a[0-9]+,a[0-9]+,-1000+
-** li a[0-9]+,9998336+
-** addi a[0-9]+,a[0-9]+,1664+
-** th.mveqz a[0-9]+,a[0-9]+,a[0-9]+
-** ret
-*/
+/* addi aX, aX, -1000
+ li aX, 9998336
+ addi aX, aX, 1664
+ th.mveqz aX, aX, aX */
int ConNmv_imm_imm_reg(int x, int y)
{
if (x != 1000)
@@ -75,13 +52,8 @@ int ConNmv_imm_imm_reg(int x, int y)
return y;
}
-/*
-**ConNmv_imm_reg_reg:
-** addi a[0-9]+,a[0-9]+,-1000+
-** th.mvnez a[0-9]+,a[0-9]+,a[0-9]+
-** mv a[0-9]+,a[0-9]+
-** ret
-*/
+/* addi aX, aX, 1000
+ th.mvnez aX, aX, aX */
int ConNmv_imm_reg_reg(int x, int y, int z)
{
if (x != 1000)
@@ -89,13 +61,9 @@ int ConNmv_imm_reg_reg(int x, int y, int z)
return z;
}
-/*
-**ConNmv_reg_imm_reg:
-** sub a[0-9]+,a[0-9]+,a[0-9]+
-** li a[0-9]+,10+
-** th.mveqz a[0-9]+,a[0-9]+,a[0-9]+
-** ret
-*/
+/* sub aX, aX, aX
+ li aX, 10
+ th.mveqz aX, aX, aX */
int ConNmv_reg_imm_reg(int x, int y, int z)
{
if (x != y)
@@ -103,16 +71,17 @@ int ConNmv_reg_imm_reg(int x, int y, int z)
return z;
}
-/*
-**ConNmv_reg_reg_reg:
-** sub a[0-9]+,a[0-9]+,a[0-9]+
-** th.mvnez a[0-9]+,a[0-9]+,a[0-9]+
-** mv a[0-9]+,a[0-9]+
-** ret
-*/
+/* sub aX, aX, aX
+ th.mvnez aX, aX, aX */
int ConNmv_reg_reg_reg(int x, int y, int z, int n)
{
if (x != y)
return z;
return n;
}
+
+/* { dg-final { scan-assembler-times "addi\t" 5 } } */
+/* { dg-final { scan-assembler-times "li\t" 4 } } */
+/* { dg-final { scan-assembler-times "sub\t" 4 } } */
+/* { dg-final { scan-assembler-times "th.mveqz\t" 4 } } */
+/* { dg-final { scan-assembler-times "th.mvnez\t" 4 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/xtheadmempair-4.c b/gcc/testsuite/gcc.target/riscv/xtheadmempair-4.c
new file mode 100644
index 0000000..9aef4e1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/xtheadmempair-4.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-g" "-Oz" "-Os" "-flto" } } */
+/* { dg-options "-march=rv64gc_xtheadmempair -mtune=thead-c906 -funwind-tables" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_xtheadmempair -mtune=thead-c906 -funwind-tables" { target { rv32 } } } */
+
+extern void bar (void);
+
+void foo (void)
+{
+ asm volatile (";my clobber list"
+ : : : "s0");
+ bar ();
+ asm volatile (";my clobber list"
+ : : : "s0");
+}
+
+/* { dg-final { scan-assembler-times "th.sdd\t" 1 { target { rv64 } } } } */
+/* { dg-final { scan-assembler ".cfi_offset 8, -16" { target { rv64 } } } } */
+/* { dg-final { scan-assembler ".cfi_offset 1, -8" { target { rv64 } } } } */
+
+/* { dg-final { scan-assembler-times "th.swd\t" 1 { target { rv32 } } } } */
+/* { dg-final { scan-assembler ".cfi_offset 8, -8" { target { rv32 } } } } */
+/* { dg-final { scan-assembler ".cfi_offset 1, -4" { target { rv32 } } } } */
+
+/* { dg-final { scan-assembler ".cfi_restore 1" } } */
+/* { dg-final { scan-assembler ".cfi_restore 8" } } */
+
+/* { dg-final { scan-assembler-times "th.ldd\t" 1 { target { rv64 } } } } */
+/* { dg-final { scan-assembler-times "th.lwd\t" 1 { target { rv32 } } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zbb-andn-orn-01.c b/gcc/testsuite/gcc.target/riscv/zbb-andn-orn-01.c
new file mode 100644
index 0000000..f9f3222
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zbb-andn-orn-01.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zbb -mabi=lp64" } */
+/* { dg-skip-if "" { *-*-* } { "-O0" "-g" "-Oz" "-Os" } } */
+
+int foo1(int rs1)
+{
+ return 100 & ~rs1;
+}
+
+int foo2(int rs1)
+{
+ return 100 | ~rs1;
+}
+
+/* { dg-final { scan-assembler-times "andn\t" 1 } } */
+/* { dg-final { scan-assembler-times "orn\t" 1 } } */
+/* { dg-final { scan-assembler-times "li\t" 2 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zbb-andn-orn-02.c b/gcc/testsuite/gcc.target/riscv/zbb-andn-orn-02.c
new file mode 100644
index 0000000..112c0fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zbb-andn-orn-02.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gc_zbb -mabi=ilp32" } */
+/* { dg-skip-if "" { *-*-* } { "-O0" "-g" "-Oz" "-Os" } } */
+
+int foo1(int rs1)
+{
+ return 100 & ~rs1;
+}
+
+int foo2(int rs1)
+{
+ return 100 | ~rs1;
+}
+
+/* { dg-final { scan-assembler-times "andn\t" 1 } } */
+/* { dg-final { scan-assembler-times "orn\t" 1 } } */
+/* { dg-final { scan-assembler-times "li\t" 2 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics.c
index bcfa04b..276dac7 100644
--- a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics.c
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics.c
@@ -45,5 +45,5 @@ int primitiveSemantics_11(int a, int b) {
/* { dg-final { scan-assembler-times {\mczero\.eqz\M} 6 } } */
/* { dg-final { scan-assembler-times {\mczero\.nez\M} 6 } } */
-/* { dg-final { scan-assembler-not {\mbeq} } } */
-/* { dg-final { scan-assembler-not {\mbne} } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm.c
new file mode 100644
index 0000000..a53a908
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm.c
@@ -0,0 +1,57 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og" "-Os" "-Oz"} } */
+
+long primitiveSemantics_compare_imm_00(long a, long b) {
+ return a == 2 ? 0 : b;
+}
+
+long primitiveSemantics_compare_imm_01(long a, long b) {
+ return a != 2 ? 0 : b;
+}
+
+long primitiveSemantics_compare_imm_02(long a, long b) {
+ return a == 2 ? b : 0;
+}
+
+long primitiveSemantics_compare_imm_03(long a, long b) {
+ return a != 2 ? b : 0;
+}
+
+long primitiveSemantics_compare_imm_04(long a, long b) {
+ if (a == 2)
+ b = 0;
+ return b;
+}
+
+long primitiveSemantics_compare_imm_05(long a, long b) {
+ if (!(a == 2))
+ b = 0;
+ return b;
+}
+
+int primitiveSemantics_compare_imm_06(int a, int b) { return a == 2 ? 0 : b; }
+
+int primitiveSemantics_compare_imm_07(int a, int b) { return a != 2 ? 0 : b; }
+
+int primitiveSemantics_compare_imm_08(int a, int b) { return a == 2 ? b : 0; }
+
+int primitiveSemantics_compare_imm_09(int a, int b) { return a != 2 ? b : 0; }
+
+int primitiveSemantics_compare_imm_10(int a, int b) {
+ if ((a == 2))
+ b = 0;
+ return b;
+}
+
+int primitiveSemantics_compare_imm_11(int a, int b) {
+ if (!(a == 2))
+ b = 0;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 6 } } */
+/* { dg-final { scan-assembler-times {\mczero.nez\M} 6 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_0_imm.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_0_imm.c
new file mode 100644
index 0000000..c90ed10
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_0_imm.c
@@ -0,0 +1,73 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og" "-Os" "-Oz"} } */
+
+long primitiveSemantics_compare_imm_return_0_imm_00(long a, long b) {
+ return a == 2 ? 0 : 5;
+}
+
+long primitiveSemantics_compare_imm_return_0_imm_01(long a, long b) {
+ return a != 2 ? 0 : 5;
+}
+
+long primitiveSemantics_compare_imm_return_0_imm_02(long a, long b) {
+ return a == 2 ? 5 : 0;
+}
+
+long primitiveSemantics_compare_imm_return_0_imm_03(long a, long b) {
+ return a != 2 ? 5 : 0;
+}
+
+long primitiveSemantics_compare_imm_return_0_imm_04(long a, long b) {
+ if (a == 2)
+ b = 0;
+ else
+ b = 5;
+ return b;
+}
+
+long primitiveSemantics_compare_imm_return_0_imm_05(long a, long b) {
+ if (!(a == 2))
+ b = 0;
+ else
+ b = 5;
+ return b;
+}
+
+int primitiveSemantics_compare_imm_return_0_imm_06(int a, int b) {
+ return a == 2 ? 0 : 5;
+}
+
+int primitiveSemantics_compare_imm_return_0_imm_07(int a, int b) {
+ return a != 2 ? 0 : 5;
+}
+
+int primitiveSemantics_compare_imm_return_0_imm_08(int a, int b) {
+ return a == 2 ? 5 : 0;
+}
+
+int primitiveSemantics_compare_imm_return_0_imm_09(int a, int b) {
+ return a != 2 ? 5 : 0;
+}
+
+int primitiveSemantics_compare_imm_return_0_imm_10(int a, int b) {
+ if ((a == 2))
+ b = 0;
+ else
+ b = 5;
+ return b;
+}
+
+int primitiveSemantics_compare_imm_return_0_imm_11(int a, int b) {
+ if (!(a == 2))
+ b = 0;
+ else
+ b = 5;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 6 } } */
+/* { dg-final { scan-assembler-times {\mczero.nez\M} 6 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_imm_imm.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_imm_imm.c
new file mode 100644
index 0000000..e806f6f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_imm_imm.c
@@ -0,0 +1,73 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og" "-Os" "-Oz"} } */
+
+long primitiveSemantics_compare_imm_return_imm_imm_00(long a, long b) {
+ return a == 2 ? 7 : 4;
+}
+
+long primitiveSemantics_compare_imm_return_imm_imm_01(long a, long b) {
+ return a != 2 ? 7 : 4;
+}
+
+long primitiveSemantics_compare_imm_return_imm_imm_02(long a, long b) {
+ return a == 2 ? 7 : 4;
+}
+
+long primitiveSemantics_compare_imm_return_imm_imm_03(long a, long b) {
+ return a != 2 ? 7 : 4;
+}
+
+long primitiveSemantics_compare_imm_return_imm_imm_04(long a, long b) {
+ if (a == 2)
+ b = 7;
+ else
+ b = 4;
+ return b;
+}
+
+long primitiveSemantics_compare_imm_return_imm_imm_05(long a, long b) {
+ if (!(a == 2))
+ b = 7;
+ else
+ b = 4;
+ return b;
+}
+
+int primitiveSemantics_compare_imm_return_imm_imm_06(int a, int b) {
+ return a == 2 ? 7 : 4;
+}
+
+int primitiveSemantics_compare_imm_return_imm_imm_07(int a, int b) {
+ return a != 2 ? 7 : 4;
+}
+
+int primitiveSemantics_compare_imm_return_imm_imm_08(int a, int b) {
+ return a == 2 ? 7 : 4;
+}
+
+int primitiveSemantics_compare_imm_return_imm_imm_09(int a, int b) {
+ return a != 2 ? 7 : 4;
+}
+
+int primitiveSemantics_compare_imm_return_imm_imm_10(int a, int b) {
+ if ((a == 2))
+ b = 7;
+ else
+ b = 4;
+ return b;
+}
+
+int primitiveSemantics_compare_imm_return_imm_imm_11(int a, int b) {
+ if (!(a == 2))
+ b = 7;
+ else
+ b = 4;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 6 } } */
+/* { dg-final { scan-assembler-times {\mczero.nez\M} 6 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_imm_reg.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_imm_reg.c
new file mode 100644
index 0000000..f976d60
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_imm_reg.c
@@ -0,0 +1,65 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og" "-Os" "-Oz"} } */
+
+long primitiveSemantics_compare_imm_return_imm_reg_00(long a, long b) {
+ return a == 2 ? 3 : b;
+}
+
+long primitiveSemantics_compare_imm_return_imm_reg_01(long a, long b) {
+ return a != 2 ? 3 : b;
+}
+
+long primitiveSemantics_compare_imm_return_imm_reg_02(long a, long b) {
+ return a == 2 ? b : 3;
+}
+
+long primitiveSemantics_compare_imm_return_imm_reg_03(long a, long b) {
+ return a != 2 ? b : 3;
+}
+
+long primitiveSemantics_compare_imm_return_imm_reg_04(long a, long b) {
+ if (a == 2)
+ b = 3;
+ return b;
+}
+
+long primitiveSemantics_compare_imm_return_imm_reg_05(long a, long b) {
+ if (!(a == 2))
+ b = 3;
+ return b;
+}
+
+int primitiveSemantics_compare_imm_return_imm_reg_06(int a, int b) {
+ return a == 2 ? 3 : b;
+}
+
+int primitiveSemantics_compare_imm_return_imm_reg_07(int a, int b) {
+ return a != 2 ? 3 : b;
+}
+
+int primitiveSemantics_compare_imm_return_imm_reg_08(int a, int b) {
+ return a == 2 ? b : 3;
+}
+
+int primitiveSemantics_compare_imm_return_imm_reg_09(int a, int b) {
+ return a != 2 ? b : 3;
+}
+
+int primitiveSemantics_compare_imm_return_imm_reg_10(int a, int b) {
+ if ((a == 2))
+ b = 3;
+ return b;
+}
+
+int primitiveSemantics_compare_imm_return_imm_reg_11(int a, int b) {
+ if (!(a == 2))
+ b = 3;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 6 } } */
+/* { dg-final { scan-assembler-times {\mczero.nez\M} 6 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_reg_reg.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_reg_reg.c
new file mode 100644
index 0000000..90e9119
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_imm_return_reg_reg.c
@@ -0,0 +1,65 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og" "-Os" "-Oz"} } */
+
+long primitiveSemantics_compare_imm_return_reg_reg_00(long a, long b, long c) {
+ return a == 2 ? c : b;
+}
+
+long primitiveSemantics_compare_imm_return_reg_reg_01(long a, long b, long c) {
+ return a != 2 ? c : b;
+}
+
+long primitiveSemantics_compare_imm_return_reg_reg_02(long a, long b, long c) {
+ return a == 2 ? b : c;
+}
+
+long primitiveSemantics_compare_imm_return_reg_reg_03(long a, long b, long c) {
+ return a != 2 ? b : c;
+}
+
+long primitiveSemantics_compare_imm_return_reg_reg_04(long a, long b, long c) {
+ if (a == 2)
+ b = c;
+ return b;
+}
+
+long primitiveSemantics_compare_imm_return_reg_reg_05(long a, long b, long c) {
+ if (!(a == 2))
+ b = c;
+ return b;
+}
+
+int primitiveSemantics_compare_imm_return_reg_reg_06(int a, int b, int c) {
+ return a == 2 ? c : b;
+}
+
+int primitiveSemantics_compare_imm_return_reg_reg_07(int a, int b, int c) {
+ return a != 2 ? c : b;
+}
+
+int primitiveSemantics_compare_imm_return_reg_reg_08(int a, int b, int c) {
+ return a == 2 ? b : c;
+}
+
+int primitiveSemantics_compare_imm_return_reg_reg_09(int a, int b, int c) {
+ return a != 2 ? b : c;
+}
+
+int primitiveSemantics_compare_imm_return_reg_reg_10(int a, int b, int c) {
+ if ((a == 2))
+ b = c;
+ return b;
+}
+
+int primitiveSemantics_compare_imm_return_reg_reg_11(int a, int b, int c) {
+ if (!(a == 2))
+ b = c;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 12 } } */
+/* { dg-final { scan-assembler-times {\mczero.nez\M} 12 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg.c
new file mode 100644
index 0000000..bfe8c06
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg.c
@@ -0,0 +1,65 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og" "-Os" "-Oz"} } */
+
+long primitiveSemantics_compare_reg_00(long a, long b, long c) {
+ return a == c ? 0 : b;
+}
+
+long primitiveSemantics_compare_reg_01(long a, long b, long c) {
+ return a != c ? 0 : b;
+}
+
+long primitiveSemantics_compare_reg_02(long a, long b, long c) {
+ return a == c ? b : 0;
+}
+
+long primitiveSemantics_compare_reg_03(long a, long b, long c) {
+ return a != c ? b : 0;
+}
+
+long primitiveSemantics_compare_reg_04(long a, long b, long c) {
+ if (a == c)
+ b = 0;
+ return b;
+}
+
+long primitiveSemantics_compare_reg_05(long a, long b, long c) {
+ if (!(a == c))
+ b = 0;
+ return b;
+}
+
+int primitiveSemantics_compare_reg_06(int a, int b, int c) {
+ return a == c ? 0 : b;
+}
+
+int primitiveSemantics_compare_reg_07(int a, int b, int c) {
+ return a != c ? 0 : b;
+}
+
+int primitiveSemantics_compare_reg_08(int a, int b, int c) {
+ return a == c ? b : 0;
+}
+
+int primitiveSemantics_compare_reg_09(int a, int b, int c) {
+ return a != c ? b : 0;
+}
+
+int primitiveSemantics_compare_reg_10(int a, int b, int c) {
+ if ((a == c))
+ b = 0;
+ return b;
+}
+
+int primitiveSemantics_compare_reg_11(int a, int b, int c) {
+ if (!(a == c))
+ b = 0;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 6 } } */
+/* { dg-final { scan-assembler-times {\mczero.nez\M} 6 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_0_imm.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_0_imm.c
new file mode 100644
index 0000000..164de06
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_0_imm.c
@@ -0,0 +1,73 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og" "-Os" "-Oz"} } */
+
+long primitiveSemantics_compare_reg_return_0_imm_00(long a, long b, long c) {
+ return a == c ? 0 : 9;
+}
+
+long primitiveSemantics_compare_reg_return_0_imm_01(long a, long b, long c) {
+ return a != c ? 0 : 9;
+}
+
+long primitiveSemantics_compare_reg_return_0_imm_02(long a, long b, long c) {
+ return a == c ? 9 : 0;
+}
+
+long primitiveSemantics_compare_reg_return_0_imm_03(long a, long b, long c) {
+ return a != c ? 9 : 0;
+}
+
+long primitiveSemantics_compare_reg_return_0_imm_04(long a, long b, long c) {
+ if (a == c)
+ b = 0;
+ else
+ b = 9;
+ return b;
+}
+
+long primitiveSemantics_compare_reg_return_0_imm_05(long a, long b, long c) {
+ if (!(a == c))
+ b = 0;
+ else
+ b = 9;
+ return b;
+}
+
+int primitiveSemantics_compare_reg_return_0_imm_06(int a, int b, int c) {
+ return a == c ? 0 : 9;
+}
+
+int primitiveSemantics_compare_reg_return_0_imm_07(int a, int b, int c) {
+ return a != c ? 0 : 9;
+}
+
+int primitiveSemantics_compare_reg_return_0_imm_08(int a, int b, int c) {
+ return a == c ? 9 : 0;
+}
+
+int primitiveSemantics_compare_reg_return_0_imm_09(int a, int b, int c) {
+ return a != c ? 9 : 0;
+}
+
+int primitiveSemantics_compare_reg_return_0_imm_10(int a, int b, int c) {
+ if ((a == c))
+ b = 0;
+ else
+ b = 9;
+ return b;
+}
+
+int primitiveSemantics_compare_reg_return_0_imm_11(int a, int b, int c) {
+ if (!(a == c))
+ b = 0;
+ else
+ b = 9;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 6 } } */
+/* { dg-final { scan-assembler-times {\mczero.nez\M} 6 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_imm_imm.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_imm_imm.c
new file mode 100644
index 0000000..8ad97ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_imm_imm.c
@@ -0,0 +1,73 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og" "-Os" "-Oz"} } */
+
+long primitiveSemantics_compare_reg_return_imm_imm_00(long a, long b, long c) {
+ return a == c ? 7 : 4;
+}
+
+long primitiveSemantics_compare_reg_return_imm_imm_01(long a, long b, long c) {
+ return a != c ? 7 : 4;
+}
+
+long primitiveSemantics_compare_reg_return_imm_imm_02(long a, long b, long c) {
+ return a == c ? 7 : 4;
+}
+
+long primitiveSemantics_compare_reg_return_imm_imm_03(long a, long b, long c) {
+ return a != c ? 7 : 4;
+}
+
+long primitiveSemantics_compare_reg_return_imm_imm_04(long a, long b, long c) {
+ if (a == c)
+ b = 7;
+ else
+ b = 4;
+ return b;
+}
+
+long primitiveSemantics_compare_reg_return_imm_imm_05(long a, long b, long c) {
+ if (!(a == c))
+ b = 7;
+ else
+ b = 4;
+ return b;
+}
+
+int primitiveSemantics_compare_reg_return_imm_imm_06(int a, int b, int c) {
+ return a == c ? 7 : 4;
+}
+
+int primitiveSemantics_compare_reg_return_imm_imm_07(int a, int b, int c) {
+ return a != c ? 7 : 4;
+}
+
+int primitiveSemantics_compare_reg_return_imm_imm_08(int a, int b, int c) {
+ return a == c ? 7 : 4;
+}
+
+int primitiveSemantics_compare_reg_return_imm_imm_09(int a, int b, int c) {
+ return a != c ? 7 : 4;
+}
+
+int primitiveSemantics_compare_reg_return_imm_imm_10(int a, int b, int c) {
+ if ((a == c))
+ b = 7;
+ else
+ b = 4;
+ return b;
+}
+
+int primitiveSemantics_compare_reg_return_imm_imm_11(int a, int b, int c) {
+ if (!(a == c))
+ b = 7;
+ else
+ b = 4;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 6 } } */
+/* { dg-final { scan-assembler-times {\mczero.nez\M} 6 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_imm_reg.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_imm_reg.c
new file mode 100644
index 0000000..5199ba7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_imm_reg.c
@@ -0,0 +1,65 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og" "-Os" "-Oz"} } */
+
+long primitiveSemantics_compare_reg_return_imm_reg_00(long a, long b, long c) {
+ return a == c ? 10 : b;
+}
+
+long primitiveSemantics_compare_reg_return_imm_reg_01(long a, long b, long c) {
+ return a != c ? 10 : b;
+}
+
+long primitiveSemantics_compare_reg_return_imm_reg_02(long a, long b, long c) {
+ return a == c ? b : 10;
+}
+
+long primitiveSemantics_compare_reg_return_imm_reg_03(long a, long b, long c) {
+ return a != c ? b : 10;
+}
+
+long primitiveSemantics_compare_reg_return_imm_reg_04(long a, long b, long c) {
+ if (a == c)
+ b = 10;
+ return b;
+}
+
+long primitiveSemantics_compare_reg_return_imm_reg_05(long a, long b, long c) {
+ if (!(a == c))
+ b = 10;
+ return b;
+}
+
+int primitiveSemantics_compare_reg_return_imm_reg_06(int a, int b, int c) {
+ return a == c ? 10 : b;
+}
+
+int primitiveSemantics_compare_reg_return_imm_reg_07(int a, int b, int c) {
+ return a != c ? 10 : b;
+}
+
+int primitiveSemantics_compare_reg_return_imm_reg_08(int a, int b, int c) {
+ return a == c ? b : 10;
+}
+
+int primitiveSemantics_compare_reg_return_imm_reg_09(int a, int b, int c) {
+ return a != c ? b : 10;
+}
+
+int primitiveSemantics_compare_reg_return_imm_reg_10(int a, int b, int c) {
+ if ((a == c))
+ b = 10;
+ return b;
+}
+
+int primitiveSemantics_compare_reg_return_imm_reg_11(int a, int b, int c) {
+ if (!(a == c))
+ b = 10;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 6 } } */
+/* { dg-final { scan-assembler-times {\mczero.nez\M} 6 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_reg_reg.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_reg_reg.c
new file mode 100644
index 0000000..eecb956
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_compare_reg_return_reg_reg.c
@@ -0,0 +1,77 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og" "-Os" "-Oz"} } */
+
+long primitiveSemantics_compare_reg_return_reg_reg_00(long a, long b, long c,
+ long d) {
+ return a == c ? d : b;
+}
+
+long primitiveSemantics_compare_reg_return_reg_reg_01(long a, long b, long c,
+ long d) {
+ return a != c ? d : b;
+}
+
+long primitiveSemantics_compare_reg_return_reg_reg_02(long a, long b, long c,
+ long d) {
+ return a == c ? b : d;
+}
+
+long primitiveSemantics_compare_reg_return_reg_reg_03(long a, long b, long c,
+ long d) {
+ return a != c ? b : d;
+}
+
+long primitiveSemantics_compare_reg_return_reg_reg_04(long a, long b, long c,
+ long d) {
+ if (a == c)
+ b = d;
+ return b;
+}
+
+long primitiveSemantics_compare_reg_return_reg_reg_05(long a, long b, long c,
+ long d) {
+ if (!(a == c))
+ b = d;
+ return b;
+}
+
+int primitiveSemantics_compare_reg_return_reg_reg_06(int a, int b, int c,
+ int d) {
+ return a == c ? d : b;
+}
+
+int primitiveSemantics_compare_reg_return_reg_reg_07(int a, int b, int c,
+ int d) {
+ return a != c ? d : b;
+}
+
+int primitiveSemantics_compare_reg_return_reg_reg_08(int a, int b, int c,
+ int d) {
+ return a == c ? b : d;
+}
+
+int primitiveSemantics_compare_reg_return_reg_reg_09(int a, int b, int c,
+ int d) {
+ return a != c ? b : d;
+}
+
+int primitiveSemantics_compare_reg_return_reg_reg_10(int a, int b, int c,
+ int d) {
+ if ((a == c))
+ b = d;
+ return b;
+}
+
+int primitiveSemantics_compare_reg_return_reg_reg_11(int a, int b, int c,
+ int d) {
+ if (!(a == c))
+ b = d;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 12 } } */
+/* { dg-final { scan-assembler-times {\mczero.nez\M} 12 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_0_imm.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_0_imm.c
index 0764d29..e3ccb17 100644
--- a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_0_imm.c
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_0_imm.c
@@ -61,5 +61,5 @@ int primitiveSemantics_return_0_imm_11(int a, int b) {
/* { dg-final { scan-assembler-times {\mczero\.eqz\M} 6 } } */
/* { dg-final { scan-assembler-times {\mczero\.nez\M} 6 } } */
-/* { dg-final { scan-assembler-not {\mbeq} } } */
-/* { dg-final { scan-assembler-not {\mbne} } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_imm_imm.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_imm_imm.c
index 2ff5033..62f9fb2 100644
--- a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_imm_imm.c
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_imm_imm.c
@@ -69,5 +69,5 @@ int primitiveSemantics_return_imm_imm_11(int a, int b) {
/* { dg-final { scan-assembler-times {\mczero\.eqz\M} 6 } } */
/* { dg-final { scan-assembler-times {\mczero\.nez\M} 6 } } */
-/* { dg-final { scan-assembler-not {\mbeq} } } */
-/* { dg-final { scan-assembler-not {\mbne} } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_imm_reg.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_imm_reg.c
index 93844d1..0866f86 100644
--- a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_imm_reg.c
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_imm_reg.c
@@ -61,5 +61,5 @@ int primitiveSemantics_return_imm_reg_11(int a, int b) {
/* { dg-final { scan-assembler-times {\mczero\.eqz\M} 6 } } */
/* { dg-final { scan-assembler-times {\mczero\.nez\M} 6 } } */
-/* { dg-final { scan-assembler-not {\mbeq} } } */
-/* { dg-final { scan-assembler-not {\mbne} } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_reg_reg.c b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_reg_reg.c
index 619ad8e..eb1764a 100644
--- a/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_reg_reg.c
+++ b/gcc/testsuite/gcc.target/riscv/zicond-primitiveSemantics_return_reg_reg.c
@@ -61,5 +61,5 @@ int primitiveSemantics_return_reg_reg_11(int a, int b, int c) {
/* { dg-final { scan-assembler-times {\mczero\.eqz\M} 12 } } */
/* { dg-final { scan-assembler-times {\mczero\.nez\M} 12 } } */
-/* { dg-final { scan-assembler-not {\mbeq} } } */
-/* { dg-final { scan-assembler-not {\mbne} } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-xor-01.c b/gcc/testsuite/gcc.target/riscv/zicond-xor-01.c
index 8362ffa..20079fd 100644
--- a/gcc/testsuite/gcc.target/riscv/zicond-xor-01.c
+++ b/gcc/testsuite/gcc.target/riscv/zicond-xor-01.c
@@ -10,5 +10,5 @@ long xor1(long crc, long poly)
return crc;
}
-/* { dg-final { scan-assembler-times "czero.eqz\t" 1 } } */
+/* { dg-final { scan-assembler-times {\mczero.eqz\M} 1 } } */
/* { dg-final { scan-assembler-times "xor\t" 1 } } */
diff --git a/gcc/testsuite/gdc.dg/analyzer/analyzer.exp b/gcc/testsuite/gdc.dg/analyzer/analyzer.exp
new file mode 100644
index 0000000..7b82b8e
--- /dev/null
+++ b/gcc/testsuite/gdc.dg/analyzer/analyzer.exp
@@ -0,0 +1,51 @@
+# Copyright (C) 2023 Free Software Foundation, Inc.
+
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 3, or (at your option) any later
+# version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Load support procs.
+load_lib gdc-dg.exp
+
+# If the analyzer has not been enabled, bail.
+if { ![check_effective_target_analyzer] } {
+ return
+}
+
+global DEFAULT_DFLAGS
+if [info exists DEFAULT_DFLAGS] then {
+ set save_default_dflags $DEFAULT_DFLAGS
+}
+
+# If a testcase doesn't have special options, use these.
+set DEFAULT_DFLAGS "-fanalyzer -Wanalyzer-too-complex -fanalyzer-call-summaries"
+
+# Initialize `dg'.
+dg-init
+
+# Main loop.
+gdc-dg-runtest [lsort \
+ [glob -nocomplain $srcdir/$subdir/*.d ] ] "" $DEFAULT_DFLAGS
+
+# All done.
+dg-finish
+
+if [info exists save_default_dflags] {
+ set DEFAULT_DFLAGS $save_default_dflags
+} else {
+ unset DEFAULT_DFLAGS
+}
diff --git a/gcc/testsuite/gdc.dg/analyzer/pr111537.d b/gcc/testsuite/gdc.dg/analyzer/pr111537.d
new file mode 100644
index 0000000..e50b05a
--- /dev/null
+++ b/gcc/testsuite/gdc.dg/analyzer/pr111537.d
@@ -0,0 +1,7 @@
+// { dg-do compile }
+import core.stdc.string;
+void main()
+{
+ char[5] arr;
+ strcpy(arr.ptr, "hello world"); // { dg-warning "stack-based buffer overflow" }
+}
diff --git a/gcc/testsuite/gdc.dg/builtins_reject.d b/gcc/testsuite/gdc.dg/builtins_reject.d
new file mode 100644
index 0000000..51d7df2
--- /dev/null
+++ b/gcc/testsuite/gdc.dg/builtins_reject.d
@@ -0,0 +1,17 @@
+// { dg-do compile }
+
+import gcc.builtins;
+
+auto test_sqrt() { return &__builtin_sqrt; }
+auto test_tan() { return &__builtin_tan; }
+auto test_malloc() { return &__builtin_malloc; }
+auto test_printf() { return &__builtin_printf; }
+
+// { dg-error ".__builtin_inf. must be directly called" "" { target *-*-* } .+1 }
+auto test_inf() { return &__builtin_inf; }
+// { dg-error ".__builtin_lfloor. must be directly called" "" { target *-*-* } .+1 }
+auto test_lfloor() { return &__builtin_lfloor; }
+// { dg-error ".__builtin_setjmp. must be directly called" "" { target *-*-* } .+1 }
+auto test_setjmp() { return &__builtin_setjmp; }
+// { dg-error ".__builtin_unreachable. must be directly called" "" { target *-*-* } .+1 }
+auto test_unreachable() { return &__builtin_unreachable; }
diff --git a/gcc/testsuite/gdc.dg/intrinsics_reject.d b/gcc/testsuite/gdc.dg/intrinsics_reject.d
new file mode 100644
index 0000000..8c644cc
--- /dev/null
+++ b/gcc/testsuite/gdc.dg/intrinsics_reject.d
@@ -0,0 +1,87 @@
+// { dg-do compile }
+import core.bitop;
+import core.math;
+import core.volatile;
+import core.stdc.stdarg;
+
+//////////////////////////////////////////////////////
+// core.bitop
+
+int function(uint) test_bsf() { return &bsf; }
+int function(ulong) test_bsfl() { return &bsf; }
+int function(uint) test_bsr() { return &bsr; }
+int function(ulong) test_bsrl() { return &bsr; }
+auto test_bt() { return &bt; }
+// { dg-error "intrinsic function .btc. must be directly called" "" { target *-*-* } .+1 }
+auto test_btc() { return &btc; }
+// { dg-error "intrinsic function .btr. must be directly called" "" { target *-*-* } .+1 }
+auto test_btr() { return &btr; }
+// { dg-error "intrinsic function .bts. must be directly called" "" { target *-*-* } .+1 }
+auto test_bts() { return &bts; }
+// { dg-error "intrinsic function .byteswap. must be directly called" "" { target *-*-* } .+1 }
+ushort function(ushort) test_byteswap() { return &byteswap; }
+// { dg-error "intrinsic function .bswap. must be directly called" "" { target *-*-* } .+1 }
+uint function(uint) test_bswap() { return &bswap; }
+// { dg-error "intrinsic function .bswap. must be directly called" "" { target *-*-* } .+1 }
+ulong function(ulong) test_bswapl() { return &bswap; }
+int function(uint) test_popcnt() { return &popcnt; }
+int function(ulong) test_popcntl() { return &popcnt; }
+auto test_rol() { return &rol!ubyte; }
+auto test_rol(uint a) { return &rol!(1, uint); }
+auto test_ror(ushort a, uint b) { return &ror!ushort; }
+auto test_ror(ulong a) { return &ror!(1, ulong); }
+
+//////////////////////////////////////////////////////
+// core.math
+
+float function(float) test_cosf() { return &cos; }
+double function(double) test_cos() { return &cos; }
+real function(real) test_cosl() { return &cos; }
+float function(float) test_sinf() { return &sin; }
+double function(double) test_sin() { return &sin; }
+real function(real) test_sinl() { return &sin; }
+long function(float) test_rndtolf() { return &rndtol; }
+long function(double) test_rndtol() { return &rndtol; }
+long function(real) test_rndtoll() { return &rndtol; }
+float function(float) test_sqrtf() { return &sqrt; }
+double function(double) test_sqrt() { return &sqrt; }
+real function(real) test_sqrtl() { return &sqrt; }
+float function(float, int) test_ldexpf() { return &ldexp; }
+double function(double, int) test_ldexp() { return &ldexp; }
+real function(real, int) test_ldexpl() { return &ldexp; }
+float function(float) test_fabsf() { return &fabs; }
+double function(double) test_fabs() { return &fabs; }
+real function(real) test_fabsl() { return &fabs; }
+float function(float) test_rintf() { return &rint; }
+double function(double) test_rint() { return &rint; }
+real function(real) test_rintl() { return &rint; }
+
+//////////////////////////////////////////////////////
+// core.volatile
+
+// { dg-error "intrinsic function .volatileLoad. must be directly called" "" { target *-*-* } .+1 }
+ubyte function(ubyte*) test_volatileLoad8() { return &volatileLoad; }
+// { dg-error "intrinsic function .volatileLoad. must be directly called" "" { target *-*-* } .+1 }
+ushort function(ushort*) test_volatileLoad16() { return &volatileLoad; }
+// { dg-error "intrinsic function .volatileLoad. must be directly called" "" { target *-*-* } .+1 }
+uint function(uint*) test_volatileLoad32() { return &volatileLoad; }
+// { dg-error "intrinsic function .volatileLoad. must be directly called" "" { target *-*-* } .+1 }
+ulong function(ulong*) test_volatileLoad64() { return &volatileLoad; }
+// { dg-error "intrinsic function .volatileStore. must be directly called" "" { target *-*-* } .+1 }
+void function(ubyte*, ubyte) test_volatileStore8() { return &volatileStore; }
+// { dg-error "intrinsic function .volatileStore. must be directly called" "" { target *-*-* } .+1 }
+void function(ushort*, ushort) test_volatileStore16() { return &volatileStore; }
+// { dg-error "intrinsic function .volatileStore. must be directly called" "" { target *-*-* } .+1 }
+void function(uint*, uint) test_volatileStore32() { return &volatileStore; }
+// { dg-error "intrinsic function .volatileStore. must be directly called" "" { target *-*-* } .+1 }
+void function(ulong*, ulong) test_volatileStore64() { return &volatileStore; }
+
+//////////////////////////////////////////////////////
+// core.stdc.stdarg
+
+// { dg-error "intrinsic function .va_start. must be directly called" "" { target *-*-* } .+1 }
+auto test_va_start() { return &va_start!int; }
+// { dg-error "built-in function .__builtin_va_end. must be directly called" "" { target *-*-* } .+1 }
+auto test_va_end() { return &va_end; }
+// { dg-error "built-in function .__builtin_va_copy. must be directly called" "" { target *-*-* } .+1 }
+auto test_va_copy() { return &va_copy; }
diff --git a/gcc/testsuite/gdc.test/compilable/dtoh_StructDeclaration.d b/gcc/testsuite/gdc.test/compilable/dtoh_StructDeclaration.d
index 2e36c7d..f8448d9 100644
--- a/gcc/testsuite/gdc.test/compilable/dtoh_StructDeclaration.d
+++ b/gcc/testsuite/gdc.test/compilable/dtoh_StructDeclaration.d
@@ -224,6 +224,15 @@ struct Params final
ddocfiles(ddocfiles)
{}
};
+
+struct Loc final
+{
+ static int32_t showColumns;
+ void toChars(int32_t showColumns = Loc::showColumns);
+ Loc()
+ {
+ }
+};
---
*/
@@ -337,3 +346,9 @@ extern (C++) struct Params
bool obj = true;
Array ddocfiles;
}
+
+extern (C++) struct Loc
+{
+ __gshared int showColumns;
+ void toChars(int showColumns = Loc.showColumns) {}
+}
diff --git a/gcc/testsuite/gdc.test/compilable/dtoh_TemplateDeclaration.d b/gcc/testsuite/gdc.test/compilable/dtoh_TemplateDeclaration.d
index 1ed6318..6fefcce 100644
--- a/gcc/testsuite/gdc.test/compilable/dtoh_TemplateDeclaration.d
+++ b/gcc/testsuite/gdc.test/compilable/dtoh_TemplateDeclaration.d
@@ -39,8 +39,6 @@ struct _d_dynamicArray final
};
#endif
-typedef uint$?:32=32|64=64$_t size_t;
-
struct Outer final
{
int32_t a;
diff --git a/gcc/testsuite/gdc.test/compilable/dtoh_functions.d b/gcc/testsuite/gdc.test/compilable/dtoh_functions.d
index 38607f6..90223cc 100644
--- a/gcc/testsuite/gdc.test/compilable/dtoh_functions.d
+++ b/gcc/testsuite/gdc.test/compilable/dtoh_functions.d
@@ -45,10 +45,10 @@ struct S final
int32_t get(int32_t , int32_t );
static int32_t get();
static const int32_t staticVar;
- void useVars(int32_t pi = i, int32_t psv = staticVar);
+ void useVars(int32_t pi = i, int32_t psv = S::staticVar);
struct Nested final
{
- void useStaticVar(int32_t i = staticVar);
+ void useStaticVar(int32_t i = S::staticVar);
Nested()
{
}
diff --git a/gcc/testsuite/gdc.test/compilable/issue22682.d b/gcc/testsuite/gdc.test/compilable/issue22682.d
new file mode 100644
index 0000000..5301a51
--- /dev/null
+++ b/gcc/testsuite/gdc.test/compilable/issue22682.d
@@ -0,0 +1,8 @@
+module issue22682;
+
+void main()
+{
+ pragma(mangle, "put" ~ "s")
+ extern(C) static int libcPuts(const char*);
+ libcPuts("issue 22682");
+}
diff --git a/gcc/testsuite/gdc.test/compilable/issue24174.d b/gcc/testsuite/gdc.test/compilable/issue24174.d
new file mode 100644
index 0000000..8876a87
--- /dev/null
+++ b/gcc/testsuite/gdc.test/compilable/issue24174.d
@@ -0,0 +1,36 @@
+/* TEST_OUTPUT:
+---
+true
+false
+---
+*/
+
+bool func1() {
+ struct BtMatcher {
+ uint pc = 0;
+ }
+ BtMatcher matcher;
+ with (matcher) {
+ goto StartLoop;
+ StartLoop:
+ goto SecondLabel;
+ SecondLabel:
+ return true;
+ }
+}
+
+bool func2() {
+ try {
+ throw new Exception("a");
+ return true;
+ } catch (Exception e) {
+ goto StartA;
+ StartA:
+ goto StartB;
+ StartB:
+ return false;
+ }
+}
+
+pragma(msg, func1());
+pragma(msg, func2());
diff --git a/gcc/testsuite/gdc.test/compilable/obsolete_body.d b/gcc/testsuite/gdc.test/compilable/obsolete_body.d
new file mode 100644
index 0000000..71906e8
--- /dev/null
+++ b/gcc/testsuite/gdc.test/compilable/obsolete_body.d
@@ -0,0 +1,5 @@
+/* REQUIRED_ARGS: -w
+*/
+
+void test()
+in { } body { }
diff --git a/gcc/testsuite/gdc.test/compilable/shortened_methods.d b/gcc/testsuite/gdc.test/compilable/shortened_methods.d
index 71350af..785cb8e 100644
--- a/gcc/testsuite/gdc.test/compilable/shortened_methods.d
+++ b/gcc/testsuite/gdc.test/compilable/shortened_methods.d
@@ -27,7 +27,12 @@ string test() => "hello"; // works at any scope
static assert(test() == "hello"); // works normally
static assert(is(typeof(&test) == string function())); // same normal type
+struct S(T) {}
+
void func() {
int a;
int nested() => a; // and at nested scopes too
+
+ // Issue 24088 - https://issues.dlang.org/show_bug.cgi?id=24088
+ S!int f() => S!int();
}
diff --git a/gcc/testsuite/gdc.test/compilable/test23145.d b/gcc/testsuite/gdc.test/compilable/test23145.d
index 18eabfb..c6bb551 100644
--- a/gcc/testsuite/gdc.test/compilable/test23145.d
+++ b/gcc/testsuite/gdc.test/compilable/test23145.d
@@ -1,11 +1,4 @@
-/* REQUIRED_ARGS: -wo -wi
-TEST_OUTPUT:
----
-compilable/test23145.d(117): Warning: `scope` allocation of `c` requires that constructor be annotated with `scope`
-compilable/test23145.d(111): is the location of the constructor
-compilable/test23145.d(124): Warning: `scope` allocation of `c` requires that constructor be annotated with `scope`
-compilable/test23145.d(111): is the location of the constructor
----
+/* REQUIRED_ARGS: -wi
*/
// https://issues.dlang.org/show_bug.cgi?id=23145
@@ -28,14 +21,14 @@ class C
C foo(D d) @nogc @safe
{
scope e = new C(1); // ok
- scope c = new C(d); // deprecation
+ scope c = new C(d); // obsolete
return c.d.c;
}
C bax(D d) @safe
{
scope e = new C(1); // ok
- scope c = new C(d); // deprecation
+ scope c = new C(d); // obsolete
return c.d.c;
}
diff --git a/gcc/testsuite/gdc.test/compilable/test24066.d b/gcc/testsuite/gdc.test/compilable/test24066.d
new file mode 100644
index 0000000..4b8a0b3
--- /dev/null
+++ b/gcc/testsuite/gdc.test/compilable/test24066.d
@@ -0,0 +1,11 @@
+// https://issues.dlang.org/show_bug.cgi?id=24066
+
+/*
+TEST_OUTPUT:
+---
+false
+---
+*/
+
+class C;
+pragma(msg, __traits(isAbstractClass, C));
diff --git a/gcc/testsuite/gdc.test/compilable/test24107.d b/gcc/testsuite/gdc.test/compilable/test24107.d
new file mode 100644
index 0000000..d16c259
--- /dev/null
+++ b/gcc/testsuite/gdc.test/compilable/test24107.d
@@ -0,0 +1,17 @@
+// https://issues.dlang.org/show_bug.cgi?id=24107
+
+/*
+TEST_OUTPUT:
+---
+This should not output an error message: false
+---
+*/
+
+bool recurse ()
+{
+ return recurse();
+}
+
+pragma(msg,
+ "This should not output an error message: ",
+ __traits(compiles, {enum bool r = recurse();}));
diff --git a/gcc/testsuite/gdc.test/compilable/test24109.d b/gcc/testsuite/gdc.test/compilable/test24109.d
new file mode 100644
index 0000000..67d03b5
--- /dev/null
+++ b/gcc/testsuite/gdc.test/compilable/test24109.d
@@ -0,0 +1,17 @@
+// https://issues.dlang.org/show_bug.cgi?id=24109
+
+struct Outer
+{
+ void method1() {}
+
+ void method2()
+ {
+ class Inner
+ {
+ void innerMethod()
+ {
+ method1();
+ }
+ }
+ }
+}
diff --git a/gcc/testsuite/gdc.test/compilable/test24118.d b/gcc/testsuite/gdc.test/compilable/test24118.d
new file mode 100644
index 0000000..25376b7
--- /dev/null
+++ b/gcc/testsuite/gdc.test/compilable/test24118.d
@@ -0,0 +1,15 @@
+// https://issues.dlang.org/show_bug.cgi?id=24118
+
+void map(alias fun, T)(T[] arr)
+{
+ fun(arr);
+}
+
+
+void foo()
+{
+ if( __ctfe )
+ {
+ ["a", "b", "c"].map!( a => " " ~ a[0] );
+ }
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/testpull1810.d b/gcc/testsuite/gdc.test/compilable/testpull1810.d
index 830e9af..6af0fc6 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/testpull1810.d
+++ b/gcc/testsuite/gdc.test/compilable/testpull1810.d
@@ -1,6 +1,6 @@
// REQUIRED_ARGS: -c -w
/*
-TEST_OUTPUT:
+Warning removed in: https://github.com/dlang/dmd/pull/15568
---
fail_compilation/testpull1810.d(21): Warning: statement is not reachable
Error: warnings are treated as errors
diff --git a/gcc/testsuite/gdc.test/fail_compilation/warn12809.d b/gcc/testsuite/gdc.test/compilable/warn12809.d
index 8daf44c..7386f1e 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/warn12809.d
+++ b/gcc/testsuite/gdc.test/compilable/warn12809.d
@@ -1,7 +1,7 @@
// REQUIRED_ARGS: -o- -w
/*
-TEST_OUTPUT:
+Warning removed in: https://github.com/dlang/dmd/pull/15568
---
fail_compilation/warn12809.d(25): Warning: statement is not reachable
fail_compilation/warn12809.d(33): Warning: statement is not reachable
@@ -36,7 +36,7 @@ void test_unrachable3()
/********************************************/
/*
-TEST_OUTPUT:
+
---
fail_compilation/warn12809.d(108): Warning: statement is not reachable
fail_compilation/warn12809.d(115): Warning: statement is not reachable
@@ -71,3 +71,27 @@ void test3()
finally foo();
int x = 1;
}
+
+// https://issues.dlang.org/show_bug.cgi?id=14835
+bool isEven(int i)()
+{
+ static if (i % 2)
+ return true;
+ return false;
+}
+
+enum x = isEven!0;
+
+// https://issues.dlang.org/show_bug.cgi?id=10532
+alias Seq(T...) = T;
+void f()
+{
+ foreach (e; Seq!(10, 20))
+ {
+ if (e == 10)
+ continue;
+
+ // lots of code follows..
+ auto x = 1;
+ }
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/aa_init.d b/gcc/testsuite/gdc.test/fail_compilation/aa_init.d
new file mode 100644
index 0000000..b1473c1
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/aa_init.d
@@ -0,0 +1,16 @@
+/*
+REQUIRED_ARGS: -vcolumns
+TEST_OUTPUT:
+---
+fail_compilation/aa_init.d(13,18): Error: invalid associative array initializer `[]`, use `null` instead
+fail_compilation/aa_init.d(14,24): Error: missing key for value `4` in initializer
+fail_compilation/aa_init.d(15,9): Error: cannot implicitly convert expression `[]` of type `void[]` to `int[int]`
+---
+*/
+
+void main()
+{
+ int[int] a = [];
+ int[int] b = [2:3, 4];
+ a = [];
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/body.d b/gcc/testsuite/gdc.test/fail_compilation/body.d
deleted file mode 100644
index 7b718c2..0000000
--- a/gcc/testsuite/gdc.test/fail_compilation/body.d
+++ /dev/null
@@ -1,11 +0,0 @@
-/* REQUIRED_ARGS: -wo -w
-TEST_OUTPUT:
----
-fail_compilation/body.d(11): Warning: usage of identifer `body` as a keyword is obsolete. Use `do` instead.
-Error: warnings are treated as errors
- Use -wi if you wish to treat warnings only as informational.
----
-*/
-
-void test()
-in { } body { }
diff --git a/gcc/testsuite/gdc.test/fail_compilation/ccast.d b/gcc/testsuite/gdc.test/fail_compilation/ccast.d
index f1ca6c0..3558e0c 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/ccast.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/ccast.d
@@ -1,9 +1,10 @@
/*
TEST_OUTPUT:
---
-fail_compilation/ccast.d(11): Error: C style cast illegal, use `cast(byte)i`
-fail_compilation/ccast.d(24): Error: C style cast illegal, use `cast(foo)5`
-fail_compilation/ccast.d(26): Error: C style cast illegal, use `cast(void*)5`
+fail_compilation/ccast.d(12): Error: C style cast illegal, use `cast(byte)i`
+fail_compilation/ccast.d(25): Error: C style cast illegal, use `cast(foo)5`
+fail_compilation/ccast.d(27): Error: C style cast illegal, use `cast(void*)5`
+fail_compilation/ccast.d(30): Error: C style cast illegal, use `cast(void*)5`
---
*/
@@ -25,4 +26,7 @@ void main()
(void*)5;
(void*)(5); // semantic implicit cast error
+
+ (void*)
+ 5;
}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/chkformat.d b/gcc/testsuite/gdc.test/fail_compilation/chkformat.d
index eb75f42..bd92466 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/chkformat.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/chkformat.d
@@ -24,6 +24,7 @@ fail_compilation/chkformat.d(122): Deprecation: argument `0LU` for format specif
fail_compilation/chkformat.d(122): C `long` is 4 bytes on your system|32=
fail_compilation/chkformat.d(122): Deprecation: argument `0LU` for format specification `"%lu"` must be `uint`, not `ulong`
fail_compilation/chkformat.d(122): C `long` is 4 bytes on your system$
+fail_compilation/chkformat.d(123): Deprecation: argument `p` for format specification `"%n"` must be `int*`, not `const(int)*`
fail_compilation/chkformat.d(201): Deprecation: argument `0L` for format specification `"%d"` must be `int*`, not `long`
fail_compilation/chkformat.d(202): Deprecation: more format specifiers than 1 arguments
fail_compilation/chkformat.d(203): Deprecation: argument `0L` for format specification `"%d"` must be `int*`, not `long`
@@ -86,6 +87,7 @@ void test19() { int u; printf("%ls\n", &u); }
//void test20() { int u; char[] s; sprintf(&s[0], "%d\n", &u); }
//void test21() { int u; fprintf(null, "%d\n", &u); }
void test20() { printf("%lu", ulong.init); }
+void test22() { int i; const(int)* p = &i; printf("%n", p); }
#line 200
diff --git a/gcc/testsuite/gdc.test/fail_compilation/dephexstrings.d b/gcc/testsuite/gdc.test/fail_compilation/dephexstrings.d
deleted file mode 100644
index 553a0c6..0000000
--- a/gcc/testsuite/gdc.test/fail_compilation/dephexstrings.d
+++ /dev/null
@@ -1,9 +0,0 @@
-// REQUIRED_ARGS: -de
-/*
-TEST_OUTPUT:
----
-fail_compilation/dephexstrings.d(9): Error: semicolon expected following auto declaration, not `"60"`
-fail_compilation/dephexstrings.d(9): Error: declaration expected, not `"60"`
----
-*/
-enum xstr = x"60";
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag10169.d b/gcc/testsuite/gdc.test/fail_compilation/diag10169.d
index 84d0ad4..e21a334 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag10169.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag10169.d
@@ -2,7 +2,8 @@
EXTRA_FILES: imports/a10169.d
TEST_OUTPUT:
---
-fail_compilation/diag10169.d(12): Error: no property `x` for `B(0)` of type `imports.a10169.B`
+fail_compilation/diag10169.d(13): Error: no property `x` for `B(0)` of type `imports.a10169.B`
+fail_compilation/imports/a10169.d(3): struct `B` defined here
---
*/
import imports.a10169;
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag10783.d b/gcc/testsuite/gdc.test/fail_compilation/diag10783.d
index 80c7f5e..e69eddb 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag10783.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag10783.d
@@ -1,8 +1,9 @@
/*
TEST_OUTPUT:
---
-fail_compilation/diag10783.d(14): Error: no property `type` for `event` of type `diag10783.Event`
-fail_compilation/diag10783.d(14): Error: undefined identifier `En`
+fail_compilation/diag10783.d(15): Error: no property `type` for `event` of type `diag10783.Event`
+fail_compilation/diag10783.d(10): struct `Event` defined here
+fail_compilation/diag10783.d(15): Error: undefined identifier `En`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag12063.d b/gcc/testsuite/gdc.test/fail_compilation/diag12063.d
index 882a809..3e9535a 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag12063.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag12063.d
@@ -1,8 +1,13 @@
/*
TEST_OUTPUT:
---
-fail_compilation/diag12063.d(11): Error: no property `max` for type `Foo`, perhaps `import std.algorithm;` is needed?
-fail_compilation/diag12063.d(14): Error: incompatible types for `(Foo()) + (1)`: `Bar` and `int`
+fail_compilation/diag12063.d(19): Error: cannot check `diag12063.Bar.b` value for overflow
+fail_compilation/diag12063.d(16): Error: no property `max` for type `Foo`, perhaps `import std.algorithm;` is needed?
+fail_compilation/diag12063.d(19): Error: cannot generate value for `diag12063.Bar.b`
+fail_compilation/diag12063.d(19): Error: incompatible types for `(Foo()) + (1)`: `Bar` and `int`
+fail_compilation/diag12063.d(29): Error: cannot check `diag12063.b` value for overflow
+fail_compilation/diag12063.d(29): Error: incompatible types for `(S()) == (1)`: `S` and `int`
+fail_compilation/diag12063.d(38): Error: enum member `diag12063.d` initialization with `__anonymous.c+1` causes overflow for type `Q`
---
*/
@@ -11,5 +16,36 @@ struct Foo {}
enum Bar : Foo
{
a = Foo(),
- b
+ b // no max, can't +1
+}
+
+struct S {
+ S opBinary(string s: "+")(int) => this;
+ enum max = 1; // wrong type
+}
+
+enum {
+ a = S(),
+ b // can't do S() == 1
+}
+
+struct Q {
+ enum max = Q();
+}
+
+enum {
+ c = Q(),
+ d // overflow detected
+}
+
+struct R {
+ int i;
+ R opBinary(string s: "+")(int) => this;
+ enum max = R(1);
+}
+
+enum ER
+{
+ e = R(),
+ f // OK
}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag12829.d b/gcc/testsuite/gdc.test/fail_compilation/diag12829.d
index 1d37a1e..aaedd0f 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag12829.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag12829.d
@@ -1,11 +1,14 @@
/*
TEST_OUTPUT:
---
-fail_compilation/diag12829.d(12): Error: function `diag12829.test1` is `@nogc` yet allocates closure for `test1()` with the GC
-fail_compilation/diag12829.d(15): `diag12829.test1.__lambda2` closes over variable `x` at fail_compilation/diag12829.d(14)
-fail_compilation/diag12829.d(19): `diag12829.test1.bar` closes over variable `x` at fail_compilation/diag12829.d(14)
-fail_compilation/diag12829.d(26): Error: function `diag12829.test2` is `@nogc` yet allocates closure for `test2()` with the GC
-fail_compilation/diag12829.d(31): `diag12829.test2.S.foo` closes over variable `x` at fail_compilation/diag12829.d(28)
+fail_compilation/diag12829.d(15): Error: function `diag12829.test1` is `@nogc` yet allocates closure for `test1()` with the GC
+fail_compilation/diag12829.d(18): delegate `diag12829.test1.__lambda2` closes over variable `x`
+fail_compilation/diag12829.d(17): `x` declared here
+fail_compilation/diag12829.d(22): function `diag12829.test1.bar` closes over variable `x`
+fail_compilation/diag12829.d(17): `x` declared here
+fail_compilation/diag12829.d(29): Error: function `diag12829.test2` is `@nogc` yet allocates closure for `test2()` with the GC
+fail_compilation/diag12829.d(34): function `diag12829.test2.S.foo` closes over variable `x`
+fail_compilation/diag12829.d(31): `x` declared here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag13609a.d b/gcc/testsuite/gdc.test/fail_compilation/diag13609a.d
index 039129e..7d0e259 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag13609a.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag13609a.d
@@ -1,10 +1,15 @@
/*
TEST_OUTPUT:
---
-fail_compilation/diag13609a.d(11): Error: `}` expected following members in `class` declaration at fail_compilation/diag13609a.d(8)
+fail_compilation/diag13609a.d(16): Error: `}` expected following members in `struct` declaration
+fail_compilation/diag13609a.d(15): struct starts here
+fail_compilation/diag13609a.d(16): Error: `}` expected following members in `class` declaration
+fail_compilation/diag13609a.d(11): class `C` starts here
---
*/
class C
{
void foo() {}
+
+ struct {
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag14145.d b/gcc/testsuite/gdc.test/fail_compilation/diag14145.d
index fa7c611..fee7307 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag14145.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag14145.d
@@ -1,10 +1,11 @@
/*
TEST_OUTPUT:
---
-fail_compilation/diag14145.d(15): Error: no property `i` for `_` of type `diag14145.main.Capture!(i)`
-fail_compilation/diag14145.d(15): potentially malformed `opDispatch`. Use an explicit instantiation to get a better error message
-fail_compilation/diag14145.d(34): Error: expression `*this.ptr` of type `shared(int)` is not implicitly convertible to return type `ref int`
-fail_compilation/diag14145.d(16): Error: template instance `diag14145.main.Capture!(i).Capture.opDispatch!"i"` error instantiating
+fail_compilation/diag14145.d(16): Error: no property `i` for `_` of type `diag14145.main.Capture!(i)`
+fail_compilation/diag14145.d(16): potentially malformed `opDispatch`. Use an explicit instantiation to get a better error message
+fail_compilation/diag14145.d(26): struct `Capture` defined here
+fail_compilation/diag14145.d(35): Error: expression `*this.ptr` of type `shared(int)` is not implicitly convertible to return type `ref int`
+fail_compilation/diag14145.d(17): Error: template instance `diag14145.main.Capture!(i).Capture.opDispatch!"i"` error instantiating
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag15713.d b/gcc/testsuite/gdc.test/fail_compilation/diag15713.d
index e4cb2a7..0071344 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag15713.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag15713.d
@@ -1,10 +1,11 @@
/*
TEST_OUTPUT:
---
-fail_compilation/diag15713.d(19): Error: no property `widthSign` for `this` of type `diag15713.WrData.Data`
-fail_compilation/diag15713.d(39): Error: template instance `diag15713.conwritefImpl!("parse-int", "width", "\n", Data(null))` error instantiating
-fail_compilation/diag15713.d(44): instantiated from here: `conwritefImpl!("main", "\n", Data(null))`
-fail_compilation/diag15713.d(49): instantiated from here: `fdwritef!()`
+fail_compilation/diag15713.d(20): Error: no property `widthSign` for `this` of type `diag15713.WrData.Data`
+fail_compilation/diag15713.d(16): struct `Data` defined here
+fail_compilation/diag15713.d(40): Error: template instance `diag15713.conwritefImpl!("parse-int", "width", "\n", Data(null))` error instantiating
+fail_compilation/diag15713.d(45): instantiated from here: `conwritefImpl!("main", "\n", Data(null))`
+fail_compilation/diag15713.d(50): instantiated from here: `fdwritef!()`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag3913.d b/gcc/testsuite/gdc.test/fail_compilation/diag3913.d
index abf70b8..41c08b0 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag3913.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag3913.d
@@ -1,8 +1,10 @@
/*
TEST_OUTPUT:
---
-fail_compilation/diag3913.d(12): Error: no property `foobardoo` for type `Foo`
-fail_compilation/diag3913.d(13): Error: no property `secon` for type `Foo`. Did you mean `Foo.second` ?
+fail_compilation/diag3913.d(14): Error: no property `foobardoo` for type `Foo`
+fail_compilation/diag3913.d(13): enum `Foo` defined here
+fail_compilation/diag3913.d(15): Error: no property `secon` for type `Foo`. Did you mean `Foo.second` ?
+fail_compilation/diag3913.d(13): enum `Foo` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag5385.d b/gcc/testsuite/gdc.test/fail_compilation/diag5385.d
index 60455ec..368fdff 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag5385.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag5385.d
@@ -2,14 +2,22 @@
EXTRA_FILES: imports/fail5385.d
TEST_OUTPUT:
---
-fail_compilation/diag5385.d(20): Error: no property `privX` for type `imports.fail5385.C`
-fail_compilation/diag5385.d(21): Error: no property `packX` for type `imports.fail5385.C`
-fail_compilation/diag5385.d(22): Error: no property `privX2` for type `imports.fail5385.C`
-fail_compilation/diag5385.d(23): Error: no property `packX2` for type `imports.fail5385.C`
-fail_compilation/diag5385.d(24): Error: no property `privX` for type `imports.fail5385.S`
-fail_compilation/diag5385.d(25): Error: no property `packX` for type `imports.fail5385.S`
-fail_compilation/diag5385.d(26): Error: no property `privX2` for type `imports.fail5385.S`
-fail_compilation/diag5385.d(27): Error: no property `packX2` for type `imports.fail5385.S`
+fail_compilation/diag5385.d(28): Error: no property `privX` for type `imports.fail5385.C`
+fail_compilation/imports/fail5385.d(3): class `C` defined here
+fail_compilation/diag5385.d(29): Error: no property `packX` for type `imports.fail5385.C`
+fail_compilation/imports/fail5385.d(3): class `C` defined here
+fail_compilation/diag5385.d(30): Error: no property `privX2` for type `imports.fail5385.C`
+fail_compilation/imports/fail5385.d(3): class `C` defined here
+fail_compilation/diag5385.d(31): Error: no property `packX2` for type `imports.fail5385.C`
+fail_compilation/imports/fail5385.d(3): class `C` defined here
+fail_compilation/diag5385.d(32): Error: no property `privX` for type `imports.fail5385.S`
+fail_compilation/imports/fail5385.d(11): struct `S` defined here
+fail_compilation/diag5385.d(33): Error: no property `packX` for type `imports.fail5385.S`
+fail_compilation/imports/fail5385.d(11): struct `S` defined here
+fail_compilation/diag5385.d(34): Error: no property `privX2` for type `imports.fail5385.S`
+fail_compilation/imports/fail5385.d(11): struct `S` defined here
+fail_compilation/diag5385.d(35): Error: no property `packX2` for type `imports.fail5385.S`
+fail_compilation/imports/fail5385.d(11): struct `S` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag7477.d b/gcc/testsuite/gdc.test/fail_compilation/diag7477.d
index b82b33d..b14a3c5 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag7477.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag7477.d
@@ -1,8 +1,8 @@
/*
TEST_OUTPUT:
---
-fail_compilation/diag7477.d(13): Error: integral constant must be scalar type, not `Foo`
-fail_compilation/diag7477.d(20): Error: integral constant must be scalar type, not `string`
+fail_compilation/diag7477.d(13): Error: cannot generate 0 value of type `Foo` for `a`
+fail_compilation/diag7477.d(20): Error: cannot generate 0 value of type `string` for `a`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag8697.d b/gcc/testsuite/gdc.test/fail_compilation/diag8697.d
index a2abad5..bcc0be0 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag8697.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag8697.d
@@ -1,7 +1,8 @@
/*
TEST_OUTPUT:
---
-fail_compilation/diag8697.d(10): Error: no property `Invalid` for type `diag8697.Base`
+fail_compilation/diag8697.d(11): Error: no property `Invalid` for type `diag8697.Base`
+fail_compilation/diag8697.d(9): class `Base` defined here
---
*/
interface InterBase : InterRoot { }
diff --git a/gcc/testsuite/gdc.test/fail_compilation/diag8894.d b/gcc/testsuite/gdc.test/fail_compilation/diag8894.d
index 7cf3023..e8e0c74 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/diag8894.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/diag8894.d
@@ -1,10 +1,14 @@
/*
TEST_OUTPUT:
---
-fail_compilation/diag8894.d(16): Error: no property `x` for `f` of type `diag8894.Foo`
-fail_compilation/diag8894.d(17): Error: no property `y` for `f` of type `diag8894.Foo`
-fail_compilation/diag8894.d(18): Error: no property `x` for `f` of type `diag8894.Foo`
-fail_compilation/diag8894.d(19): Error: no property `x` for `f` of type `diag8894.Foo`
+fail_compilation/diag8894.d(20): Error: no property `x` for `f` of type `diag8894.Foo`
+fail_compilation/diag8894.d(15): struct `Foo` defined here
+fail_compilation/diag8894.d(21): Error: no property `y` for `f` of type `diag8894.Foo`
+fail_compilation/diag8894.d(15): struct `Foo` defined here
+fail_compilation/diag8894.d(22): Error: no property `x` for `f` of type `diag8894.Foo`
+fail_compilation/diag8894.d(15): struct `Foo` defined here
+fail_compilation/diag8894.d(23): Error: no property `x` for `f` of type `diag8894.Foo`
+fail_compilation/diag8894.d(15): struct `Foo` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/dip1000_deprecation.d b/gcc/testsuite/gdc.test/fail_compilation/dip1000_deprecation.d
index e591a14..77ab520 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/dip1000_deprecation.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/dip1000_deprecation.d
@@ -1,17 +1,14 @@
/*
-REQUIRED_ARGS: -de -wo
+REQUIRED_ARGS: -de
TEST_OUTPUT:
---
-fail_compilation/dip1000_deprecation.d(20): Deprecation: `@safe` function `main` calling `inferred`
-fail_compilation/dip1000_deprecation.d(28): which wouldn't be `@safe` because of:
-fail_compilation/dip1000_deprecation.d(28): scope variable `x0` may not be returned
-fail_compilation/dip1000_deprecation.d(22): Deprecation: `@safe` function `main` calling `inferredC`
-fail_compilation/dip1000_deprecation.d(39): which calls `dip1000_deprecation.inferred`
-fail_compilation/dip1000_deprecation.d(28): which wouldn't be `@safe` because of:
-fail_compilation/dip1000_deprecation.d(28): scope variable `x0` may not be returned
-fail_compilation/dip1000_deprecation.d(54): Warning: escaping reference to stack allocated value returned by `S(null)`
-fail_compilation/dip1000_deprecation.d(55): Warning: escaping reference to stack allocated value returned by `createS()`
-fail_compilation/dip1000_deprecation.d(58): Warning: returning `s.incorrectReturnRef()` escapes a reference to local variable `s`
+fail_compilation/dip1000_deprecation.d(17): Deprecation: `@safe` function `main` calling `inferred`
+fail_compilation/dip1000_deprecation.d(25): which wouldn't be `@safe` because of:
+fail_compilation/dip1000_deprecation.d(25): scope variable `x0` may not be returned
+fail_compilation/dip1000_deprecation.d(19): Deprecation: `@safe` function `main` calling `inferredC`
+fail_compilation/dip1000_deprecation.d(36): which calls `dip1000_deprecation.inferred`
+fail_compilation/dip1000_deprecation.d(25): which wouldn't be `@safe` because of:
+fail_compilation/dip1000_deprecation.d(25): scope variable `x0` may not be returned
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/dip22a.d b/gcc/testsuite/gdc.test/fail_compilation/dip22a.d
index 324d217..661ebc3 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/dip22a.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/dip22a.d
@@ -2,11 +2,13 @@
EXTRA_FILES: imports/dip22a.d
TEST_OUTPUT:
---
-fail_compilation/dip22a.d(16): Error: no property `bar` for `new Klass` of type `imports.dip22a.Klass`
-fail_compilation/dip22a.d(17): Error: no property `bar` for `Struct()` of type `imports.dip22a.Struct`
-fail_compilation/dip22a.d(18): Error: undefined identifier `bar` in module `imports.dip22a`
-fail_compilation/dip22a.d(19): Error: no property `bar` for `Template!int` of type `void`
-fail_compilation/dip22a.d(20): Error: no property `bar` for `12` of type `int`
+fail_compilation/dip22a.d(18): Error: no property `bar` for `new Klass` of type `imports.dip22a.Klass`
+fail_compilation/imports/dip22a.d(3): class `Klass` defined here
+fail_compilation/dip22a.d(19): Error: no property `bar` for `Struct()` of type `imports.dip22a.Struct`
+fail_compilation/imports/dip22a.d(8): struct `Struct` defined here
+fail_compilation/dip22a.d(20): Error: undefined identifier `bar` in module `imports.dip22a`
+fail_compilation/dip22a.d(21): Error: no property `bar` for `Template!int` of type `void`
+fail_compilation/dip22a.d(22): Error: no property `bar` for `12` of type `int`
---
*/
import imports.dip22a;
diff --git a/gcc/testsuite/gdc.test/fail_compilation/enum_function.d b/gcc/testsuite/gdc.test/fail_compilation/enum_function.d
index b22f2ce..52b71d1 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/enum_function.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/enum_function.d
@@ -1,10 +1,11 @@
/*
+REQUIRED_ARGS: -de
TEST_OUTPUT:
---
-fail_compilation/enum_function.d(10): Error: function cannot have enum storage class
-fail_compilation/enum_function.d(11): Error: function cannot have enum storage class
-fail_compilation/enum_function.d(12): Error: function cannot have enum storage class
-fail_compilation/enum_function.d(13): Error: function cannot have enum storage class
+fail_compilation/enum_function.d(11): Deprecation: function cannot have enum storage class
+fail_compilation/enum_function.d(12): Deprecation: function cannot have enum storage class
+fail_compilation/enum_function.d(13): Deprecation: function cannot have enum storage class
+fail_compilation/enum_function.d(14): Deprecation: function cannot have enum storage class
---
*/
enum void f1() { return; }
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail10528.d b/gcc/testsuite/gdc.test/fail_compilation/fail10528.d
index 38c5a23..3b3f5bb 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail10528.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail10528.d
@@ -2,14 +2,18 @@
EXTRA_FILES: imports/a10528.d
TEST_OUTPUT:
---
-fail_compilation/fail10528.d(20): Error: undefined identifier `a`
-fail_compilation/fail10528.d(21): Error: undefined identifier `a` in module `a10528`
-fail_compilation/fail10528.d(23): Error: undefined identifier `b`
-fail_compilation/fail10528.d(24): Error: undefined identifier `b` in module `a10528`
-fail_compilation/fail10528.d(26): Error: no property `c` for type `a10528.S`
-fail_compilation/fail10528.d(27): Error: no property `c` for type `a10528.S`
-fail_compilation/fail10528.d(29): Error: no property `d` for type `a10528.C`
-fail_compilation/fail10528.d(30): Error: no property `d` for type `a10528.C`
+fail_compilation/fail10528.d(24): Error: undefined identifier `a`
+fail_compilation/fail10528.d(25): Error: undefined identifier `a` in module `a10528`
+fail_compilation/fail10528.d(27): Error: undefined identifier `b`
+fail_compilation/fail10528.d(28): Error: undefined identifier `b` in module `a10528`
+fail_compilation/fail10528.d(30): Error: no property `c` for type `a10528.S`
+fail_compilation/imports/a10528.d(4): struct `S` defined here
+fail_compilation/fail10528.d(31): Error: no property `c` for type `a10528.S`
+fail_compilation/imports/a10528.d(4): struct `S` defined here
+fail_compilation/fail10528.d(33): Error: no property `d` for type `a10528.C`
+fail_compilation/imports/a10528.d(5): class `C` defined here
+fail_compilation/fail10528.d(34): Error: no property `d` for type `a10528.C`
+fail_compilation/imports/a10528.d(5): class `C` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail10534.d b/gcc/testsuite/gdc.test/fail_compilation/fail10534.d
index fac37f4..b5bb67c 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail10534.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail10534.d
@@ -1,22 +1,22 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail10534.d(28): Error: `a` is not of arithmetic type, it is a `int delegate()`
-fail_compilation/fail10534.d(28): Error: `b` is not of arithmetic type, it is a `int delegate()`
-fail_compilation/fail10534.d(29): Error: `a` is not of arithmetic type, it is a `int delegate()`
-fail_compilation/fail10534.d(29): Error: `b` is not of arithmetic type, it is a `int delegate()`
-fail_compilation/fail10534.d(30): Error: `a` is not of arithmetic type, it is a `int delegate()`
-fail_compilation/fail10534.d(30): Error: `b` is not of arithmetic type, it is a `int delegate()`
-fail_compilation/fail10534.d(31): Error: `a` is not of arithmetic type, it is a `int delegate()`
-fail_compilation/fail10534.d(31): Error: `b` is not of arithmetic type, it is a `int delegate()`
-fail_compilation/fail10534.d(36): Error: `a` is not of arithmetic type, it is a `int function()`
-fail_compilation/fail10534.d(36): Error: `b` is not of arithmetic type, it is a `int function()`
-fail_compilation/fail10534.d(37): Error: `a` is not of arithmetic type, it is a `int function()`
-fail_compilation/fail10534.d(37): Error: `b` is not of arithmetic type, it is a `int function()`
-fail_compilation/fail10534.d(38): Error: `a` is not of arithmetic type, it is a `int function()`
-fail_compilation/fail10534.d(38): Error: `b` is not of arithmetic type, it is a `int function()`
-fail_compilation/fail10534.d(39): Error: `a` is not of arithmetic type, it is a `int function()`
-fail_compilation/fail10534.d(39): Error: `b` is not of arithmetic type, it is a `int function()`
+fail_compilation/fail10534.d(28): Error: illegal operator `+` for `a` of type `int delegate()`
+fail_compilation/fail10534.d(28): Error: illegal operator `+` for `b` of type `int delegate()`
+fail_compilation/fail10534.d(29): Error: illegal operator `-` for `a` of type `int delegate()`
+fail_compilation/fail10534.d(29): Error: illegal operator `-` for `b` of type `int delegate()`
+fail_compilation/fail10534.d(30): Error: illegal operator `/` for `a` of type `int delegate()`
+fail_compilation/fail10534.d(30): Error: illegal operator `/` for `b` of type `int delegate()`
+fail_compilation/fail10534.d(31): Error: illegal operator `*` for `a` of type `int delegate()`
+fail_compilation/fail10534.d(31): Error: illegal operator `*` for `b` of type `int delegate()`
+fail_compilation/fail10534.d(36): Error: illegal operator `+` for `a` of type `int function()`
+fail_compilation/fail10534.d(36): Error: illegal operator `+` for `b` of type `int function()`
+fail_compilation/fail10534.d(37): Error: illegal operator `-` for `a` of type `int function()`
+fail_compilation/fail10534.d(37): Error: illegal operator `-` for `b` of type `int function()`
+fail_compilation/fail10534.d(38): Error: illegal operator `/` for `a` of type `int function()`
+fail_compilation/fail10534.d(38): Error: illegal operator `/` for `b` of type `int function()`
+fail_compilation/fail10534.d(39): Error: illegal operator `*` for `a` of type `int function()`
+fail_compilation/fail10534.d(39): Error: illegal operator `*` for `b` of type `int function()`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail109.d b/gcc/testsuite/gdc.test/fail_compilation/fail109.d
index 87297db..003e65f 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail109.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail109.d
@@ -34,6 +34,7 @@ enum E1 : short
/* https://issues.dlang.org/show_bug.cgi?id=14950
TEST_OUTPUT:
---
+fail_compilation/fail109.d(50): Error: cannot check `fail109.B.end` value for overflow
fail_compilation/fail109.d(50): Error: comparison between different enumeration types `B` and `C`; If this behavior is intended consider using `std.conv.asOriginalType`
fail_compilation/fail109.d(50): Error: enum member `fail109.B.end` initialization with `B.start+1` causes overflow for type `C`
---
@@ -43,7 +44,6 @@ enum C
start,
end
}
-
enum B
{
start = C.end,
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail121.d b/gcc/testsuite/gdc.test/fail_compilation/fail121.d
index 8d5af74..110547b 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail121.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail121.d
@@ -3,8 +3,9 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail121.d(23): Error: no property `typeinfo` for `list[1]` of type `fail121.myobject`
-fail_compilation/fail121.d(23): Error: no property `typeinfo` for `i` of type `int`
+fail_compilation/fail121.d(24): Error: no property `typeinfo` for `list[1]` of type `fail121.myobject`
+fail_compilation/fail121.d(12): struct `myobject` defined here
+fail_compilation/fail121.d(24): Error: no property `typeinfo` for `i` of type `int`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail136.d b/gcc/testsuite/gdc.test/fail_compilation/fail136.d
index 16659b5..3bc8653 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail136.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail136.d
@@ -1,7 +1,7 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail136.d(10): Error: found `"EF BB BF"` when expecting `;` following statement
+fail_compilation\fail136.d(10): Error: `"\xef\xbb\xbf"` has no effect
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail17570.d b/gcc/testsuite/gdc.test/fail_compilation/fail17570.d
index bee61cf..9be7cd4 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail17570.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail17570.d
@@ -1,9 +1,10 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail17570.d(11): Error: cannot use function constraints for non-template functions. Use `static if` instead
-fail_compilation/fail17570.d(11): Error: declaration expected, not `if`
-fail_compilation/fail17570.d(14): Error: `}` expected following members in `struct` declaration at fail_compilation/fail17570.d(10)
+fail_compilation/fail17570.d(12): Error: cannot use function constraints for non-template functions. Use `static if` instead
+fail_compilation/fail17570.d(12): Error: declaration expected, not `if`
+fail_compilation/fail17570.d(15): Error: `}` expected following members in `struct` declaration
+fail_compilation/fail17570.d(11): struct `S` starts here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail17969.d b/gcc/testsuite/gdc.test/fail_compilation/fail17969.d
index e6b9556..29bc3f4 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail17969.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail17969.d
@@ -1,6 +1,7 @@
/* TEST_OUTPUT:
---
-fail_compilation/fail17969.d(9): Error: no property `sum` for type `fail17969.__lambda6!(int[]).__lambda6.MapResult2!((b) => b)`
+fail_compilation/fail17969.d(10): Error: no property `sum` for type `fail17969.__lambda6!(int[]).__lambda6.MapResult2!((b) => b)`
+fail_compilation/fail17969.d(16): struct `MapResult2` defined here
---
* https://issues.dlang.org/show_bug.cgi?id=17969
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail18219.d b/gcc/testsuite/gdc.test/fail_compilation/fail18219.d
index f3ec680..bf4b189 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail18219.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail18219.d
@@ -3,10 +3,11 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail18219.d(16): Error: no property `Foobar` for type `AST`, did you mean `b18219.Foobar`?
-fail_compilation/fail18219.d(17): Error: no property `Bar` for type `a18219.AST`
-fail_compilation/fail18219.d(18): Error: no property `fun` for type `AST`, did you mean `b18219.fun`?
-fail_compilation/fail18219.d(19): Error: no property `Foobar` for type `AST`, did you mean `b18219.Foobar`?
+fail_compilation/fail18219.d(17): Error: no property `Foobar` for type `AST`, did you mean `b18219.Foobar`?
+fail_compilation/fail18219.d(18): Error: no property `Bar` for type `a18219.AST`
+fail_compilation/imports/a18219.d(3): struct `AST` defined here
+fail_compilation/fail18219.d(19): Error: no property `fun` for type `AST`, did you mean `b18219.fun`?
+fail_compilation/fail18219.d(20): Error: no property `Foobar` for type `AST`, did you mean `b18219.Foobar`?
---
*/
import imports.a18219;
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail18892.d b/gcc/testsuite/gdc.test/fail_compilation/fail18892.d
index 0fb56d3..381ef3c 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail18892.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail18892.d
@@ -1,8 +1,10 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail18892.d(20): Error: no property `foo` for `a` of type `fail18892.MT`
-fail_compilation/fail18892.d(21): Error: no property `foo` for `MT` of type `fail18892.MT`
+fail_compilation/fail18892.d(22): Error: no property `foo` for `a` of type `fail18892.MT`
+fail_compilation/fail18892.d(13): struct `MT` defined here
+fail_compilation/fail18892.d(23): Error: no property `foo` for `MT` of type `fail18892.MT`
+fail_compilation/fail18892.d(13): struct `MT` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail18970.d b/gcc/testsuite/gdc.test/fail_compilation/fail18970.d
index a8156fe..9b1ec1d 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail18970.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail18970.d
@@ -1,10 +1,12 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail18970.d(24): Error: no property `y` for `S()` of type `fail18970.S`
-fail_compilation/fail18970.d(24): potentially malformed `opDispatch`. Use an explicit instantiation to get a better error message
-fail_compilation/fail18970.d(31): Error: no property `yyy` for `this` of type `fail18970.S2`
-fail_compilation/fail18970.d(31): potentially malformed `opDispatch`. Use an explicit instantiation to get a better error message
+fail_compilation/fail18970.d(26): Error: no property `y` for `S()` of type `fail18970.S`
+fail_compilation/fail18970.d(26): potentially malformed `opDispatch`. Use an explicit instantiation to get a better error message
+fail_compilation/fail18970.d(15): struct `S` defined here
+fail_compilation/fail18970.d(33): Error: no property `yyy` for `this` of type `fail18970.S2`
+fail_compilation/fail18970.d(33): potentially malformed `opDispatch`. Use an explicit instantiation to get a better error message
+fail_compilation/fail18970.d(29): struct `S2` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail18979.d b/gcc/testsuite/gdc.test/fail_compilation/fail18979.d
index 04e36f6..9b7c59a 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail18979.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail18979.d
@@ -2,7 +2,8 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail18979.d(13): Error: no property `__ctor` for `Foo()` of type `imports.imp18979.Foo`
+fail_compilation/fail18979.d(14): Error: no property `__ctor` for `Foo()` of type `imports.imp18979.Foo`
+fail_compilation/imports/imp18979.d(3): struct `Foo` defined here
----
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail1900.d b/gcc/testsuite/gdc.test/fail_compilation/fail1900.d
index edc4630..7e5f056 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail1900.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail1900.d
@@ -3,9 +3,9 @@ EXTRA_FILES: imports/fail1900a.d imports/fail1900b.d
TEST_OUTPUT:
---
fail_compilation/fail1900.d(27): Error: template `fail1900.Mix1a!().Foo` matches more than one template declaration:
-fail_compilation/fail1900.d(14): `Foo(ubyte x)`
-and
-fail_compilation/fail1900.d(15): `Foo(byte x)`
+fail_compilation/fail1900.d(14): `Foo(ubyte x)`
+and:
+fail_compilation/fail1900.d(15): `Foo(byte x)`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail19076.d b/gcc/testsuite/gdc.test/fail_compilation/fail19076.d
index 2441d6f..05ae21b 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail19076.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail19076.d
@@ -1,8 +1,9 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail19076.d(11): Error: no property `V` for type `fail19076.I`
-fail_compilation/fail19076.d(11): Error: `(I).V` cannot be resolved
+fail_compilation/fail19076.d(12): Error: no property `V` for type `fail19076.I`
+fail_compilation/fail19076.d(11): interface `I` defined here
+fail_compilation/fail19076.d(12): Error: `(I).V` cannot be resolved
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail19103.d b/gcc/testsuite/gdc.test/fail_compilation/fail19103.d
index 40fafcd..c1abd0d 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail19103.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail19103.d
@@ -1,9 +1,11 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail19103.d(12): Error: no property `puts` for `new C` of type `fail19103.C`
-fail_compilation/fail19103.d(14): Error: no property `puts` for `s1` of type `fail19103.S1`
-fail_compilation/fail19103.d(16): Error: no property `puts` for type `S2`, did you mean `core.stdc.stdio.puts`?
+fail_compilation/fail19103.d(14): Error: no property `puts` for `new C` of type `fail19103.C`
+fail_compilation/fail19103.d(26): class `C` defined here
+fail_compilation/fail19103.d(16): Error: no property `puts` for `s1` of type `fail19103.S1`
+fail_compilation/fail19103.d(30): struct `S1` defined here
+fail_compilation/fail19103.d(18): Error: no property `puts` for type `S2`, did you mean `core.stdc.stdio.puts`?
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail196.d b/gcc/testsuite/gdc.test/fail_compilation/fail196.d
index cdad5c4..78c5963 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail196.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail196.d
@@ -1,26 +1,35 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail196.d(29): Error: delimited string must end in `)"`
-fail_compilation/fail196.d(29): Error: implicit string concatenation is error-prone and disallowed in D
-fail_compilation/fail196.d(29): Use the explicit syntax instead (concatenating literals is `@nogc`): "foo(xxx)" ~ ";\n assert(s == "
-fail_compilation/fail196.d(30): Error: semicolon needed to end declaration of `s`, instead of `foo`
-fail_compilation/fail196.d(29): `s` declared here
-fail_compilation/fail196.d(30): Error: found `");\n\n s = q"` when expecting `;` following statement `foo(xxx)` on line fail_compilation/fail196.d(30)
-fail_compilation/fail196.d(32): Error: found `";\n assert(s == "` when expecting `;` following statement `[foo[xxx]]` on line fail_compilation/fail196.d(32)
-fail_compilation/fail196.d(33): Error: found `");\n\n s = q"` when expecting `;` following statement `foo[xxx]` on line fail_compilation/fail196.d(33)
-fail_compilation/fail196.d(35): Error: found `{` when expecting `;` following statement `foo` on line fail_compilation/fail196.d(35)
-fail_compilation/fail196.d(35): Error: found `}` when expecting `;` following statement `xxx` on line fail_compilation/fail196.d(35)
-fail_compilation/fail196.d(36): Error: found `foo` when expecting `;` following statement `";\n assert(s == "` on line fail_compilation/fail196.d(35)
-fail_compilation/fail196.d(36): Error: found `}` when expecting `;` following statement `xxx` on line fail_compilation/fail196.d(36)
-fail_compilation/fail196.d(38): Error: found `<` when expecting `;` following statement `");\n\n s = q" < foo` on line fail_compilation/fail196.d(36)
-fail_compilation/fail196.d(39): Error: found `foo` when expecting `;` following statement `xxx >> ";\n assert(s == "` on line fail_compilation/fail196.d(38)
-fail_compilation/fail196.d(39): Error: found `<` instead of statement
-fail_compilation/fail196.d(45): Error: unterminated string constant starting at fail_compilation/fail196.d(45)
-fail_compilation/fail196.d(47): Error: matching `}` expected following compound statement, not `End of File`
-fail_compilation/fail196.d(36): unmatched `{`
-fail_compilation/fail196.d(47): Error: matching `}` expected following compound statement, not `End of File`
-fail_compilation/fail196.d(28): unmatched `{`
+fail_compilation/fail196.d(38): Error: delimited string must end in `)"`
+fail_compilation/fail196.d(38): Error: implicit string concatenation is error-prone and disallowed in D
+fail_compilation/fail196.d(38): Use the explicit syntax instead (concatenating literals is `@nogc`): "foo(xxx)" ~ ";\n assert(s == "
+fail_compilation/fail196.d(39): Error: semicolon needed to end declaration of `s`, instead of `foo`
+fail_compilation/fail196.d(38): `s` declared here
+fail_compilation/fail196.d(39): Error: found `");\n\n s = q"` when expecting `;` following expression
+fail_compilation/fail196.d(39): expression: `foo(xxx)`
+fail_compilation/fail196.d(41): Error: found `";\n assert(s == "` when expecting `;` following expression
+fail_compilation/fail196.d(41): expression: `[foo[xxx]]`
+fail_compilation/fail196.d(42): Error: found `");\n\n s = q"` when expecting `;` following expression
+fail_compilation/fail196.d(42): expression: `foo[xxx]`
+fail_compilation/fail196.d(44): Error: found `{` when expecting `;` following expression
+fail_compilation/fail196.d(44): expression: `foo`
+fail_compilation/fail196.d(44): Error: found `}` when expecting `;` following expression
+fail_compilation/fail196.d(44): expression: `xxx`
+fail_compilation/fail196.d(45): Error: found `foo` when expecting `;` following expression
+fail_compilation/fail196.d(44): expression: `";\n assert(s == "`
+fail_compilation/fail196.d(45): Error: found `}` when expecting `;` following expression
+fail_compilation/fail196.d(45): expression: `xxx`
+fail_compilation/fail196.d(47): Error: found `<` when expecting `;` following expression
+fail_compilation/fail196.d(45): expression: `");\n\n s = q" < foo`
+fail_compilation/fail196.d(48): Error: found `foo` when expecting `;` following expression
+fail_compilation/fail196.d(47): expression: `xxx >> ";\n assert(s == "`
+fail_compilation/fail196.d(48): Error: found `<` instead of statement
+fail_compilation/fail196.d(54): Error: unterminated string constant starting at fail_compilation/fail196.d(54)
+fail_compilation/fail196.d(56): Error: matching `}` expected following compound statement, not `End of File`
+fail_compilation/fail196.d(45): unmatched `{`
+fail_compilation/fail196.d(56): Error: matching `}` expected following compound statement, not `End of File`
+fail_compilation/fail196.d(37): unmatched `{`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail20637.d b/gcc/testsuite/gdc.test/fail_compilation/fail20637.d
index 77c69ea..dd0a5f5 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail20637.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail20637.d
@@ -2,7 +2,8 @@
EXTRA_FILES: imports/fail20637b.d
TEST_OUTPUT:
---
-fail_compilation/fail20637.d(12): Error: no property `foo` for type `imports.fail20637b.A`
+fail_compilation/fail20637.d(13): Error: no property `foo` for type `imports.fail20637b.A`
+fail_compilation/imports/fail20637b.d(3): class `A` defined here
---
*/
module fail20637;
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail22054.d b/gcc/testsuite/gdc.test/fail_compilation/fail22054.d
index c172f08..8b525d8 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail22054.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail22054.d
@@ -3,10 +3,12 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail22054.d(21): Error: no property `what` for type `fail22054.exception`
-fail_compilation/fail22054.d(16): `class fail22054.exception` is opaque and has no members.
-fail_compilation/fail22054.d(22): Error: no property `what` for type `fail22054.exception2`
-fail_compilation/fail22054.d(17): `struct fail22054.exception2` is opaque and has no members.
+fail_compilation/fail22054.d(23): Error: no property `what` for type `fail22054.exception`
+fail_compilation/fail22054.d(18): `class fail22054.exception` is opaque and has no members.
+fail_compilation/fail22054.d(18): class `exception` defined here
+fail_compilation/fail22054.d(24): Error: no property `what` for type `fail22054.exception2`
+fail_compilation/fail22054.d(19): `struct fail22054.exception2` is opaque and has no members.
+fail_compilation/fail22054.d(19): struct `exception2` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail22529.d b/gcc/testsuite/gdc.test/fail_compilation/fail22529.d
index 3bec3c0f..62eac25 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail22529.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail22529.d
@@ -3,7 +3,7 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail22529.d(13): Error: found `return` when expecting `;` following statement
+fail_compilation/fail22529.d(13): Error: found `return` when expecting `;` following expression
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail23109.d b/gcc/testsuite/gdc.test/fail_compilation/fail23109.d
index 5c5c11b..ee56075 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail23109.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail23109.d
@@ -5,7 +5,9 @@ EXTRA_SOURCES: extra-files/test23109/object.d
TEST_OUTPUT:
---
Error: no property `getHash` for `typeid(const(Ensure[]))` of type `object.TypeInfo_Const`
+fail_compilation/extra-files/test23109/object.d(7): class `TypeInfo_Const` defined here
Error: no property `getHash` for `typeid(const(Ensure[1]))` of type `object.TypeInfo_Const`
+fail_compilation/extra-files/test23109/object.d(7): class `TypeInfo_Const` defined here
fail_compilation/imports/test23109a.d(10): Error: template instance `imports.test23109a.Array!(Ensure)` error instantiating
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail235.d b/gcc/testsuite/gdc.test/fail_compilation/fail235.d
index 47f302d..bec614c 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail235.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail235.d
@@ -1,7 +1,7 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail235.d(12): Error: expression `typeid(char)` is not a valid template value argument
+fail_compilation/fail235.d(12): Error: template instance `Tuple!(typeid(char))` expression `typeid(char)` is not a valid template value argument
---
*/
template Tuple(TPL...)
@@ -14,7 +14,7 @@ auto K = Tuple!(typeid(char));
/*
TEST_OUTPUT:
---
-fail_compilation/fail235.d(24): Error: expression `typeid(char)` is not a valid template value argument
+fail_compilation/fail235.d(24): Error: template instance `Alias!(typeid(char))` expression `typeid(char)` is not a valid template value argument
---
*/
template Alias(alias A)
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail61.d b/gcc/testsuite/gdc.test/fail_compilation/fail61.d
index a2f01d7..e7175b4 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail61.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail61.d
@@ -1,10 +1,13 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail61.d(22): Error: no property `B` for type `fail61.A.B`
-fail_compilation/fail61.d(23): Error: no property `B` for type `fail61.A.B`
-fail_compilation/fail61.d(32): Error: no property `A2` for type `fail61.B2`
-fail_compilation/fail61.d(41): Error: calling non-static function `foo` requires an instance of type `B3`
+fail_compilation/fail61.d(25): Error: no property `B` for type `fail61.A.B`
+fail_compilation/fail61.d(16): class `B` defined here
+fail_compilation/fail61.d(26): Error: no property `B` for type `fail61.A.B`
+fail_compilation/fail61.d(16): class `B` defined here
+fail_compilation/fail61.d(35): Error: no property `A2` for type `fail61.B2`
+fail_compilation/fail61.d(30): class `B2` defined here
+fail_compilation/fail61.d(44): Error: calling non-static function `foo` requires an instance of type `B3`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail7861.d b/gcc/testsuite/gdc.test/fail_compilation/fail7861.d
index c7018c9..a24eb95 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail7861.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail7861.d
@@ -1,7 +1,8 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail7861.d(17): Error: no property `nonexistent` for type `test.B`
+fail_compilation/fail7861.d(18): Error: no property `nonexistent` for type `test.B`
+fail_compilation/fail7861.d(14): struct `B` defined here
---
*/
module test;
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail9.d b/gcc/testsuite/gdc.test/fail_compilation/fail9.d
index 0878995..080ac1e 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail9.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail9.d
@@ -1,7 +1,8 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fail9.d(23): Error: no property `Vector` for type `fail9.Vector!int`
+fail_compilation/fail9.d(24): Error: no property `Vector` for type `fail9.Vector!int`
+fail_compilation/fail9.d(13): class `Vector` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fail_scope.d b/gcc/testsuite/gdc.test/fail_compilation/fail_scope.d
index 8508b27..a9e5429 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fail_scope.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fail_scope.d
@@ -1,23 +1,21 @@
/*
-REQUIRED_ARGS: -wo
TEST_OUTPUT:
---
-fail_compilation/fail_scope.d(30): Deprecation: scope parameter `da` may not be returned
-fail_compilation/fail_scope.d(32): Deprecation: scope parameter `o` may not be returned
-fail_compilation/fail_scope.d(33): Deprecation: scope parameter `dg` may not be returned
-fail_compilation/fail_scope.d(40): Deprecation: scope parameter `p` may not be returned
-fail_compilation/fail_scope.d(45): Error: returning `cast(char[])string` escapes a reference to local variable `string`
-fail_compilation/fail_scope.d(63): Error: returning `s.bar()` escapes a reference to local variable `s`
-fail_compilation/fail_scope.d(74): Error: `fail_scope.foo8` called with argument types `(int)` matches both:
-fail_compilation/fail_scope.d(68): `fail_scope.foo8(ref int x)`
+fail_compilation/fail_scope.d(28): Deprecation: scope parameter `da` may not be returned
+fail_compilation/fail_scope.d(30): Deprecation: scope parameter `o` may not be returned
+fail_compilation/fail_scope.d(31): Deprecation: scope parameter `dg` may not be returned
+fail_compilation/fail_scope.d(38): Deprecation: scope parameter `p` may not be returned
+fail_compilation/fail_scope.d(43): Error: returning `cast(char[])string` escapes a reference to local variable `string`
+fail_compilation/fail_scope.d(61): Error: returning `s.bar()` escapes a reference to local variable `s`
+fail_compilation/fail_scope.d(72): Error: `fail_scope.foo8` called with argument types `(int)` matches both:
+fail_compilation/fail_scope.d(66): `fail_scope.foo8(ref int x)`
and:
-fail_compilation/fail_scope.d(69): `fail_scope.foo8(return ref int x)`
-fail_compilation/fail_scope.d(82): Error: returning `& string` escapes a reference to local variable `string`
-fail_compilation/fail_scope.d(92): Error: returning `cast(int[])a` escapes a reference to local variable `a`
-fail_compilation/fail_scope.d(100): Error: returning `cast(int[])a` escapes a reference to local variable `a`
-fail_compilation/fail_scope.d(108): Error: escaping reference to outer local variable `x`
-fail_compilation/fail_scope.d(127): Warning: returning `s.bar()` escapes a reference to local variable `s`
-fail_compilation/fail_scope.d(137): Error: returning `foo16226(i)` escapes a reference to local variable `i`
+fail_compilation/fail_scope.d(67): `fail_scope.foo8(return ref int x)`
+fail_compilation/fail_scope.d(80): Error: returning `& string` escapes a reference to local variable `string`
+fail_compilation/fail_scope.d(90): Error: returning `cast(int[])a` escapes a reference to local variable `a`
+fail_compilation/fail_scope.d(98): Error: returning `cast(int[])a` escapes a reference to local variable `a`
+fail_compilation/fail_scope.d(106): Error: escaping reference to outer local variable `x`
+fail_compilation/fail_scope.d(135): Error: returning `foo16226(i)` escapes a reference to local variable `i`
---
//fail_compilation/fail_scope.d(35): Error: scope variable `da` may not be returned
//fail_compilation/fail_scope.d(37): Error: scope variable `o` may not be returned
diff --git a/gcc/testsuite/gdc.test/fail_compilation/faildottypeinfo.d b/gcc/testsuite/gdc.test/fail_compilation/faildottypeinfo.d
index c44b289..9b62c26 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/faildottypeinfo.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/faildottypeinfo.d
@@ -1,8 +1,9 @@
/*
TEST_OUTPUT:
---
-fail_compilation/faildottypeinfo.d(11): Error: no property `typeinfo` for `0` of type `int`
-fail_compilation/faildottypeinfo.d(12): Error: no property `typeinfo` for type `object.Object`
+fail_compilation/faildottypeinfo.d(12): Error: no property `typeinfo` for `0` of type `int`
+fail_compilation/faildottypeinfo.d(13): Error: no property `typeinfo` for type `object.Object`
+$p:druntime/import/object.d$($n$): class `Object` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/fnconstraint.d b/gcc/testsuite/gdc.test/fail_compilation/fnconstraint.d
index 5862f7a..21603f7 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/fnconstraint.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/fnconstraint.d
@@ -1,11 +1,12 @@
/*
TEST_OUTPUT:
---
-fail_compilation/fnconstraint.d(13): Error: template constraint must follow parameter lists and attributes
-fail_compilation/fnconstraint.d(13): Error: declaration expected, not `if`
-fail_compilation/fnconstraint.d(22): Error: template constraint must follow parameter lists and attributes
-fail_compilation/fnconstraint.d(22): Error: declaration expected, not `if`
-fail_compilation/fnconstraint.d(26): Error: `}` expected following members in `struct` declaration at fail_compilation/fnconstraint.d(18)
+fail_compilation/fnconstraint.d(14): Error: template constraint must follow parameter lists and attributes
+fail_compilation/fnconstraint.d(14): Error: declaration expected, not `if`
+fail_compilation/fnconstraint.d(23): Error: template constraint must follow parameter lists and attributes
+fail_compilation/fnconstraint.d(23): Error: declaration expected, not `if`
+fail_compilation/fnconstraint.d(27): Error: `}` expected following members in `struct` declaration
+fail_compilation/fnconstraint.d(19): struct `S` starts here
---
*/
void foo()()
diff --git a/gcc/testsuite/gdc.test/fail_compilation/goto_skip.d b/gcc/testsuite/gdc.test/fail_compilation/goto_skip.d
new file mode 100644
index 0000000..21bce5d
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/goto_skip.d
@@ -0,0 +1,57 @@
+/*
+REQUIRED_ARGS: -verrors=context
+TEST_OUTPUT:
+---
+fail_compilation/goto_skip.d(28): Error: `goto` skips declaration of variable `goto_skip.skip.ch`
+ goto Lskip;
+ ^
+fail_compilation/goto_skip.d(29): declared here
+ char ch = '!';
+ ^
+fail_compilation/goto_skip.d(36): Error: `goto` skips declaration of `with` temporary
+ goto L1;
+ ^
+fail_compilation/goto_skip.d(38): declared here
+ with (S()) {
+ ^
+fail_compilation/goto_skip.d(46): Error: `goto` skips declaration of variable `goto_skip.test8.e`
+ goto L2;
+ ^
+fail_compilation/goto_skip.d(51): declared here
+ catch (Exception e) {
+ ^
+---
+*/
+char skip(bool b)
+{
+ if (b)
+ goto Lskip;
+ char ch = '!';
+Lskip:
+ return ch;
+}
+
+int f()
+{
+ goto L1;
+ struct S { int e = 5; }
+ with (S()) {
+L1:
+ return e;
+ }
+}
+
+void test8(int a)
+{
+ goto L2;
+
+ try {
+ a += 2;
+ }
+ catch (Exception e) {
+ a += 3;
+L2: ;
+ a += 100;
+ }
+ assert(a == 100);
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/hexstring.d b/gcc/testsuite/gdc.test/fail_compilation/hexstring.d
new file mode 100644
index 0000000..de83db9
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/hexstring.d
@@ -0,0 +1,18 @@
+/**
+TEST_OUTPUT:
+---
+fail_compilation\hexstring.d(16): Error: cannot implicitly convert expression `"123F"` of type `string` to `immutable(ubyte[])`
+fail_compilation\hexstring.d(17): Error: cannot implicitly convert expression `"\x12?"c` of type `string` to `immutable(ubyte[])`
+fail_compilation\hexstring.d(18): Error: cannot implicitly convert expression `"\x12?"` of type `string` to `immutable(ubyte[])`
+fail_compilation\hexstring.d(15): Error: cannot implicitly convert expression `"\x12?"` of type `string` to `ubyte[]`
+---
+*/
+immutable ubyte[] s0 = x"123F";
+static assert(s0[0] == 0x12);
+static assert(s0[1] == 0x3F);
+immutable byte[] s1 = x"123F";
+
+ubyte[] f1 = x"123F";
+immutable ubyte[] f2 = "123F";
+immutable ubyte[] f3 = x"123F"c;
+immutable ubyte[] f4 = cast(string) x"123F";
diff --git a/gcc/testsuite/gdc.test/fail_compilation/ice10713.d b/gcc/testsuite/gdc.test/fail_compilation/ice10713.d
index f368032..e59a594 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/ice10713.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/ice10713.d
@@ -1,7 +1,8 @@
/*
TEST_OUTPUT:
---
-fail_compilation/ice10713.d(10): Error: no property `nonExistingField` for type `ice10713.S`
+fail_compilation/ice10713.d(11): Error: no property `nonExistingField` for type `ice10713.S`
+fail_compilation/ice10713.d(9): struct `S` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/ice10938.d b/gcc/testsuite/gdc.test/fail_compilation/ice10938.d
index d21ee47..4d107c9 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/ice10938.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/ice10938.d
@@ -1,8 +1,9 @@
/*
TEST_OUTPUT:
---
-fail_compilation/ice10938.d(13): Error: no property `opts` for `this` of type `ice10938.C`
-fail_compilation/ice10938.d(13): potentially malformed `opDispatch`. Use an explicit instantiation to get a better error message
+fail_compilation/ice10938.d(14): Error: no property `opts` for `this` of type `ice10938.C`
+fail_compilation/ice10938.d(14): potentially malformed `opDispatch`. Use an explicit instantiation to get a better error message
+fail_compilation/ice10938.d(10): class `C` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/ice11518.d b/gcc/testsuite/gdc.test/fail_compilation/ice11518.d
index c8542f7..4e4f617 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/ice11518.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/ice11518.d
@@ -2,9 +2,9 @@
TEST_OUTPUT:
---
fail_compilation/ice11518.d(17): Error: class `ice11518.B` matches more than one template declaration:
-fail_compilation/ice11518.d(12): `B(T : A!T)`
-and
-fail_compilation/ice11518.d(13): `B(T : A!T)`
+fail_compilation/ice11518.d(12): `B(T : A!T)`
+and:
+fail_compilation/ice11518.d(13): `B(T : A!T)`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/ice11982.d b/gcc/testsuite/gdc.test/fail_compilation/ice11982.d
index 0886df6..f500700 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/ice11982.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/ice11982.d
@@ -1,20 +1,22 @@
/*
TEST_OUTPUT:
---
-fail_compilation/ice11982.d(20): Error: basic type expected, not `scope`
-fail_compilation/ice11982.d(20): Error: found `scope` when expecting `;` following statement `new _error_` on line fail_compilation/ice11982.d(20)
-fail_compilation/ice11982.d(20): Error: basic type expected, not `}`
-fail_compilation/ice11982.d(20): Error: missing `{ ... }` for function literal
-fail_compilation/ice11982.d(20): Error: C style cast illegal, use `cast(funk)function _error_()
+fail_compilation/ice11982.d(22): Error: basic type expected, not `scope`
+fail_compilation/ice11982.d(22): Error: found `scope` when expecting `;` following expression
+fail_compilation/ice11982.d(22): expression: `new _error_`
+fail_compilation/ice11982.d(22): Error: basic type expected, not `}`
+fail_compilation/ice11982.d(22): Error: missing `{ ... }` for function literal
+fail_compilation/ice11982.d(22): Error: C style cast illegal, use `cast(funk)function _error_()
{
}
`
-fail_compilation/ice11982.d(20): Error: found `}` when expecting `;` following statement `cast(funk)function _error_()
+fail_compilation/ice11982.d(22): Error: found `}` when expecting `;` following expression
+fail_compilation/ice11982.d(22): expression: `cast(funk)function _error_()
{
}
-` on line fail_compilation/ice11982.d(20)
-fail_compilation/ice11982.d(21): Error: matching `}` expected following compound statement, not `End of File`
-fail_compilation/ice11982.d(20): unmatched `{`
+`
+fail_compilation/ice11982.d(23): Error: matching `}` expected following compound statement, not `End of File`
+fail_compilation/ice11982.d(22): unmatched `{`
---
*/
void main() { new scope ( funk ) function }
diff --git a/gcc/testsuite/gdc.test/fail_compilation/ice6538.d b/gcc/testsuite/gdc.test/fail_compilation/ice6538.d
index af7c554..9355a0f 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/ice6538.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/ice6538.d
@@ -6,7 +6,7 @@
/*
TEST_OUTPUT:
---
-fail_compilation/ice6538.d(23): Error: expression `super` is not a valid template value argument
+fail_compilation/ice6538.d(23): Error: template instance `Sym!(super)` expression `super` is not a valid template value argument
fail_compilation/ice6538.d(28): Error: template `ice6538.D.foo` is not callable using argument types `!()()`
fail_compilation/ice6538.d(23): Candidate is: `foo()()`
---
diff --git a/gcc/testsuite/gdc.test/fail_compilation/ice8100.d b/gcc/testsuite/gdc.test/fail_compilation/ice8100.d
index dc68cfc..b8b3155 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/ice8100.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/ice8100.d
@@ -1,9 +1,10 @@
/*
TEST_OUTPUT:
---
-fail_compilation/ice8100.d(10): Error: no property `Q` for type `ice8100.Bar!bool`
-fail_compilation/ice8100.d(11): Error: template instance `ice8100.Foo!(Bar!bool)` error instantiating
-fail_compilation/ice8100.d(12): instantiated from here: `Bar!bool`
+fail_compilation/ice8100.d(11): Error: no property `Q` for type `ice8100.Bar!bool`
+fail_compilation/ice8100.d(12): class `Bar` defined here
+fail_compilation/ice8100.d(12): Error: template instance `ice8100.Foo!(Bar!bool)` error instantiating
+fail_compilation/ice8100.d(13): instantiated from here: `Bar!bool`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/issue11070.d b/gcc/testsuite/gdc.test/fail_compilation/issue11070.d
new file mode 100644
index 0000000..07d69ee
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/issue11070.d
@@ -0,0 +1,17 @@
+/*
+TEST_OUTPUT:
+---
+fail_compilation/issue11070.d(16): Error: undefined identifier `x`
+---
+*/
+
+int get() { return 1; }
+
+void test() {
+ import std.stdio : writeln;
+ switch (auto x = get()) {
+ default:
+ auto z = x;
+ }
+ x = 1;
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/issue12652.d b/gcc/testsuite/gdc.test/fail_compilation/issue12652.d
deleted file mode 100644
index 0ddd6b4..0000000
--- a/gcc/testsuite/gdc.test/fail_compilation/issue12652.d
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-TEST_OUTPUT:
-----
-fail_compilation/issue12652.d(18): Error: static initializations of associative arrays is not allowed.
-fail_compilation/issue12652.d(18): associative arrays must be initialized at runtime: https://dlang.org/spec/hash-map.html#runtime_initialization
----
-*/
-
-enum A
-{
- x,
- y,
- z
-}
-
-struct S
-{
- string[A] t = [A.x : "aaa", A.y : "bbb"];
-}
-
-void main ()
-{
- S s;
-}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/issue22682.d b/gcc/testsuite/gdc.test/fail_compilation/issue22682.d
new file mode 100644
index 0000000..80e8311
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/issue22682.d
@@ -0,0 +1,18 @@
+/* TEST_OUTPUT:
+---
+fail_compilation/issue22682.d(14): Error: `pragma(mangle)` must be attached to a declaration
+fail_compilation/issue22682.d(15): Error: `pragma(mangle)` takes a single argument that must be a string literal
+fail_compilation/issue22682.d(16): Error: `string` expected for pragma mangle argument, not `(0)` of type `int`
+fail_compilation/issue22682.d(16): Error: `pragma(mangle)` takes a single argument that must be a string literal
+fail_compilation/issue22682.d(17): Error: `pragma(mangle)` must be attached to a declaration
+---
+ */
+module issue22682;
+
+void main()
+{
+ pragma(mangle) {}
+ pragma(mangle) static int i0;
+ pragma(mangle, 0) static int i1;
+ pragma(mangle);
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/issue3396.d b/gcc/testsuite/gdc.test/fail_compilation/issue3396.d
new file mode 100644
index 0000000..7c6efd2
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/issue3396.d
@@ -0,0 +1,24 @@
+/*
+TEST_OUTPUT:
+---
+fail_compilation/issue3396.d(17): Error: call to unimplemented abstract function `void M()`
+fail_compilation/issue3396.d(17): declared here: fail_compilation/issue3396.d(12)
+---
+*/
+module issue3396;
+
+abstract class A
+{
+ abstract void M();
+}
+
+class B:A
+{
+ override void M(){ super.M(); }
+}
+
+void test()
+{
+ auto b=new B();
+ b.M();
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/lexer1.d b/gcc/testsuite/gdc.test/fail_compilation/lexer1.d
index 0ad3f01..7fe2a53 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/lexer1.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/lexer1.d
@@ -1,8 +1,7 @@
/*
TEST_OUTPUT:
---
-fail_compilation/lexer1.d(30): Error: no identifier for declarator `x`
-fail_compilation/lexer1.d(30): Error: declaration expected, not `"01 02 03"w`
+fail_compilation/lexer1.d(30): Error: declaration expected, not `x"01 02 03"w`
fail_compilation/lexer1.d(31): Error: declaration expected, not `2147483649U`
fail_compilation/lexer1.d(32): Error: declaration expected, not `0.1`
fail_compilation/lexer1.d(33): Error: declaration expected, not `0.1f`
@@ -26,6 +25,7 @@ fail_compilation/lexer1.d(52): Error: escape octal sequence \400 is larger than
fail_compilation/lexer1.d(53): Error: html entity requires 2 code units, use a string instead of a character
---
*/
+
// https://dlang.dawg.eu/coverage/src/lexer.c.gcov.html
x"01 02 03"w;
0x80000001;
diff --git a/gcc/testsuite/gdc.test/fail_compilation/lexer2.d b/gcc/testsuite/gdc.test/fail_compilation/lexer2.d
index f895e64..f8fae85 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/lexer2.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/lexer2.d
@@ -1,16 +1,16 @@
/*
TEST_OUTPUT:
---
-fail_compilation/lexer2.d(16): Error: semicolon expected following auto declaration, not `"123"`
-fail_compilation/lexer2.d(16): Error: declaration expected, not `"123"`
-fail_compilation/lexer2.d(17): Error: semicolon expected following auto declaration, not `"123G"`
-fail_compilation/lexer2.d(17): Error: declaration expected, not `"123G"`
+fail_compilation/lexer2.d(16): Error: odd number (3) of hex characters in hex string
+fail_compilation/lexer2.d(17): Error: non-hex character 'G' in hex string
fail_compilation/lexer2.d(18): Error: heredoc rest of line should be blank
fail_compilation/lexer2.d(20): Error: unterminated delimited string constant starting at fail_compilation/lexer2.d(20)
fail_compilation/lexer2.d(22): Error: semicolon expected following auto declaration, not `End of File`
---
*/
+
+
// https://dlang.dawg.eu/coverage/src/lexer.c.gcov.html
static s1 = x"123";
diff --git a/gcc/testsuite/gdc.test/fail_compilation/main.d b/gcc/testsuite/gdc.test/fail_compilation/main.d
new file mode 100644
index 0000000..42a8a43
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/main.d
@@ -0,0 +1,9 @@
+/*
+TEST_OUTPUT:
+---
+fail_compilation/main.d(9): Error: only one entry point `main`$?:windows=, `WinMain` or `DllMain`$ is allowed
+fail_compilation/main.d(8): previously found `void main()` here
+---
+*/
+void main() {}
+void main(string[] args) {}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/match_func_ptr.d b/gcc/testsuite/gdc.test/fail_compilation/match_func_ptr.d
new file mode 100644
index 0000000..7f59183
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/match_func_ptr.d
@@ -0,0 +1,17 @@
+/*
+TEST_OUTPUT:
+---
+fail_compilation/match_func_ptr.d(13): Error: cannot match delegate literal to function pointer type `void function()`
+fail_compilation/match_func_ptr.d(14): Error: cannot match function literal to delegate type `void delegate()`
+fail_compilation/match_func_ptr.d(15): Error: cannot infer parameter types from `int function()`
+fail_compilation/match_func_ptr.d(16): Error: cannot infer parameter types from `int delegate(int, int)`
+---
+*/
+
+void main()
+{
+ void function() f = delegate {};
+ void delegate() d = function {};
+ int function() f2 = i => 2;
+ int delegate(int, int) d2 = i => 2;
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/misc_parser_err_cov1.d b/gcc/testsuite/gdc.test/fail_compilation/misc_parser_err_cov1.d
index 57706b59..9de436b 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/misc_parser_err_cov1.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/misc_parser_err_cov1.d
@@ -23,7 +23,8 @@ fail_compilation/misc_parser_err_cov1.d(40): Error: semicolon expected following
fail_compilation/misc_parser_err_cov1.d(40): Error: identifier or `new` expected following `.`, not `+`
fail_compilation/misc_parser_err_cov1.d(41): Error: identifier or new keyword expected following `(...)`.
fail_compilation/misc_parser_err_cov1.d(41): Error: expression expected, not `;`
-fail_compilation/misc_parser_err_cov1.d(42): Error: found `}` when expecting `;` following statement `(__error) + 0` on line fail_compilation/misc_parser_err_cov1.d(41)
+fail_compilation/misc_parser_err_cov1.d(42): Error: found `}` when expecting `;` following expression
+fail_compilation/misc_parser_err_cov1.d(41): expression: `(__error) + 0`
fail_compilation/misc_parser_err_cov1.d(43): Error: matching `}` expected following compound statement, not `End of File`
fail_compilation/misc_parser_err_cov1.d(33): unmatched `{`
---
diff --git a/gcc/testsuite/gdc.test/fail_compilation/nogc3.d b/gcc/testsuite/gdc.test/fail_compilation/nogc3.d
index 3bd7167..b53903f 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/nogc3.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/nogc3.d
@@ -43,10 +43,12 @@ fail_compilation/nogc3.d(35): Error: `@nogc` function `nogc3.testCall` cannot ca
/*
TEST_OUTPUT:
---
-fail_compilation/nogc3.d(52): Error: function `nogc3.testClosure1` is `@nogc` yet allocates closure for `testClosure1()` with the GC
-fail_compilation/nogc3.d(55): `nogc3.testClosure1.bar` closes over variable `x` at fail_compilation/nogc3.d(54)
-fail_compilation/nogc3.d(64): Error: function `nogc3.testClosure3` is `@nogc` yet allocates closure for `testClosure3()` with the GC
-fail_compilation/nogc3.d(67): `nogc3.testClosure3.bar` closes over variable `x` at fail_compilation/nogc3.d(66)
+fail_compilation/nogc3.d(54): Error: function `nogc3.testClosure1` is `@nogc` yet allocates closure for `testClosure1()` with the GC
+fail_compilation/nogc3.d(57): function `nogc3.testClosure1.bar` closes over variable `x`
+fail_compilation/nogc3.d(56): `x` declared here
+fail_compilation/nogc3.d(66): Error: function `nogc3.testClosure3` is `@nogc` yet allocates closure for `testClosure3()` with the GC
+fail_compilation/nogc3.d(69): function `nogc3.testClosure3.bar` closes over variable `x`
+fail_compilation/nogc3.d(68): `x` declared here
---
*/
@nogc auto testClosure1()
@@ -73,10 +75,10 @@ fail_compilation/nogc3.d(67): `nogc3.testClosure3.bar` closes over variab
/*
TEST_OUTPUT:
---
-fail_compilation/nogc3.d(85): Error: array literal in `@nogc` function `nogc3.foo13702` may cause a GC allocation
-fail_compilation/nogc3.d(86): Error: array literal in `@nogc` function `nogc3.foo13702` may cause a GC allocation
-fail_compilation/nogc3.d(92): Error: array literal in `@nogc` function `nogc3.bar13702` may cause a GC allocation
-fail_compilation/nogc3.d(91): Error: array literal in `@nogc` function `nogc3.bar13702` may cause a GC allocation
+fail_compilation/nogc3.d(87): Error: array literal in `@nogc` function `nogc3.foo13702` may cause a GC allocation
+fail_compilation/nogc3.d(88): Error: array literal in `@nogc` function `nogc3.foo13702` may cause a GC allocation
+fail_compilation/nogc3.d(94): Error: array literal in `@nogc` function `nogc3.bar13702` may cause a GC allocation
+fail_compilation/nogc3.d(93): Error: array literal in `@nogc` function `nogc3.bar13702` may cause a GC allocation
---
*/
int[] foo13702(bool b) @nogc
diff --git a/gcc/testsuite/gdc.test/fail_compilation/noreturn_expr.d b/gcc/testsuite/gdc.test/fail_compilation/noreturn_expr.d
new file mode 100644
index 0000000..c72bade
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/noreturn_expr.d
@@ -0,0 +1,16 @@
+/*
+TEST_OUTPUT:
+---
+fail_compilation/noreturn_expr.d(10): Error: type `noreturn` is not an expression
+---
+*/
+
+int v(e)()
+{
+ return e + 0;
+}
+
+int main()
+{
+ return v!(noreturn)();
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/noreturn_expr2.d b/gcc/testsuite/gdc.test/fail_compilation/noreturn_expr2.d
new file mode 100644
index 0000000..f5e0054
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/noreturn_expr2.d
@@ -0,0 +1,14 @@
+/*
+TEST_OUTPUT:
+---
+fail_compilation/noreturn_expr2.d(8): Error: cannot cast `noreturn` to `int` at compile time
+---
+*/
+
+enum E {e1 = 1, e2 = 2, illegal = noreturn}
+
+void main()
+{
+ E e;
+ e = E.illegal;
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/operator_undefined.d b/gcc/testsuite/gdc.test/fail_compilation/operator_undefined.d
new file mode 100644
index 0000000..3065bdb
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/operator_undefined.d
@@ -0,0 +1,20 @@
+/*
+TEST_OUTPUT:
+---
+fail_compilation/operator_undefined.d(19): Error: operator `-` is not defined for `toJson(2)` of type `Json`
+---
+*/
+
+import std.stdio;
+
+struct Json
+{
+ //int opUnary(string op : "-")();
+}
+
+Json toJson(int);
+
+void main()
+{
+ auto x = -2.toJson;
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/parseStc.d b/gcc/testsuite/gdc.test/fail_compilation/parseStc.d
index d13006d..9a24741 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/parseStc.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/parseStc.d
@@ -3,12 +3,12 @@ TEST_OUTPUT:
---
fail_compilation/parseStc.d(12): Error: missing closing `)` after `if (x`
fail_compilation/parseStc.d(12): Error: use `{ }` for an empty statement, not `;`
-fail_compilation/parseStc.d(12): Error: found `)` when expecting `;` following statement `1` on line fail_compilation/parseStc.d(12)
+fail_compilation/parseStc.d(12): Error: found `)` when expecting `;` following expression
+fail_compilation/parseStc.d(12): expression: `1`
fail_compilation/parseStc.d(13): Error: redundant attribute `const`
---
*/
-void test1()
-{
+void test1() {
if (x; 1) {}
if (const const auto x = 1) {}
}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/reserved_version.d b/gcc/testsuite/gdc.test/fail_compilation/reserved_version.d
index f7a554c..ac00b4d 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/reserved_version.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/reserved_version.d
@@ -76,49 +76,53 @@ fail_compilation/reserved_version.d(175): Error: version identifier `SH` is rese
fail_compilation/reserved_version.d(176): Error: version identifier `Alpha` is reserved and cannot be set
fail_compilation/reserved_version.d(177): Error: version identifier `Alpha_SoftFloat` is reserved and cannot be set
fail_compilation/reserved_version.d(178): Error: version identifier `Alpha_HardFloat` is reserved and cannot be set
-fail_compilation/reserved_version.d(179): Error: version identifier `LittleEndian` is reserved and cannot be set
-fail_compilation/reserved_version.d(180): Error: version identifier `BigEndian` is reserved and cannot be set
-fail_compilation/reserved_version.d(181): Error: version identifier `ELFv1` is reserved and cannot be set
-fail_compilation/reserved_version.d(182): Error: version identifier `ELFv2` is reserved and cannot be set
-fail_compilation/reserved_version.d(183): Error: version identifier `CRuntime_Bionic` is reserved and cannot be set
-fail_compilation/reserved_version.d(184): Error: version identifier `CRuntime_DigitalMars` is reserved and cannot be set
-fail_compilation/reserved_version.d(185): Error: version identifier `CRuntime_Glibc` is reserved and cannot be set
-fail_compilation/reserved_version.d(186): Error: version identifier `CRuntime_Microsoft` is reserved and cannot be set
-fail_compilation/reserved_version.d(187): Error: version identifier `CRuntime_Musl` is reserved and cannot be set
-fail_compilation/reserved_version.d(188): Error: version identifier `CRuntime_Newlib` is reserved and cannot be set
-fail_compilation/reserved_version.d(189): Error: version identifier `CRuntime_UClibc` is reserved and cannot be set
-fail_compilation/reserved_version.d(190): Error: version identifier `CRuntime_WASI` is reserved and cannot be set
-fail_compilation/reserved_version.d(191): Error: version identifier `D_Coverage` is reserved and cannot be set
-fail_compilation/reserved_version.d(192): Error: version identifier `D_Ddoc` is reserved and cannot be set
-fail_compilation/reserved_version.d(193): Error: version identifier `D_InlineAsm_X86` is reserved and cannot be set
-fail_compilation/reserved_version.d(194): Error: version identifier `D_InlineAsm_X86_64` is reserved and cannot be set
-fail_compilation/reserved_version.d(195): Error: version identifier `D_LP64` is reserved and cannot be set
-fail_compilation/reserved_version.d(196): Error: version identifier `D_X32` is reserved and cannot be set
-fail_compilation/reserved_version.d(197): Error: version identifier `D_HardFloat` is reserved and cannot be set
-fail_compilation/reserved_version.d(198): Error: version identifier `D_SoftFloat` is reserved and cannot be set
-fail_compilation/reserved_version.d(199): Error: version identifier `D_PIC` is reserved and cannot be set
-fail_compilation/reserved_version.d(200): Error: version identifier `D_SIMD` is reserved and cannot be set
-fail_compilation/reserved_version.d(201): Error: version identifier `D_Version2` is reserved and cannot be set
-fail_compilation/reserved_version.d(202): Error: version identifier `D_NoBoundsChecks` is reserved and cannot be set
-fail_compilation/reserved_version.d(205): Error: version identifier `all` is reserved and cannot be set
-fail_compilation/reserved_version.d(206): Error: version identifier `none` is reserved and cannot be set
-fail_compilation/reserved_version.d(207): Error: version identifier `AsmJS` is reserved and cannot be set
-fail_compilation/reserved_version.d(208): Error: version identifier `Emscripten` is reserved and cannot be set
-fail_compilation/reserved_version.d(209): Error: version identifier `WebAssembly` is reserved and cannot be set
-fail_compilation/reserved_version.d(210): Error: version identifier `WASI` is reserved and cannot be set
-fail_compilation/reserved_version.d(211): Error: version identifier `CppRuntime_Clang` is reserved and cannot be set
-fail_compilation/reserved_version.d(212): Error: version identifier `CppRuntime_DigitalMars` is reserved and cannot be set
-fail_compilation/reserved_version.d(213): Error: version identifier `CppRuntime_Gcc` is reserved and cannot be set
-fail_compilation/reserved_version.d(214): Error: version identifier `CppRuntime_Microsoft` is reserved and cannot be set
-fail_compilation/reserved_version.d(215): Error: version identifier `CppRuntime_Sun` is reserved and cannot be set
-fail_compilation/reserved_version.d(216): Error: version identifier `D_PIE` is reserved and cannot be set
-fail_compilation/reserved_version.d(217): Error: version identifier `AVR` is reserved and cannot be set
-fail_compilation/reserved_version.d(218): Error: version identifier `D_PreConditions` is reserved and cannot be set
-fail_compilation/reserved_version.d(219): Error: version identifier `D_PostConditions` is reserved and cannot be set
-fail_compilation/reserved_version.d(220): Error: version identifier `D_ProfileGC` is reserved and cannot be set
-fail_compilation/reserved_version.d(221): Error: version identifier `D_Invariants` is reserved and cannot be set
-fail_compilation/reserved_version.d(222): Error: version identifier `D_Optimized` is reserved and cannot be set
-fail_compilation/reserved_version.d(223): Error: version identifier `VisionOS` is reserved and cannot be set
+fail_compilation/reserved_version.d(179): Error: version identifier `LoongArch32` is reserved and cannot be set
+fail_compilation/reserved_version.d(180): Error: version identifier `LoongArch64` is reserved and cannot be set
+fail_compilation/reserved_version.d(181): Error: version identifier `LoongArch_HardFloat` is reserved and cannot be set
+fail_compilation/reserved_version.d(182): Error: version identifier `LoongArch_SoftFloat` is reserved and cannot be set
+fail_compilation/reserved_version.d(183): Error: version identifier `LittleEndian` is reserved and cannot be set
+fail_compilation/reserved_version.d(184): Error: version identifier `BigEndian` is reserved and cannot be set
+fail_compilation/reserved_version.d(185): Error: version identifier `ELFv1` is reserved and cannot be set
+fail_compilation/reserved_version.d(186): Error: version identifier `ELFv2` is reserved and cannot be set
+fail_compilation/reserved_version.d(187): Error: version identifier `CRuntime_Bionic` is reserved and cannot be set
+fail_compilation/reserved_version.d(188): Error: version identifier `CRuntime_DigitalMars` is reserved and cannot be set
+fail_compilation/reserved_version.d(189): Error: version identifier `CRuntime_Glibc` is reserved and cannot be set
+fail_compilation/reserved_version.d(190): Error: version identifier `CRuntime_Microsoft` is reserved and cannot be set
+fail_compilation/reserved_version.d(191): Error: version identifier `CRuntime_Musl` is reserved and cannot be set
+fail_compilation/reserved_version.d(192): Error: version identifier `CRuntime_Newlib` is reserved and cannot be set
+fail_compilation/reserved_version.d(193): Error: version identifier `CRuntime_UClibc` is reserved and cannot be set
+fail_compilation/reserved_version.d(194): Error: version identifier `CRuntime_WASI` is reserved and cannot be set
+fail_compilation/reserved_version.d(195): Error: version identifier `D_Coverage` is reserved and cannot be set
+fail_compilation/reserved_version.d(196): Error: version identifier `D_Ddoc` is reserved and cannot be set
+fail_compilation/reserved_version.d(197): Error: version identifier `D_InlineAsm_X86` is reserved and cannot be set
+fail_compilation/reserved_version.d(198): Error: version identifier `D_InlineAsm_X86_64` is reserved and cannot be set
+fail_compilation/reserved_version.d(199): Error: version identifier `D_LP64` is reserved and cannot be set
+fail_compilation/reserved_version.d(200): Error: version identifier `D_X32` is reserved and cannot be set
+fail_compilation/reserved_version.d(201): Error: version identifier `D_HardFloat` is reserved and cannot be set
+fail_compilation/reserved_version.d(202): Error: version identifier `D_SoftFloat` is reserved and cannot be set
+fail_compilation/reserved_version.d(203): Error: version identifier `D_PIC` is reserved and cannot be set
+fail_compilation/reserved_version.d(204): Error: version identifier `D_SIMD` is reserved and cannot be set
+fail_compilation/reserved_version.d(205): Error: version identifier `D_Version2` is reserved and cannot be set
+fail_compilation/reserved_version.d(206): Error: version identifier `D_NoBoundsChecks` is reserved and cannot be set
+fail_compilation/reserved_version.d(209): Error: version identifier `all` is reserved and cannot be set
+fail_compilation/reserved_version.d(210): Error: version identifier `none` is reserved and cannot be set
+fail_compilation/reserved_version.d(211): Error: version identifier `AsmJS` is reserved and cannot be set
+fail_compilation/reserved_version.d(212): Error: version identifier `Emscripten` is reserved and cannot be set
+fail_compilation/reserved_version.d(213): Error: version identifier `WebAssembly` is reserved and cannot be set
+fail_compilation/reserved_version.d(214): Error: version identifier `WASI` is reserved and cannot be set
+fail_compilation/reserved_version.d(215): Error: version identifier `CppRuntime_Clang` is reserved and cannot be set
+fail_compilation/reserved_version.d(216): Error: version identifier `CppRuntime_DigitalMars` is reserved and cannot be set
+fail_compilation/reserved_version.d(217): Error: version identifier `CppRuntime_Gcc` is reserved and cannot be set
+fail_compilation/reserved_version.d(218): Error: version identifier `CppRuntime_Microsoft` is reserved and cannot be set
+fail_compilation/reserved_version.d(219): Error: version identifier `CppRuntime_Sun` is reserved and cannot be set
+fail_compilation/reserved_version.d(220): Error: version identifier `D_PIE` is reserved and cannot be set
+fail_compilation/reserved_version.d(221): Error: version identifier `AVR` is reserved and cannot be set
+fail_compilation/reserved_version.d(222): Error: version identifier `D_PreConditions` is reserved and cannot be set
+fail_compilation/reserved_version.d(223): Error: version identifier `D_PostConditions` is reserved and cannot be set
+fail_compilation/reserved_version.d(224): Error: version identifier `D_ProfileGC` is reserved and cannot be set
+fail_compilation/reserved_version.d(225): Error: version identifier `D_Invariants` is reserved and cannot be set
+fail_compilation/reserved_version.d(226): Error: version identifier `D_Optimized` is reserved and cannot be set
+fail_compilation/reserved_version.d(227): Error: version identifier `VisionOS` is reserved and cannot be set
---
*/
@@ -199,6 +203,10 @@ version = SH;
version = Alpha;
version = Alpha_SoftFloat;
version = Alpha_HardFloat;
+version = LoongArch32;
+version = LoongArch64;
+version = LoongArch_HardFloat;
+version = LoongArch_SoftFloat;
version = LittleEndian;
version = BigEndian;
version = ELFv1;
@@ -315,6 +323,10 @@ debug = WASI;
debug = Alpha;
debug = Alpha_SoftFloat;
debug = Alpha_HardFloat;
+debug = LoongArch32;
+debug = LoongArch64;
+debug = LoongArch_HardFloat;
+debug = LoongArch_SoftFloat;
debug = LittleEndian;
debug = BigEndian;
debug = ELFv1;
diff --git a/gcc/testsuite/gdc.test/fail_compilation/reserved_version_switch.d b/gcc/testsuite/gdc.test/fail_compilation/reserved_version_switch.d
index f5f6b1c..44db404 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/reserved_version_switch.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/reserved_version_switch.d
@@ -70,6 +70,10 @@
// REQUIRED_ARGS: -version=Alpha
// REQUIRED_ARGS: -version=Alpha_SoftFloat
// REQUIRED_ARGS: -version=Alpha_HardFloat
+// REQUIRED_ARGS: -version=LoongArch32
+// REQUIRED_ARGS: -version=LoongArch64
+// REQUIRED_ARGS: -version=LoongArch_HardFloat
+// REQUIRED_ARGS: -version=LoongArch_SoftFloat
// REQUIRED_ARGS: -version=LittleEndian
// REQUIRED_ARGS: -version=BigEndian
// REQUIRED_ARGS: -version=ELFv1
@@ -175,6 +179,10 @@
// REQUIRED_ARGS: -debug=Alpha
// REQUIRED_ARGS: -debug=Alpha_SoftFloat
// REQUIRED_ARGS: -debug=Alpha_HardFloat
+// REQUIRED_ARGS: -debug=LoongArch32
+// REQUIRED_ARGS: -debug=LoongArch64
+// REQUIRED_ARGS: -debug=LoongArch_HardFloat
+// REQUIRED_ARGS: -debug=LoongArch_SoftFloat
// REQUIRED_ARGS: -debug=LittleEndian
// REQUIRED_ARGS: -debug=BigEndian
// REQUIRED_ARGS: -debug=ELFv1
@@ -286,6 +294,10 @@ Error: version identifier `WASI` is reserved and cannot be set
Error: version identifier `Alpha` is reserved and cannot be set
Error: version identifier `Alpha_SoftFloat` is reserved and cannot be set
Error: version identifier `Alpha_HardFloat` is reserved and cannot be set
+Error: version identifier `LoongArch32` is reserved and cannot be set
+Error: version identifier `LoongArch64` is reserved and cannot be set
+Error: version identifier `LoongArch_HardFloat` is reserved and cannot be set
+Error: version identifier `LoongArch_SoftFloat` is reserved and cannot be set
Error: version identifier `LittleEndian` is reserved and cannot be set
Error: version identifier `BigEndian` is reserved and cannot be set
Error: version identifier `ELFv1` is reserved and cannot be set
diff --git a/gcc/testsuite/gdc.test/fail_compilation/retscope.d b/gcc/testsuite/gdc.test/fail_compilation/retscope.d
index c08747f..ce983c0 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/retscope.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/retscope.d
@@ -55,7 +55,7 @@ void test2(scope int* p, int[] a ...) @safe
TEST_OUTPUT:
---
fail_compilation/retscope.d(75): Error: function `retscope.HTTP.Impl.onReceive` is `@nogc` yet allocates closure for `onReceive()` with the GC
-fail_compilation/retscope.d(77): `retscope.HTTP.Impl.onReceive.__lambda1` closes over variable `this` at fail_compilation/retscope.d(75)
+fail_compilation/retscope.d(77): delegate `retscope.HTTP.Impl.onReceive.__lambda1` closes over variable `this`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/skip.d b/gcc/testsuite/gdc.test/fail_compilation/skip.d
index 0f3a9ec..6207a10 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/skip.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/skip.d
@@ -2,8 +2,10 @@
* REQUIRED_ARGS: -de
* TEST_OUTPUT:
---
-fail_compilation/skip.d(21): Error: `switch` skips declaration of `with` temporary at fail_compilation/skip.d(26)
-fail_compilation/skip.d(43): Error: `switch` skips declaration of variable `skip.test14532.n` at fail_compilation/skip.d(45)
+fail_compilation/skip.d(23): Error: `switch` skips declaration of `with` temporary
+fail_compilation/skip.d(28): declared here
+fail_compilation/skip.d(45): Error: `switch` skips declaration of variable `skip.test14532.n`
+fail_compilation/skip.d(47): declared here
---
*/
// https://issues.dlang.org/show_bug.cgi?id=10524
diff --git a/gcc/testsuite/gdc.test/fail_compilation/switch_skip.d b/gcc/testsuite/gdc.test/fail_compilation/switch_skip.d
new file mode 100644
index 0000000..7141fb1
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/switch_skip.d
@@ -0,0 +1,48 @@
+/*
+REQUIRED_ARGS: -verrors=context
+TEST_OUTPUT:
+---
+fail_compilation/switch_skip.d(22): Error: `switch` skips declaration of variable `switch_skip.test3.j`
+ switch (i)
+ ^
+fail_compilation/switch_skip.d(26): declared here
+ int j;
+ ^
+fail_compilation/switch_skip.d(39): Error: `switch` skips declaration of variable `switch_skip.test.z`
+ final switch(n)
+ ^
+fail_compilation/switch_skip.d(41): declared here
+ int z = 5;
+ ^
+---
+*/
+
+void test3(int i)
+{
+ switch (i)
+ {
+ case 1:
+ {
+ int j;
+ case 2:
+ ++j;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=18858
+int test(int n)
+{
+ final switch(n)
+ {
+ int z = 5;
+ enum e = 6;
+
+ case 1:
+ int y = 2;
+ return y;
+ }
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/switches.d b/gcc/testsuite/gdc.test/fail_compilation/switches.d
index b53fb4c..06652c8 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/switches.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/switches.d
@@ -1,13 +1,11 @@
-/************************************************************/
-
/*
TEST_OUTPUT:
---
-fail_compilation/switches.d(105): Error: `case 2` not found
+fail_compilation/switches.d(14): Error: `case 2` not found
+fail_compilation/switches.d(25): Error: no `case` statement following `goto case;`
---
*/
-#line 100
void test1(int i)
{
switch (i)
@@ -19,16 +17,6 @@ void test1(int i)
}
}
-/************************************************************/
-
-/*
-TEST_OUTPUT:
----
-fail_compilation/switches.d(205): Error: no `case` statement following `goto case;`
----
-*/
-
-#line 200
void test2(int i)
{
switch (i)
@@ -39,55 +27,3 @@ void test2(int i)
break;
}
}
-
-/************************************************************/
-
-/*
-TEST_OUTPUT:
----
-fail_compilation/switches.d(302): Error: `switch` skips declaration of variable `switches.test3.j` at fail_compilation/switches.d(306)
----
-*/
-
-#line 300
-void test3(int i)
-{
- switch (i)
- {
- case 1:
- {
- int j;
- case 2:
- ++j;
- break;
- }
- default:
- break;
- }
-}
-
-
-/************************************************************/
-
-/*
-TEST_OUTPUT:
----
-fail_compilation/switches.d(404): Error: `switch` skips declaration of variable `switches.test.z` at fail_compilation/switches.d(406)
----
-*/
-
-#line 400
-// https://issues.dlang.org/show_bug.cgi?id=18858
-
-int test(int n)
-{
- final switch(n)
- {
- int z = 5;
- enum e = 6;
-
- case 1:
- int y = 2;
- return y;
- }
-}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test13536.d b/gcc/testsuite/gdc.test/fail_compilation/test13536.d
index 4a4bb26..eff807a 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test13536.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test13536.d
@@ -1,9 +1,8 @@
-/* REQUIRED_ARGS: -wo
+/*
TEST_OUTPUT:
---
-fail_compilation/test13536.d(23): Error: field `U.sysDg` cannot access pointers in `@safe` code that overlap other fields
-fail_compilation/test13536.d(23): Warning: address of variable `s` assigned to `u` with longer lifetime
-fail_compilation/test13536.d(24): Error: field `U.safeDg` cannot access pointers in `@safe` code that overlap other fields
+fail_compilation/test13536.d(22): Error: field `U.sysDg` cannot access pointers in `@safe` code that overlap other fields
+fail_compilation/test13536.d(23): Error: field `U.safeDg` cannot access pointers in `@safe` code that overlap other fields
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test15785.d b/gcc/testsuite/gdc.test/fail_compilation/test15785.d
index 594b5d3..fc61e3d 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test15785.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test15785.d
@@ -2,8 +2,9 @@
/*
TEST_OUTPUT:
---
-fail_compilation/test15785.d(16): Error: no property `foo` for `super` of type `imports.test15785.Base`
-fail_compilation/test15785.d(17): Error: undefined identifier `bar`
+fail_compilation/test15785.d(17): Error: no property `foo` for `super` of type `imports.test15785.Base`
+fail_compilation/imports/test15785.d(3): class `Base` defined here
+fail_compilation/test15785.d(18): Error: undefined identifier `bar`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test15897.d b/gcc/testsuite/gdc.test/fail_compilation/test15897.d
index db554cb..c911263 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test15897.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test15897.d
@@ -3,7 +3,8 @@
/*
TEST_OUTPUT:
---
-fail_compilation/test15897.d(19): Error: no property `create` for `cat` of type `imports.test15897.Cat`
+fail_compilation/test15897.d(20): Error: no property `create` for `cat` of type `imports.test15897.Cat`
+fail_compilation/imports/test15897.d(4): class `Cat` defined here
---
*/
module test15897;
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test16188.d b/gcc/testsuite/gdc.test/fail_compilation/test16188.d
index bdaae94..0bd052c 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test16188.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test16188.d
@@ -3,6 +3,7 @@
---
fail_compilation/test16188.d(101): Error: no property `name` for `Where()` of type `test16188.Where`
fail_compilation/test16188.d(101): potentially malformed `opDispatch`. Use an explicit instantiation to get a better error message
+fail_compilation/test16188.d(103): struct `Where` defined here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test16193.d b/gcc/testsuite/gdc.test/fail_compilation/test16193.d
index 6c80471..39399cf 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test16193.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test16193.d
@@ -2,8 +2,9 @@
REQUIRED_ARGS: -preview=dip1000
TEST_OUTPUT:
---
-fail_compilation/test16193.d(38): Error: function `test16193.abc` is `@nogc` yet allocates closure for `abc()` with the GC
-fail_compilation/test16193.d(40): `test16193.abc.__foreachbody2` closes over variable `x` at fail_compilation/test16193.d(39)
+fail_compilation/test16193.d(39): Error: function `test16193.abc` is `@nogc` yet allocates closure for `abc()` with the GC
+fail_compilation/test16193.d(41): delegate `test16193.abc.__foreachbody2` closes over variable `x`
+fail_compilation/test16193.d(40): `x` declared here
---
*/
//fail_compilation/test16193.d(22): To enforce `@safe`, the compiler allocates a closure unless `opApply()` uses `scope`
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test16365.d b/gcc/testsuite/gdc.test/fail_compilation/test16365.d
index 4d49365..5bfa5f8 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test16365.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test16365.d
@@ -1,10 +1,9 @@
-/* REQUIRED_ARGS: -wo
+/*
TEST_OUTPUT:
---
-fail_compilation/test16365.d(21): Error: `this` reference necessary to take address of member `f1` in `@safe` function `main`
-fail_compilation/test16365.d(23): Error: cannot implicitly convert expression `&f2` of type `void delegate() pure nothrow @nogc @safe` to `void function() @safe`
-fail_compilation/test16365.d(27): Warning: address of variable `s` assigned to `dg` with longer lifetime
-fail_compilation/test16365.d(28): Error: `dg.funcptr` cannot be used in `@safe` code
+fail_compilation/test16365.d(20): Error: `this` reference necessary to take address of member `f1` in `@safe` function `main`
+fail_compilation/test16365.d(22): Error: cannot implicitly convert expression `&f2` of type `void delegate() pure nothrow @nogc @safe` to `void function() @safe`
+fail_compilation/test16365.d(27): Error: `dg.funcptr` cannot be used in `@safe` code
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test17380spec.d b/gcc/testsuite/gdc.test/fail_compilation/test17380spec.d
index f523337..8ab8739 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test17380spec.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test17380spec.d
@@ -1,9 +1,10 @@
/* REQUIRED_ARGS: -verrors=spec
TEST_OUTPUT:
---
-(spec:1) fail_compilation/test17380spec.d(14): Error: cannot resolve identifier `ThisTypeDoesNotExistAndCrashesTheCompiler`
-(spec:1) fail_compilation/test17380spec.d(14): Error: no property `ThisTypeDoesNotExistAndCrashesTheCompiler` for `this.opCast()` of type `test17380spec.Uint128`
-fail_compilation/test17380spec.d(14): Error: undefined identifier `ThisTypeDoesNotExistAndCrashesTheCompiler`
+(spec:1) fail_compilation/test17380spec.d(15): Error: cannot resolve identifier `ThisTypeDoesNotExistAndCrashesTheCompiler`
+(spec:1) fail_compilation/test17380spec.d(15): Error: no property `ThisTypeDoesNotExistAndCrashesTheCompiler` for `this.opCast()` of type `test17380spec.Uint128`
+(spec:1) fail_compilation/test17380spec.d(20): struct `Uint128` defined here
+fail_compilation/test17380spec.d(15): Error: undefined identifier `ThisTypeDoesNotExistAndCrashesTheCompiler`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test20522.d b/gcc/testsuite/gdc.test/fail_compilation/test20522.d
new file mode 100644
index 0000000..1d21c17
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/test20522.d
@@ -0,0 +1,23 @@
+/*
+REQUIRED_ARGS: -w
+TEST_OUTPUT:
+---
+fail_compilation/test20522.d(19): Error: undefined identifier `non_existent`
+---
+*/
+
+// https://issues.dlang.org/show_bug.cgi?id=20522
+struct File
+{
+ ~this() {}
+}
+
+void main()
+{
+ {
+ auto test = File(); // <- Essential
+ non_existent;
+ }
+ // Warning: statement is not reachable
+ string[] is_this_unreachable_question_mark;
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test20655.d b/gcc/testsuite/gdc.test/fail_compilation/test20655.d
new file mode 100644
index 0000000..c3bb70a
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/test20655.d
@@ -0,0 +1,32 @@
+/*
+REQUIRED_ARGS: -de
+TEST_OUTPUT:
+---
+fail_compilation/test20655.d(29): Deprecation: `@safe` function `g` calling `f1`
+fail_compilation/test20655.d(24): which wouldn't be `@safe` because of:
+fail_compilation/test20655.d(24): field `U.s` cannot access pointers in `@safe` code that overlap other fields
+fail_compilation/test20655.d(30): Deprecation: `@safe` function `g` calling `f2`
+fail_compilation/test20655.d(25): which wouldn't be `@safe` because of:
+fail_compilation/test20655.d(25): field `U.s` cannot access pointers in `@safe` code that overlap other fields
+fail_compilation/test20655.d(31): Deprecation: `@safe` function `g` calling `f3`
+fail_compilation/test20655.d(28): which wouldn't be `@safe` because of:
+fail_compilation/test20655.d(28): field `U.s` cannot access pointers in `@safe` code that overlap other fields
+---
+*/
+
+union U
+{
+ string s;
+ int x;
+}
+U u;
+
+auto f1() { auto s = u.s; } /* Should be inferred as @system. */
+void f2()() { auto s = u.s; } /* ditto */
+void g() @safe
+{
+ void f3() { auto s = u.s; } /* ditto */
+ f1(); /* Should be rejected with error "cannot call @system function". */
+ f2(); /* ditto */
+ f3(); /* ditto */
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test21353.d b/gcc/testsuite/gdc.test/fail_compilation/test21353.d
index 55c84f9..14fa26d 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test21353.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test21353.d
@@ -2,11 +2,14 @@
EXTRA_FILES: imports/imp21353.d
TEST_OUTPUT:
---
-fail_compilation/test21353.d(19): Error: no property `A` for type `imports.imp21353.B`
-fail_compilation/test21353.d(20): Error: no property `A` for type `imports.imp21353.B`
-fail_compilation/test21353.d(21): Error: no property `A` for type `imports.imp21353.B`
-fail_compilation/test21353.d(23): Error: undefined identifier `P` in module `imports.imp21353`
-fail_compilation/test21353.d(24): Error: undefined identifier `P` in module `imports.imp21353`
+fail_compilation/test21353.d(22): Error: no property `A` for type `imports.imp21353.B`
+fail_compilation/imports/imp21353.d(5): struct `B` defined here
+fail_compilation/test21353.d(23): Error: no property `A` for type `imports.imp21353.B`
+fail_compilation/imports/imp21353.d(5): struct `B` defined here
+fail_compilation/test21353.d(24): Error: no property `A` for type `imports.imp21353.B`
+fail_compilation/imports/imp21353.d(5): struct `B` defined here
+fail_compilation/test21353.d(26): Error: undefined identifier `P` in module `imports.imp21353`
+fail_compilation/test21353.d(27): Error: undefined identifier `P` in module `imports.imp21353`
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test21912.d b/gcc/testsuite/gdc.test/fail_compilation/test21912.d
index 9b07eba..f8bcb40 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test21912.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test21912.d
@@ -2,14 +2,18 @@
PERMUTE_ARGS: -preview=dip1000
TEST_OUTPUT:
---
-fail_compilation/test21912.d(24): Error: function `test21912.escapeParam` is `@nogc` yet allocates closure for `escapeParam()` with the GC
-fail_compilation/test21912.d(26): `test21912.escapeParam.__lambda2` closes over variable `i` at fail_compilation/test21912.d(24)
-fail_compilation/test21912.d(29): Error: function `test21912.escapeAssign` is `@nogc` yet allocates closure for `escapeAssign()` with the GC
-fail_compilation/test21912.d(31): `test21912.escapeAssign.__lambda3` closes over variable `i` at fail_compilation/test21912.d(29)
-fail_compilation/test21912.d(40): Error: function `test21912.escapeAssignRef` is `@nogc` yet allocates closure for `escapeAssignRef()` with the GC
-fail_compilation/test21912.d(42): `test21912.escapeAssignRef.__lambda3` closes over variable `i` at fail_compilation/test21912.d(40)
-fail_compilation/test21912.d(51): Error: function `test21912.escapeParamInferred` is `@nogc` yet allocates closure for `escapeParamInferred()` with the GC
-fail_compilation/test21912.d(53): `test21912.escapeParamInferred.__lambda2` closes over variable `i` at fail_compilation/test21912.d(51)
+fail_compilation/test21912.d(28): Error: function `test21912.escapeParam` is `@nogc` yet allocates closure for `escapeParam()` with the GC
+fail_compilation/test21912.d(30): delegate `test21912.escapeParam.__lambda2` closes over variable `i`
+fail_compilation/test21912.d(28): `i` declared here
+fail_compilation/test21912.d(33): Error: function `test21912.escapeAssign` is `@nogc` yet allocates closure for `escapeAssign()` with the GC
+fail_compilation/test21912.d(35): delegate `test21912.escapeAssign.__lambda3` closes over variable `i`
+fail_compilation/test21912.d(33): `i` declared here
+fail_compilation/test21912.d(44): Error: function `test21912.escapeAssignRef` is `@nogc` yet allocates closure for `escapeAssignRef()` with the GC
+fail_compilation/test21912.d(46): delegate `test21912.escapeAssignRef.__lambda3` closes over variable `i`
+fail_compilation/test21912.d(44): `i` declared here
+fail_compilation/test21912.d(55): Error: function `test21912.escapeParamInferred` is `@nogc` yet allocates closure for `escapeParamInferred()` with the GC
+fail_compilation/test21912.d(57): delegate `test21912.escapeParamInferred.__lambda2` closes over variable `i`
+fail_compilation/test21912.d(55): `i` declared here
---
*/
@nogc:
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test22329.d b/gcc/testsuite/gdc.test/fail_compilation/test22329.d
index 237f9c7..25c83f5 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test22329.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test22329.d
@@ -4,8 +4,9 @@
TEST_OUTPUT:
---
fail_compilation/imports/imp22329.d(3): Error: no property `values` for type `test22329.Foo`
+fail_compilation/test22329.d(13): struct `Foo` defined here
fail_compilation/imports/imp22329.d(3): Error: incompatible types for `(arg) + (1)`: `Foo` and `int`
-fail_compilation/test22329.d(20): Error: template instance `imp22329.func!(Foo)` error instantiating
+fail_compilation/test22329.d(21): Error: template instance `imp22329.func!(Foo)` error instantiating
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test23112.d b/gcc/testsuite/gdc.test/fail_compilation/test23112.d
index 325d89b..9bbab80 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/test23112.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/test23112.d
@@ -2,7 +2,8 @@
TEST_OUTPUT:
---
fail_compilation/test23112.d(106): Error: function `test23112.bar` is `@nogc` yet allocates closure for `bar()` with the GC
-fail_compilation/test23112.d(108): `test23112.bar.f` closes over variable `a` at fail_compilation/test23112.d(106)
+fail_compilation/test23112.d(108): function `test23112.bar.f` closes over variable `a`
+fail_compilation/test23112.d(106): `a` declared here
---
*/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test24015.d b/gcc/testsuite/gdc.test/fail_compilation/test24015.d
new file mode 100644
index 0000000..c9bc42e
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/test24015.d
@@ -0,0 +1,20 @@
+/* REQUIRED_ARGS: -preview=dip1000
+ * TEST_OUTPUT:
+---
+fail_compilation/test24015.d(19): Error: scope variable `v` assigned to non-scope parameter `...` calling `jer`
+---
+*/
+
+// https://issues.dlang.org/show_bug.cgi?id=24105
+
+@safe:
+
+extern (C) void ben(int i, scope ...);
+
+extern (C) void jer(int i, ...);
+
+void bar(scope const char* v)
+{
+ ben(3, v);
+ jer(3, v);
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test24036.d b/gcc/testsuite/gdc.test/fail_compilation/test24036.d
new file mode 100644
index 0000000..da529f9
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/test24036.d
@@ -0,0 +1,21 @@
+/*
+https://issues.dlang.org/show_bug.cgi?id=24036
+Issue 24036 - assert message in CTFE becomes `['m', 'e', 's', 's', 'a', 'g', 'e'][0..7]` if produced using std.format.format
+
+TEST_OUTPUT:
+---
+fail_compilation/test24036.d(19): Error: message
+fail_compilation/test24036.d(21): called from here: `(*function () pure nothrow @safe => 42)()`
+---
+*/
+
+auto format()
+{
+ return ['m', 'e', 's', 's', 'a', 'g', 'e'][0 .. 7];
+}
+
+immutable ctfeThing = ()
+{
+ assert(0, format());
+ return 42;
+}();
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test24065.d b/gcc/testsuite/gdc.test/fail_compilation/test24065.d
new file mode 100644
index 0000000..9e4ebbf
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/test24065.d
@@ -0,0 +1,18 @@
+// https://issues.dlang.org/show_bug.cgi?id=24065
+
+/*
+TEST_OUTPUT:
+---
+fail_compilation/test24065.d(12): Error: string expected as argument of __traits `getTargetInfo` instead of `int`
+fail_compilation/test24065.d(15): Error: string expected as argument of __traits `getTargetInfo` instead of `foo`
+fail_compilation/test24065.d(18): Error: string expected as argument of __traits `getTargetInfo` instead of `e`
+---
+*/
+
+auto s1 = __traits(getTargetInfo, int);
+
+void foo() {}
+auto s2 = __traits(getTargetInfo, foo);
+
+enum e;
+auto s3 = __traits(getTargetInfo, e);
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test24084.d b/gcc/testsuite/gdc.test/fail_compilation/test24084.d
new file mode 100644
index 0000000..51de15e
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/test24084.d
@@ -0,0 +1,28 @@
+/* REQUIRED_ARGS: -nothrow
+ * TEST_OUTPUT:
+---
+fail_compilation/test24084.d(110): Error: cannot use `throw` statements with -nothrow
+fail_compilation/test24084.d(112): Error: cannot use try-catch statements with -nothrow
+---
+ */
+
+// https://issues.dlang.org/show_bug.cgi?id=24084
+
+#line 100
+
+struct S
+{
+ int x;
+ ~this() { }
+}
+
+void xyzzy()
+{
+ S s;
+ throw new Exception("xx");
+
+ try
+ {
+ int y;
+ } catch (Exception) { }
+}
diff --git a/gcc/testsuite/gdc.test/fail_compilation/test24110.d b/gcc/testsuite/gdc.test/fail_compilation/test24110.d
new file mode 100644
index 0000000..acf7788
--- /dev/null
+++ b/gcc/testsuite/gdc.test/fail_compilation/test24110.d
@@ -0,0 +1,12 @@
+// https://issues.dlang.org/show_bug.cgi?id=24110
+
+/*
+TEST_OUTPUT:
+---
+fail_compilation/test24110.d(12): Error: static assert: `__traits(compiles, __error)` is false
+---
+*/
+
+struct S { int x; }
+alias T = shared S;
+static assert(__traits(compiles, (T[] a, T[] b) => a < b));
diff --git a/gcc/testsuite/gdc.test/fail_compilation/testOpApply.d b/gcc/testsuite/gdc.test/fail_compilation/testOpApply.d
index 9203685..8d6c736 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/testOpApply.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/testOpApply.d
@@ -48,9 +48,9 @@ void testSameAttr() @system
TEST_OUTPUT:
---
fail_compilation/testOpApply.d(217): Error: `sa.opApply` matches more than one declaration:
-`fail_compilation/testOpApply.d(203)`: `int(int delegate(int) dg)`
+fail_compilation/testOpApply.d(203): `int(int delegate(int) dg)`
and:
-`fail_compilation/testOpApply.d(208)`: `int(int delegate(string) dg)`
+fail_compilation/testOpApply.d(208): `int(int delegate(string) dg)`
fail_compilation/testOpApply.d(217): Error: cannot uniquely infer `foreach` argument types
---
+/
@@ -79,9 +79,9 @@ void testDifferentTypes()
TEST_OUTPUT:
---
fail_compilation/testOpApply.d(317): Error: `sa.opApply` matches more than one declaration:
-`fail_compilation/testOpApply.d(303)`: `int(int delegate(int) dg)`
+fail_compilation/testOpApply.d(303): `int(int delegate(int) dg)`
and:
-`fail_compilation/testOpApply.d(308)`: `int(int delegate(long) dg)`
+fail_compilation/testOpApply.d(308): `int(int delegate(long) dg)`
fail_compilation/testOpApply.d(317): Error: cannot uniquely infer `foreach` argument types
---
+/
@@ -112,9 +112,9 @@ See https://issues.dlang.org/show_bug.cgi?id=21683
TEST_OUTPUT:
---
fail_compilation/testOpApply.d(420): Error: `sa.opApply` matches more than one declaration:
-`fail_compilation/testOpApply.d(404)`: `int(int delegate(int) dg)`
+fail_compilation/testOpApply.d(404): `int(int delegate(int) dg)`
and:
-`fail_compilation/testOpApply.d(410)`: `int(int delegate(ref int) dg)`
+fail_compilation/testOpApply.d(410): `int(int delegate(ref int) dg)`
fail_compilation/testOpApply.d(420): Error: cannot uniquely infer `foreach` argument types
---
+/
@@ -146,9 +146,9 @@ void testDifferentQualifiers()
TEST_OUTPUT:
---
fail_compilation/testOpApply.d(504): Error: `sa.opApply` matches more than one declaration:
-`fail_compilation/testOpApply.d(404)`: `int(int delegate(int) dg)`
+fail_compilation/testOpApply.d(404): `int(int delegate(int) dg)`
and:
-`fail_compilation/testOpApply.d(410)`: `int(int delegate(ref int) dg)`
+fail_compilation/testOpApply.d(410): `int(int delegate(ref int) dg)`
fail_compilation/testOpApply.d(504): Error: cannot uniquely infer `foreach` argument types
---
+/
diff --git a/gcc/testsuite/gdc.test/fail_compilation/testsemi.d b/gcc/testsuite/gdc.test/fail_compilation/testsemi.d
index fc9bc65..77601a5 100644
--- a/gcc/testsuite/gdc.test/fail_compilation/testsemi.d
+++ b/gcc/testsuite/gdc.test/fail_compilation/testsemi.d
@@ -8,7 +8,8 @@ fail_compilation/testsemi.d(117): Error: found `int` when expecting `;` followin
fail_compilation/testsemi.d(117): Error: no identifier for declarator `x`
fail_compilation/testsemi.d(123): Error: found `int` when expecting `;` following mixin
fail_compilation/testsemi.d(129): Error: found `int` when expecting `;` following `import` Expression
-fail_compilation/testsemi.d(131): Error: `}` expected following members in `class` declaration at fail_compilation/testsemi.d(112)
+fail_compilation/testsemi.d(131): Error: `}` expected following members in `class` declaration
+fail_compilation/testsemi.d(112): class `C` starts here
---
*/
diff --git a/gcc/testsuite/gdc.test/runnable/issue11070.d b/gcc/testsuite/gdc.test/runnable/issue11070.d
new file mode 100644
index 0000000..073607d
--- /dev/null
+++ b/gcc/testsuite/gdc.test/runnable/issue11070.d
@@ -0,0 +1,19 @@
+/* RUN_OUTPUT:
+---
+inside switch: 1
+---
+*/
+
+int get() { return 1; }
+
+void test() {
+ import core.stdc.stdio : printf;
+ switch (auto x = get()) {
+ default:
+ printf("inside switch: %d\n", x);
+ }
+}
+
+void main() {
+ test();
+}
diff --git a/gcc/testsuite/gdc.test/runnable/staticaa.d b/gcc/testsuite/gdc.test/runnable/staticaa.d
new file mode 100644
index 0000000..17a2ecb
--- /dev/null
+++ b/gcc/testsuite/gdc.test/runnable/staticaa.d
@@ -0,0 +1,153 @@
+// https://issues.dlang.org/show_bug.cgi?id=23103
+// Issue 23103 - static initialization of associative arrays is not implemented
+
+nothrow @safe:
+
+/////////////////////////////////////////////
+
+int[int] globalAA = [1: 10, 2: 20];
+
+void testSimple()
+{
+ assert(globalAA[1] == 10);
+ assert(globalAA[2] == 20);
+ assert(!(30 in globalAA));
+
+ foreach (i; 0 .. 1000)
+ {
+ globalAA[i] = i * 10;
+ assert(globalAA[i] == i * 10);
+ }
+}
+
+/////////////////////////////////////////////
+
+struct Composit
+{
+ string[string][] aa;
+}
+
+auto getAA() { return ["a": "A"]; }
+
+immutable Composit compositAA = Composit([getAA(), ["b": "B"]]);
+
+void testComposit() pure
+{
+ assert(compositAA.aa[0]["a"] == "A");
+ assert(compositAA.aa[1]["b"] == "B");
+}
+
+/////////////////////////////////////////////
+
+struct Destructing
+{
+ int v;
+ static int destructorsCalled = 0;
+
+ ~this() nothrow
+ {
+ // FIXME: the lowering to newaa calls the destructor at CTFE, so we can't modify globals in it
+ if (!__ctfe)
+ destructorsCalled++;
+ }
+}
+
+struct Key
+{
+ int v;
+ bool opEquals(ref const Key o) const { return v == o.v; }
+ size_t toHash() const { return v; }
+}
+
+Destructing[Key] dAa = [Key(1): Destructing(10), Key(2): Destructing(20)];
+
+void testDestructor()
+{
+ assert(dAa[Key(1)].v == 10);
+ assert(dAa[Key(2)].v == 20);
+ assert(Destructing.destructorsCalled == 0);
+ dAa[Key(1)] = Destructing(100);
+ assert(dAa[Key(1)].v == 100);
+ assert(Destructing.destructorsCalled == 1);
+}
+
+/////////////////////////////////////////////
+
+enum A
+{
+ x, y, z
+}
+
+struct S
+{
+ string[A] t = [A.x : "A.x", A.y : "A.y"];
+}
+
+void testStructInit()
+{
+ S s;
+ assert(s.t[A.x] == "A.x");
+ assert(s.t[A.y] == "A.y");
+}
+
+struct S2
+{
+ string[A] t = [A.x : "A.x", A.y : "A.y"];
+}
+
+bool testStructInitCTFE()
+{
+ S2 s2;
+ assert(s2.t[A.x] == "A.x");
+ assert(s2.t[A.y] == "A.y");
+ return true;
+}
+static assert(testStructInitCTFE());
+
+/////////////////////////////////////////////
+
+class C
+{
+ string[int] t = [0 : "zero"];
+}
+
+void testClassInit()
+{
+ C c = new C();
+ assert(c.t[0] == "zero");
+}
+
+class C2
+{
+ string[int] t = [0 : "zero"];
+}
+
+bool testClassInitCTFE()
+{
+ C2 c2 = new C2();
+ assert(c2.t[0] == "zero");
+ return true;
+}
+static assert(testClassInitCTFE());
+
+/////////////////////////////////////////////
+
+immutable(string)[immutable(int)] immutableAA = [1: "one", 2: "two"];
+
+void testImmutable()
+{
+ assert(immutableAA[1] == "one");
+ assert(immutableAA[2] == "two");
+}
+
+/////////////////////////////////////////////
+
+void main()
+{
+ testSimple();
+ testComposit();
+ testDestructor();
+ testStructInit();
+ testClassInit();
+ testImmutable();
+}
diff --git a/gcc/testsuite/gdc.test/runnable/template9.d b/gcc/testsuite/gdc.test/runnable/template9.d
index fa70b81..7a55b2d 100644
--- a/gcc/testsuite/gdc.test/runnable/template9.d
+++ b/gcc/testsuite/gdc.test/runnable/template9.d
@@ -5036,6 +5036,13 @@ void test15653()
/******************************************/
+template foo23733(T, alias T a) {}
+
+int n23733;
+alias _ = foo23733!(int, n23733);
+
+/******************************************/
+
int main()
{
test1();
diff --git a/gcc/testsuite/gdc.test/runnable/test24078.d b/gcc/testsuite/gdc.test/runnable/test24078.d
new file mode 100644
index 0000000..99d7440
--- /dev/null
+++ b/gcc/testsuite/gdc.test/runnable/test24078.d
@@ -0,0 +1,6 @@
+//https://issues.dlang.org/show_bug.cgi?id=24078
+
+void main()
+{
+ assert(["c"] ~ "a" ~ "b" == ["c", "a", "b"]);
+}
diff --git a/gcc/testsuite/gdc.test/runnable/test24139.d b/gcc/testsuite/gdc.test/runnable/test24139.d
new file mode 100644
index 0000000..af6215d
--- /dev/null
+++ b/gcc/testsuite/gdc.test/runnable/test24139.d
@@ -0,0 +1,25 @@
+// https://issues.dlang.org/show_bug.cgi?id=24139
+
+struct S1
+{
+ int x;
+ extern(C++) ~this() { assert(&this == s1); }
+}
+
+extern(C++) struct S2
+{
+ int x;
+ ~this() { assert(&this == s2); }
+}
+
+S1* s1;
+S2* s2;
+
+void main()
+{
+ s1 = new S1;
+ s2 = new S2;
+
+ typeid(S1).destroy(s1);
+ typeid(S2).destroy(s2);
+}
diff --git a/gcc/testsuite/gfortran.dg/allocate_with_source_25.f90 b/gcc/testsuite/gfortran.dg/allocate_with_source_25.f90
index de20a14..92dc507 100644
--- a/gcc/testsuite/gfortran.dg/allocate_with_source_25.f90
+++ b/gcc/testsuite/gfortran.dg/allocate_with_source_25.f90
@@ -68,4 +68,4 @@ contains
end function func_foo_a
end program simple_leak
-! { dg-final { scan-tree-dump-times "\>_final" 4 "original" } }
+! { dg-final { scan-tree-dump-times "\>_final" 6 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/block_17.f90 b/gcc/testsuite/gfortran.dg/block_17.f90
new file mode 100644
index 0000000..6ab3106
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/block_17.f90
@@ -0,0 +1,9 @@
+subroutine foo()
+ block
+ end block
+end
+
+subroutine bar()
+ my_name: block
+ end block my_name
+end
diff --git a/gcc/testsuite/gfortran.dg/coarray/alloc_comp_6.f90 b/gcc/testsuite/gfortran.dg/coarray/alloc_comp_6.f90
new file mode 100644
index 0000000..e8a74db
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/coarray/alloc_comp_6.f90
@@ -0,0 +1,29 @@
+! { dg-do run }
+
+program alloc_comp_6
+
+ implicit none
+
+ type :: foo
+ real :: x
+ integer, allocatable :: y(:)
+ end type
+
+ call check()
+
+contains
+
+ subroutine check()
+ block
+ type(foo), allocatable :: example[:] ! needs to be a coarray
+
+ allocate(example[*])
+ allocate(example%y(10))
+ example%x = 3.4
+ example%y = 4
+
+ deallocate(example)
+ end block ! example%y shall not be accessed here by the finalizer,
+ ! because example is already deallocated
+ end subroutine check
+end program alloc_comp_6
diff --git a/gcc/testsuite/gfortran.dg/coarray/alloc_comp_7.f90 b/gcc/testsuite/gfortran.dg/coarray/alloc_comp_7.f90
new file mode 100644
index 0000000..5ebd31f
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/coarray/alloc_comp_7.f90
@@ -0,0 +1,49 @@
+! { dg-do run }
+
+module alloc_comp_module_7
+
+ public :: check
+
+ type :: foo
+ real :: x
+ integer, allocatable :: y(:)
+ contains
+ final :: foo_final
+ end type
+
+contains
+
+ subroutine foo_final(f)
+ type(foo), intent(inout) :: f
+
+ if (allocated(f%y)) then
+ f%y = -1
+ end if
+ end subroutine foo_final
+
+ subroutine check()
+ block
+ type(foo), allocatable :: example[:] ! needs to be a coarray
+
+ allocate(example[*])
+ allocate(example%y(10))
+ example%x = 3.4
+ example%y = 4
+
+ deallocate(example%y)
+ deallocate(example)
+ end block ! example%y shall not be accessed here by the finalizer,
+ ! because example is already deallocated
+ end subroutine check
+end module alloc_comp_module_7
+
+program alloc_comp_7
+
+ use alloc_comp_module_7, only: check
+
+ implicit none
+
+ call check()
+
+end program alloc_comp_7
+
diff --git a/gcc/testsuite/gfortran.dg/derived_function_interface_1.f90 b/gcc/testsuite/gfortran.dg/derived_function_interface_1.f90
index 24a0095..5438ad4 100644
--- a/gcc/testsuite/gfortran.dg/derived_function_interface_1.f90
+++ b/gcc/testsuite/gfortran.dg/derived_function_interface_1.f90
@@ -38,7 +38,7 @@ end function ext_fun
contains
- type(foo) function fun() ! { dg-error "already has an explicit interface" }
+ type(foo) function fun() ! { dg-error "has an explicit interface" }
end function fun ! { dg-error "Expecting END PROGRAM" }
end
diff --git a/gcc/testsuite/gfortran.dg/finalize_38.f90 b/gcc/testsuite/gfortran.dg/finalize_38.f90
index f4b00a1..8533489 100644
--- a/gcc/testsuite/gfortran.dg/finalize_38.f90
+++ b/gcc/testsuite/gfortran.dg/finalize_38.f90
@@ -4,6 +4,8 @@
! With -std=gnu, no finalization of array or structure constructors should occur.
! See finalize_38a.f90 for the result with f2008.
! Tests fix for PR64290 as well.
+! Extended to test that nonfinalizable types with allocatable finalizable components
+! are finalized before deallocation (PR111674).
!
module testmode
implicit none
@@ -20,6 +22,10 @@ module testmode
final :: destructor3, destructor4
end type complicated
+ type :: notfinalizable
+ type(simple), allocatable :: aa
+ end type
+
integer :: check_scalar
integer :: check_array(4)
real :: check_real
@@ -114,6 +120,7 @@ program test_final
type(simple), allocatable :: MyType, MyType2
type(simple), allocatable :: MyTypeArray(:)
type(simple) :: ThyType = simple(21), ThyType2 = simple(22)
+ type(notfinalizable) :: MyNf
class(simple), allocatable :: MyClass
class(simple), allocatable :: MyClassArray(:)
@@ -214,6 +221,15 @@ program test_final
deallocate (MyClassArray)
call test(2, 0, [10, 20], 170, rarray = [10.0,20.0])
+!******************
+! Test for PR111674
+!******************
+ final_count = 0
+ MyNf = notfinalizable (simple (42)) ! Allocatable component not finalized
+ if (final_count .ne. 0) stop 171
+ MyNf = notfinalizable (simple (84)) ! Component finalized before deallocation
+ call test(1, 42, [0,0], 180)
+
! Clean up for valgrind testing
if (allocated (MyType)) deallocate (MyType)
if (allocated (MyType2)) deallocate (MyType2)
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-10.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-10.f90
new file mode 100644
index 0000000..e50db53
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-10.f90
@@ -0,0 +1,75 @@
+! { dg-additional-options "-Wall -fdump-tree-gimple" }
+
+module m
+use iso_c_binding
+integer, parameter :: omp_allocator_handle_kind = c_intptr_t
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_null_allocator = 0
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_default_mem_alloc = 1
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_large_cap_mem_alloc = 2
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_const_mem_alloc = 3
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_high_bw_mem_alloc = 4
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_low_lat_mem_alloc = 5
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_cgroup_mem_alloc = 6
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_pteam_mem_alloc = 7
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_thread_mem_alloc = 8
+end
+
+
+! { dg-final { scan-tree-dump-times "__builtin_GOMP_alloc" 3 "gimple" } }
+! { dg-final { scan-tree-dump-times "__builtin_GOMP_free" 3 "gimple" } }
+
+subroutine f
+ use m
+ implicit none
+ integer :: n
+ block
+ integer :: A(n) ! { dg-warning "Unused variable 'a' declared" }
+ end block
+end
+
+subroutine f2
+ use m
+ implicit none
+ integer :: n ! { dg-note "'n' was declared here" }
+ block
+ integer :: A(n) ! { dg-warning "'n' is used uninitialized" }
+ !$omp allocate(A)
+ ! by matching 'A' above, TREE_USE is set. Hence:
+ ! { dg-final { scan-tree-dump-times "a = __builtin_GOMP_alloc \\(., D\.\[0-9\]+, 0B\\);" 1 "gimple" } }
+ ! { dg-final { scan-tree-dump-times "__builtin_GOMP_free \\(a, 0B\\);" 1 "gimple" } }
+ end block
+end
+
+subroutine h1()
+ use m
+ implicit none
+ integer(omp_allocator_handle_kind) my_handle ! { dg-note "'my_handle' was declared here" }
+ integer :: B1(3)
+ !$omp allocate(B1) allocator(my_handle) ! { dg-warning "31:'my_handle' is used uninitialized" }
+ B1(1) = 5
+ ! { dg-final { scan-tree-dump-times "b1.\[0-9\]+ = __builtin_GOMP_alloc \\(4, 12, D\.\[0-9\]+\\);" 1 "gimple" } }
+ ! { dg-final { scan-tree-dump-times "__builtin_GOMP_free \\(b1.\[0-9\]+, 0B\\);" 1 "gimple" } }
+end
+
+subroutine h2()
+ use m
+ implicit none
+ integer(omp_allocator_handle_kind) my_handle ! { dg-note "'my_handle' was declared here" }
+ block
+ integer :: B2(3)
+ !$omp allocate(B2) allocator(my_handle) ! { dg-warning "33:'my_handle' is used uninitialized" }
+ ! Similar above; B2 is unused - but in gfortran, the match in 'allocate(B2)' already
+ ! causes TREE_USED = 1
+ ! { dg-final { scan-tree-dump-times "b2.\[0-9\]+ = __builtin_GOMP_alloc \\(4, 12, D\.\[0-9\]+\\);" 1 "gimple" } }
+ ! { dg-final { scan-tree-dump-times "__builtin_GOMP_free \\(b2.\[0-9\]+, 0B\\);" 1 "gimple" } }
+ end block
+end
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-11.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-11.f90
new file mode 100644
index 0000000..8a8d939
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-11.f90
@@ -0,0 +1,33 @@
+module m
+use iso_c_binding
+integer, parameter :: omp_allocator_handle_kind = c_intptr_t
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_null_allocator = 0
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_default_mem_alloc = 1
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_large_cap_mem_alloc = 2
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_const_mem_alloc = 3
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_high_bw_mem_alloc = 4
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_low_lat_mem_alloc = 5
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_cgroup_mem_alloc = 6
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_pteam_mem_alloc = 7
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_thread_mem_alloc = 8
+end
+
+subroutine f ()
+ use m
+ implicit none
+ integer :: i
+ !$omp parallel firstprivate(i) allocate(allocator(omp_low_latency_mem_alloc): i)
+ ! { dg-error "Symbol 'omp_low_latency_mem_alloc' at .1. has no IMPLICIT type; did you mean 'omp_low_lat_mem_alloc'\\\?" "" { target *-*-* } .-1 }
+ ! { dg-error "Expected integer expression of the 'omp_allocator_handle_kind' kind at .1." "" { target *-*-* } .-2 }
+ i = 4
+ !$omp end parallel
+end
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-12.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-12.f90
new file mode 100644
index 0000000..183c294
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-12.f90
@@ -0,0 +1,24 @@
+module m
+ implicit none
+contains
+subroutine f ()
+ !$omp declare target
+ integer :: var ! { dg-error "'allocate' directive for 'var' inside a target region must specify an 'allocator' clause" }
+ !$omp allocate(var)
+ var = 5
+end
+
+subroutine h ()
+ !$omp target
+ !$omp parallel
+ !$omp single
+ block
+ integer :: var2(5) ! { dg-error "'allocate' directive for 'var2' inside a target region must specify an 'allocator' clause" }
+ !$omp allocate(var2)
+ var2(1) = 7
+ end block
+ !$omp end single
+ !$omp end parallel
+ !$omp end target
+end
+end module
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-13.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-13.f90
new file mode 100644
index 0000000..bf8a5a2
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-13.f90
@@ -0,0 +1,25 @@
+module m
+ implicit none
+ !$omp requires dynamic_allocators
+contains
+subroutine f ()
+ !$omp declare target
+ integer :: var
+ !$omp allocate(var)
+ var = 5
+end
+
+subroutine h ()
+ !$omp target
+ !$omp parallel
+ !$omp single
+ block
+ integer :: var2(5)
+ !$omp allocate(var2)
+ var2(1) = 7
+ end block
+ !$omp end single
+ !$omp end parallel
+ !$omp end target
+end
+end module
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-13a.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-13a.f90
new file mode 100644
index 0000000..4b297cd
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-13a.f90
@@ -0,0 +1,34 @@
+! { dg-do compile { target lto } }
+! { dg-additional-options "-flto" }
+
+! Same as allocate-13.f90 but compiled with -flto.
+
+! This was failing before as the statement list,
+! used for placing the GOMP_alloc/GOMP_free leaked
+! through to LTO.
+
+module m
+ implicit none
+ !$omp requires dynamic_allocators
+contains
+subroutine f ()
+ !$omp declare target
+ integer :: var
+ !$omp allocate(var)
+ var = 5
+end
+
+subroutine h ()
+ !$omp target
+ !$omp parallel
+ !$omp single
+ block
+ integer :: var2(5)
+ !$omp allocate(var2)
+ var2(1) = 7
+ end block
+ !$omp end single
+ !$omp end parallel
+ !$omp end target
+end
+end module
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-14.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-14.f90
new file mode 100644
index 0000000..8ff9c25
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-14.f90
@@ -0,0 +1,95 @@
+! { dg-additional-options "-fcoarray=single -fcray-pointer" }
+
+module m
+use iso_c_binding
+integer, parameter :: omp_allocator_handle_kind = c_intptr_t
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_null_allocator = 0
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_default_mem_alloc = 1
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_large_cap_mem_alloc = 2
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_const_mem_alloc = 3
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_high_bw_mem_alloc = 4
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_low_lat_mem_alloc = 5
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_cgroup_mem_alloc = 6
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_pteam_mem_alloc = 7
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_thread_mem_alloc = 8
+end
+
+subroutine coarrays(x)
+ use m
+ implicit none
+
+ integer :: x[*]
+ integer, allocatable :: y[:], z(:)[:]
+
+ !$omp allocate(x) ! { dg-error "Unexpected dummy argument 'x' as argument at .1. to declarative !.OMP ALLOCATE" }
+
+ !$omp allocators allocate(y) ! { dg-error "28:Unexpected coarray 'y' in 'allocate' at .1." }
+ allocate(y[*])
+
+ !$omp allocate(z) ! { dg-error "17:Unexpected coarray 'z' in 'allocate' at .1." }
+ allocate(z(5)[*])
+ x = 5
+end
+
+
+integer function f() result(res)
+ !$omp allocate(f) ! { dg-error "Argument 'f' at .1. to declarative !.OMP ALLOCATE directive must be a variable" }
+ !$omp allocate(res) ! { dg-error "Unexpected function-result variable 'res' at .1. in declarative !.OMP ALLOCATE" }
+ res = 5
+end
+
+integer function g() result(res)
+ allocatable :: res
+ !$omp allocators allocate(g) ! { dg-error "Expected variable list at .1." }
+
+ !$omp allocators allocate (res)
+ allocate(res, source=5)
+ deallocate(res)
+
+ !$omp allocate (res)
+ allocate(res, source=5)
+end
+
+
+subroutine cray_ptr()
+ real pointee(10)
+ pointer (ipt, pointee)
+ !$omp allocate(pointee) ! { dg-error "Sorry, Cray pointers and pointees such as 'pointee' are not supported with !.OMP ALLOCATE at .1." }
+ !$omp allocate(ipt) ! { dg-error "Sorry, Cray pointers and pointees such as 'ipt' are not supported with !.OMP ALLOCATE at .1." }
+end
+
+subroutine equiv
+ integer :: A
+ real :: B(2)
+ equivalence(A,B)
+ !$omp allocate (A) ! { dg-error "Sorry, EQUIVALENCE object 'a' not supported with !.OMP ALLOCATE at .1." }
+ !$omp allocate (B) ! { dg-error "Sorry, EQUIVALENCE object 'b' not supported with !.OMP ALLOCATE at .1." }
+end
+
+subroutine common
+ use m
+ integer :: a,b,c(5)
+ common /my/ a,b,c
+ !$omp allocate(b) allocator(omp_cgroup_mem_alloc) ! { dg-error "'b' at .1. is part of the common block '/my/' and may only be specificed implicitly via the named common block" }
+end
+
+subroutine c_and_func_ptrs
+ use iso_c_binding
+ implicit none
+ procedure(), pointer :: p
+ type(c_ptr) :: cptr
+ type(c_ptr) :: cfunptr
+
+ !$omp allocate(cptr) ! OK
+ !$omp allocate(cfunptr) ! OK? A normal derived-type var?
+ !$omp allocate(p) ! { dg-error "Argument 'p' at .1. to declarative !.OMP ALLOCATE directive must be a variable" }
+end
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-15.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-15.f90
new file mode 100644
index 0000000..a0690a5
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-15.f90
@@ -0,0 +1,38 @@
+module m
+use iso_c_binding
+integer, parameter :: omp_allocator_handle_kind = c_intptr_t
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_null_allocator = 0
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_default_mem_alloc = 1
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_large_cap_mem_alloc = 2
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_const_mem_alloc = 3
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_high_bw_mem_alloc = 4
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_low_lat_mem_alloc = 5
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_cgroup_mem_alloc = 6
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_pteam_mem_alloc = 7
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_thread_mem_alloc = 8
+end
+
+subroutine common
+ use m
+ integer :: a,b,c(5)
+ common /my/ a,b,c ! { dg-error "Sorry, !.OMP allocate for COMMON block variable 'my' at .1. not supported" }
+ !$omp allocate(/my/) allocator(omp_cgroup_mem_alloc)
+end
+
+integer function allocators() result(res)
+ use m
+ integer, save :: a(5) = [1,2,3,4,5] ! { dg-error "Sorry, !.OMP allocate for variable 'a' at .1. with SAVE attribute not yet implemented" }
+ !$omp allocate(a) allocator(omp_high_bw_mem_alloc)
+ res = a(4)
+end
+
+
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-4.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-4.f90
index a2dcf10..b93a37c 100644
--- a/gcc/testsuite/gfortran.dg/gomp/allocate-4.f90
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-4.f90
@@ -33,13 +33,13 @@ integer(kind=omp_allocator_handle_kind), intent(in) :: my_alloc
!stack variables:
integer :: a,b,c(n),d(5),e(2)
-!$omp allocate(a) ! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" }
+!$omp allocate(a)
!$omp allocate ( b , c ) align ( 32) allocator (my_alloc)
!$omp allocate (d) align( 128 )
!$omp allocate( e ) allocator( omp_high_bw_mem_alloc )
!saved vars
-integer, save :: k,l,m(5),r(2)
+integer, save :: k,l,m(5),r(2) ! { dg-error "Sorry, !.OMP allocate for variable 'k' at .1. with SAVE attribute not yet implemented" }
!$omp allocate(k) align(16) , allocator (omp_large_cap_mem_alloc)
!$omp allocate ( l ) allocator (omp_large_cap_mem_alloc) , align ( 32)
!$omp allocate (m) align( 128 ),allocator( omp_high_bw_mem_alloc )
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-7.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-7.f90
index b856204..ab85e32 100644
--- a/gcc/testsuite/gfortran.dg/gomp/allocate-7.f90
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-7.f90
@@ -47,7 +47,6 @@ integer, pointer :: ptr
integer, parameter :: prm=5
!$omp allocate(prm) align(64) ! { dg-error "Argument 'prm' at .1. to declarative !.OMP ALLOCATE directive must be a variable" }
-! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" "" { target *-*-* } .-1 }
!$omp allocate(used) allocator(omp_pteam_mem_alloc) ! { dg-error "Argument 'used' at .1. to declarative !.OMP ALLOCATE shall be in the same scope as the variable declaration" }
!$omp allocate(n) allocator(omp_pteam_mem_alloc) ! { dg-error "Unexpected dummy argument 'n' as argument at .1. to declarative !.OMP ALLOCATE" }
@@ -59,7 +58,6 @@ contains
subroutine inner
!$omp allocate(a) allocator(omp_pteam_mem_alloc) ! { dg-error "Argument 'a' at .1. to declarative !.OMP ALLOCATE shall be in the same scope as the variable declaration" }
-! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" "" { target *-*-* } .-1 }
end
end
@@ -74,7 +72,6 @@ common /com4/ y,z
allocatable :: q
pointer :: b
!$omp allocate (c, d) allocator (omp_pteam_mem_alloc)
-! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" "" { target *-*-* } .-1 }
!$omp allocate (/com4/) allocator (omp_pteam_mem_alloc)
!$omp allocate (c) allocator (omp_pteam_mem_alloc) ! { dg-error "Duplicated variable 'c' in !.OMP ALLOCATE" }
!$omp allocate (/com4/) allocator (omp_pteam_mem_alloc) ! { dg-error "Duplicated common block '/com4/' in !.OMP ALLOCATE" }
@@ -86,7 +83,6 @@ end
subroutine four(n)
integer :: qq, rr, ss, tt, uu, vv,n
!$omp allocate (qq) align(3+n) ! { dg-error "ALIGN requires a scalar positive constant integer alignment expression at .1. that is a power of two" }
-! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" "" { target *-*-* } .-1 }
!$omp allocate (rr) align([4]) ! { dg-error "ALIGN requires a scalar positive constant integer alignment expression at .1. that is a power of two" }
!$omp allocate (ss) align([4]) ! { dg-error "ALIGN requires a scalar positive constant integer alignment expression at .1. that is a power of two" }
!$omp allocate (tt) align(32.0) ! { dg-error "ALIGN requires a scalar positive constant integer alignment expression at .1. that is a power of two" }
@@ -99,7 +95,6 @@ subroutine five(n,my_alloc)
integer :: qq, rr, ss, tt, uu, vv,n
integer(omp_allocator_handle_kind) :: my_alloc
!$omp allocate (qq) allocator(3.0) ! { dg-error "Expected integer expression of the 'omp_allocator_handle_kind' kind" }
-! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" "" { target *-*-* } .-1 }
!$omp allocate (rr) allocator(3_2) ! { dg-error "Expected integer expression of the 'omp_allocator_handle_kind' kind" }
!$omp allocate (ss) allocator([omp_pteam_mem_alloc]) ! { dg-error "Expected integer expression of the 'omp_allocator_handle_kind' kind" }
!$omp allocate (tt) allocator(my_alloc) ! OK
@@ -113,7 +108,6 @@ subroutine five_SaveAll(n,my_alloc)
integer :: qq, rr, ss, tt, uu, vv,n
integer(omp_allocator_handle_kind) :: my_alloc
!$omp allocate (qq) allocator(3.0) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'qq' at .2. has the SAVE attribute" }
-! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" "" { target *-*-* } .-1 }
!$omp allocate (rr) allocator(3_2) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'rr' at .2. has the SAVE attribute" }
!$omp allocate (ss) allocator([omp_pteam_mem_alloc]) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'ss' at .2. has the SAVE attribute" }
!$omp allocate (tt) allocator(my_alloc) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'tt' at .2. has the SAVE attribute" }
@@ -127,7 +121,6 @@ subroutine five_Save(n,my_alloc)
integer, save :: qq, rr, ss, tt, uu, vv
integer(omp_allocator_handle_kind) :: my_alloc
!$omp allocate (qq) allocator(3.0) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'qq' at .2. has the SAVE attribute" }
-! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" "" { target *-*-* } .-1 }
!$omp allocate (rr) allocator(3_2) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'rr' at .2. has the SAVE attribute" }
!$omp allocate (ss) allocator([omp_pteam_mem_alloc]) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'ss' at .2. has the SAVE attribute" }
!$omp allocate (tt) allocator(my_alloc) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'tt' at .2. has the SAVE attribute" }
@@ -139,7 +132,6 @@ module five_Module
integer, save :: qq, rr, ss, tt, uu, vv,n
integer(omp_allocator_handle_kind) :: my_alloc
!$omp allocate (qq) allocator(3.0) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'qq' at .2. has the SAVE attribute" }
-! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" "" { target *-*-* } .-1 }
!$omp allocate (rr) allocator(3_2) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'rr' at .2. has the SAVE attribute" }
!$omp allocate (ss) allocator([omp_pteam_mem_alloc]) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'ss' at .2. has the SAVE attribute" }
!$omp allocate (tt) allocator(my_alloc) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'tt' at .2. has the SAVE attribute" }
@@ -151,7 +143,6 @@ program five_program
integer, save :: qq, rr, ss, tt, uu, vv,n
integer(omp_allocator_handle_kind) :: my_alloc
!$omp allocate (qq) allocator(3.0) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'qq' at .2. has the SAVE attribute" }
-! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" "" { target *-*-* } .-1 }
!$omp allocate (rr) allocator(3_2) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'rr' at .2. has the SAVE attribute" }
!$omp allocate (ss) allocator([omp_pteam_mem_alloc]) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'ss' at .2. has the SAVE attribute" }
!$omp allocate (tt) allocator(my_alloc) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'tt' at .2. has the SAVE attribute" }
@@ -170,7 +161,6 @@ subroutine six(n,my_alloc)
integer(omp_allocator_handle_kind) :: my_alloc
!$omp allocate (/com6qq/) allocator(3.0) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item '/com6qq/' at .2. has the SAVE attribute" }
-! { dg-error "Sorry, declarative !.OMP ALLOCATE at .1. not yet supported" "" { target *-*-* } .-1 }
!$omp allocate (/com6rr/) allocator(3_2) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item '/com6rr/' at .2. has the SAVE attribute" }
!$omp allocate (/com6ss/) allocator([omp_pteam_mem_alloc]) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item '/com6ss/' at .2. has the SAVE attribute" }
!$omp allocate (/com6tt/) allocator(my_alloc) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item '/com6tt/' at .2. has the SAVE attribute" }
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-8.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-8.f90
new file mode 100644
index 0000000..bb4d07d
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-8.f90
@@ -0,0 +1,29 @@
+! { dg-additional-options "-fdump-tree-original" }
+
+module m
+ use iso_c_binding
+ !use omp_lib, only: omp_allocator_handle_kind
+ implicit none
+ integer, parameter :: omp_allocator_handle_kind = c_intptr_t
+ integer :: a = 0, b = 42, c = 0
+
+contains
+ integer(omp_allocator_handle_kind) function get_alloc()
+ allocatable :: get_alloc
+ get_alloc = 2_omp_allocator_handle_kind
+ end
+ subroutine foo ()
+ !$omp scope private (a) firstprivate (b) reduction (+: c) allocate ( get_alloc() : a , b , c)
+ if (b /= 42) &
+ error stop
+ a = 36
+ b = 15
+ c = c + 1
+ !$omp end scope
+ end
+end
+
+! { dg-final { scan-tree-dump "omp scope private\\(a\\) firstprivate\\(b\\) reduction\\(\\+:c\\) allocate\\(allocator\\(D\\.\[0-9\]+\\):a\\) allocate\\(allocator\\(D\\.\[0-9\]+\\):b\\) allocate\\(allocator\\(D\\.\[0-9\]+\\):c\\)" "original" } }
+
+! { dg-final { scan-tree-dump-times "D\\.\[0-9\]+ = get_alloc \\(\\);\[\n\r\]+ *D\\.\[0-9\]+ = \\*D\\.\[0-9\]+;\[\n\r\]+ *__builtin_free \\(\\(void \\*\\) D\\.\[0-9\]+\\);" 1 "original" } }
+
diff --git a/gcc/testsuite/gfortran.dg/gomp/allocate-9.f90 b/gcc/testsuite/gfortran.dg/gomp/allocate-9.f90
new file mode 100644
index 0000000..4d95536
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/allocate-9.f90
@@ -0,0 +1,112 @@
+module m
+use iso_c_binding
+integer, parameter :: omp_allocator_handle_kind = c_intptr_t
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_null_allocator = 0
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_default_mem_alloc = 1
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_large_cap_mem_alloc = 2
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_const_mem_alloc = 3
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_high_bw_mem_alloc = 4
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_low_lat_mem_alloc = 5
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_cgroup_mem_alloc = 6
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_pteam_mem_alloc = 7
+ integer (kind=omp_allocator_handle_kind), &
+ parameter :: omp_thread_mem_alloc = 8
+end
+
+
+module m2
+ use m
+ implicit none
+ integer :: A(5) = [1,2,3,4,5], A2, A3, A4, A5
+ integer :: B, C, D
+
+! If the following fails because of added predefined allocators, please update
+! - c/c-parser.c's c_parser_omp_allocate
+! - fortran/openmp.cc's is_predefined_allocator
+! - libgomp/env.c's parse_allocator
+! - libgomp/libgomp.texi (document the new values - multiple locations)
+! + ensure that the memory-spaces are also up to date.
+
+!$omp allocate(A) align(32) allocator(9_omp_allocator_handle_kind) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'a' at .2. has the SAVE attribute" }
+
+! typo in allocator name:
+!$omp allocate(A2) allocator(omp_low_latency_mem_alloc) ! { dg-error "Symbol 'omp_low_latency_mem_alloc' at .1. has no IMPLICIT type; did you mean 'omp_low_lat_mem_alloc'\\?" }
+! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'a2' at .2. has the SAVE attribute" "" { target *-*-* } .-1 }
+
+! align be const multiple of 2
+!$omp allocate(A3) align(31) allocator(omp_default_mem_alloc) ! { dg-error "ALIGN requires a scalar positive constant integer alignment expression at .1. that is a power of two" }
+
+! allocator missing (required as A is static)
+!$omp allocate(A4) align(32) ! { dg-error "An ALLOCATOR clause is required as the list item 'a4' at .1. has the SAVE attribute" }
+
+! "expression in the clause must be a constant expression that evaluates to one of the
+! predefined memory allocator values -> omp_low_lat_mem_alloc"
+!$omp allocate(B) allocator(omp_high_bw_mem_alloc+1_omp_allocator_handle_kind) align(32) ! OK: omp_low_lat_mem_alloc
+
+!$omp allocate(C) allocator(2_omp_allocator_handle_kind) ! OK: omp_large_cap_mem_alloc
+
+!$omp allocate(A5) align(32) allocator(omp_null_allocator) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'a5' at .2. has the SAVE attribute" }
+
+!$omp allocate(C) align(32) allocator(omp_large_cap_mem_alloc) ! { dg-error "Duplicated variable 'c' in !.OMP ALLOCATE at .1." }
+
+contains
+
+integer function f()
+ !$omp allocate(D) align(32) allocator(omp_large_cap_mem_alloc) ! { dg-error "Argument 'd' at .1. to declarative !.OMP ALLOCATE shall be in the same scope as the variable declaration" }
+ f = A(1)
+end
+
+integer function g()
+ integer :: a2, b2
+ !$omp allocate(a2)
+ !$omp allocate(a2) ! { dg-error "Duplicated variable 'a2' in !.OMP ALLOCATE at .1." }
+ a2=1; b2=2
+ block
+ integer :: c2
+ !$omp allocate(c2, b2) ! { dg-error "Argument 'b2' at .1. to declarative !.OMP ALLOCATE shall be in the same scope as the variable declaration" }
+ c2 = 3
+ g = c2+a2+b2
+ end block
+end
+
+integer function h(q)
+ integer :: q
+ !$omp allocate(q) ! { dg-error "Unexpected dummy argument 'q' as argument at .1. to declarative !.OMP ALLOCATE" }
+ h = q
+end
+
+integer function k ()
+ integer, save :: var3 = 8
+ !$omp allocate(var3) allocator(-1_omp_allocator_handle_kind) ! { dg-error "Predefined allocator required in ALLOCATOR clause at .1. as the list item 'var3' at .2. has the SAVE attribute" }
+ k = var3
+end
+end module
+
+
+subroutine foo
+ integer :: a, b
+ integer :: c, d,h
+ !$omp allocate(a,b)
+ b = 1; d = 5
+contains
+subroutine internal
+ integer :: e,f
+ !$omp allocate(c,d)
+ ! { dg-error "Argument 'c' at .1. to declarative !.OMP ALLOCATE shall be in the same scope as the variable declaration" "" { target *-*-* } .-1 }
+ ! { dg-error "Argument 'd' at .1. to declarative !.OMP ALLOCATE shall be in the same scope as the variable declaration" "" { target *-*-* } .-2 }
+ !$omp allocate(e)
+ a = 1; c = 2; e = 4
+ block
+ !$omp allocate(f) ! { dg-error "Argument 'f' at .1. to declarative !.OMP ALLOCATE shall be in the same scope as the variable declaration" }
+ !$omp allocate(h) ! { dg-error "Argument 'h' at .1. to declarative !.OMP ALLOCATE shall be in the same scope as the variable declaration" }
+ end block
+end
+end
diff --git a/gcc/testsuite/gfortran.dg/gomp/strictly-structured-block-5.f90 b/gcc/testsuite/gfortran.dg/gomp/strictly-structured-block-5.f90
new file mode 100644
index 0000000..79cb920
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/strictly-structured-block-5.f90
@@ -0,0 +1,77 @@
+subroutine f()
+ !$omp parallel
+ block
+ end block
+
+ !$omp parallel
+ block
+ inner: block
+ block
+ end block
+ end block inner
+ end block
+end
+
+subroutine f2()
+ !$omp parallel
+ my_name : block
+ end block my_name
+
+ !$omp parallel
+ another_block : block
+ inner: block
+ block
+ end block
+ end block inner
+ end block another_block
+end
+
+subroutine f3()
+ !$omp parallel
+ my_name : block
+ end block my_name2 ! { dg-error "Expected label 'my_name' for END BLOCK statement" }
+ end block my_name ! avoid follow up errors
+end subroutine
+
+subroutine f4
+ integer :: n
+ n = 5
+ !$omp parallel
+ my: block
+ integer :: A(n)
+ A(1) = 1
+ end block my
+end
+
+subroutine f4a
+ intrinsic :: sin
+ !$omp parallel
+ block
+ procedure(), pointer :: proc
+ procedure(sin) :: my_sin
+ proc => sin
+ end block
+end subroutine
+
+subroutine f5(x)
+ !$omp parallel
+ block
+ intent(in) :: x ! { dg-error "INTENT is not allowed inside of BLOCK" }
+ optional :: x ! { dg-error "OPTIONAL is not allowed inside of BLOCK" }
+ value :: x ! { dg-error "VALUE is not allowed inside of BLOCK" }
+ end block
+end
+
+subroutine f6()
+ !$omp parallel
+ myblock: block
+ cycle myblock ! { dg-error "CYCLE statement at .1. is not applicable to non-loop construct 'myblock'" }
+ end block myblock
+
+ !$omp parallel
+ myblock2: block
+ exit myblock2 ! OK.
+ ! jumps to the end of the block but stays in the structured block
+ end block myblock2
+ !$omp end parallel
+end
diff --git a/gcc/testsuite/gfortran.dg/implied_do_io_8.f90 b/gcc/testsuite/gfortran.dg/implied_do_io_8.f90
new file mode 100644
index 0000000..c66a0f6
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/implied_do_io_8.f90
@@ -0,0 +1,18 @@
+! { dg-do run }
+! { dg-additional-options "-fcheck=bounds" }
+! PR fortran/111837 - out of bounds access with front-end optimization
+
+program implied_do_bug
+ implicit none
+ integer :: i,j,k
+ real :: arr(1,1,1)
+ integer :: ni(1)
+ ni(1) = 1
+ arr = 1
+ write(*,*) (((arr(i,j,k), i=1,ni(k)), k=1,1), j=1,1)
+ write(*,*) (((arr(i,j,k), i=1,ni(k)), j=1,1), k=1,1)
+ write(*,*) (((arr(k,i,j), i=1,ni(k)), k=1,1), j=1,1)
+ write(*,*) (((arr(k,i,j), i=1,ni(k)), j=1,1), k=1,1)
+ write(*,*) (((arr(j,k,i), i=1,ni(k)), k=1,1), j=1,1)
+ write(*,*) (((arr(j,k,i), i=1,ni(k)), j=1,1), k=1,1)
+end
diff --git a/gcc/testsuite/gfortran.dg/pr104351.f90 b/gcc/testsuite/gfortran.dg/pr104351.f90
new file mode 100644
index 0000000..86b47e0
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr104351.f90
@@ -0,0 +1,14 @@
+! { dg-do compile }
+! PR fortran/104351
+! Contributed by G.Steinmetz
+
+program p
+ implicit none
+ type t
+ end type
+ type(t) :: f
+contains
+ real function f() result(z) ! { dg-error "has an explicit interface" }
+ z = 0.0 ! { dg-error "assignment" }
+ end function f ! { dg-error "Expecting END PROGRAM" }
+end
diff --git a/gcc/testsuite/gfortran.dg/pr67740.f90 b/gcc/testsuite/gfortran.dg/pr67740.f90
new file mode 100644
index 0000000..bf70ff2
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr67740.f90
@@ -0,0 +1,32 @@
+! { dg-do compile }
+! { dg-options "-fdump-tree-original" }
+!
+! Check the fix for the testcase in comment 4, where the hidden string length
+! component of the array pointer component was not set.
+!
+! Contributed by Sebastien Bardeau <bardeau@iram.fr>
+!
+program test2
+ implicit none
+ character(len=10), allocatable, target :: s(:)
+ character(len=:), pointer :: sptr(:)
+ type :: pointer_typec0_t
+ character(len=:), pointer :: data0
+ character(len=:), pointer :: data1(:)
+ end type pointer_typec0_t
+ type(pointer_typec0_t) :: co
+ !
+ allocate(s(3))
+ s(1) = '1234567890'
+ s(2) = 'qwertyuio '
+ s(3) = 'asdfghjk '
+ !
+ sptr => s
+ co%data0 => s(1)
+ co%data1 => s
+ !
+ if (any (sptr .ne. s)) stop 1
+ if (co%data0 .ne. s(1)) stop 2
+ if (any (co%data1 .ne. s)) stop 3 ! Hidden string length was not set
+end program test2
+! { dg-final { scan-tree-dump-times "co._data1_length = 10;" 1 "original" } } \ No newline at end of file
diff --git a/gcc/testsuite/gfortran.dg/pr95690.f90 b/gcc/testsuite/gfortran.dg/pr95690.f90
index 47a5df9..1432937 100644
--- a/gcc/testsuite/gfortran.dg/pr95690.f90
+++ b/gcc/testsuite/gfortran.dg/pr95690.f90
@@ -2,8 +2,8 @@
module m
contains
subroutine s
- print *, (erfc) ! { dg-error "not a floating constant" "" { target i?86-*-* x86_64-*-* sparc*-*-* cris-*-* } }
- end ! { dg-error "not a floating constant" "" { target { ! "i?86-*-* x86_64-*-* sparc*-*-* cris-*-*" } } }
+ print *, (erfc) ! { dg-error "not a floating constant" "" { target i?86-*-* x86_64-*-* sparc*-*-* cris-*-* hppa*-*-* } }
+ end ! { dg-error "not a floating constant" "" { target { ! "i?86-*-* x86_64-*-* sparc*-*-* cris-*-* hppa*-*-*" } } }
function erfc()
end
end
diff --git a/gcc/testsuite/gfortran.dg/vect/pr60510.f b/gcc/testsuite/gfortran.dg/vect/pr60510.f
index ecd50dd..6cae82a 100644
--- a/gcc/testsuite/gfortran.dg/vect/pr60510.f
+++ b/gcc/testsuite/gfortran.dg/vect/pr60510.f
@@ -17,6 +17,7 @@
program test
real*8 x(1024),y(1024),a
+ a = 0.0
do i=1,1024
x(i) = i
y(i) = i+1
diff --git a/gcc/testsuite/gm2/coroutines/pim/run/pass/coroutines-pim-run-pass.exp b/gcc/testsuite/gm2/coroutines/pim/run/pass/coroutines-pim-run-pass.exp
index 92ed8d8..3c1d9a1 100644
--- a/gcc/testsuite/gm2/coroutines/pim/run/pass/coroutines-pim-run-pass.exp
+++ b/gcc/testsuite/gm2/coroutines/pim/run/pass/coroutines-pim-run-pass.exp
@@ -30,8 +30,8 @@ set gm2src ${srcdir}/../gm2
gm2_init_cor ""
-# We should be able to compile, link or run in 15 seconds.
-gm2_push_timeout 15
+# We should be able to compile, link or run in 20 seconds.
+gm2_push_timeout 20
foreach testcase [lsort [glob -nocomplain $srcdir/$subdir/*.mod]] {
diff --git a/gcc/testsuite/gm2/extensions/run/pass/packedrecord3.mod b/gcc/testsuite/gm2/extensions/run/pass/packedrecord3.mod
new file mode 100644
index 0000000..627f9b6
--- /dev/null
+++ b/gcc/testsuite/gm2/extensions/run/pass/packedrecord3.mod
@@ -0,0 +1,49 @@
+MODULE packedrecord3 ; (*!m2iso+gm2*)
+
+FROM libc IMPORT printf, exit ;
+
+TYPE
+ subrange = [0..63] <* bytealignment (0) *> ;
+
+ packedrec = RECORD
+ <* bytealignment (0) *>
+ bool: BOOLEAN ;
+ col : (white, black) ;
+ sub : subrange ;
+ END ;
+
+
+VAR
+ global: subrange ;
+ pr : packedrec ;
+
+
+PROCEDURE test (s: subrange; level: CARDINAL) ;
+BEGIN
+ IF s # global
+ THEN
+ printf ("failed to pass %d into test\n", ORD (s)) ;
+ exit (1)
+ END ;
+ IF level > 0
+ THEN
+ test (s, level-1)
+ END
+END test ;
+
+
+BEGIN
+ IF SIZE (pr) # 1
+ THEN
+ printf ("test failed as SIZE (pr) should be 1 not %d\n", SIZE (pr)) ;
+ exit (1)
+ END ;
+ FOR global := MIN (subrange) TO MAX (subrange) DO
+ test (global, 2)
+ END ;
+ FOR global := MIN (subrange) TO MAX (subrange) DO
+ pr.bool := FALSE ;
+ pr.sub := global ;
+ test (pr.sub, 2)
+ END
+END packedrecord3.
diff --git a/gcc/testsuite/gm2/iso/run/pass/iso-run-pass.exp b/gcc/testsuite/gm2/iso/run/pass/iso-run-pass.exp
index 09d04ee..1a64f7e 100644
--- a/gcc/testsuite/gm2/iso/run/pass/iso-run-pass.exp
+++ b/gcc/testsuite/gm2/iso/run/pass/iso-run-pass.exp
@@ -23,10 +23,14 @@ if $tracelevel then {
# load support procs
load_lib gm2-torture.exp
+load_lib timeout-dg.exp
gm2_init_iso "${srcdir}/gm2/iso/run/pass" -fsoft-check-all
gm2_link_obj fileio.o
+# We should be able to compile, link or run in 60 seconds.
+gm2_push_timeout 60
+
foreach testcase [lsort [glob -nocomplain $srcdir/$subdir/*.mod]] {
# If we're only testing specific files and this isn't one of them, skip it.
if ![runtest_file_p $runtests $testcase] then {
@@ -38,3 +42,5 @@ foreach testcase [lsort [glob -nocomplain $srcdir/$subdir/*.mod]] {
gm2-torture-execute $testcase "" "pass"
}
}
+
+gm2_pop_timeout
diff --git a/gcc/testsuite/gm2/iso/run/pass/m2date.mod b/gcc/testsuite/gm2/iso/run/pass/m2date.mod
index 1d8b595..f7e0c3c 100644
--- a/gcc/testsuite/gm2/iso/run/pass/m2date.mod
+++ b/gcc/testsuite/gm2/iso/run/pass/m2date.mod
@@ -13,7 +13,8 @@ TYPE
CONST
Debugging = FALSE ;
- DayName = DayArray { "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun" } ;
+ (* 1st January 1970 was a Thursday. *)
+ DayName = DayArray { "Thu", "Fri", "Sat", "Sun", "Mon", "Tue", "Wed" } ;
MonthName = MonthArray { "Dec", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov" } ;
diff --git a/gcc/testsuite/gm2/pimlib/base/run/pass/pimlib-base-run-pass.exp b/gcc/testsuite/gm2/pimlib/base/run/pass/pimlib-base-run-pass.exp
index 79640c2..d1e8e42 100644
--- a/gcc/testsuite/gm2/pimlib/base/run/pass/pimlib-base-run-pass.exp
+++ b/gcc/testsuite/gm2/pimlib/base/run/pass/pimlib-base-run-pass.exp
@@ -24,13 +24,14 @@ if $tracelevel then {
# load support procs
load_lib gm2-torture.exp
+load_lib timeout-dg.exp
set gm2src ${srcdir}/../m2
gm2_init_pim ""
-# We should be able to compile, link or run in 20 seconds.
-gm2_push_timeout 20
+# We should be able to compile, link or run in 60 seconds.
+gm2_push_timeout 60
foreach testcase [lsort [glob -nocomplain $srcdir/$subdir/*.mod]] {
# If we're only testing specific files and this isn't one of them, skip it.
diff --git a/gcc/testsuite/gm2/projects/iso/run/pass/halma/projects-iso-run-pass-halma.exp b/gcc/testsuite/gm2/projects/iso/run/pass/halma/projects-iso-run-pass-halma.exp
index faec3f9..2212756 100644
--- a/gcc/testsuite/gm2/projects/iso/run/pass/halma/projects-iso-run-pass-halma.exp
+++ b/gcc/testsuite/gm2/projects/iso/run/pass/halma/projects-iso-run-pass-halma.exp
@@ -29,6 +29,11 @@ set gm2src ${srcdir}/../m2
gm2_init_iso ""
+# We should be able to compile, link or run in 45 seconds even on a
+# really slow host/target.
+
+gm2_push_timeout 45
+
foreach testcase [lsort [glob -nocomplain $srcdir/$subdir/*.mod]] {
# If we're only testing specific files and this isn't one of them, skip it.
if ![runtest_file_p $runtests $testcase] then {
@@ -37,3 +42,5 @@ foreach testcase [lsort [glob -nocomplain $srcdir/$subdir/*.mod]] {
gm2-torture-execute $testcase "" "pass"
}
+
+gm2_pop_timeout
diff --git a/gcc/testsuite/gm2/switches/whole-program/pass/run/switches-whole-program-pass-run.exp b/gcc/testsuite/gm2/switches/whole-program/pass/run/switches-whole-program-pass-run.exp
index aa46c35..5013c50 100644
--- a/gcc/testsuite/gm2/switches/whole-program/pass/run/switches-whole-program-pass-run.exp
+++ b/gcc/testsuite/gm2/switches/whole-program/pass/run/switches-whole-program-pass-run.exp
@@ -23,16 +23,16 @@ if $tracelevel then {
# load support procs
load_lib gm2-torture.exp
+load_lib timeout-dg.exp
gm2_init_pim "${srcdir}/gm2/switches/whole-program/run/pass" -fm2-whole-program
-gm2_push_timeout 60
+gm2_push_timeout 120
foreach testcase [lsort [glob -nocomplain $srcdir/$subdir/*.mod]] {
# If we're only testing specific files and this isn't one of them, skip it.
if ![runtest_file_p $runtests $testcase] then {
continue
}
- gm2_target_compile $srcdir/$subdir/mystrlib.mod mystrlib.o object "-g -O3 -I$srcdir/$subdir/"
gm2-torture $testcase
}
diff --git a/gcc/testsuite/lib/gdc-utils.exp b/gcc/testsuite/lib/gdc-utils.exp
index 25574cb8..cbd25e9 100644
--- a/gcc/testsuite/lib/gdc-utils.exp
+++ b/gcc/testsuite/lib/gdc-utils.exp
@@ -163,6 +163,9 @@ proc gdc-convert-args { args } {
upvar 2 compilable_do_what compilable_do_what
set compilable_do_what "compile"
+ } elseif [string match "-nothrow" $arg] {
+ lappend out "-fno-exceptions"
+
} elseif [string match "-vgc" $arg] {
lappend out "-ftransition=nogc"
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index f3043b2..f0b692a 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -2021,7 +2021,7 @@ proc riscv_get_arch { } {
proc add_options_for_riscv_d { flags } {
if { [lsearch $flags -march=*] >= 0 } {
# If there are multiple -march flags, we have to adjust all of them.
- return [regsub -all -- {((?^|[[:space:]])-march=rv[[:digit:]]*[a-ce-rt-wy]*)d*} $flags \\1d ]
+ return [regsub -all -- {((?:^|[[:space:]])-march=rv[[:digit:]]*[a-ce-rt-wy]*)d*} $flags \\1d ]
}
if { [check_effective_target_riscv_d] } {
return "$flags"
@@ -2032,7 +2032,7 @@ proc add_options_for_riscv_d { flags } {
proc add_options_for_riscv_v { flags } {
if { [lsearch $flags -march=*] >= 0 } {
# If there are multiple -march flags, we have to adjust all of them.
- return [regsub -all -- {((?^|[[:space:]])-march=rv[[:digit:]]*[a-rt-uwy]*)v*} $flags \\1v ]
+ return [regsub -all -- {((?:^|[[:space:]])-march=rv[[:digit:]]*[a-rt-uwy]*)v*} $flags \\1v ]
}
if { [check_effective_target_riscv_v] } {
return "$flags"
@@ -2043,8 +2043,8 @@ proc add_options_for_riscv_v { flags } {
proc add_options_for_riscv_zfh { flags } {
if { [lsearch $flags -march=*] >= 0 } {
# If there are multiple -march flags, we have to adjust all of them.
- set flags [regsub -all -- {(?^|[[:space:]])-march=[[:alnum:]_.]*} $flags &_zfh ]
- return [regsub -all -- {((?^|[[:space:]])-march=[[:alnum:]_.]*_zfh[[:alnum:]_.]*)_zfh} $flags \\1 ]
+ set flags [regsub -all -- {(?:^|[[:space:]])-march=[[:alnum:]_.]*} $flags &_zfh ]
+ return [regsub -all -- {((?:^|[[:space:]])-march=[[:alnum:]_.]*_zfh[[:alnum:]_.]*)_zfh} $flags \\1 ]
}
if { [check_effective_target_riscv_zfh] } {
return "$flags"
@@ -3970,7 +3970,8 @@ proc check_effective_target_vect_int { } {
|| [et-is-effective-target mips_msa]))
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget riscv*-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v])
}}]
}
@@ -4098,7 +4099,9 @@ proc check_effective_target_vect_intfloat_cvt { } {
&& [et-is-effective-target mips_msa])
|| [istarget amdgcn-*-*]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vxe2]) }}]
+ && [check_effective_target_s390_vxe2])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports signed double->int conversion
@@ -4117,7 +4120,9 @@ proc check_effective_target_vect_doubleint_cvt { } {
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports signed int->double conversion
@@ -4136,7 +4141,9 @@ proc check_effective_target_vect_intdouble_cvt { } {
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
#Return 1 if we're supporting __int128 for target, 0 otherwise.
@@ -4167,7 +4174,9 @@ proc check_effective_target_vect_uintfloat_cvt { } {
&& [et-is-effective-target mips_msa])
|| [istarget amdgcn-*-*]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vxe2]) }}]
+ && [check_effective_target_s390_vxe2])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
@@ -4184,7 +4193,9 @@ proc check_effective_target_vect_floatint_cvt { } {
&& [et-is-effective-target mips_msa])
|| [istarget amdgcn-*-*]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vxe2]) }}]
+ && [check_effective_target_s390_vxe2])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports unsigned float->int conversion
@@ -4199,7 +4210,18 @@ proc check_effective_target_vect_floatuint_cvt { } {
&& [et-is-effective-target mips_msa])
|| [istarget amdgcn-*-*]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vxe2]) }}]
+ && [check_effective_target_s390_vxe2])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
+}
+
+# Return 1 if the target supports vector integer char -> long long extend optab
+#
+
+proc check_effective_target_vect_ext_char_longlong { } {
+ return [check_cached_effective_target_indexed vect_ext_char_longlong {
+ expr { ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if peeling for alignment might be profitable on the target
@@ -7275,7 +7297,9 @@ proc check_effective_target_vect_shift { } {
|| [et-is-effective-target mips_loongson_mmi]))
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports hardware vector shift by register operation.
@@ -7285,6 +7309,8 @@ proc check_effective_target_vect_var_shift { } {
expr {(([istarget i?86-*-*] || [istarget x86_64-*-*])
&& [check_avx2_available])
|| [istarget aarch64*-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v])
}}]
}
@@ -7299,7 +7325,9 @@ proc check_effective_target_whole_vector_shift { } {
&& [et-is-effective-target mips_loongson_mmi])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] } {
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) } {
set answer 1
} else {
set answer 0
@@ -7327,7 +7355,9 @@ proc check_effective_target_vect_bool_cmp { } {
return [check_cached_effective_target_indexed vect_bool_cmp {
expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget aarch64*-*-*]
- || [is-effective-target arm_neon] }}]
+ || [is-effective-target arm_neon]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports addition of char vectors for at least
@@ -7348,6 +7378,8 @@ proc check_effective_target_vect_char_add { } {
|| [et-is-effective-target mips_msa]))
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v])
}}]
}
@@ -7362,7 +7394,9 @@ proc check_effective_target_vect_shift_char { } {
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports hardware vectors of long, 0 otherwise.
@@ -7381,7 +7415,9 @@ proc check_effective_target_vect_long { } {
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] } {
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) } {
set answer 1
} else {
set answer 0
@@ -7409,7 +7445,9 @@ proc check_effective_target_vect_float { } {
|| [is-effective-target arm_neon]
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vxe])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports hardware vectors of float without
@@ -7438,7 +7476,9 @@ proc check_effective_target_vect_double { } {
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*]} }]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v])} }]
}
# Return 1 if the target supports conditional addition, subtraction,
@@ -7446,7 +7486,8 @@ proc check_effective_target_vect_double { } {
# via the cond_ optabs. Return 0 otherwise.
proc check_effective_target_vect_double_cond_arith { } {
- return [check_effective_target_aarch64_sve]
+ return [expr { [check_effective_target_aarch64_sve]
+ || [check_effective_target_riscv_v] }]
}
# Return 1 if the target supports hardware vectors of long long, 0 otherwise.
@@ -7463,7 +7504,9 @@ proc check_effective_target_vect_long_long { } {
|| ([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*]
&& [check_effective_target_has_arch_pwr8])
- || [istarget aarch64*-*-*] }}]
+ || [istarget aarch64*-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v])}}]
}
@@ -7516,7 +7559,9 @@ proc check_effective_target_vect_perm { } {
|| [et-is-effective-target mips_msa]))
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if, for some VF:
@@ -7609,7 +7654,9 @@ proc check_effective_target_vect_perm_byte { } {
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports SLP permutation of 3 vectors when each
@@ -7638,7 +7685,9 @@ proc check_effective_target_vect_perm_short { } {
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports SLP permutation of 3 vectors when each
@@ -7827,7 +7876,9 @@ proc check_effective_target_vect_sdot_qi { } {
|| [istarget aarch64*-*-*]
|| [istarget arm*-*-*]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target plus current options supports a vector
@@ -7842,7 +7893,9 @@ proc check_effective_target_vect_udot_qi { } {
|| [istarget arm*-*-*]
|| [istarget ia64-*-*]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target plus current options supports a vector
@@ -7869,7 +7922,9 @@ proc check_effective_target_vect_sdot_hi { } {
|| [istarget ia64-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target plus current options supports a vector
@@ -7881,7 +7936,9 @@ proc check_effective_target_vect_udot_hi { } {
return [check_cached_effective_target_indexed vect_udot_hi {
expr { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target plus current options supports a vector
@@ -7896,15 +7953,19 @@ proc check_effective_target_vect_usad_char { } {
|| ([istarget aarch64*-*-*]
&& ![check_effective_target_aarch64_sve])
|| ([istarget powerpc*-*-*]
- && [check_p9vector_hw_available])}}]
+ && [check_p9vector_hw_available])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target plus current options supports both signed
# and unsigned average operations on vectors of bytes.
proc check_effective_target_vect_avg_qi {} {
- return [expr { [istarget aarch64*-*-*]
- && ![check_effective_target_aarch64_sve1_only] }]
+ return [expr { ([istarget aarch64*-*-*]
+ && ![check_effective_target_aarch64_sve1_only])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }]
}
# Return 1 if the target plus current options supports both signed
@@ -7920,8 +7981,10 @@ proc check_effective_target_vect_mulhrs_hi {} {
# by power-of-2 operations on vectors of 4-byte integers.
proc check_effective_target_vect_sdiv_pow2_si {} {
- return [expr { [istarget aarch64*-*-*]
- && [check_effective_target_aarch64_sve] }]
+ return [expr { ([istarget aarch64*-*-*]
+ && [check_effective_target_aarch64_sve])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }]
}
# Return 1 if the target plus current options supports a vector
@@ -7941,7 +8004,9 @@ proc check_effective_target_vect_pack_trunc { } {
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn*-*-*] }}]
+ || [istarget amdgcn*-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target plus current options supports a vector
@@ -7961,7 +8026,9 @@ proc check_effective_target_vect_unpack { } {
&& [check_effective_target_arm_little_endian])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn*-*-*] }}]
+ || [istarget amdgcn*-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target plus current options does not guarantee
@@ -8595,7 +8662,8 @@ proc check_effective_target_vect_load_lanes { } {
proc check_effective_target_vect_masked_load { } {
return [expr { [check_avx_available]
|| [check_effective_target_aarch64_sve]
- || [istarget amdgcn*-*-*] } ]
+ || [istarget amdgcn*-*-*]
+ || [check_effective_target_riscv_v] } ]
}
# Return 1 if the target supports vector masked stores.
@@ -8610,14 +8678,16 @@ proc check_effective_target_vect_masked_store { } {
# Return 1 if the target supports vector gather loads via internal functions.
proc check_effective_target_vect_gather_load_ifn { } {
- return [expr { [check_effective_target_aarch64_sve] }]
+ return [expr { [check_effective_target_aarch64_sve]
+ || [check_effective_target_riscv_v] }]
}
# Return 1 if the target supports vector scatter stores.
proc check_effective_target_vect_scatter_store { } {
return [expr { [check_effective_target_aarch64_sve]
- || [istarget amdgcn*-*-*] }]
+ || [istarget amdgcn*-*-*]
+ || [check_effective_target_riscv_v] }]
}
# Return 1 if the target supports vector conditional operations, 0 otherwise.
@@ -8634,7 +8704,9 @@ proc check_effective_target_vect_condition { } {
&& [check_effective_target_arm_neon_ok])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports vector conditional operations where
@@ -8651,7 +8723,9 @@ proc check_effective_target_vect_cond_mixed { } {
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports vector char multiplication, 0 otherwise.
@@ -8667,7 +8741,9 @@ proc check_effective_target_vect_char_mult { } {
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports vector short multiplication, 0 otherwise.
@@ -8685,7 +8761,8 @@ proc check_effective_target_vect_short_mult { } {
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
- || [istarget riscv*-*-*] }}]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports vector int multiplication, 0 otherwise.
@@ -8702,7 +8779,8 @@ proc check_effective_target_vect_int_mult { } {
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
- || [istarget riscv*-*-*] }}]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports 64 bit hardware vector
@@ -8719,7 +8797,9 @@ proc check_effective_target_vect_long_mult { } {
|| ([istarget sparc*-*-*] && [check_effective_target_ilp32])
|| [istarget aarch64*-*-*]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
+ && [et-is-effective-target mips_msa])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) } {
set answer 1
} else {
set answer 0
@@ -8735,7 +8815,11 @@ proc check_effective_target_vect_int_mod { } {
return [check_cached_effective_target_indexed vect_int_mod {
expr { ([istarget powerpc*-*-*]
&& [check_effective_target_has_arch_pwr10])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports vector even/odd elements extraction, 0 otherwise.
@@ -8751,7 +8835,9 @@ proc check_effective_target_vect_extract_even_odd { } {
&& ([et-is-effective-target mips_msa]
|| [et-is-effective-target mpaired_single]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports vector interleaving, 0 otherwise.
@@ -8767,7 +8853,9 @@ proc check_effective_target_vect_interleave { } {
&& ([et-is-effective-target mpaired_single]
|| [et-is-effective-target mips_msa]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
foreach N {2 3 4 5 6 7 8} {
@@ -8825,7 +8913,7 @@ proc available_vector_sizes { } {
lappend result 4096 2048 1024 512 256 128 64 32 16 8 4 2
} elseif { [istarget riscv*-*-*] } {
if { [check_effective_target_riscv_v] } {
- lappend result 0 32
+ lappend result 0 32 64 128 256 512 1024
}
lappend result 128
} else {
@@ -8847,6 +8935,12 @@ proc check_effective_target_vect_variable_length { } {
return [expr { [lindex [available_vector_sizes] 0] == 0 }]
}
+# Return 1 if the target supports vectors of 1024 bits.
+
+proc check_effective_target_vect1024 { } {
+ return [expr { [lsearch -exact [available_vector_sizes] 1024] >= 0 }]
+}
+
# Return 1 if the target supports vectors of 512 bits.
proc check_effective_target_vect512 { } {
@@ -8884,7 +8978,9 @@ proc check_effective_target_vect_call_copysignf { } {
expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget powerpc*-*-*]
|| [istarget aarch64*-*-*]
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports hardware square root instructions.
@@ -8921,7 +9017,9 @@ proc check_effective_target_vect_call_sqrtf { } {
|| ([istarget powerpc*-*-*] && [check_vsx_hw_available])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || ([istarget riscv*-*-*]
+ && [check_effective_target_riscv_v]) }}]
}
# Return 1 if the target supports vector lrint calls.
@@ -9032,14 +9130,16 @@ proc check_effective_target_vect_call_roundf { } {
proc check_effective_target_vect_logical_reduc { } {
return [expr { [check_effective_target_aarch64_sve]
- || [istarget amdgcn-*-*] }]
+ || [istarget amdgcn-*-*]
+ || [check_effective_target_riscv_v] }]
}
# Return 1 if the target supports the fold_extract_last optab.
proc check_effective_target_vect_fold_extract_last { } {
return [expr { [check_effective_target_aarch64_sve]
- || [istarget amdgcn*-*-*] }]
+ || [istarget amdgcn*-*-*]
+ || [check_effective_target_riscv_v] }]
}
# Return 1 if the target supports section-anchors
@@ -9932,7 +10032,8 @@ proc check_effective_target_vect_sizes_32B_16B { } {
proc check_effective_target_vect_sizes_16B_8B { } {
if { [check_avx_available]
|| [is-effective-target arm_neon]
- || [istarget aarch64*-*-*] } {
+ || [istarget aarch64*-*-*]
+ || [check_effective_target_riscv_v] } {
return 1;
} else {
return 0;
@@ -10150,6 +10251,16 @@ proc check_effective_target_sm4 { } {
} "-msm4" ]
}
+proc check_effective_target_apxf { } {
+ return [check_no_compiler_messages apxf object {
+ void
+ foo ()
+ {
+ __asm__ volatile ("add\t%%r16, %%r31" ::);
+ }
+ } "-mapxf" ]
+}
+
# Return 1 if sse instructions can be compiled.
proc check_effective_target_sse { } {
return [check_no_compiler_messages sse object {
@@ -11323,6 +11434,13 @@ proc check_vect_support_and_set_flags { } {
lappend DEFAULT_VECTCFLAGS "--param" "riscv-vector-abi"
set dg-do-what-default compile
}
+ } elseif [istarget loongarch*-*-*] {
+ lappend DEFAULT_VECTCFLAGS "-mdouble-float" "-mlasx"
+ if [check_effective_target_loongarch_asx_hw] {
+ set dg-do-what-default run
+ } else {
+ set dg-do-what-default compile
+ }
} else {
return 0
}
@@ -11874,7 +11992,8 @@ proc check_effective_target_builtin_eh_return { } {
# Return 1 if the target supports max reduction for vectors.
proc check_effective_target_vect_max_reduc { } {
- if { [istarget aarch64*-*-*] || [is-effective-target arm_neon] } {
+ if { [istarget aarch64*-*-*] || [is-effective-target arm_neon]
+ || [check_effective_target_riscv_v] } {
return 1
}
return 0
@@ -12805,6 +12924,72 @@ proc check_effective_target_const_volatile_readonly_section { } {
return 1
}
+# Return 1 if the CORE-V MAC extension is available.
+proc check_effective_target_cv_mac { } {
+ if { !([istarget riscv*-*-*]) } {
+ return 0
+ }
+ return [check_no_compiler_messages cv_mac object {
+ void foo (void)
+ {
+ asm ("cv.mac t0, t1, t2");
+ }
+ } "-march=rv32i_xcvmac" ]
+}
+
+# Return 1 if the CORE-V ALU extension is available.
+proc check_effective_target_cv_alu { } {
+ if { !([istarget riscv*-*-*]) } {
+ return 0
+ }
+ return [check_no_compiler_messages cv_alu object {
+ void foo (void)
+ {
+ asm ("cv.addn t0, t1, t2, 0");
+ }
+ } "-march=rv32i_xcvalu" ]
+}
+
+proc check_effective_target_loongarch_sx { } {
+ return [check_no_compiler_messages loongarch_lsx assembly {
+ #if !defined(__loongarch_sx)
+ #error "LSX not defined"
+ #endif
+ }]
+}
+
+proc check_effective_target_loongarch_sx_hw { } {
+ return [check_runtime loongarch_sx_hw {
+ #include <lsxintrin.h>
+ int main (void)
+ {
+ __m128i a, b, c;
+ c = __lsx_vand_v (a, b);
+ return 0;
+ }
+ } "-mlsx"]
+}
+
+proc check_effective_target_loongarch_asx { } {
+ return [check_no_compiler_messages loongarch_asx assembly {
+ #if !defined(__loongarch_asx)
+ #error "LASX not defined"
+ #endif
+ }]
+}
+
+proc check_effective_target_loongarch_asx_hw { } {
+ return [check_runtime loongarch_asx_hw {
+ #include <lasxintrin.h>
+ int main (void)
+ {
+ __m256i a, b, c;
+ c = __lasx_xvand_v (a, b);
+ return 0;
+ }
+ } "-mlasx"]
+}
+
# Appends necessary Python flags to extra-tool-flags if Python.h is supported.
# Otherwise, modifies dg-do-what.
proc dg-require-python-h { args } {
diff --git a/gcc/text-art/styled-string.cc b/gcc/text-art/styled-string.cc
index a0cc187..1a66a15 100644
--- a/gcc/text-art/styled-string.cc
+++ b/gcc/text-art/styled-string.cc
@@ -563,10 +563,7 @@ styled_string::from_fmt_va (style_manager &sm,
const char *fmt,
va_list *args)
{
- text_info text;
- text.err_no = errno;
- text.args_ptr = args;
- text.format_spec = fmt;
+ text_info text (fmt, args, errno);
pretty_printer pp;
pp_show_color (&pp) = true;
pp.url_format = URL_FORMAT_DEFAULT;
diff --git a/gcc/text-art/table.cc b/gcc/text-art/table.cc
index 2f857a0..2ea0da3 100644
--- a/gcc/text-art/table.cc
+++ b/gcc/text-art/table.cc
@@ -150,6 +150,26 @@ table::set_cell_span (rect_t span,
}
}
+/* If SPAN is unoccuped, set it to CONTENT.
+ Otherwise, discard CONTENT. */
+
+void
+table::maybe_set_cell_span (rect_t span,
+ table_cell_content &&content,
+ enum x_align x_align,
+ enum y_align y_align)
+{
+ gcc_assert (span.m_size.w > 0);
+ gcc_assert (span.m_size.h > 0);
+ for (int y = span.get_min_y (); y < span.get_next_y (); y++)
+ for (int x = span.get_min_x (); x < span.get_next_x (); x++)
+ {
+ if (m_occupancy.get (coord_t (x, y)) != -1)
+ return;
+ }
+ set_cell_span (span, std::move (content), x_align, y_align);
+}
+
canvas
table::to_canvas (const theme &theme, const style_manager &sm) const
{
@@ -189,6 +209,21 @@ table::debug () const
canvas.debug (false);
}
+/* Move OTHER's content this table, starting at OFFSET. */
+
+void
+table::add_other_table (table &&other,
+ table::coord_t offset)
+{
+ for (auto &&placement : other.m_placements)
+ {
+ set_cell_span (placement.m_rect + offset,
+ std::move (placement.m_content),
+ placement.m_x_align,
+ placement.m_y_align);
+ }
+}
+
const table::cell_placement *
table::get_placement_at (coord_t coord) const
{
diff --git a/gcc/text-art/table.h b/gcc/text-art/table.h
index 17eda91..5d5d4bd 100644
--- a/gcc/text-art/table.h
+++ b/gcc/text-art/table.h
@@ -115,6 +115,7 @@ class table
const table_cell_content &get_content () const { return m_content; }
private:
+ friend class table;
friend class table_cell_sizes;
rect_t m_rect;
table_cell_content m_content;
@@ -130,11 +131,18 @@ class table
const size_t &get_size () const { return m_size; }
+ int add_rows (unsigned num)
+ {
+ int topmost_new_row = m_size.h;
+ m_size.h += num;
+ for (unsigned i = 0; i < num; i++)
+ m_occupancy.add_row (-1);
+ return topmost_new_row;
+ }
+
int add_row ()
{
- m_size.h++;
- m_occupancy.add_row (-1);
- return m_size.h - 1; // return the table_y of the newly-added row
+ return add_rows (1);
}
void set_cell (coord_t coord,
@@ -147,6 +155,11 @@ class table
enum x_align x_align = x_align::CENTER,
enum y_align y_align = y_align::CENTER);
+ void maybe_set_cell_span (rect_t span,
+ table_cell_content &&content,
+ enum x_align x_align = x_align::CENTER,
+ enum y_align y_align = y_align::CENTER);
+
canvas to_canvas (const theme &theme, const style_manager &sm) const;
void paint_to_canvas(canvas &canvas,
@@ -156,6 +169,8 @@ class table
void debug () const;
+ void add_other_table (table &&other, table::coord_t offset);
+
/* Self-test support. */
const cell_placement *get_placement_at (coord_t coord) const;
diff --git a/gcc/text-art/types.h b/gcc/text-art/types.h
index ea4ff4b..d5394a9 100644
--- a/gcc/text-art/types.h
+++ b/gcc/text-art/types.h
@@ -129,6 +129,13 @@ struct rect
size<CoordinateSystem> m_size;
};
+template <typename CoordinateSystem>
+rect<CoordinateSystem> operator+ (rect<CoordinateSystem> r,
+ coord<CoordinateSystem> offset)
+{
+ return rect<CoordinateSystem> (r.m_top_left + offset, r.m_size);
+}
+
template <typename ElementType, typename SizeType, typename CoordType>
class array2
{
diff --git a/gcc/timevar.def b/gcc/timevar.def
index 9523598..d21b08c 100644
--- a/gcc/timevar.def
+++ b/gcc/timevar.def
@@ -160,6 +160,7 @@ DEFTIMEVAR (TV_TREE_TAIL_MERGE , "tree tail merge")
DEFTIMEVAR (TV_TREE_VRP , "tree VRP")
DEFTIMEVAR (TV_TREE_VRP_THREADER , "tree VRP threader")
DEFTIMEVAR (TV_TREE_EARLY_VRP , "tree Early VRP")
+DEFTIMEVAR (TV_TREE_FAST_VRP , "tree Fast VRP")
DEFTIMEVAR (TV_TREE_COPY_PROP , "tree copy propagation")
DEFTIMEVAR (TV_FIND_REFERENCED_VARS , "tree find ref. vars")
DEFTIMEVAR (TV_TREE_PTA , "tree PTA")
diff --git a/gcc/toplev.cc b/gcc/toplev.cc
index db62e3e..8af9bf5 100644
--- a/gcc/toplev.cc
+++ b/gcc/toplev.cc
@@ -1023,11 +1023,11 @@ general_init (const char *argv0, bool init_signals)
override it later. */
tree_diagnostics_defaults (global_dc);
- global_dc->show_caret
+ global_dc->m_source_printing.enabled
= global_options_init.x_flag_diagnostics_show_caret;
- global_dc->show_labels_p
+ global_dc->m_source_printing.show_labels_p
= global_options_init.x_flag_diagnostics_show_labels;
- global_dc->show_line_numbers_p
+ global_dc->m_source_printing.show_line_numbers_p
= global_options_init.x_flag_diagnostics_show_line_numbers;
global_dc->show_cwe
= global_options_init.x_flag_diagnostics_show_cwe;
@@ -1039,7 +1039,7 @@ general_init (const char *argv0, bool init_signals)
= global_options_init.x_flag_diagnostics_show_path_depths;
global_dc->show_option_requested
= global_options_init.x_flag_diagnostics_show_option;
- global_dc->min_margin_width
+ global_dc->m_source_printing.min_margin_width
= global_options_init.x_diagnostics_minimum_margin_width;
global_dc->show_column
= global_options_init.x_flag_show_column;
@@ -1082,8 +1082,8 @@ general_init (const char *argv0, bool init_signals)
input_location = UNKNOWN_LOCATION;
line_table = ggc_alloc<line_maps> ();
linemap_init (line_table, BUILTINS_LOCATION);
- line_table->reallocator = realloc_for_line_map;
- line_table->round_alloc_size = ggc_round_alloc_size;
+ line_table->m_reallocator = realloc_for_line_map;
+ line_table->m_round_alloc_size = ggc_round_alloc_size;
line_table->default_range_bits = 5;
init_ttree ();
diff --git a/gcc/tree-affine.cc b/gcc/tree-affine.cc
index ecab467..eff38f0 100644
--- a/gcc/tree-affine.cc
+++ b/gcc/tree-affine.cc
@@ -805,6 +805,7 @@ aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
continue;
}
exp = XNEW (class name_expansion);
+ ::new (static_cast<void *> (exp)) name_expansion ();
exp->in_progress = 1;
if (!*cache)
*cache = new hash_map<tree, name_expansion *>;
@@ -860,6 +861,7 @@ tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
bool
free_name_expansion (tree const &, name_expansion **value, void *)
{
+ (*value)->~name_expansion ();
free (*value);
return true;
}
diff --git a/gcc/tree-cfg.cc b/gcc/tree-cfg.cc
index ffab751..ffeb20b 100644
--- a/gcc/tree-cfg.cc
+++ b/gcc/tree-cfg.cc
@@ -8160,11 +8160,14 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
bb = create_empty_bb (entry_pred[0]);
if (current_loops)
add_bb_to_loop (bb, loop);
+ profile_count count = profile_count::zero ();
for (i = 0; i < num_entry_edges; i++)
{
e = make_edge (entry_pred[i], bb, entry_flag[i]);
e->probability = entry_prob[i];
+ count += e->count ();
}
+ bb->count = count;
for (i = 0; i < num_exit_edges; i++)
{
diff --git a/gcc/tree-core.h b/gcc/tree-core.h
index 91551fd..77417db 100644
--- a/gcc/tree-core.h
+++ b/gcc/tree-core.h
@@ -1091,17 +1091,11 @@ struct GTY(()) tree_base {
struct {
/* The number of HOST_WIDE_INTs if the INTEGER_CST is accessed in
its native precision. */
- unsigned char unextended;
+ unsigned short unextended;
/* The number of HOST_WIDE_INTs if the INTEGER_CST is extended to
wider precisions based on its TYPE_SIGN. */
- unsigned char extended;
-
- /* The number of HOST_WIDE_INTs if the INTEGER_CST is accessed in
- offset_int precision, with smaller integers being extended
- according to their TYPE_SIGN. This is equal to one of the two
- fields above but is cached for speed. */
- unsigned char offset;
+ unsigned short extended;
} int_length;
/* VEC length. This field is only used with TREE_VEC. */
diff --git a/gcc/tree-dfa.cc b/gcc/tree-dfa.cc
index ad8cfed..5355af2 100644
--- a/gcc/tree-dfa.cc
+++ b/gcc/tree-dfa.cc
@@ -372,9 +372,9 @@ get_or_create_ssa_default_def (struct function *fn, tree var)
true, the storage order of the reference is reversed. */
tree
-get_ref_base_and_extent (tree exp, poly_int64_pod *poffset,
- poly_int64_pod *psize,
- poly_int64_pod *pmax_size,
+get_ref_base_and_extent (tree exp, poly_int64 *poffset,
+ poly_int64 *psize,
+ poly_int64 *pmax_size,
bool *preverse)
{
poly_offset_int bitsize = -1;
@@ -531,10 +531,7 @@ get_ref_base_and_extent (tree exp, poly_int64_pod *poffset,
value_range vr;
range_query *query;
- if (cfun)
- query = get_range_query (cfun);
- else
- query = get_global_range_query ();
+ query = get_range_query (cfun);
if (TREE_CODE (index) == SSA_NAME
&& (low_bound = array_ref_low_bound (exp),
@@ -765,7 +762,7 @@ get_ref_base_and_extent_hwi (tree exp, HOST_WIDE_INT *poffset,
its argument or a constant if the argument is known to be constant. */
tree
-get_addr_base_and_unit_offset_1 (tree exp, poly_int64_pod *poffset,
+get_addr_base_and_unit_offset_1 (tree exp, poly_int64 *poffset,
tree (*valueize) (tree))
{
poly_int64 byte_offset = 0;
@@ -907,7 +904,7 @@ done:
is not BITS_PER_UNIT-aligned. */
tree
-get_addr_base_and_unit_offset (tree exp, poly_int64_pod *poffset)
+get_addr_base_and_unit_offset (tree exp, poly_int64 *poffset)
{
return get_addr_base_and_unit_offset_1 (exp, poffset, NULL);
}
diff --git a/gcc/tree-dfa.h b/gcc/tree-dfa.h
index 074a4da..e27baa8 100644
--- a/gcc/tree-dfa.h
+++ b/gcc/tree-dfa.h
@@ -30,13 +30,13 @@ extern void debug_dfa_stats (void);
extern tree ssa_default_def (struct function *, tree);
extern void set_ssa_default_def (struct function *, tree, tree);
extern tree get_or_create_ssa_default_def (struct function *, tree);
-extern tree get_ref_base_and_extent (tree, poly_int64_pod *, poly_int64_pod *,
- poly_int64_pod *, bool *);
+extern tree get_ref_base_and_extent (tree, poly_int64 *, poly_int64 *,
+ poly_int64 *, bool *);
extern tree get_ref_base_and_extent_hwi (tree, HOST_WIDE_INT *,
HOST_WIDE_INT *, bool *);
-extern tree get_addr_base_and_unit_offset_1 (tree, poly_int64_pod *,
+extern tree get_addr_base_and_unit_offset_1 (tree, poly_int64 *,
tree (*) (tree));
-extern tree get_addr_base_and_unit_offset (tree, poly_int64_pod *);
+extern tree get_addr_base_and_unit_offset (tree, poly_int64 *);
extern bool stmt_references_abnormal_ssa_name (gimple *);
extern void replace_abnormal_ssa_names (gimple *);
extern void dump_enumerated_decls (FILE *, dump_flags_t);
diff --git a/gcc/tree-diagnostic-path.cc b/gcc/tree-diagnostic-path.cc
index 84148da..97ee0f0 100644
--- a/gcc/tree-diagnostic-path.cc
+++ b/gcc/tree-diagnostic-path.cc
@@ -200,13 +200,13 @@ struct event_range
/* Emit a span indicating the filename (and line/column) if the
line has changed relative to the last call to
diagnostic_show_locus. */
- if (dc->show_caret)
+ if (dc->m_source_printing.enabled)
{
expanded_location exploc
= linemap_client_expand_location_to_spelling_point
(initial_loc, LOCATION_ASPECT_CARET);
if (exploc.file != LOCATION_FILE (dc->last_location))
- dc->start_span (dc, exploc);
+ dc->m_text_callbacks.start_span (dc, exploc);
}
/* If we have an UNKNOWN_LOCATION (or BUILTINS_LOCATION) as the
diff --git a/gcc/tree-diagnostic.cc b/gcc/tree-diagnostic.cc
index d2f6637..a600f0e 100644
--- a/gcc/tree-diagnostic.cc
+++ b/gcc/tree-diagnostic.cc
@@ -261,7 +261,7 @@ default_tree_printer (pretty_printer *pp, text_info *text, const char *spec,
switch (*spec)
{
case 'E':
- t = va_arg (*text->args_ptr, tree);
+ t = va_arg (*text->m_args_ptr, tree);
if (TREE_CODE (t) == IDENTIFIER_NODE)
{
pp_identifier (pp, IDENTIFIER_POINTER (t));
@@ -270,14 +270,14 @@ default_tree_printer (pretty_printer *pp, text_info *text, const char *spec,
break;
case 'D':
- t = va_arg (*text->args_ptr, tree);
+ t = va_arg (*text->m_args_ptr, tree);
if (VAR_P (t) && DECL_HAS_DEBUG_EXPR_P (t))
t = DECL_DEBUG_EXPR (t);
break;
case 'F':
case 'T':
- t = va_arg (*text->args_ptr, tree);
+ t = va_arg (*text->m_args_ptr, tree);
break;
default:
diff --git a/gcc/tree-if-conv.cc b/gcc/tree-if-conv.cc
index a8c9159..c381d14 100644
--- a/gcc/tree-if-conv.cc
+++ b/gcc/tree-if-conv.cc
@@ -80,7 +80,6 @@ along with GCC; see the file COPYING3. If not see
<L18>:;
*/
-#define INCLUDE_ALGORITHM
#include "config.h"
#include "system.h"
#include "coretypes.h"
@@ -1107,17 +1106,21 @@ if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs)
case GIMPLE_CALL:
{
+ /* There are some IFN_s that are used to replace builtins but have the
+ same semantics. Even if MASK_CALL cannot handle them vectorable_call
+ will insert the proper selection, so do not block conversion. */
+ int flags = gimple_call_flags (stmt);
+ if ((flags & ECF_CONST)
+ && !(flags & ECF_LOOPING_CONST_OR_PURE)
+ && gimple_call_combined_fn (stmt) != CFN_LAST)
+ return true;
+
tree fndecl = gimple_call_fndecl (stmt);
if (fndecl)
{
/* We can vectorize some builtins and functions with SIMD
"inbranch" clones. */
- int flags = gimple_call_flags (stmt);
struct cgraph_node *node = cgraph_node::get (fndecl);
- if ((flags & ECF_CONST)
- && !(flags & ECF_LOOPING_CONST_OR_PURE)
- && fndecl_built_in_p (fndecl))
- return true;
if (node && node->simd_clones != NULL)
/* Ensure that at least one clone can be "inbranch". */
for (struct cgraph_node *n = node->simd_clones; n != NULL;
@@ -1129,6 +1132,7 @@ if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs)
return true;
}
}
+
return false;
}
@@ -1927,11 +1931,32 @@ gen_simplified_condition (tree cond, scalar_cond_masked_set_type &cond_set)
return cond;
}
+/* Structure used to track meta-data on PHI arguments used to generate
+ most efficient comparison sequence to slatten a PHI node. */
+
+typedef struct ifcvt_arg_entry
+{
+ /* The PHI node argument value. */
+ tree arg;
+
+ /* The number of compares required to reach this PHI node from start of the
+ BB being if-converted. */
+ unsigned num_compares;
+
+ /* The number of times this PHI node argument appears in the current PHI
+ node. */
+ unsigned occurs;
+
+ /* The indices at which this PHI arg occurs inside the PHI node. */
+ vec <int> *indexes;
+} ifcvt_arg_entry_t;
+
/* Produce condition for all occurrences of ARG in PHI node. Set *INVERT
as to whether the condition is inverted. */
static tree
-gen_phi_arg_condition (gphi *phi, vec<int> *occur, gimple_stmt_iterator *gsi,
+gen_phi_arg_condition (gphi *phi, ifcvt_arg_entry_t &arg,
+ gimple_stmt_iterator *gsi,
scalar_cond_masked_set_type &cond_set, bool *invert)
{
int len;
@@ -1941,11 +1966,11 @@ gen_phi_arg_condition (gphi *phi, vec<int> *occur, gimple_stmt_iterator *gsi,
edge e;
*invert = false;
- len = occur->length ();
+ len = arg.indexes->length ();
gcc_assert (len > 0);
for (i = 0; i < len; i++)
{
- e = gimple_phi_arg_edge (phi, (*occur)[i]);
+ e = gimple_phi_arg_edge (phi, (*arg.indexes)[i]);
c = bb_predicate (e->src);
if (is_true_predicate (c))
{
@@ -2010,22 +2035,21 @@ gen_phi_arg_condition (gphi *phi, vec<int> *occur, gimple_stmt_iterator *gsi,
static tree
gen_phi_nest_statement (gphi *phi, gimple_stmt_iterator *gsi,
scalar_cond_masked_set_type &cond_set, tree type,
- hash_map<tree_operand_hash, auto_vec<int>> &phi_arg_map,
- gimple **res_stmt, tree lhs0, vec<tree> &args,
- unsigned idx)
+ gimple **res_stmt, tree lhs0,
+ vec<struct ifcvt_arg_entry> &args, unsigned idx)
{
if (idx == args.length ())
- return args[idx - 1];
+ return args[idx - 1].arg;
- vec<int> *indexes = phi_arg_map.get (args[idx - 1]);
bool invert;
- tree cond = gen_phi_arg_condition (phi, indexes, gsi, cond_set, &invert);
- tree arg1 = gen_phi_nest_statement (phi, gsi, cond_set, type, phi_arg_map,
- res_stmt, lhs0, args, idx + 1);
+ tree cond = gen_phi_arg_condition (phi, args[idx - 1], gsi, cond_set,
+ &invert);
+ tree arg1 = gen_phi_nest_statement (phi, gsi, cond_set, type, res_stmt, lhs0,
+ args, idx + 1);
unsigned prev = idx;
unsigned curr = prev - 1;
- tree arg0 = args[curr];
+ tree arg0 = args[curr].arg;
tree rhs, lhs;
if (idx > 1)
lhs = make_temp_ssa_name (type, NULL, "_ifc_");
@@ -2045,6 +2069,36 @@ gen_phi_nest_statement (gphi *phi, gimple_stmt_iterator *gsi,
return lhs;
}
+/* When flattening a PHI node we have a choice of which conditions to test to
+ for all the paths from the start of the dominator block of the BB with the
+ PHI node. If the PHI node has X arguments we have to only test X - 1
+ conditions as the last one is implicit. It does matter which conditions we
+ test first. We should test the shortest condition first (distance here is
+ measures in the number of logical operators in the condition) and the
+ longest one last. This allows us to skip testing the most expensive
+ condition. To accomplish this we need to sort the conditions. P1 and P2
+ are sorted first based on the number of logical operations (num_compares)
+ and then by how often they occur in the PHI node. */
+
+static int
+cmp_arg_entry (const void *p1, const void *p2, void * /* data. */)
+{
+ const ifcvt_arg_entry sval1 = *(const ifcvt_arg_entry *)p1;
+ const ifcvt_arg_entry sval2 = *(const ifcvt_arg_entry *)p2;
+
+ if (sval1.num_compares < sval2.num_compares)
+ return -1;
+ else if (sval1.num_compares > sval2.num_compares)
+ return 1;
+
+ if (sval1.occurs < sval2.occurs)
+ return -1;
+ else if (sval1.occurs > sval2.occurs)
+ return 1;
+
+ return 0;
+}
+
/* Replace a scalar PHI node with a COND_EXPR using COND as condition.
This routine can handle PHI nodes with more than two arguments.
@@ -2170,58 +2224,55 @@ predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
hash_map<tree_operand_hash, auto_vec<int> > phi_arg_map;
unsigned int num_args = gimple_phi_num_args (phi);
/* Vector of different PHI argument values. */
- auto_vec<tree> args (num_args);
+ auto_vec<ifcvt_arg_entry_t> args;
- /* Compute phi_arg_map. */
+ /* Compute phi_arg_map, determine the list of unique PHI args and the indices
+ where they are in the PHI node. The indices will be used to determine
+ the conditions to apply and their complexity. */
for (i = 0; i < num_args; i++)
{
tree arg;
arg = gimple_phi_arg_def (phi, i);
if (!phi_arg_map.get (arg))
- args.quick_push (arg);
+ args.safe_push ({ arg, 0, 0, NULL });
phi_arg_map.get_or_insert (arg).safe_push (i);
}
- /* Determine element with max number of occurrences and complexity. Looking at only
- number of occurrences as a measure for complexity isn't enough as all usages can
- be unique but the comparisons to reach the PHI node differ per branch. */
- typedef std::pair <tree, std::pair <unsigned, unsigned>> ArgEntry;
- auto_vec<ArgEntry> argsKV;
- for (i = 0; i < args.length (); i++)
+ /* Determine element with max number of occurrences and complexity. Looking
+ at only number of occurrences as a measure for complexity isn't enough as
+ all usages can be unique but the comparisons to reach the PHI node differ
+ per branch. */
+ for (unsigned i = 0; i < args.length (); i++)
{
unsigned int len = 0;
- for (int index : phi_arg_map.get (args[i]))
+ vec<int> *indices = phi_arg_map.get (args[i].arg);
+ for (int index : *indices)
{
edge e = gimple_phi_arg_edge (phi, index);
len += get_bb_num_predicate_stmts (e->src);
}
- unsigned occur = phi_arg_map.get (args[i])->length ();
+ unsigned occur = indices->length ();
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Ranking %d as len=%d, idx=%d\n", i, len, occur);
- argsKV.safe_push ({ args[i], { len, occur }});
+ args[i].num_compares = len;
+ args[i].occurs = occur;
+ args[i].indexes = indices;
}
/* Sort elements based on rankings ARGS. */
- std::sort(argsKV.begin(), argsKV.end(), [](const ArgEntry &left,
- const ArgEntry &right) {
- return left.second < right.second;
- });
-
- for (i = 0; i < args.length (); i++)
- args[i] = argsKV[i].first;
+ args.stablesort (cmp_arg_entry, NULL);
/* Handle one special case when number of arguments with different values
is equal 2 and one argument has the only occurrence. Such PHI can be
handled as if would have only 2 arguments. */
- if (args.length () == 2 && phi_arg_map.get (args[0])->length () == 1)
+ if (args.length () == 2
+ && args[0].indexes->length () == 1)
{
- vec<int> *indexes;
- indexes = phi_arg_map.get (args[0]);
- index0 = (*indexes)[0];
- arg0 = args[0];
- arg1 = args[1];
+ index0 = (*args[0].indexes)[0];
+ arg0 = args[0].arg;
+ arg1 = args[1].arg;
e = gimple_phi_arg_edge (phi, index0);
cond = bb_predicate (e->src);
if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
@@ -2235,8 +2286,8 @@ predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
if (!(is_cond_scalar_reduction (phi, &reduc, arg0 , arg1,
&op0, &op1, true, &has_nop, &nop_reduc)))
rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
- swap? arg1 : arg0,
- swap? arg0 : arg1);
+ swap ? arg1 : arg0,
+ swap ? arg0 : arg1);
else
{
/* Convert reduction stmt into vectorizable form. */
@@ -2252,8 +2303,8 @@ predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
{
/* Common case. */
tree type = TREE_TYPE (gimple_phi_result (phi));
- gen_phi_nest_statement (phi, gsi, cond_set, type, phi_arg_map,
- &new_stmt, res, args, 1);
+ gen_phi_nest_statement (phi, gsi, cond_set, type, &new_stmt, res,
+ args, 1);
}
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -3795,6 +3846,13 @@ tree_if_conversion (class loop *loop, vec<gimple *> *preds)
}
if (need_to_ifcvt)
{
+ /* Before we rewrite edges we'll record their original position in the
+ edge map such that we can map the edges between the ifcvt and the
+ non-ifcvt loop during peeling. */
+ uintptr_t idx = 0;
+ for (edge exit : get_loop_exit_edges (loop))
+ exit->aux = (void*)idx++;
+
/* Now all statements are if-convertible. Combine all the basic
blocks into one huge basic block doing the if-conversion
on-the-fly. */
diff --git a/gcc/tree-loop-distribution.cc b/gcc/tree-loop-distribution.cc
index a28470b..8abfa99 100644
--- a/gcc/tree-loop-distribution.cc
+++ b/gcc/tree-loop-distribution.cc
@@ -949,7 +949,8 @@ copy_loop_before (class loop *loop, bool redirect_lc_phi_defs)
edge preheader = loop_preheader_edge (loop);
initialize_original_copy_tables ();
- res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, NULL, preheader);
+ res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, single_exit (loop), NULL,
+ NULL, preheader, NULL, false);
gcc_assert (res != NULL);
/* When a not last partition is supposed to keep the LC PHIs computed
@@ -1574,6 +1575,7 @@ find_single_drs (class loop *loop, struct graph *rdg, const bitmap &partition_st
basic_block bb_ld = NULL;
basic_block bb_st = NULL;
+ edge exit = single_exit (loop);
if (single_ld)
{
@@ -1589,6 +1591,14 @@ find_single_drs (class loop *loop, struct graph *rdg, const bitmap &partition_st
bb_ld = gimple_bb (DR_STMT (single_ld));
if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb_ld))
return false;
+
+ /* The data reference must also be executed before possibly exiting
+ the loop as otherwise we'd for example unconditionally execute
+ memset (ptr, 0, n) which even with n == 0 implies ptr is non-NULL. */
+ if (bb_ld != loop->header
+ && (!exit
+ || !dominated_by_p (CDI_DOMINATORS, exit->src, bb_ld)))
+ return false;
}
if (single_st)
@@ -1604,6 +1614,12 @@ find_single_drs (class loop *loop, struct graph *rdg, const bitmap &partition_st
bb_st = gimple_bb (DR_STMT (single_st));
if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb_st))
return false;
+
+ /* And before exiting the loop. */
+ if (bb_st != loop->header
+ && (!exit
+ || !dominated_by_p (CDI_DOMINATORS, exit->src, bb_st)))
+ return false;
}
if (single_ld && single_st)
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index eba2d54..79a5f33 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -470,6 +470,7 @@ extern gimple_opt_pass *make_pass_check_data_deps (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_copy_prop (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_isolate_erroneous_paths (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_early_vrp (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_fast_vrp (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_vrp (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_assumptions (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_uncprop (gcc::context *ctxt);
@@ -621,6 +622,7 @@ extern rtl_opt_pass *make_pass_sched_fusion (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_peephole2 (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_if_after_reload (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_regrename (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_fold_mem_offsets (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_cprop_hardreg (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_reorder_blocks (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_leaf_regs (gcc::context *ctxt);
diff --git a/gcc/tree-pretty-print.cc b/gcc/tree-pretty-print.cc
index 12c57c1..58705c5 100644
--- a/gcc/tree-pretty-print.cc
+++ b/gcc/tree-pretty-print.cc
@@ -2248,10 +2248,11 @@ dump_generic_node (pretty_printer *pp, tree node, int spc, dump_flags_t flags,
pp_minus (pp);
val = -val;
}
- unsigned int prec = val.get_precision ();
- if ((prec + 3) / 4 > sizeof (pp_buffer (pp)->digit_buffer) - 3)
+ unsigned int len;
+ print_hex_buf_size (val, &len);
+ if (UNLIKELY (len > sizeof (pp_buffer (pp)->digit_buffer)))
{
- char *buf = XALLOCAVEC (char, (prec + 3) / 4 + 3);
+ char *buf = XALLOCAVEC (char, len);
print_hex (val, buf);
pp_string (pp, buf);
}
diff --git a/gcc/tree-pretty-print.h b/gcc/tree-pretty-print.h
index 2c8ee9a..12bae05 100644
--- a/gcc/tree-pretty-print.h
+++ b/gcc/tree-pretty-print.h
@@ -28,7 +28,7 @@ along with GCC; see the file COPYING3. If not see
pp_verbatim (PP, "%qs not supported by %s", \
get_tree_code_name (TREE_CODE (T)), __FUNCTION__)
-#define pp_ti_abstract_origin(TI) ((tree *) (TI)->x_data)
+#define pp_ti_abstract_origin(TI) ((tree *) (TI)->m_data)
extern void debug_generic_expr (tree);
diff --git a/gcc/tree-scalar-evolution.cc b/gcc/tree-scalar-evolution.cc
index 3fb6951..7cafe5c 100644
--- a/gcc/tree-scalar-evolution.cc
+++ b/gcc/tree-scalar-evolution.cc
@@ -1293,8 +1293,16 @@ scev_dfs::follow_ssa_edge_expr (gimple *at_stmt, tree expr,
gcond *
get_loop_exit_condition (const class loop *loop)
{
+ return get_loop_exit_condition (single_exit (loop));
+}
+
+/* If the statement just before the EXIT_EDGE contains a condition then
+ return the condition, otherwise NULL. */
+
+gcond *
+get_loop_exit_condition (const_edge exit_edge)
+{
gcond *res = NULL;
- edge exit_edge = single_exit (loop);
if (dump_file && (dump_flags & TDF_SCEV))
fprintf (dump_file, "(get_loop_exit_condition \n ");
diff --git a/gcc/tree-scalar-evolution.h b/gcc/tree-scalar-evolution.h
index c58a8a1..f35ca1b 100644
--- a/gcc/tree-scalar-evolution.h
+++ b/gcc/tree-scalar-evolution.h
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
extern tree number_of_latch_executions (class loop *);
extern gcond *get_loop_exit_condition (const class loop *);
+extern gcond *get_loop_exit_condition (const_edge);
extern void scev_initialize (void);
extern bool scev_initialized_p (void);
diff --git a/gcc/tree-sra.cc b/gcc/tree-sra.cc
index 56a8ba2..f8dff8b 100644
--- a/gcc/tree-sra.cc
+++ b/gcc/tree-sra.cc
@@ -1113,6 +1113,21 @@ disqualify_base_of_expr (tree t, const char *reason)
disqualify_candidate (t, reason);
}
+/* Return true if the BIT_FIELD_REF read EXPR is handled by SRA. */
+
+static bool
+sra_handled_bf_read_p (tree expr)
+{
+ uint64_t size, offset;
+ if (bit_field_size (expr).is_constant (&size)
+ && bit_field_offset (expr).is_constant (&offset)
+ && size % BITS_PER_UNIT == 0
+ && offset % BITS_PER_UNIT == 0
+ && pow2p_hwi (size))
+ return true;
+ return false;
+}
+
/* Scan expression EXPR and create access structures for all accesses to
candidates for scalarization. Return the created access or NULL if none is
created. */
@@ -1123,7 +1138,8 @@ build_access_from_expr_1 (tree expr, gimple *stmt, bool write)
struct access *ret = NULL;
bool partial_ref;
- if (TREE_CODE (expr) == BIT_FIELD_REF
+ if ((TREE_CODE (expr) == BIT_FIELD_REF
+ && (write || !sra_handled_bf_read_p (expr)))
|| TREE_CODE (expr) == IMAGPART_EXPR
|| TREE_CODE (expr) == REALPART_EXPR)
{
@@ -1170,6 +1186,7 @@ build_access_from_expr_1 (tree expr, gimple *stmt, bool write)
case COMPONENT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
+ case BIT_FIELD_REF:
ret = create_access (expr, stmt, write);
break;
@@ -1549,6 +1566,7 @@ make_fancy_name_1 (tree expr)
obstack_grow (&name_obstack, buffer, strlen (buffer));
break;
+ case BIT_FIELD_REF:
case ADDR_EXPR:
make_fancy_name_1 (TREE_OPERAND (expr, 0));
break;
@@ -1564,7 +1582,6 @@ make_fancy_name_1 (tree expr)
}
break;
- case BIT_FIELD_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
gcc_unreachable (); /* we treat these as scalars. */
@@ -1734,7 +1751,7 @@ build_ref_for_model (location_t loc, tree base, HOST_WIDE_INT offset,
&& !TREE_THIS_VOLATILE (base)
&& (TYPE_ADDR_SPACE (TREE_TYPE (base))
== TYPE_ADDR_SPACE (TREE_TYPE (model->expr)))
- && offset <= model->offset
+ && offset == model->offset
/* build_reconstructed_reference can still fail if we have already
massaged BASE because of another type incompatibility. */
&& (res = build_reconstructed_reference (loc, base, model)))
@@ -3769,7 +3786,8 @@ sra_modify_expr (tree *expr, gimple_stmt_iterator *gsi, bool write)
tree type, bfr, orig_expr;
bool partial_cplx_access = false;
- if (TREE_CODE (*expr) == BIT_FIELD_REF)
+ if (TREE_CODE (*expr) == BIT_FIELD_REF
+ && (write || !sra_handled_bf_read_p (*expr)))
{
bfr = *expr;
expr = &TREE_OPERAND (*expr, 0);
diff --git a/gcc/tree-ssa-ccp.cc b/gcc/tree-ssa-ccp.cc
index e048675..1a555ae 100644
--- a/gcc/tree-ssa-ccp.cc
+++ b/gcc/tree-ssa-ccp.cc
@@ -1966,7 +1966,8 @@ bit_value_binop (enum tree_code code, signop sgn, int width,
}
else
{
- widest_int upper = wi::udiv_trunc (r1max, r2min);
+ widest_int upper
+ = wi::udiv_trunc (wi::zext (r1max, width), r2min);
unsigned int lzcount = wi::clz (upper);
unsigned int bits = wi::get_precision (upper) - lzcount;
*mask = wi::mask <widest_int> (bits, false);
diff --git a/gcc/tree-ssa-dce.cc b/gcc/tree-ssa-dce.cc
index f0b0245..bbdf931 100644
--- a/gcc/tree-ssa-dce.cc
+++ b/gcc/tree-ssa-dce.cc
@@ -221,6 +221,14 @@ mark_stmt_if_obviously_necessary (gimple *stmt, bool aggressive)
case GIMPLE_CALL:
{
+ /* Never elide a noreturn call we pruned control-flow for. */
+ if ((gimple_call_flags (stmt) & ECF_NORETURN)
+ && gimple_call_ctrl_altering_p (stmt))
+ {
+ mark_stmt_necessary (stmt, true);
+ return;
+ }
+
tree callee = gimple_call_fndecl (stmt);
if (callee != NULL_TREE
&& fndecl_built_in_p (callee, BUILT_IN_NORMAL))
diff --git a/gcc/tree-ssa-live.cc b/gcc/tree-ssa-live.cc
index 8d8a318..f06daf2 100644
--- a/gcc/tree-ssa-live.cc
+++ b/gcc/tree-ssa-live.cc
@@ -1361,7 +1361,7 @@ compute_live_vars (struct function *fn, live_vars_map *vars)
We then do a mostly classical bitmap liveness algorithm. */
active.create (last_basic_block_for_fn (fn));
- active.quick_grow (last_basic_block_for_fn (fn));
+ active.quick_grow_cleared (last_basic_block_for_fn (fn));
for (int i = 0; i < last_basic_block_for_fn (fn); i++)
bitmap_initialize (&active[i], &bitmap_default_obstack);
diff --git a/gcc/tree-ssa-loop-im.cc b/gcc/tree-ssa-loop-im.cc
index b8e33a4..49aeb68 100644
--- a/gcc/tree-ssa-loop-im.cc
+++ b/gcc/tree-ssa-loop-im.cc
@@ -2324,7 +2324,7 @@ execute_sm (class loop *loop, im_mem_ref *ref,
enum sm_kind { sm_ord, sm_unord, sm_other };
struct seq_entry
{
- seq_entry () {}
+ seq_entry () = default;
seq_entry (unsigned f, sm_kind k, tree fr = NULL)
: first (f), second (k), from (fr) {}
unsigned first;
@@ -3496,13 +3496,13 @@ tree_ssa_lim_initialize (bool store_motion)
(mem_ref_alloc (NULL, 0, UNANALYZABLE_MEM_ID));
memory_accesses.refs_loaded_in_loop.create (number_of_loops (cfun));
- memory_accesses.refs_loaded_in_loop.quick_grow (number_of_loops (cfun));
+ memory_accesses.refs_loaded_in_loop.quick_grow_cleared (number_of_loops (cfun));
memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
- memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
+ memory_accesses.refs_stored_in_loop.quick_grow_cleared (number_of_loops (cfun));
if (store_motion)
{
memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
- memory_accesses.all_refs_stored_in_loop.quick_grow
+ memory_accesses.all_refs_stored_in_loop.quick_grow_cleared
(number_of_loops (cfun));
}
diff --git a/gcc/tree-ssa-loop-ivcanon.cc b/gcc/tree-ssa-loop-ivcanon.cc
index 1330cf8..5856f76 100644
--- a/gcc/tree-ssa-loop-ivcanon.cc
+++ b/gcc/tree-ssa-loop-ivcanon.cc
@@ -622,10 +622,11 @@ remove_redundant_iv_tests (class loop *loop)
|| !integer_zerop (niter.may_be_zero)
|| !niter.niter
|| TREE_CODE (niter.niter) != INTEGER_CST
- || !wi::ltu_p (loop->nb_iterations_upper_bound,
+ || !wi::ltu_p (widest_int::from (loop->nb_iterations_upper_bound,
+ SIGNED),
wi::to_widest (niter.niter)))
continue;
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Removed pointless exit: ");
diff --git a/gcc/tree-ssa-loop-ivopts.cc b/gcc/tree-ssa-loop-ivopts.cc
index 3d3f28f..2c1f084 100644
--- a/gcc/tree-ssa-loop-ivopts.cc
+++ b/gcc/tree-ssa-loop-ivopts.cc
@@ -412,7 +412,7 @@ struct iv_use
tree *op_p; /* The place where it occurs. */
tree addr_base; /* Base address with const offset stripped. */
- poly_uint64_pod addr_offset;
+ poly_uint64 addr_offset;
/* Const offset stripped from base address. */
};
@@ -1036,10 +1036,12 @@ niter_for_exit (struct ivopts_data *data, edge exit)
names that appear in phi nodes on abnormal edges, so that we do not
create overlapping life ranges for them (PR 27283). */
desc = XNEW (class tree_niter_desc);
+ ::new (static_cast<void*> (desc)) tree_niter_desc ();
if (!number_of_iterations_exit (data->current_loop,
exit, desc, true)
|| contains_abnormal_ssa_name_p (desc->niter))
{
+ desc->~tree_niter_desc ();
XDELETE (desc);
desc = NULL;
}
@@ -2956,7 +2958,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref,
/* Strips constant offsets from EXPR and stores them to OFFSET. */
static tree
-strip_offset (tree expr, poly_uint64_pod *offset)
+strip_offset (tree expr, poly_uint64 *offset)
{
poly_int64 off;
tree core = strip_offset_1 (expr, false, false, &off);
@@ -7894,7 +7896,11 @@ remove_unused_ivs (struct ivopts_data *data, bitmap toremove)
bool
free_tree_niter_desc (edge const &, tree_niter_desc *const &value, void *)
{
- free (value);
+ if (value)
+ {
+ value->~tree_niter_desc ();
+ free (value);
+ }
return true;
}
diff --git a/gcc/tree-ssa-loop-niter.cc b/gcc/tree-ssa-loop-niter.cc
index 705bcc0..718582a 100644
--- a/gcc/tree-ssa-loop-niter.cc
+++ b/gcc/tree-ssa-loop-niter.cc
@@ -3873,12 +3873,17 @@ do_warn_aggressive_loop_optimizations (class loop *loop,
return;
gimple *estmt = last_nondebug_stmt (e->src);
- char buf[WIDE_INT_PRINT_BUFFER_SIZE];
- print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
- ? UNSIGNED : SIGNED);
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p;
+ unsigned len;
+ if (print_dec_buf_size (i_bound, TYPE_SIGN (TREE_TYPE (loop->nb_iterations)),
+ &len))
+ p = XALLOCAVEC (char, len);
+ else
+ p = buf;
+ print_dec (i_bound, p, TYPE_SIGN (TREE_TYPE (loop->nb_iterations)));
auto_diagnostic_group d;
if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
- "iteration %s invokes undefined behavior", buf))
+ "iteration %s invokes undefined behavior", p))
inform (gimple_location (estmt), "within this loop");
loop->warned_aggressive_loop_optimizations = true;
}
@@ -3915,6 +3920,9 @@ record_estimate (class loop *loop, tree bound, const widest_int &i_bound,
else
gcc_checking_assert (i_bound == wi::to_widest (bound));
+ if (wi::min_precision (i_bound, SIGNED) > bound_wide_int ().get_precision ())
+ return;
+
/* If we have a guaranteed upper bound, record it in the appropriate
list, unless this is an !is_exit bound (i.e. undefined behavior in
at_stmt) in a loop with known constant number of iterations. */
@@ -3925,7 +3933,7 @@ record_estimate (class loop *loop, tree bound, const widest_int &i_bound,
{
class nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
- elt->bound = i_bound;
+ elt->bound = bound_wide_int::from (i_bound, SIGNED);
elt->stmt = at_stmt;
elt->is_exit = is_exit;
elt->next = loop->bounds;
@@ -4410,8 +4418,8 @@ infer_loop_bounds_from_undefined (class loop *loop, basic_block *bbs)
static int
wide_int_cmp (const void *p1, const void *p2)
{
- const widest_int *d1 = (const widest_int *) p1;
- const widest_int *d2 = (const widest_int *) p2;
+ const bound_wide_int *d1 = (const bound_wide_int *) p1;
+ const bound_wide_int *d2 = (const bound_wide_int *) p2;
return wi::cmpu (*d1, *d2);
}
@@ -4419,7 +4427,7 @@ wide_int_cmp (const void *p1, const void *p2)
Lookup by binary search. */
static int
-bound_index (const vec<widest_int> &bounds, const widest_int &bound)
+bound_index (const vec<bound_wide_int> &bounds, const bound_wide_int &bound)
{
unsigned int end = bounds.length ();
unsigned int begin = 0;
@@ -4428,7 +4436,7 @@ bound_index (const vec<widest_int> &bounds, const widest_int &bound)
while (begin != end)
{
unsigned int middle = (begin + end) / 2;
- widest_int index = bounds[middle];
+ bound_wide_int index = bounds[middle];
if (index == bound)
return middle;
@@ -4450,7 +4458,7 @@ static void
discover_iteration_bound_by_body_walk (class loop *loop)
{
class nb_iter_bound *elt;
- auto_vec<widest_int> bounds;
+ auto_vec<bound_wide_int> bounds;
vec<vec<basic_block> > queues = vNULL;
vec<basic_block> queue = vNULL;
ptrdiff_t queue_index;
@@ -4459,7 +4467,7 @@ discover_iteration_bound_by_body_walk (class loop *loop)
/* Discover what bounds may interest us. */
for (elt = loop->bounds; elt; elt = elt->next)
{
- widest_int bound = elt->bound;
+ bound_wide_int bound = elt->bound;
/* Exit terminates loop at given iteration, while non-exits produce undefined
effect on the next iteration. */
@@ -4492,7 +4500,7 @@ discover_iteration_bound_by_body_walk (class loop *loop)
hash_map<basic_block, ptrdiff_t> bb_bounds;
for (elt = loop->bounds; elt; elt = elt->next)
{
- widest_int bound = elt->bound;
+ bound_wide_int bound = elt->bound;
if (!elt->is_exit)
{
bound += 1;
@@ -4601,7 +4609,8 @@ discover_iteration_bound_by_body_walk (class loop *loop)
print_decu (bounds[latch_index], dump_file);
fprintf (dump_file, "\n");
}
- record_niter_bound (loop, bounds[latch_index], false, true);
+ record_niter_bound (loop, widest_int::from (bounds[latch_index],
+ SIGNED), false, true);
}
queues.release ();
@@ -4704,7 +4713,8 @@ maybe_lower_iteration_bound (class loop *loop)
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Reducing loop iteration estimate by 1; "
"undefined statement must be executed at the last iteration.\n");
- record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
+ record_niter_bound (loop, widest_int::from (loop->nb_iterations_upper_bound,
+ SIGNED) - 1,
false, true);
}
@@ -4860,10 +4870,13 @@ estimate_numbers_of_iterations (class loop *loop)
not break code with undefined behavior by not recording smaller
maximum number of iterations. */
if (loop->nb_iterations
- && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
+ && TREE_CODE (loop->nb_iterations) == INTEGER_CST
+ && (wi::min_precision (wi::to_widest (loop->nb_iterations), SIGNED)
+ <= bound_wide_int ().get_precision ()))
{
loop->any_upper_bound = true;
- loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
+ loop->nb_iterations_upper_bound
+ = bound_wide_int::from (wi::to_widest (loop->nb_iterations), SIGNED);
}
}
@@ -5114,7 +5127,7 @@ n_of_executions_at_most (gimple *stmt,
class nb_iter_bound *niter_bound,
tree niter)
{
- widest_int bound = niter_bound->bound;
+ widest_int bound = widest_int::from (niter_bound->bound, SIGNED);
tree nit_type = TREE_TYPE (niter), e;
enum tree_code cmp;
diff --git a/gcc/tree-ssa-math-opts.cc b/gcc/tree-ssa-math-opts.cc
index 51c14d6..363f316 100644
--- a/gcc/tree-ssa-math-opts.cc
+++ b/gcc/tree-ssa-math-opts.cc
@@ -4581,6 +4581,7 @@ match_uaddc_usubc (gimple_stmt_iterator *gsi, gimple *stmt, tree_code code)
if (!INTEGRAL_TYPE_P (type) || !TYPE_UNSIGNED (type))
return false;
+ auto_vec<gimple *, 2> temp_stmts;
if (code != BIT_IOR_EXPR && code != BIT_XOR_EXPR)
{
/* If overflow flag is ignored on the MSB limb, we can end up with
@@ -4615,26 +4616,29 @@ match_uaddc_usubc (gimple_stmt_iterator *gsi, gimple *stmt, tree_code code)
rhs[0] = gimple_assign_rhs1 (g);
tree &r = rhs[2] ? rhs[3] : rhs[2];
r = r2;
+ temp_stmts.quick_push (g);
}
else
break;
}
- while (TREE_CODE (rhs[1]) == SSA_NAME && !rhs[3])
- {
- gimple *g = SSA_NAME_DEF_STMT (rhs[1]);
- if (has_single_use (rhs[1])
- && is_gimple_assign (g)
- && gimple_assign_rhs_code (g) == PLUS_EXPR)
- {
- rhs[1] = gimple_assign_rhs1 (g);
- if (rhs[2])
- rhs[3] = gimple_assign_rhs2 (g);
- else
- rhs[2] = gimple_assign_rhs2 (g);
- }
- else
- break;
- }
+ for (int i = 1; i <= 2; ++i)
+ while (rhs[i] && TREE_CODE (rhs[i]) == SSA_NAME && !rhs[3])
+ {
+ gimple *g = SSA_NAME_DEF_STMT (rhs[i]);
+ if (has_single_use (rhs[i])
+ && is_gimple_assign (g)
+ && gimple_assign_rhs_code (g) == PLUS_EXPR)
+ {
+ rhs[i] = gimple_assign_rhs1 (g);
+ if (rhs[2])
+ rhs[3] = gimple_assign_rhs2 (g);
+ else
+ rhs[2] = gimple_assign_rhs2 (g);
+ temp_stmts.quick_push (g);
+ }
+ else
+ break;
+ }
/* If there are just 3 addends or one minuend and two subtrahends,
check for UADDC or USUBC being pattern recognized earlier.
Say r = op1 + op2 + ovf1 + ovf2; where the (ovf1 + ovf2) part
@@ -5039,7 +5043,17 @@ match_uaddc_usubc (gimple_stmt_iterator *gsi, gimple *stmt, tree_code code)
g = gimple_build_assign (ilhs, IMAGPART_EXPR,
build1 (IMAGPART_EXPR, TREE_TYPE (ilhs), nlhs));
if (rhs[2])
- gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ {
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ /* Remove some further statements which can't be kept in the IL because
+ they can use SSA_NAMEs whose setter is going to be removed too. */
+ while (temp_stmts.length ())
+ {
+ g = temp_stmts.pop ();
+ gsi2 = gsi_for_stmt (g);
+ gsi_remove (&gsi2, true);
+ }
+ }
else
gsi_replace (gsi, g, true);
/* Remove some statements which can't be kept in the IL because they
diff --git a/gcc/tree-ssa-sccvn.cc b/gcc/tree-ssa-sccvn.cc
index e464985..0b2c10dc 100644
--- a/gcc/tree-ssa-sccvn.cc
+++ b/gcc/tree-ssa-sccvn.cc
@@ -5747,9 +5747,12 @@ visit_reference_op_load (tree lhs, tree op, gimple *stmt)
{
/* Avoid the type punning in case the result mode has padding where
the op we lookup has not. */
- if (maybe_lt (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (result))),
- GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op)))))
+ if (TYPE_MODE (TREE_TYPE (result)) != BLKmode
+ && maybe_lt (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (result))),
+ GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op)))))
result = NULL_TREE;
+ else if (CONSTANT_CLASS_P (result))
+ result = const_unop (VIEW_CONVERT_EXPR, TREE_TYPE (op), result);
else
{
/* We will be setting the value number of lhs to the value number
@@ -7688,7 +7691,11 @@ rpo_elim::eliminate_avail (basic_block bb, tree op)
{
if (SSA_NAME_IS_DEFAULT_DEF (valnum))
return valnum;
- vn_avail *av = VN_INFO (valnum)->avail;
+ vn_ssa_aux_t valnum_info = VN_INFO (valnum);
+ /* See above. */
+ if (!valnum_info->visited)
+ return valnum;
+ vn_avail *av = valnum_info->avail;
if (!av)
return NULL_TREE;
if (av->location == bb->index)
diff --git a/gcc/tree-ssa-sccvn.h b/gcc/tree-ssa-sccvn.h
index 675240e..98d70e0 100644
--- a/gcc/tree-ssa-sccvn.h
+++ b/gcc/tree-ssa-sccvn.h
@@ -114,7 +114,7 @@ typedef struct vn_reference_op_struct
/* For storing TYPE_ALIGN for array ref element size computation. */
unsigned align : 6;
/* Constant offset this op adds or -1 if it is variable. */
- poly_int64_pod off;
+ poly_int64 off;
tree type;
tree op0;
tree op1;
diff --git a/gcc/tree-ssa-strlen.cc b/gcc/tree-ssa-strlen.cc
index 8b7ef91..083a10d 100644
--- a/gcc/tree-ssa-strlen.cc
+++ b/gcc/tree-ssa-strlen.cc
@@ -281,14 +281,14 @@ public:
gimple *stmt,
unsigned lenrange[3], bool *nulterm,
bool *allnul, bool *allnonnul);
- bool count_nonzero_bytes (tree exp,
+ bool count_nonzero_bytes (tree exp, tree vuse,
gimple *stmt,
unsigned HOST_WIDE_INT offset,
unsigned HOST_WIDE_INT nbytes,
unsigned lenrange[3], bool *nulterm,
bool *allnul, bool *allnonnul,
ssa_name_limit_t &snlim);
- bool count_nonzero_bytes_addr (tree exp,
+ bool count_nonzero_bytes_addr (tree exp, tree vuse,
gimple *stmt,
unsigned HOST_WIDE_INT offset,
unsigned HOST_WIDE_INT nbytes,
@@ -4531,8 +4531,8 @@ nonzero_bytes_for_type (tree type, unsigned lenrange[3],
}
/* Recursively determine the minimum and maximum number of leading nonzero
- bytes in the representation of EXP and set LENRANGE[0] and LENRANGE[1]
- to each.
+ bytes in the representation of EXP at memory state VUSE and set
+ LENRANGE[0] and LENRANGE[1] to each.
Sets LENRANGE[2] to the total size of the access (which may be less
than LENRANGE[1] when what's being referenced by EXP is a pointer
rather than an array).
@@ -4546,7 +4546,7 @@ nonzero_bytes_for_type (tree type, unsigned lenrange[3],
Returns true on success and false otherwise. */
bool
-strlen_pass::count_nonzero_bytes (tree exp, gimple *stmt,
+strlen_pass::count_nonzero_bytes (tree exp, tree vuse, gimple *stmt,
unsigned HOST_WIDE_INT offset,
unsigned HOST_WIDE_INT nbytes,
unsigned lenrange[3], bool *nulterm,
@@ -4566,22 +4566,23 @@ strlen_pass::count_nonzero_bytes (tree exp, gimple *stmt,
exact value is not known) recurse once to set the range
for an arbitrary constant. */
exp = build_int_cst (type, 1);
- return count_nonzero_bytes (exp, stmt,
+ return count_nonzero_bytes (exp, vuse, stmt,
offset, 1, lenrange,
nulterm, allnul, allnonnul, snlim);
}
- gimple *stmt = SSA_NAME_DEF_STMT (exp);
- if (gimple_assign_single_p (stmt))
+ gimple *g = SSA_NAME_DEF_STMT (exp);
+ if (gimple_assign_single_p (g))
{
- exp = gimple_assign_rhs1 (stmt);
+ exp = gimple_assign_rhs1 (g);
if (!DECL_P (exp)
&& TREE_CODE (exp) != CONSTRUCTOR
&& TREE_CODE (exp) != MEM_REF)
return false;
/* Handle DECLs, CONSTRUCTOR and MEM_REF below. */
+ stmt = g;
}
- else if (gimple_code (stmt) == GIMPLE_PHI)
+ else if (gimple_code (g) == GIMPLE_PHI)
{
/* Avoid processing an SSA_NAME that has already been visited
or if an SSA_NAME limit has been reached. Indicate success
@@ -4590,11 +4591,11 @@ strlen_pass::count_nonzero_bytes (tree exp, gimple *stmt,
return res > 0;
/* Determine the minimum and maximum from the PHI arguments. */
- unsigned int n = gimple_phi_num_args (stmt);
+ unsigned int n = gimple_phi_num_args (g);
for (unsigned i = 0; i != n; i++)
{
- tree def = gimple_phi_arg_def (stmt, i);
- if (!count_nonzero_bytes (def, stmt,
+ tree def = gimple_phi_arg_def (g, i);
+ if (!count_nonzero_bytes (def, vuse, g,
offset, nbytes, lenrange, nulterm,
allnul, allnonnul, snlim))
return false;
@@ -4652,7 +4653,7 @@ strlen_pass::count_nonzero_bytes (tree exp, gimple *stmt,
return false;
/* Handle MEM_REF = SSA_NAME types of assignments. */
- return count_nonzero_bytes_addr (arg, stmt,
+ return count_nonzero_bytes_addr (arg, vuse, stmt,
offset, nbytes, lenrange, nulterm,
allnul, allnonnul, snlim);
}
@@ -4765,7 +4766,7 @@ strlen_pass::count_nonzero_bytes (tree exp, gimple *stmt,
bytes that are pointed to by EXP, which should be a pointer. */
bool
-strlen_pass::count_nonzero_bytes_addr (tree exp, gimple *stmt,
+strlen_pass::count_nonzero_bytes_addr (tree exp, tree vuse, gimple *stmt,
unsigned HOST_WIDE_INT offset,
unsigned HOST_WIDE_INT nbytes,
unsigned lenrange[3], bool *nulterm,
@@ -4775,6 +4776,14 @@ strlen_pass::count_nonzero_bytes_addr (tree exp, gimple *stmt,
int idx = get_stridx (exp, stmt);
if (idx > 0)
{
+ /* get_strinfo reflects string lengths before the current statement,
+ where the current statement is the outermost count_nonzero_bytes
+ stmt. If there are any stores in between stmt and that
+ current statement, the string length information might describe
+ something significantly different. */
+ if (gimple_vuse (stmt) != vuse)
+ return false;
+
strinfo *si = get_strinfo (idx);
if (!si)
return false;
@@ -4835,14 +4844,14 @@ strlen_pass::count_nonzero_bytes_addr (tree exp, gimple *stmt,
}
if (TREE_CODE (exp) == ADDR_EXPR)
- return count_nonzero_bytes (TREE_OPERAND (exp, 0), stmt,
+ return count_nonzero_bytes (TREE_OPERAND (exp, 0), vuse, stmt,
offset, nbytes,
lenrange, nulterm, allnul, allnonnul, snlim);
if (TREE_CODE (exp) == SSA_NAME)
{
- gimple *stmt = SSA_NAME_DEF_STMT (exp);
- if (gimple_code (stmt) == GIMPLE_PHI)
+ gimple *g = SSA_NAME_DEF_STMT (exp);
+ if (gimple_code (g) == GIMPLE_PHI)
{
/* Avoid processing an SSA_NAME that has already been visited
or if an SSA_NAME limit has been reached. Indicate success
@@ -4851,11 +4860,11 @@ strlen_pass::count_nonzero_bytes_addr (tree exp, gimple *stmt,
return res > 0;
/* Determine the minimum and maximum from the PHI arguments. */
- unsigned int n = gimple_phi_num_args (stmt);
+ unsigned int n = gimple_phi_num_args (g);
for (unsigned i = 0; i != n; i++)
{
- tree def = gimple_phi_arg_def (stmt, i);
- if (!count_nonzero_bytes_addr (def, stmt,
+ tree def = gimple_phi_arg_def (g, i);
+ if (!count_nonzero_bytes_addr (def, vuse, g,
offset, nbytes, lenrange,
nulterm, allnul, allnonnul,
snlim))
@@ -4903,7 +4912,7 @@ strlen_pass::count_nonzero_bytes (tree expr_or_type, gimple *stmt,
ssa_name_limit_t snlim;
tree expr = expr_or_type;
- return count_nonzero_bytes (expr, stmt,
+ return count_nonzero_bytes (expr, gimple_vuse (stmt), stmt,
0, 0, lenrange, nulterm, allnul, allnonnul,
snlim);
}
diff --git a/gcc/tree-ssa.cc b/gcc/tree-ssa.cc
index ebba02b..2f3210f 100644
--- a/gcc/tree-ssa.cc
+++ b/gcc/tree-ssa.cc
@@ -1788,15 +1788,20 @@ maybe_optimize_var (tree var, bitmap addresses_taken, bitmap not_reg_needs,
maybe_reg = true;
DECL_NOT_GIMPLE_REG_P (var) = 0;
}
- if (maybe_reg && is_gimple_reg (var))
+ if (maybe_reg)
{
- if (dump_file)
+ if (is_gimple_reg (var))
{
- fprintf (dump_file, "Now a gimple register: ");
- print_generic_expr (dump_file, var);
- fprintf (dump_file, "\n");
+ if (dump_file)
+ {
+ fprintf (dump_file, "Now a gimple register: ");
+ print_generic_expr (dump_file, var);
+ fprintf (dump_file, "\n");
+ }
+ bitmap_set_bit (suitable_for_renaming, DECL_UID (var));
}
- bitmap_set_bit (suitable_for_renaming, DECL_UID (var));
+ else
+ DECL_NOT_GIMPLE_REG_P (var) = 1;
}
}
}
diff --git a/gcc/tree-ssanames.cc b/gcc/tree-ssanames.cc
index 23387b9..e26ef55 100644
--- a/gcc/tree-ssanames.cc
+++ b/gcc/tree-ssanames.cc
@@ -418,10 +418,13 @@ set_range_info (tree name, const vrange &r)
if (r.undefined_p () || r.varying_p ())
return false;
+ // Pick up the current range, or VARYING if none.
tree type = TREE_TYPE (name);
if (POINTER_TYPE_P (type))
{
- if (r.nonzero_p ())
+ struct ptr_info_def *pi = get_ptr_info (name);
+ // If R is nonnull and pi is not, set nonnull.
+ if (r.nonzero_p () && (!pi || pi->pt.null))
{
set_ptr_nonnull (name);
return true;
@@ -429,18 +432,16 @@ set_range_info (tree name, const vrange &r)
return false;
}
- /* If a global range already exists, incorporate it. */
+ Value_Range tmp (type);
if (range_info_p (name))
- {
- Value_Range tmp (type);
- range_info_get_range (name, tmp);
- tmp.intersect (r);
- if (tmp.undefined_p ())
- return false;
+ range_info_get_range (name, tmp);
+ else
+ tmp.set_varying (type);
+ // If the result doesn't change, or is undefined, return false.
+ if (!tmp.intersect (r) || tmp.undefined_p ())
+ return false;
- return range_info_set_range (name, tmp);
- }
- return range_info_set_range (name, r);
+ return range_info_set_range (name, tmp);
}
/* Set nonnull attribute to pointer NAME. */
@@ -521,10 +522,6 @@ ssa_name_has_boolean_range (tree op)
{
gcc_assert (TREE_CODE (op) == SSA_NAME);
- /* Boolean types always have a range [0..1]. */
- if (TREE_CODE (TREE_TYPE (op)) == BOOLEAN_TYPE)
- return true;
-
/* An integral type with a single bit of precision. */
if (INTEGRAL_TYPE_P (TREE_TYPE (op))
&& TYPE_UNSIGNED (TREE_TYPE (op))
diff --git a/gcc/tree-vect-data-refs.cc b/gcc/tree-vect-data-refs.cc
index 40ab568..9607a9fb 100644
--- a/gcc/tree-vect-data-refs.cc
+++ b/gcc/tree-vect-data-refs.cc
@@ -2078,7 +2078,8 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
/* Check if we can possibly peel the loop. */
if (!vect_can_advance_ivs_p (loop_vinfo)
- || !slpeel_can_duplicate_loop_p (loop, single_exit (loop))
+ || !slpeel_can_duplicate_loop_p (loop, LOOP_VINFO_IV_EXIT (loop_vinfo),
+ LOOP_VINFO_IV_EXIT (loop_vinfo))
|| loop->inner)
do_peeling = false;
diff --git a/gcc/tree-vect-loop-manip.cc b/gcc/tree-vect-loop-manip.cc
index 0964190..1f7779b 100644
--- a/gcc/tree-vect-loop-manip.cc
+++ b/gcc/tree-vect-loop-manip.cc
@@ -252,6 +252,9 @@ adjust_phi_and_debug_stmts (gimple *update_phi, edge e, tree new_def)
{
tree orig_def = PHI_ARG_DEF_FROM_EDGE (update_phi, e);
+ gcc_assert (TREE_CODE (orig_def) != SSA_NAME
+ || orig_def != new_def);
+
SET_PHI_ARG_DEF (update_phi, e->dest_idx, new_def);
if (MAY_HAVE_DEBUG_BIND_STMTS)
@@ -803,7 +806,7 @@ vect_set_loop_controls_directly (class loop *loop, loop_vec_info loop_vinfo,
final gcond. */
static gcond *
-vect_set_loop_condition_partial_vectors (class loop *loop,
+vect_set_loop_condition_partial_vectors (class loop *loop, edge exit_edge,
loop_vec_info loop_vinfo, tree niters,
tree final_iv, bool niters_maybe_zero,
gimple_stmt_iterator loop_cond_gsi)
@@ -904,7 +907,6 @@ vect_set_loop_condition_partial_vectors (class loop *loop,
add_header_seq (loop, header_seq);
/* Get a boolean result that tells us whether to iterate. */
- edge exit_edge = single_exit (loop);
gcond *cond_stmt;
if (LOOP_VINFO_USING_DECREMENTING_IV_P (loop_vinfo)
&& !LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo))
@@ -935,7 +937,7 @@ vect_set_loop_condition_partial_vectors (class loop *loop,
if (final_iv)
{
gassign *assign = gimple_build_assign (final_iv, orig_niters);
- gsi_insert_on_edge_immediate (single_exit (loop), assign);
+ gsi_insert_on_edge_immediate (exit_edge, assign);
}
return cond_stmt;
@@ -953,6 +955,7 @@ vect_set_loop_condition_partial_vectors (class loop *loop,
static gcond *
vect_set_loop_condition_partial_vectors_avx512 (class loop *loop,
+ edge exit_edge,
loop_vec_info loop_vinfo, tree niters,
tree final_iv,
bool niters_maybe_zero,
@@ -1144,7 +1147,6 @@ vect_set_loop_condition_partial_vectors_avx512 (class loop *loop,
add_preheader_seq (loop, preheader_seq);
/* Adjust the exit test using the decrementing IV. */
- edge exit_edge = single_exit (loop);
tree_code code = (exit_edge->flags & EDGE_TRUE_VALUE) ? LE_EXPR : GT_EXPR;
/* When we peel for alignment with niter_skip != 0 this can
cause niter + niter_skip to wrap and since we are comparing the
@@ -1183,7 +1185,8 @@ vect_set_loop_condition_partial_vectors_avx512 (class loop *loop,
loop handles exactly VF scalars per iteration. */
static gcond *
-vect_set_loop_condition_normal (class loop *loop, tree niters, tree step,
+vect_set_loop_condition_normal (loop_vec_info /* loop_vinfo */, edge exit_edge,
+ class loop *loop, tree niters, tree step,
tree final_iv, bool niters_maybe_zero,
gimple_stmt_iterator loop_cond_gsi)
{
@@ -1191,13 +1194,12 @@ vect_set_loop_condition_normal (class loop *loop, tree niters, tree step,
gcond *cond_stmt;
gcond *orig_cond;
edge pe = loop_preheader_edge (loop);
- edge exit_edge = single_exit (loop);
gimple_stmt_iterator incr_gsi;
bool insert_after;
enum tree_code code;
tree niters_type = TREE_TYPE (niters);
- orig_cond = get_loop_exit_condition (loop);
+ orig_cond = get_loop_exit_condition (exit_edge);
gcc_assert (orig_cond);
loop_cond_gsi = gsi_for_stmt (orig_cond);
@@ -1305,19 +1307,18 @@ vect_set_loop_condition_normal (class loop *loop, tree niters, tree step,
if (final_iv)
{
gassign *assign;
- edge exit = single_exit (loop);
- gcc_assert (single_pred_p (exit->dest));
+ gcc_assert (single_pred_p (exit_edge->dest));
tree phi_dest
= integer_zerop (init) ? final_iv : copy_ssa_name (indx_after_incr);
/* Make sure to maintain LC SSA form here and elide the subtraction
if the value is zero. */
- gphi *phi = create_phi_node (phi_dest, exit->dest);
- add_phi_arg (phi, indx_after_incr, exit, UNKNOWN_LOCATION);
+ gphi *phi = create_phi_node (phi_dest, exit_edge->dest);
+ add_phi_arg (phi, indx_after_incr, exit_edge, UNKNOWN_LOCATION);
if (!integer_zerop (init))
{
assign = gimple_build_assign (final_iv, MINUS_EXPR,
phi_dest, init);
- gimple_stmt_iterator gsi = gsi_after_labels (exit->dest);
+ gimple_stmt_iterator gsi = gsi_after_labels (exit_edge->dest);
gsi_insert_before (&gsi, assign, GSI_SAME_STMT);
}
}
@@ -1348,29 +1349,33 @@ vect_set_loop_condition_normal (class loop *loop, tree niters, tree step,
Assumption: the exit-condition of LOOP is the last stmt in the loop. */
void
-vect_set_loop_condition (class loop *loop, loop_vec_info loop_vinfo,
+vect_set_loop_condition (class loop *loop, edge loop_e, loop_vec_info loop_vinfo,
tree niters, tree step, tree final_iv,
bool niters_maybe_zero)
{
gcond *cond_stmt;
- gcond *orig_cond = get_loop_exit_condition (loop);
+ gcond *orig_cond = get_loop_exit_condition (loop_e);
gimple_stmt_iterator loop_cond_gsi = gsi_for_stmt (orig_cond);
if (loop_vinfo && LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
{
if (LOOP_VINFO_PARTIAL_VECTORS_STYLE (loop_vinfo) == vect_partial_vectors_avx512)
- cond_stmt = vect_set_loop_condition_partial_vectors_avx512 (loop, loop_vinfo,
+ cond_stmt = vect_set_loop_condition_partial_vectors_avx512 (loop, loop_e,
+ loop_vinfo,
niters, final_iv,
niters_maybe_zero,
loop_cond_gsi);
else
- cond_stmt = vect_set_loop_condition_partial_vectors (loop, loop_vinfo,
+ cond_stmt = vect_set_loop_condition_partial_vectors (loop, loop_e,
+ loop_vinfo,
niters, final_iv,
niters_maybe_zero,
loop_cond_gsi);
}
else
- cond_stmt = vect_set_loop_condition_normal (loop, niters, step, final_iv,
+ cond_stmt = vect_set_loop_condition_normal (loop_vinfo, loop_e, loop,
+ niters,
+ step, final_iv,
niters_maybe_zero,
loop_cond_gsi);
@@ -1439,16 +1444,24 @@ slpeel_duplicate_current_defs_from_edges (edge from, edge to)
get_current_def (PHI_ARG_DEF_FROM_EDGE (from_phi, from)));
}
-
/* Given LOOP this function generates a new copy of it and puts it
on E which is either the entry or exit of LOOP. If SCALAR_LOOP is
non-NULL, assume LOOP and SCALAR_LOOP are equivalent and copy the
basic blocks from SCALAR_LOOP instead of LOOP, but to either the
- entry or exit of LOOP. */
+ entry or exit of LOOP. If FLOW_LOOPS then connect LOOP to SCALAR_LOOP as a
+ continuation. This is correct for cases where one loop continues from the
+ other like in the vectorizer, but not true for uses in e.g. loop distribution
+ where the contents of the loop body are split but the iteration space of both
+ copies remains the same.
+
+ If UPDATED_DOMS is not NULL it is update with the list of basic blocks whoms
+ dominators were updated during the peeling. */
class loop *
-slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop,
- class loop *scalar_loop, edge e)
+slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop, edge loop_exit,
+ class loop *scalar_loop,
+ edge scalar_exit, edge e, edge *new_e,
+ bool flow_loops)
{
class loop *new_loop;
basic_block *new_bbs, *bbs, *pbbs;
@@ -1458,13 +1471,30 @@ slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop,
edge exit, new_exit;
bool duplicate_outer_loop = false;
- exit = single_exit (loop);
+ exit = loop_exit;
at_exit = (e == exit);
if (!at_exit && e != loop_preheader_edge (loop))
return NULL;
if (scalar_loop == NULL)
- scalar_loop = loop;
+ {
+ scalar_loop = loop;
+ scalar_exit = loop_exit;
+ }
+ else if (scalar_loop == loop)
+ scalar_exit = loop_exit;
+ else
+ {
+ /* Loop has been version, match exits up using the aux index. */
+ for (edge exit : get_loop_exit_edges (scalar_loop))
+ if (exit->aux == loop_exit->aux)
+ {
+ scalar_exit = exit;
+ break;
+ }
+
+ gcc_assert (scalar_exit);
+ }
bbs = XNEWVEC (basic_block, scalar_loop->num_nodes + 1);
pbbs = bbs + 1;
@@ -1490,13 +1520,19 @@ slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop,
bbs[0] = preheader;
new_bbs = XNEWVEC (basic_block, scalar_loop->num_nodes + 1);
- exit = single_exit (scalar_loop);
copy_bbs (bbs, scalar_loop->num_nodes + 1, new_bbs,
- &exit, 1, &new_exit, NULL,
+ &scalar_exit, 1, &new_exit, NULL,
at_exit ? loop->latch : e->src, true);
- exit = single_exit (loop);
+ exit = loop_exit;
basic_block new_preheader = new_bbs[0];
+ gcc_assert (new_exit);
+
+ /* Record the new loop exit information. new_loop doesn't have SCEV data and
+ so we must initialize the exit information. */
+ if (new_e)
+ *new_e = new_exit;
+
/* Before installing PHI arguments make sure that the edges
into them match that of the scalar loop we analyzed. This
makes sure the SLP tree matches up between the main vectorized
@@ -1530,6 +1566,19 @@ slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop,
for (unsigned i = (at_exit ? 0 : 1); i < scalar_loop->num_nodes + 1; i++)
rename_variables_in_bb (new_bbs[i], duplicate_outer_loop);
+ /* Rename the exit uses. */
+ for (edge exit : get_loop_exit_edges (new_loop))
+ for (auto gsi = gsi_start_phis (exit->dest);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ tree orig_def = PHI_ARG_DEF_FROM_EDGE (gsi.phi (), exit);
+ rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (gsi.phi (), exit));
+ if (MAY_HAVE_DEBUG_BIND_STMTS)
+ adjust_debug_stmts (orig_def, PHI_RESULT (gsi.phi ()), exit->dest);
+ }
+
+ /* This condition happens when the loop has been versioned. e.g. due to ifcvt
+ versioning the loop. */
if (scalar_loop != loop)
{
/* If we copied from SCALAR_LOOP rather than LOOP, SSA_NAMEs from
@@ -1537,34 +1586,89 @@ slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop,
but LOOP will not. slpeel_update_phi_nodes_for_guard{1,2} expects
the LOOP SSA_NAMEs (on the exit edge and edge from latch to
header) to have current_def set, so copy them over. */
- slpeel_duplicate_current_defs_from_edges (single_exit (scalar_loop),
- exit);
+ slpeel_duplicate_current_defs_from_edges (scalar_exit, exit);
slpeel_duplicate_current_defs_from_edges (EDGE_SUCC (scalar_loop->latch,
0),
EDGE_SUCC (loop->latch, 0));
}
+ auto loop_exits = get_loop_exit_edges (loop);
+ auto_vec<basic_block> doms;
+
if (at_exit) /* Add the loop copy at exit. */
{
- if (scalar_loop != loop)
+ if (scalar_loop != loop && new_exit->dest != exit_dest)
{
- gphi_iterator gsi;
new_exit = redirect_edge_and_branch (new_exit, exit_dest);
+ flush_pending_stmts (new_exit);
+ }
- for (gsi = gsi_start_phis (exit_dest); !gsi_end_p (gsi);
- gsi_next (&gsi))
- {
- gphi *phi = gsi.phi ();
- tree orig_arg = PHI_ARG_DEF_FROM_EDGE (phi, e);
- location_t orig_locus
- = gimple_phi_arg_location_from_edge (phi, e);
+ auto_vec <gimple *> new_phis;
+ hash_map <tree, tree> new_phi_args;
+ /* First create the empty phi nodes so that when we flush the
+ statements they can be filled in. However because there is no order
+ between the PHI nodes in the exits and the loop headers we need to
+ order them base on the order of the two headers. First record the new
+ phi nodes. */
+ for (auto gsi_from = gsi_start_phis (scalar_exit->dest);
+ !gsi_end_p (gsi_from); gsi_next (&gsi_from))
+ {
+ gimple *from_phi = gsi_stmt (gsi_from);
+ tree new_res = copy_ssa_name (gimple_phi_result (from_phi));
+ gphi *res = create_phi_node (new_res, new_preheader);
+ new_phis.safe_push (res);
+ }
- add_phi_arg (phi, orig_arg, new_exit, orig_locus);
- }
+ /* Then redirect the edges and flush the changes. This writes out the new
+ SSA names. */
+ for (edge exit : loop_exits)
+ {
+ edge temp_e = redirect_edge_and_branch (exit, new_preheader);
+ flush_pending_stmts (temp_e);
}
- redirect_edge_and_branch_force (e, new_preheader);
- flush_pending_stmts (e);
+
+ /* Record the new SSA names in the cache so that we can skip materializing
+ them again when we fill in the rest of the LCSSA variables. */
+ for (auto phi : new_phis)
+ {
+ tree new_arg = gimple_phi_arg (phi, 0)->def;
+ new_phi_args.put (new_arg, gimple_phi_result (phi));
+ }
+
+ /* Copy the current loop LC PHI nodes between the original loop exit
+ block and the new loop header. This allows us to later split the
+ preheader block and still find the right LC nodes. */
+ edge loop_entry = single_succ_edge (new_preheader);
+ if (flow_loops)
+ for (auto gsi_from = gsi_start_phis (loop->header),
+ gsi_to = gsi_start_phis (new_loop->header);
+ !gsi_end_p (gsi_from) && !gsi_end_p (gsi_to);
+ gsi_next (&gsi_from), gsi_next (&gsi_to))
+ {
+ gimple *from_phi = gsi_stmt (gsi_from);
+ gimple *to_phi = gsi_stmt (gsi_to);
+ tree new_arg = PHI_ARG_DEF_FROM_EDGE (from_phi,
+ loop_latch_edge (loop));
+
+ /* Check if we've already created a new phi node during edge
+ redirection. If we have, only propagate the value downwards. */
+ if (tree *res = new_phi_args.get (new_arg))
+ {
+ adjust_phi_and_debug_stmts (to_phi, loop_entry, *res);
+ continue;
+ }
+
+ tree new_res = copy_ssa_name (gimple_phi_result (from_phi));
+ gphi *lcssa_phi = create_phi_node (new_res, new_preheader);
+
+ /* Main loop exit should use the final iter value. */
+ add_phi_arg (lcssa_phi, new_arg, loop_exit, UNKNOWN_LOCATION);
+
+ adjust_phi_and_debug_stmts (to_phi, loop_entry, new_res);
+ }
+
set_immediate_dominator (CDI_DOMINATORS, new_preheader, e->src);
+
if (was_imm_dom || duplicate_outer_loop)
set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_exit->src);
@@ -1578,6 +1682,23 @@ slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop,
}
else /* Add the copy at entry. */
{
+ /* Copy the current loop LC PHI nodes between the original loop exit
+ block and the new loop header. This allows us to later split the
+ preheader block and still find the right LC nodes. */
+ if (flow_loops)
+ for (auto gsi_from = gsi_start_phis (new_loop->header),
+ gsi_to = gsi_start_phis (loop->header);
+ !gsi_end_p (gsi_from) && !gsi_end_p (gsi_to);
+ gsi_next (&gsi_from), gsi_next (&gsi_to))
+ {
+ gimple *from_phi = gsi_stmt (gsi_from);
+ gimple *to_phi = gsi_stmt (gsi_to);
+ tree new_arg = PHI_ARG_DEF_FROM_EDGE (from_phi,
+ loop_latch_edge (new_loop));
+ adjust_phi_and_debug_stmts (to_phi, loop_preheader_edge (loop),
+ new_arg);
+ }
+
if (scalar_loop != loop)
{
/* Remove the non-necessary forwarder of scalar_loop again. */
@@ -1607,29 +1728,6 @@ slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop,
loop_preheader_edge (new_loop)->src);
}
- if (scalar_loop != loop)
- {
- /* Update new_loop->header PHIs, so that on the preheader
- edge they are the ones from loop rather than scalar_loop. */
- gphi_iterator gsi_orig, gsi_new;
- edge orig_e = loop_preheader_edge (loop);
- edge new_e = loop_preheader_edge (new_loop);
-
- for (gsi_orig = gsi_start_phis (loop->header),
- gsi_new = gsi_start_phis (new_loop->header);
- !gsi_end_p (gsi_orig) && !gsi_end_p (gsi_new);
- gsi_next (&gsi_orig), gsi_next (&gsi_new))
- {
- gphi *orig_phi = gsi_orig.phi ();
- gphi *new_phi = gsi_new.phi ();
- tree orig_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, orig_e);
- location_t orig_locus
- = gimple_phi_arg_location_from_edge (orig_phi, orig_e);
-
- add_phi_arg (new_phi, orig_arg, new_e, orig_locus);
- }
- }
-
free (new_bbs);
free (bbs);
@@ -1696,11 +1794,11 @@ slpeel_add_loop_guard (basic_block guard_bb, tree cond,
*/
bool
-slpeel_can_duplicate_loop_p (const class loop *loop, const_edge e)
+slpeel_can_duplicate_loop_p (const class loop *loop, const_edge exit_e,
+ const_edge e)
{
- edge exit_e = single_exit (loop);
edge entry_e = loop_preheader_edge (loop);
- gcond *orig_cond = get_loop_exit_condition (loop);
+ gcond *orig_cond = get_loop_exit_condition (exit_e);
gimple_stmt_iterator loop_exit_gsi = gsi_last_bb (exit_e->src);
unsigned int num_bb = loop->inner? 5 : 2;
@@ -1709,7 +1807,7 @@ slpeel_can_duplicate_loop_p (const class loop *loop, const_edge e)
if (!loop_outer (loop)
|| loop->num_nodes != num_bb
|| !empty_block_p (loop->latch)
- || !single_exit (loop)
+ || !exit_e
/* Verify that new loop exit condition can be trivially modified. */
|| (!orig_cond || orig_cond != gsi_stmt (loop_exit_gsi))
|| (e != exit_e && e != entry_e))
@@ -1722,7 +1820,7 @@ slpeel_can_duplicate_loop_p (const class loop *loop, const_edge e)
return ret;
}
-/* Function vect_get_loop_location.
+/* Function find_loop_location.
Extract the location of the loop in the source code.
If the loop is not well formed for vectorization, an estimated
@@ -1739,11 +1837,19 @@ find_loop_location (class loop *loop)
if (!loop)
return dump_user_location_t ();
- stmt = get_loop_exit_condition (loop);
+ if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
+ {
+ /* We only care about the loop location, so use any exit with location
+ information. */
+ for (edge e : get_loop_exit_edges (loop))
+ {
+ stmt = get_loop_exit_condition (e);
- if (stmt
- && LOCATION_LOCUS (gimple_location (stmt)) > BUILTINS_LOCATION)
- return stmt;
+ if (stmt
+ && LOCATION_LOCUS (gimple_location (stmt)) > BUILTINS_LOCATION)
+ return stmt;
+ }
+ }
/* If we got here the loop is probably not "well formed",
try to estimate the loop location */
@@ -1962,7 +2068,8 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo,
gphi_iterator gsi, gsi1;
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block update_bb = update_e->dest;
- basic_block exit_bb = single_exit (loop)->dest;
+
+ basic_block exit_bb = LOOP_VINFO_IV_EXIT (loop_vinfo)->dest;
/* Make sure there exists a single-predecessor exit bb: */
gcc_assert (single_pred_p (exit_bb));
@@ -2529,10 +2636,9 @@ vect_gen_vector_loop_niters_mult_vf (loop_vec_info loop_vinfo,
{
/* We should be using a step_vector of VF if VF is variable. */
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo).to_constant ();
- class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree type = TREE_TYPE (niters_vector);
tree log_vf = build_int_cst (type, exact_log2 (vf));
- basic_block exit_bb = single_exit (loop)->dest;
+ basic_block exit_bb = LOOP_VINFO_IV_EXIT (loop_vinfo)->dest;
gcc_assert (niters_vector_mult_vf_ptr != NULL);
tree niters_vector_mult_vf = fold_build2 (LSHIFT_EXPR, type,
@@ -2551,137 +2657,36 @@ vect_gen_vector_loop_niters_mult_vf (loop_vec_info loop_vinfo,
/* LCSSA_PHI is a lcssa phi of EPILOG loop which is copied from LOOP,
this function searches for the corresponding lcssa phi node in exit
- bb of LOOP. If it is found, return the phi result; otherwise return
- NULL. */
+ bb of LOOP following the LCSSA_EDGE to the exit node. If it is found,
+ return the phi result; otherwise return NULL. */
static tree
-find_guard_arg (class loop *loop, class loop *epilog ATTRIBUTE_UNUSED,
- gphi *lcssa_phi)
+find_guard_arg (class loop *loop ATTRIBUTE_UNUSED, const_edge loop_e,
+ class loop *epilog ATTRIBUTE_UNUSED, gphi *lcssa_phi,
+ int lcssa_edge = 0)
{
gphi_iterator gsi;
- edge e = single_exit (loop);
- gcc_assert (single_pred_p (e->dest));
- for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gsi = gsi_start_phis (loop_e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
- if (operand_equal_p (PHI_ARG_DEF (phi, 0),
- PHI_ARG_DEF (lcssa_phi, 0), 0))
- return PHI_RESULT (phi);
- }
- return NULL_TREE;
-}
-
-/* Function slpeel_tree_duplicate_loop_to_edge_cfg duplciates FIRST/SECOND
- from SECOND/FIRST and puts it at the original loop's preheader/exit
- edge, the two loops are arranged as below:
-
- preheader_a:
- first_loop:
- header_a:
- i_1 = PHI<i_0, i_2>;
- ...
- i_2 = i_1 + 1;
- if (cond_a)
- goto latch_a;
- else
- goto between_bb;
- latch_a:
- goto header_a;
-
- between_bb:
- ;; i_x = PHI<i_2>; ;; LCSSA phi node to be created for FIRST,
-
- second_loop:
- header_b:
- i_3 = PHI<i_0, i_4>; ;; Use of i_0 to be replaced with i_x,
- or with i_2 if no LCSSA phi is created
- under condition of CREATE_LCSSA_FOR_IV_PHIS.
- ...
- i_4 = i_3 + 1;
- if (cond_b)
- goto latch_b;
- else
- goto exit_bb;
- latch_b:
- goto header_b;
-
- exit_bb:
-
- This function creates loop closed SSA for the first loop; update the
- second loop's PHI nodes by replacing argument on incoming edge with the
- result of newly created lcssa PHI nodes. IF CREATE_LCSSA_FOR_IV_PHIS
- is false, Loop closed ssa phis will only be created for non-iv phis for
- the first loop.
-
- This function assumes exit bb of the first loop is preheader bb of the
- second loop, i.e, between_bb in the example code. With PHIs updated,
- the second loop will execute rest iterations of the first. */
-
-static void
-slpeel_update_phi_nodes_for_loops (loop_vec_info loop_vinfo,
- class loop *first, class loop *second,
- bool create_lcssa_for_iv_phis)
-{
- gphi_iterator gsi_update, gsi_orig;
- class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
-
- edge first_latch_e = EDGE_SUCC (first->latch, 0);
- edge second_preheader_e = loop_preheader_edge (second);
- basic_block between_bb = single_exit (first)->dest;
-
- gcc_assert (between_bb == second_preheader_e->src);
- gcc_assert (single_pred_p (between_bb) && single_succ_p (between_bb));
- /* Either the first loop or the second is the loop to be vectorized. */
- gcc_assert (loop == first || loop == second);
-
- for (gsi_orig = gsi_start_phis (first->header),
- gsi_update = gsi_start_phis (second->header);
- !gsi_end_p (gsi_orig) && !gsi_end_p (gsi_update);
- gsi_next (&gsi_orig), gsi_next (&gsi_update))
- {
- gphi *orig_phi = gsi_orig.phi ();
- gphi *update_phi = gsi_update.phi ();
-
- tree arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, first_latch_e);
- /* Generate lcssa PHI node for the first loop. */
- gphi *vect_phi = (loop == first) ? orig_phi : update_phi;
- stmt_vec_info vect_phi_info = loop_vinfo->lookup_stmt (vect_phi);
- if (create_lcssa_for_iv_phis || !iv_phi_p (vect_phi_info))
+ /* Nested loops with multiple exits can have different no# phi node
+ arguments between the main loop and epilog as epilog falls to the
+ second loop. */
+ if (gimple_phi_num_args (phi) > loop_e->dest_idx)
{
- tree new_res = copy_ssa_name (PHI_RESULT (orig_phi));
- gphi *lcssa_phi = create_phi_node (new_res, between_bb);
- add_phi_arg (lcssa_phi, arg, single_exit (first), UNKNOWN_LOCATION);
- arg = new_res;
- }
-
- /* Update PHI node in the second loop by replacing arg on the loop's
- incoming edge. */
- adjust_phi_and_debug_stmts (update_phi, second_preheader_e, arg);
- }
-
- /* For epilogue peeling we have to make sure to copy all LC PHIs
- for correct vectorization of live stmts. */
- if (loop == first)
- {
- basic_block orig_exit = single_exit (second)->dest;
- for (gsi_orig = gsi_start_phis (orig_exit);
- !gsi_end_p (gsi_orig); gsi_next (&gsi_orig))
- {
- gphi *orig_phi = gsi_orig.phi ();
- tree orig_arg = PHI_ARG_DEF (orig_phi, 0);
- if (TREE_CODE (orig_arg) != SSA_NAME || virtual_operand_p (orig_arg))
- continue;
-
- /* Already created in the above loop. */
- if (find_guard_arg (first, second, orig_phi))
+ tree var = PHI_ARG_DEF (phi, loop_e->dest_idx);
+ if (TREE_CODE (var) != SSA_NAME)
continue;
-
- tree new_res = copy_ssa_name (orig_arg);
- gphi *lcphi = create_phi_node (new_res, between_bb);
- add_phi_arg (lcphi, orig_arg, single_exit (first), UNKNOWN_LOCATION);
+ tree def = get_current_def (var);
+ if (!def)
+ continue;
+ if (operand_equal_p (def,
+ PHI_ARG_DEF (lcssa_phi, lcssa_edge), 0))
+ return PHI_RESULT (phi);
}
}
+ return NULL_TREE;
}
/* Function slpeel_add_loop_guard adds guard skipping from the beginning
@@ -2766,11 +2771,11 @@ slpeel_update_phi_nodes_for_guard1 (class loop *skip_loop,
}
}
-/* LOOP and EPILOG are two consecutive loops in CFG and EPILOG is copied
- from LOOP. Function slpeel_add_loop_guard adds guard skipping from a
- point between the two loops to the end of EPILOG. Edges GUARD_EDGE
- and MERGE_EDGE are the two pred edges of merge_bb at the end of EPILOG.
- The CFG looks like:
+/* LOOP and EPILOG are two consecutive loops in CFG connected by LOOP_EXIT edge
+ and EPILOG is copied from LOOP. Function slpeel_add_loop_guard adds guard
+ skipping from a point between the two loops to the end of EPILOG. Edges
+ GUARD_EDGE and MERGE_EDGE are the two pred edges of merge_bb at the end of
+ EPILOG. The CFG looks like:
loop:
header_a:
@@ -2821,6 +2826,7 @@ slpeel_update_phi_nodes_for_guard1 (class loop *skip_loop,
static void
slpeel_update_phi_nodes_for_guard2 (class loop *loop, class loop *epilog,
+ const_edge loop_exit,
edge guard_edge, edge merge_edge)
{
gphi_iterator gsi;
@@ -2829,13 +2835,11 @@ slpeel_update_phi_nodes_for_guard2 (class loop *loop, class loop *epilog,
gcc_assert (single_succ_p (merge_bb));
edge e = single_succ_edge (merge_bb);
basic_block exit_bb = e->dest;
- gcc_assert (single_pred_p (exit_bb));
- gcc_assert (single_pred (exit_bb) == single_exit (epilog)->dest);
for (gsi = gsi_start_phis (exit_bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *update_phi = gsi.phi ();
- tree old_arg = PHI_ARG_DEF (update_phi, 0);
+ tree old_arg = PHI_ARG_DEF (update_phi, e->dest_idx);
tree merge_arg = NULL_TREE;
@@ -2847,7 +2851,8 @@ slpeel_update_phi_nodes_for_guard2 (class loop *loop, class loop *epilog,
if (!merge_arg)
merge_arg = old_arg;
- tree guard_arg = find_guard_arg (loop, epilog, update_phi);
+ tree guard_arg = find_guard_arg (loop, loop_exit, epilog,
+ update_phi, e->dest_idx);
/* If the var is live after loop but not a reduction, we simply
use the old arg. */
if (!guard_arg)
@@ -2867,21 +2872,6 @@ slpeel_update_phi_nodes_for_guard2 (class loop *loop, class loop *epilog,
}
}
-/* EPILOG loop is duplicated from the original loop for vectorizing,
- the arg of its loop closed ssa PHI needs to be updated. */
-
-static void
-slpeel_update_phi_nodes_for_lcssa (class loop *epilog)
-{
- gphi_iterator gsi;
- basic_block exit_bb = single_exit (epilog)->dest;
-
- gcc_assert (single_pred_p (exit_bb));
- edge e = EDGE_PRED (exit_bb, 0);
- for (gsi = gsi_start_phis (exit_bb); !gsi_end_p (gsi); gsi_next (&gsi))
- rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (gsi.phi (), e));
-}
-
/* LOOP_VINFO is an epilogue loop whose corresponding main loop can be skipped.
Return a value that equals:
@@ -3201,27 +3191,36 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
}
if (vect_epilogues)
- /* Make sure to set the epilogue's epilogue scalar loop, such that we can
- use the original scalar loop as remaining epilogue if necessary. */
- LOOP_VINFO_SCALAR_LOOP (epilogue_vinfo)
- = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
+ {
+ /* Make sure to set the epilogue's epilogue scalar loop, such that we can
+ use the original scalar loop as remaining epilogue if necessary. */
+ LOOP_VINFO_SCALAR_LOOP (epilogue_vinfo)
+ = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
+ LOOP_VINFO_SCALAR_IV_EXIT (epilogue_vinfo)
+ = LOOP_VINFO_SCALAR_IV_EXIT (loop_vinfo);
+ }
if (prolog_peeling)
{
e = loop_preheader_edge (loop);
- gcc_checking_assert (slpeel_can_duplicate_loop_p (loop, e));
+ edge exit_e = LOOP_VINFO_IV_EXIT (loop_vinfo);
+ gcc_checking_assert (slpeel_can_duplicate_loop_p (loop, exit_e, e));
/* Peel prolog and put it on preheader edge of loop. */
- prolog = slpeel_tree_duplicate_loop_to_edge_cfg (loop, scalar_loop, e);
+ edge scalar_e = LOOP_VINFO_SCALAR_IV_EXIT (loop_vinfo);
+ edge prolog_e = NULL;
+ prolog = slpeel_tree_duplicate_loop_to_edge_cfg (loop, exit_e,
+ scalar_loop, scalar_e,
+ e, &prolog_e);
gcc_assert (prolog);
prolog->force_vectorize = false;
- slpeel_update_phi_nodes_for_loops (loop_vinfo, prolog, loop, true);
+
first_loop = prolog;
reset_original_copy_tables ();
/* Update the number of iterations for prolog loop. */
tree step_prolog = build_one_cst (TREE_TYPE (niters_prolog));
- vect_set_loop_condition (prolog, NULL, niters_prolog,
+ vect_set_loop_condition (prolog, prolog_e, loop_vinfo, niters_prolog,
step_prolog, NULL_TREE, false);
/* Skip the prolog loop. */
@@ -3275,8 +3274,8 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
if (epilog_peeling)
{
- e = single_exit (loop);
- gcc_checking_assert (slpeel_can_duplicate_loop_p (loop, e));
+ e = LOOP_VINFO_IV_EXIT (loop_vinfo);
+ gcc_checking_assert (slpeel_can_duplicate_loop_p (loop, e, e));
/* Peel epilog and put it on exit edge of loop. If we are vectorizing
said epilog then we should use a copy of the main loop as a starting
@@ -3285,12 +3284,16 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
If we are not vectorizing the epilog then we should use the scalar loop
as the transformations mentioned above make less or no sense when not
vectorizing. */
+ edge scalar_e = LOOP_VINFO_SCALAR_IV_EXIT (loop_vinfo);
epilog = vect_epilogues ? get_loop_copy (loop) : scalar_loop;
- epilog = slpeel_tree_duplicate_loop_to_edge_cfg (loop, epilog, e);
+ edge epilog_e = vect_epilogues ? e : scalar_e;
+ edge new_epilog_e = NULL;
+ epilog = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e, epilog,
+ epilog_e, e,
+ &new_epilog_e);
+ LOOP_VINFO_EPILOGUE_IV_EXIT (loop_vinfo) = new_epilog_e;
gcc_assert (epilog);
-
epilog->force_vectorize = false;
- slpeel_update_phi_nodes_for_loops (loop_vinfo, loop, epilog, false);
bb_before_epilog = loop_preheader_edge (epilog)->src;
/* Scalar version loop may be preferred. In this case, add guard
@@ -3335,7 +3338,7 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
free (bbs);
free (original_bbs);
}
- else
+ else if (old_count.nonzero_p ())
scale_loop_profile (epilog, guard_to->count.probability_in (old_count), -1);
/* Only need to handle basic block before epilog loop if it's not
@@ -3374,16 +3377,18 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
{
guard_cond = fold_build2 (EQ_EXPR, boolean_type_node,
niters, niters_vector_mult_vf);
- guard_bb = single_exit (loop)->dest;
- guard_to = split_edge (single_exit (epilog));
+ guard_bb = LOOP_VINFO_IV_EXIT (loop_vinfo)->dest;
+ edge epilog_e = LOOP_VINFO_EPILOGUE_IV_EXIT (loop_vinfo);
+ guard_to = split_edge (epilog_e);
guard_e = slpeel_add_loop_guard (guard_bb, guard_cond, guard_to,
skip_vector ? anchor : guard_bb,
prob_epilog.invert (),
irred_flag);
if (vect_epilogues)
epilogue_vinfo->skip_this_loop_edge = guard_e;
- slpeel_update_phi_nodes_for_guard2 (loop, epilog, guard_e,
- single_exit (epilog));
+ edge main_iv = LOOP_VINFO_IV_EXIT (loop_vinfo);
+ slpeel_update_phi_nodes_for_guard2 (loop, epilog, main_iv, guard_e,
+ epilog_e);
/* Only need to handle basic block before epilog loop if it's not
the guard_bb, which is the case when skip_vector is true. */
if (guard_bb != bb_before_epilog)
@@ -3394,8 +3399,6 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
}
scale_loop_profile (epilog, prob_epilog, -1);
}
- else
- slpeel_update_phi_nodes_for_lcssa (epilog);
unsigned HOST_WIDE_INT bound;
if (bound_scalar.is_constant (&bound))
@@ -3416,6 +3419,8 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
{
epilog->aux = epilogue_vinfo;
LOOP_VINFO_LOOP (epilogue_vinfo) = epilog;
+ LOOP_VINFO_IV_EXIT (epilogue_vinfo)
+ = LOOP_VINFO_EPILOGUE_IV_EXIT (loop_vinfo);
loop_constraint_clear (epilog, LOOP_C_INFINITE);
diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
index 23c6e82..ebab195 100644
--- a/gcc/tree-vect-loop.cc
+++ b/gcc/tree-vect-loop.cc
@@ -851,80 +851,137 @@ vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
in NUMBER_OF_ITERATIONSM1. Place the condition under which the
niter information holds in ASSUMPTIONS.
- Return the loop exit condition. */
+ Return the loop exit conditions. */
-static gcond *
-vect_get_loop_niters (class loop *loop, tree *assumptions,
+static vec<gcond *>
+vect_get_loop_niters (class loop *loop, const_edge main_exit, tree *assumptions,
tree *number_of_iterations, tree *number_of_iterationsm1)
{
- edge exit = single_exit (loop);
+ auto_vec<edge> exits = get_loop_exit_edges (loop);
+ vec<gcond *> conds;
+ conds.create (exits.length ());
class tree_niter_desc niter_desc;
tree niter_assumptions, niter, may_be_zero;
- gcond *cond = get_loop_exit_condition (loop);
*assumptions = boolean_true_node;
*number_of_iterationsm1 = chrec_dont_know;
*number_of_iterations = chrec_dont_know;
+
DUMP_VECT_SCOPE ("get_loop_niters");
- if (!exit)
- return cond;
+ if (exits.is_empty ())
+ return conds;
+
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location, "Loop has %d exits.\n",
+ exits.length ());
- may_be_zero = NULL_TREE;
- if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
- || chrec_contains_undetermined (niter_desc.niter))
- return cond;
+ edge exit;
+ unsigned int i;
+ FOR_EACH_VEC_ELT (exits, i, exit)
+ {
+ gcond *cond = get_loop_exit_condition (exit);
+ if (cond)
+ conds.safe_push (cond);
- niter_assumptions = niter_desc.assumptions;
- may_be_zero = niter_desc.may_be_zero;
- niter = niter_desc.niter;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location, "Analyzing exit %d...\n", i);
- if (may_be_zero && integer_zerop (may_be_zero))
- may_be_zero = NULL_TREE;
+ if (exit != main_exit)
+ continue;
- if (may_be_zero)
- {
- if (COMPARISON_CLASS_P (may_be_zero))
+ may_be_zero = NULL_TREE;
+ if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
+ || chrec_contains_undetermined (niter_desc.niter))
+ continue;
+
+ niter_assumptions = niter_desc.assumptions;
+ may_be_zero = niter_desc.may_be_zero;
+ niter = niter_desc.niter;
+
+ if (may_be_zero && integer_zerop (may_be_zero))
+ may_be_zero = NULL_TREE;
+
+ if (may_be_zero)
{
- /* Try to combine may_be_zero with assumptions, this can simplify
- computation of niter expression. */
- if (niter_assumptions && !integer_nonzerop (niter_assumptions))
- niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- niter_assumptions,
- fold_build1 (TRUTH_NOT_EXPR,
- boolean_type_node,
- may_be_zero));
+ if (COMPARISON_CLASS_P (may_be_zero))
+ {
+ /* Try to combine may_be_zero with assumptions, this can simplify
+ computation of niter expression. */
+ if (niter_assumptions && !integer_nonzerop (niter_assumptions))
+ niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
+ niter_assumptions,
+ fold_build1 (TRUTH_NOT_EXPR,
+ boolean_type_node,
+ may_be_zero));
+ else
+ niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
+ build_int_cst (TREE_TYPE (niter), 0),
+ rewrite_to_non_trapping_overflow (niter));
+
+ may_be_zero = NULL_TREE;
+ }
+ else if (integer_nonzerop (may_be_zero))
+ {
+ *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
+ *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
+ continue;
+ }
else
- niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
- build_int_cst (TREE_TYPE (niter), 0),
- rewrite_to_non_trapping_overflow (niter));
+ continue;
+ }
- may_be_zero = NULL_TREE;
- }
- else if (integer_nonzerop (may_be_zero))
- {
- *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
- *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
- return cond;
- }
- else
- return cond;
+ /* Loop assumptions are based off the normal exit. */
+ *assumptions = niter_assumptions;
+ *number_of_iterationsm1 = niter;
+
+ /* We want the number of loop header executions which is the number
+ of latch executions plus one.
+ ??? For UINT_MAX latch executions this number overflows to zero
+ for loops like do { n++; } while (n != 0); */
+ if (niter && !chrec_contains_undetermined (niter))
+ niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter),
+ unshare_expr (niter),
+ build_int_cst (TREE_TYPE (niter), 1));
+ *number_of_iterations = niter;
}
- *assumptions = niter_assumptions;
- *number_of_iterationsm1 = niter;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location, "All loop exits successfully analyzed.\n");
- /* We want the number of loop header executions which is the number
- of latch executions plus one.
- ??? For UINT_MAX latch executions this number overflows to zero
- for loops like do { n++; } while (n != 0); */
- if (niter && !chrec_contains_undetermined (niter))
- niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
- build_int_cst (TREE_TYPE (niter), 1));
- *number_of_iterations = niter;
+ return conds;
+}
- return cond;
+/* Determine the main loop exit for the vectorizer. */
+
+edge
+vec_init_loop_exit_info (class loop *loop)
+{
+ /* Before we begin we must first determine which exit is the main one and
+ which are auxilary exits. */
+ auto_vec<edge> exits = get_loop_exit_edges (loop);
+ if (exits.length () == 1)
+ return exits[0];
+
+ /* If we have multiple exits we only support counting IV at the moment. Analyze
+ all exits and return one */
+ class tree_niter_desc niter_desc;
+ edge candidate = NULL;
+ for (edge exit : exits)
+ {
+ if (!get_loop_exit_condition (exit))
+ continue;
+
+ if (number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
+ && !chrec_contains_undetermined (niter_desc.niter))
+ {
+ if (!niter_desc.may_be_zero || !candidate)
+ candidate = exit;
+ }
+ }
+
+ return candidate;
}
/* Function bb_in_loop_p
@@ -987,7 +1044,10 @@ _loop_vec_info::_loop_vec_info (class loop *loop_in, vec_info_shared *shared)
has_mask_store (false),
scalar_loop_scaling (profile_probability::uninitialized ()),
scalar_loop (NULL),
- orig_loop_info (NULL)
+ orig_loop_info (NULL),
+ vec_loop_iv_exit (NULL),
+ vec_epilogue_loop_iv_exit (NULL),
+ scalar_loop_iv_exit (NULL)
{
/* CHECKME: We want to visit all BBs before their successors (except for
latch blocks, for which this assertion wouldn't hold). In the simple
@@ -1646,6 +1706,18 @@ vect_analyze_loop_form (class loop *loop, vect_loop_form_info *info)
{
DUMP_VECT_SCOPE ("vect_analyze_loop_form");
+ edge exit_e = vec_init_loop_exit_info (loop);
+ if (!exit_e)
+ return opt_result::failure_at (vect_location,
+ "not vectorized:"
+ " could not determine main exit from"
+ " loop with multiple exits.\n");
+ info->loop_exit = exit_e;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "using as main loop exit: %d -> %d [AUX: %p]\n",
+ exit_e->src->index, exit_e->dest->index, exit_e->aux);
+
/* Different restrictions apply when we are considering an inner-most loop,
vs. an outer (nested) loop.
(FORNOW. May want to relax some of these restrictions in the future). */
@@ -1739,7 +1811,7 @@ vect_analyze_loop_form (class loop *loop, vect_loop_form_info *info)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Considering outer-loop vectorization.\n");
- info->inner_loop_cond = inner.loop_cond;
+ info->inner_loop_cond = inner.conds[0];
}
if (!single_exit (loop))
@@ -1760,31 +1832,39 @@ vect_analyze_loop_form (class loop *loop, vect_loop_form_info *info)
"not vectorized: latch block not empty.\n");
/* Make sure the exit is not abnormal. */
- edge e = single_exit (loop);
- if (e->flags & EDGE_ABNORMAL)
+ if (exit_e->flags & EDGE_ABNORMAL)
return opt_result::failure_at (vect_location,
"not vectorized:"
" abnormal loop exit edge.\n");
- info->loop_cond
- = vect_get_loop_niters (loop, &info->assumptions,
+ info->conds
+ = vect_get_loop_niters (loop, exit_e, &info->assumptions,
&info->number_of_iterations,
&info->number_of_iterationsm1);
- if (!info->loop_cond)
+
+ if (info->conds.is_empty ())
return opt_result::failure_at
(vect_location,
"not vectorized: complicated exit condition.\n");
+ /* Determine what the primary and alternate exit conds are. */
+ for (unsigned i = 0; i < info->conds.length (); i++)
+ {
+ gcond *cond = info->conds[i];
+ if (exit_e->src == gimple_bb (cond))
+ std::swap (info->conds[0], info->conds[i]);
+ }
+
if (integer_zerop (info->assumptions)
|| !info->number_of_iterations
|| chrec_contains_undetermined (info->number_of_iterations))
return opt_result::failure_at
- (info->loop_cond,
+ (info->conds[0],
"not vectorized: number of iterations cannot be computed.\n");
if (integer_zerop (info->number_of_iterations))
return opt_result::failure_at
- (info->loop_cond,
+ (info->conds[0],
"not vectorized: number of iterations = 0.\n");
if (!(tree_fits_shwi_p (info->number_of_iterations)
@@ -1819,8 +1899,18 @@ vect_create_loop_vinfo (class loop *loop, vec_info_shared *shared,
if (!integer_onep (info->assumptions) && !main_loop_info)
LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = info->assumptions;
- stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (info->loop_cond);
- STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
+ for (gcond *cond : info->conds)
+ {
+ stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (cond);
+ STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
+ }
+
+ for (unsigned i = 1; i < info->conds.length (); i ++)
+ LOOP_VINFO_LOOP_CONDS (loop_vinfo).safe_push (info->conds[i]);
+ LOOP_VINFO_LOOP_IV_COND (loop_vinfo) = info->conds[0];
+
+ LOOP_VINFO_IV_EXIT (loop_vinfo) = info->loop_exit;
+
if (info->inner_loop_cond)
{
stmt_vec_info inner_loop_cond_info
@@ -3063,9 +3153,9 @@ start_over:
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
if (!vect_can_advance_ivs_p (loop_vinfo)
- || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
- single_exit (LOOP_VINFO_LOOP
- (loop_vinfo))))
+ || !slpeel_can_duplicate_loop_p (loop,
+ LOOP_VINFO_IV_EXIT (loop_vinfo),
+ LOOP_VINFO_IV_EXIT (loop_vinfo)))
{
ok = opt_result::failure_at (vect_location,
"not vectorized: can't create required "
@@ -3986,24 +4076,15 @@ pop:
??? We could relax this and handle arbitrary live stmts by
forcing a scalar epilogue for example. */
imm_use_iterator imm_iter;
+ use_operand_p use_p;
gimple *op_use_stmt;
unsigned cnt = 0;
FOR_EACH_IMM_USE_STMT (op_use_stmt, imm_iter, op.ops[opi])
if (!is_gimple_debug (op_use_stmt)
&& (*code != ERROR_MARK
|| flow_bb_inside_loop_p (loop, gimple_bb (op_use_stmt))))
- {
- /* We want to allow x + x but not x < 1 ? x : 2. */
- if (is_gimple_assign (op_use_stmt)
- && gimple_assign_rhs_code (op_use_stmt) == COND_EXPR)
- {
- use_operand_p use_p;
- FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
- cnt++;
- }
- else
- cnt++;
- }
+ FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
+ cnt++;
if (cnt != 1)
{
fail = true;
@@ -5780,7 +5861,7 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo,
basic_block exit_bb;
tree scalar_dest;
tree scalar_type;
- gimple *new_phi = NULL, *phi;
+ gimple *new_phi = NULL, *phi = NULL;
gimple_stmt_iterator exit_gsi;
tree new_temp = NULL_TREE, new_name, new_scalar_dest;
gimple *epilog_stmt = NULL;
@@ -6002,7 +6083,7 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo,
Store them in NEW_PHIS. */
if (double_reduc)
loop = outer_loop;
- exit_bb = single_exit (loop)->dest;
+ exit_bb = LOOP_VINFO_IV_EXIT (loop_vinfo)->dest;
exit_gsi = gsi_after_labels (exit_bb);
reduc_inputs.create (slp_node ? vec_num : ncopies);
for (unsigned i = 0; i < vec_num; i++)
@@ -6018,7 +6099,7 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo,
phi = create_phi_node (new_def, exit_bb);
if (j)
def = gimple_get_lhs (STMT_VINFO_VEC_STMTS (rdef_info)[j]);
- SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
+ SET_PHI_ARG_DEF (phi, LOOP_VINFO_IV_EXIT (loop_vinfo)->dest_idx, def);
new_def = gimple_convert (&stmts, vectype, new_def);
reduc_inputs.quick_push (new_def);
}
@@ -10416,12 +10497,12 @@ vectorizable_live_operation (vec_info *vinfo, stmt_vec_info stmt_info,
lhs' = new_tree; */
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- basic_block exit_bb = single_exit (loop)->dest;
+ basic_block exit_bb = LOOP_VINFO_IV_EXIT (loop_vinfo)->dest;
gcc_assert (single_pred_p (exit_bb));
tree vec_lhs_phi = copy_ssa_name (vec_lhs);
gimple *phi = create_phi_node (vec_lhs_phi, exit_bb);
- SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, vec_lhs);
+ SET_PHI_ARG_DEF (phi, LOOP_VINFO_IV_EXIT (loop_vinfo)->dest_idx, vec_lhs);
gimple_seq stmts = NULL;
tree new_tree;
@@ -10965,7 +11046,7 @@ vect_get_loop_len (loop_vec_info loop_vinfo, gimple_stmt_iterator *gsi,
profile. */
static void
-scale_profile_for_vect_loop (class loop *loop, unsigned vf, bool flat)
+scale_profile_for_vect_loop (class loop *loop, edge exit_e, unsigned vf, bool flat)
{
/* For flat profiles do not scale down proportionally by VF and only
cap by known iteration count bounds. */
@@ -10980,7 +11061,6 @@ scale_profile_for_vect_loop (class loop *loop, unsigned vf, bool flat)
return;
}
/* Loop body executes VF fewer times and exit increases VF times. */
- edge exit_e = single_exit (loop);
profile_count entry_count = loop_preheader_edge (loop)->count ();
/* If we have unreliable loop profile avoid dropping entry
@@ -11350,7 +11430,7 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
/* Make sure there exists a single-predecessor exit bb. Do this before
versioning. */
- edge e = single_exit (loop);
+ edge e = LOOP_VINFO_IV_EXIT (loop_vinfo);
if (! single_pred_p (e->dest))
{
split_loop_exit_edge (e, true);
@@ -11376,7 +11456,7 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
loop closed PHI nodes on the exit. */
if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
{
- e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
+ e = LOOP_VINFO_SCALAR_IV_EXIT (loop_vinfo);
if (! single_pred_p (e->dest))
{
split_loop_exit_edge (e, true);
@@ -11625,8 +11705,9 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
a zero NITERS becomes a nonzero NITERS_VECTOR. */
if (integer_onep (step_vector))
niters_no_overflow = true;
- vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
- niters_vector_mult_vf, !niters_no_overflow);
+ vect_set_loop_condition (loop, LOOP_VINFO_IV_EXIT (loop_vinfo), loop_vinfo,
+ niters_vector, step_vector, niters_vector_mult_vf,
+ !niters_no_overflow);
unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
@@ -11681,7 +11762,7 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
LOOP_VINFO_VECT_FACTOR (loop_vinfo),
&bound))
loop->nb_iterations_upper_bound
- = wi::umin ((widest_int) (bound - 1),
+ = wi::umin ((bound_wide_int) (bound - 1),
loop->nb_iterations_upper_bound);
}
}
@@ -11699,7 +11780,8 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
assumed_vf) - 1
: wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
assumed_vf) - 1);
- scale_profile_for_vect_loop (loop, assumed_vf, flat);
+ scale_profile_for_vect_loop (loop, LOOP_VINFO_IV_EXIT (loop_vinfo),
+ assumed_vf, flat);
if (dump_enabled_p ())
{
diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
index a2ed036..6964c99 100644
--- a/gcc/tree-vect-patterns.cc
+++ b/gcc/tree-vect-patterns.cc
@@ -2944,7 +2944,7 @@ vect_recog_over_widening_pattern (vec_info *vinfo,
/* Check the operands. */
unsigned int nops = gimple_num_ops (last_stmt) - first_op;
auto_vec <vect_unpromoted_value, 3> unprom (nops);
- unprom.quick_grow (nops);
+ unprom.quick_grow_cleared (nops);
unsigned int min_precision = 0;
bool single_use_p = false;
for (unsigned int i = 0; i < nops; ++i)
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index a3e54eb..d081999 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -117,6 +117,7 @@ _slp_tree::_slp_tree ()
SLP_TREE_CHILDREN (this) = vNULL;
SLP_TREE_LOAD_PERMUTATION (this) = vNULL;
SLP_TREE_LANE_PERMUTATION (this) = vNULL;
+ SLP_TREE_SIMD_CLONE_INFO (this) = vNULL;
SLP_TREE_DEF_TYPE (this) = vect_uninitialized_def;
SLP_TREE_CODE (this) = ERROR_MARK;
SLP_TREE_VECTYPE (this) = NULL_TREE;
@@ -143,6 +144,7 @@ _slp_tree::~_slp_tree ()
SLP_TREE_VEC_DEFS (this).release ();
SLP_TREE_LOAD_PERMUTATION (this).release ();
SLP_TREE_LANE_PERMUTATION (this).release ();
+ SLP_TREE_SIMD_CLONE_INFO (this).release ();
if (this->failed)
free (failed);
}
@@ -505,6 +507,14 @@ static const int arg2_map[] = { 1, 2 };
static const int arg1_arg4_map[] = { 2, 1, 4 };
static const int arg3_arg2_map[] = { 2, 3, 2 };
static const int op1_op0_map[] = { 2, 1, 0 };
+static const int mask_call_maps[6][7] = {
+ { 1, 1, },
+ { 2, 1, 2, },
+ { 3, 1, 2, 3, },
+ { 4, 1, 2, 3, 4, },
+ { 5, 1, 2, 3, 4, 5, },
+ { 6, 1, 2, 3, 4, 5, 6 },
+};
/* For most SLP statements, there is a one-to-one mapping between
gimple arguments and child nodes. If that is not true for STMT,
@@ -547,6 +557,15 @@ vect_get_operand_map (const gimple *stmt, unsigned char swap = 0)
case IFN_MASK_STORE:
return arg3_arg2_map;
+ case IFN_MASK_CALL:
+ {
+ unsigned nargs = gimple_call_num_args (call);
+ if (nargs >= 2 && nargs <= 7)
+ return mask_call_maps[nargs-2];
+ else
+ return nullptr;
+ }
+
default:
break;
}
@@ -1070,7 +1089,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
if (call_stmt)
{
combined_fn cfn = gimple_call_combined_fn (call_stmt);
- if (cfn != CFN_LAST)
+ if (cfn != CFN_LAST && cfn != CFN_MASK_CALL)
rhs_code = cfn;
else
rhs_code = CALL_EXPR;
@@ -1084,7 +1103,9 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
ldst_p = true;
rhs_code = CFN_MASK_STORE;
}
- else if ((internal_fn_p (cfn)
+ else if ((cfn != CFN_LAST
+ && cfn != CFN_MASK_CALL
+ && internal_fn_p (cfn)
&& !vectorizable_internal_fn_p (as_internal_fn (cfn)))
|| gimple_call_tail_p (call_stmt)
|| gimple_call_noreturn_p (call_stmt)
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index cd7c109..e5ff44c 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -951,138 +951,6 @@ cfun_returns (tree decl)
return false;
}
-/* Function vect_model_store_cost
-
- Models cost for stores. In the case of grouped accesses, one access
- has the overhead of the grouped access attributed to it. */
-
-static void
-vect_model_store_cost (vec_info *vinfo, stmt_vec_info stmt_info, int ncopies,
- vect_memory_access_type memory_access_type,
- gather_scatter_info *gs_info,
- dr_alignment_support alignment_support_scheme,
- int misalignment,
- vec_load_store_type vls_type, slp_tree slp_node,
- stmt_vector_for_cost *cost_vec)
-{
- unsigned int inside_cost = 0, prologue_cost = 0;
- stmt_vec_info first_stmt_info = stmt_info;
- bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
-
- /* ??? Somehow we need to fix this at the callers. */
- if (slp_node)
- ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
-
- if (vls_type == VLS_STORE_INVARIANT)
- {
- if (!slp_node)
- prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
- stmt_info, 0, vect_prologue);
- }
-
- /* Grouped stores update all elements in the group at once,
- so we want the DR for the first statement. */
- if (!slp_node && grouped_access_p)
- first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
-
- /* True if we should include any once-per-group costs as well as
- the cost of the statement itself. For SLP we only get called
- once per group anyhow. */
- bool first_stmt_p = (first_stmt_info == stmt_info);
-
- /* We assume that the cost of a single store-lanes instruction is
- equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
- access is instead being provided by a permute-and-store operation,
- include the cost of the permutes. */
- if (first_stmt_p
- && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
- {
- /* Uses a high and low interleave or shuffle operations for each
- needed permute. */
- int group_size = DR_GROUP_SIZE (first_stmt_info);
- int nstmts = ncopies * ceil_log2 (group_size) * group_size;
- inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
- stmt_info, 0, vect_body);
-
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "vect_model_store_cost: strided group_size = %d .\n",
- group_size);
- }
-
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- /* Costs of the stores. */
- if (memory_access_type == VMAT_ELEMENTWISE
- || memory_access_type == VMAT_GATHER_SCATTER)
- {
- unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
- if (memory_access_type == VMAT_GATHER_SCATTER
- && gs_info->ifn == IFN_LAST && !gs_info->decl)
- /* For emulated scatter N offset vector element extracts
- (we assume the scalar scaling and ptr + offset add is consumed by
- the load). */
- inside_cost += record_stmt_cost (cost_vec, ncopies * assumed_nunits,
- vec_to_scalar, stmt_info, 0,
- vect_body);
- /* N scalar stores plus extracting the elements. */
- inside_cost += record_stmt_cost (cost_vec,
- ncopies * assumed_nunits,
- scalar_store, stmt_info, 0, vect_body);
- }
- else
- vect_get_store_cost (vinfo, stmt_info, ncopies, alignment_support_scheme,
- misalignment, &inside_cost, cost_vec);
-
- if (memory_access_type == VMAT_ELEMENTWISE
- || memory_access_type == VMAT_STRIDED_SLP
- || (memory_access_type == VMAT_GATHER_SCATTER
- && gs_info->ifn == IFN_LAST && !gs_info->decl))
- {
- /* N scalar stores plus extracting the elements. */
- unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
- inside_cost += record_stmt_cost (cost_vec,
- ncopies * assumed_nunits,
- vec_to_scalar, stmt_info, 0, vect_body);
- }
-
- /* When vectorizing a store into the function result assign
- a penalty if the function returns in a multi-register location.
- In this case we assume we'll end up with having to spill the
- vector result and do piecewise loads as a conservative estimate. */
- tree base = get_base_address (STMT_VINFO_DATA_REF (stmt_info)->ref);
- if (base
- && (TREE_CODE (base) == RESULT_DECL
- || (DECL_P (base) && cfun_returns (base)))
- && !aggregate_value_p (base, cfun->decl))
- {
- rtx reg = hard_function_value (TREE_TYPE (base), cfun->decl, 0, 1);
- /* ??? Handle PARALLEL in some way. */
- if (REG_P (reg))
- {
- int nregs = hard_regno_nregs (REGNO (reg), GET_MODE (reg));
- /* Assume that a single reg-reg move is possible and cheap,
- do not account for vector to gp register move cost. */
- if (nregs > 1)
- {
- /* Spill. */
- prologue_cost += record_stmt_cost (cost_vec, ncopies,
- vector_store,
- stmt_info, 0, vect_epilogue);
- /* Loads. */
- prologue_cost += record_stmt_cost (cost_vec, ncopies * nregs,
- scalar_load,
- stmt_info, 0, vect_epilogue);
- }
- }
- }
-
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "vect_model_store_cost: inside_cost = %d, "
- "prologue_cost = %d .\n", inside_cost, prologue_cost);
-}
-
-
/* Calculate cost of DR's memory access. */
void
vect_get_store_cost (vec_info *, stmt_vec_info stmt_info, int ncopies,
@@ -2999,7 +2867,8 @@ vect_build_gather_load_calls (vec_info *vinfo, stmt_vec_info stmt_info,
static void
vect_build_scatter_store_calls (vec_info *vinfo, stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi, gimple **vec_stmt,
- gather_scatter_info *gs_info, tree mask)
+ gather_scatter_info *gs_info, tree mask,
+ stmt_vector_for_cost *cost_vec)
{
loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (vinfo);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
@@ -3009,6 +2878,30 @@ vect_build_scatter_store_calls (vec_info *vinfo, stmt_vec_info stmt_info,
poly_uint64 scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
+ /* FIXME: Keep the previous costing way in vect_model_store_cost by
+ costing N scalar stores, but it should be tweaked to use target
+ specific costs on related scatter store calls. */
+ if (cost_vec)
+ {
+ tree op = vect_get_store_rhs (stmt_info);
+ enum vect_def_type dt;
+ gcc_assert (vect_is_simple_use (op, vinfo, &dt));
+ unsigned int inside_cost, prologue_cost = 0;
+ if (dt == vect_constant_def || dt == vect_external_def)
+ prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
+ stmt_info, 0, vect_prologue);
+ unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
+ inside_cost = record_stmt_cost (cost_vec, ncopies * assumed_nunits,
+ scalar_store, stmt_info, 0, vect_body);
+
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vect_model_store_cost: inside_cost = %d, "
+ "prologue_cost = %d .\n",
+ inside_cost, prologue_cost);
+ return;
+ }
+
tree perm_mask = NULL_TREE, mask_halfvectype = NULL_TREE;
if (known_eq (nunits, scatter_off_nunits))
modifier = NONE;
@@ -4315,10 +4208,6 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
if (loop_vinfo && nested_in_vect_loop_p (loop, stmt_info))
return false;
- /* FORNOW */
- if (slp_node)
- return false;
-
/* Process function arguments. */
nargs = gimple_call_num_args (stmt) - arg_offset;
@@ -4326,7 +4215,11 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
if (nargs == 0)
return false;
+ vec<tree>& simd_clone_info = (slp_node ? SLP_TREE_SIMD_CLONE_INFO (slp_node)
+ : STMT_VINFO_SIMD_CLONE_INFO (stmt_info));
arginfo.reserve (nargs, true);
+ auto_vec<slp_tree> slp_op;
+ slp_op.safe_grow_cleared (nargs);
for (i = 0; i < nargs; i++)
{
@@ -4338,9 +4231,12 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
thisarginfo.op = NULL_TREE;
thisarginfo.simd_lane_linear = false;
- op = gimple_call_arg (stmt, i + arg_offset);
- if (!vect_is_simple_use (op, vinfo, &thisarginfo.dt,
- &thisarginfo.vectype)
+ int op_no = i + arg_offset;
+ if (slp_node)
+ op_no = vect_slp_child_index_for_operand (stmt, op_no);
+ if (!vect_is_simple_use (vinfo, stmt_info, slp_node,
+ op_no, &op, &slp_op[i],
+ &thisarginfo.dt, &thisarginfo.vectype)
|| thisarginfo.dt == vect_uninitialized_def)
{
if (dump_enabled_p ())
@@ -4351,30 +4247,33 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
if (thisarginfo.dt == vect_constant_def
|| thisarginfo.dt == vect_external_def)
- gcc_assert (thisarginfo.vectype == NULL_TREE);
+ {
+ gcc_assert (vec_stmt || thisarginfo.vectype == NULL_TREE);
+ if (!vec_stmt)
+ thisarginfo.vectype = get_vectype_for_scalar_type (vinfo,
+ TREE_TYPE (op),
+ slp_node);
+ }
else
gcc_assert (thisarginfo.vectype != NULL_TREE);
/* For linear arguments, the analyze phase should have saved
- the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
- if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
- && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
+ the base and step in {STMT_VINFO,SLP_TREE}_SIMD_CLONE_INFO. */
+ if (i * 3 + 4 <= simd_clone_info.length ()
+ && simd_clone_info[i * 3 + 2])
{
gcc_assert (vec_stmt);
- thisarginfo.linear_step
- = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
- thisarginfo.op
- = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
+ thisarginfo.linear_step = tree_to_shwi (simd_clone_info[i * 3 + 2]);
+ thisarginfo.op = simd_clone_info[i * 3 + 1];
thisarginfo.simd_lane_linear
- = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
- == boolean_true_node);
+ = (simd_clone_info[i * 3 + 3] == boolean_true_node);
/* If loop has been peeled for alignment, we need to adjust it. */
tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
if (n1 != n2 && !thisarginfo.simd_lane_linear)
{
tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
- tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
+ tree step = simd_clone_info[i * 3 + 2];
tree opt = TREE_TYPE (thisarginfo.op);
bias = fold_convert (TREE_TYPE (step), bias);
bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
@@ -4408,15 +4307,14 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
&& thisarginfo.dt != vect_constant_def
&& thisarginfo.dt != vect_external_def
&& loop_vinfo
- && !slp_node
&& TREE_CODE (op) == SSA_NAME)
vect_simd_lane_linear (op, loop, &thisarginfo);
arginfo.quick_push (thisarginfo);
}
- poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
- if (!vf.is_constant ())
+ if (loop_vinfo
+ && !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant ())
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -4425,17 +4323,20 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
return false;
}
+ poly_uint64 vf = loop_vinfo ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) : 1;
+ unsigned group_size = slp_node ? SLP_TREE_LANES (slp_node) : 1;
unsigned int badness = 0;
struct cgraph_node *bestn = NULL;
- if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
- bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
+ if (simd_clone_info.exists ())
+ bestn = cgraph_node::get (simd_clone_info[0]);
else
for (struct cgraph_node *n = node->simd_clones; n != NULL;
n = n->simdclone->next_clone)
{
unsigned int this_badness = 0;
unsigned int num_calls;
- if (!constant_multiple_p (vf, n->simdclone->simdlen, &num_calls)
+ if (!constant_multiple_p (vf * group_size,
+ n->simdclone->simdlen, &num_calls)
|| n->simdclone->nargs != nargs)
continue;
if (num_calls != 1)
@@ -4483,6 +4384,16 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
i = -1;
break;
case SIMD_CLONE_ARG_TYPE_MASK:
+ /* While we can create a traditional data vector from
+ an incoming integer mode mask we have no good way to
+ force generate an integer mode mask from a traditional
+ boolean vector input. */
+ if (SCALAR_INT_MODE_P (n->simdclone->mask_mode)
+ && !SCALAR_INT_MODE_P (TYPE_MODE (arginfo[i].vectype)))
+ i = -1;
+ else if (!SCALAR_INT_MODE_P (n->simdclone->mask_mode)
+ && SCALAR_INT_MODE_P (TYPE_MODE (arginfo[i].vectype)))
+ this_badness += 2048;
break;
}
if (i == (size_t) -1)
@@ -4508,6 +4419,12 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
if (bestn == NULL)
return false;
+ unsigned int num_mask_args = 0;
+ if (SCALAR_INT_MODE_P (bestn->simdclone->mask_mode))
+ for (i = 0; i < nargs; i++)
+ if (bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK)
+ num_mask_args++;
+
for (i = 0; i < nargs; i++)
{
if ((arginfo[i].dt == vect_constant_def
@@ -4532,36 +4449,59 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
return false;
}
- if (bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK
- && bestn->simdclone->mask_mode == VOIDmode
- && (simd_clone_subparts (bestn->simdclone->args[i].vector_type)
- != simd_clone_subparts (arginfo[i].vectype)))
+ if (bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK)
{
- /* FORNOW we only have partial support for vector-type masks that
- can't hold all of simdlen. */
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION,
- vect_location,
- "in-branch vector clones are not yet"
- " supported for mismatched vector sizes.\n");
- return false;
- }
- if (bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK
- && bestn->simdclone->mask_mode != VOIDmode)
- {
- /* FORNOW don't support integer-type masks. */
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION,
- vect_location,
- "in-branch vector clones are not yet"
- " supported for integer mask modes.\n");
- return false;
+ if (bestn->simdclone->mask_mode == VOIDmode)
+ {
+ if (simd_clone_subparts (bestn->simdclone->args[i].vector_type)
+ != simd_clone_subparts (arginfo[i].vectype))
+ {
+ /* FORNOW we only have partial support for vector-type masks
+ that can't hold all of simdlen. */
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION,
+ vect_location,
+ "in-branch vector clones are not yet"
+ " supported for mismatched vector sizes.\n");
+ return false;
+ }
+ }
+ else if (SCALAR_INT_MODE_P (bestn->simdclone->mask_mode))
+ {
+ if (!SCALAR_INT_MODE_P (TYPE_MODE (arginfo[i].vectype))
+ || maybe_ne (exact_div (bestn->simdclone->simdlen,
+ num_mask_args),
+ simd_clone_subparts (arginfo[i].vectype)))
+ {
+ /* FORNOW we only have partial support for integer-type masks
+ that represent the same number of lanes as the
+ vectorized mask inputs. */
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION,
+ vect_location,
+ "in-branch vector clones are not yet "
+ "supported for mismatched vector sizes.\n");
+ return false;
+ }
+ }
+ else
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION,
+ vect_location,
+ "in-branch vector clones not supported"
+ " on this target.\n");
+ return false;
+ }
}
}
fndecl = bestn->decl;
nunits = bestn->simdclone->simdlen;
- ncopies = vector_unroll_factor (vf, nunits);
+ if (slp_node)
+ ncopies = vector_unroll_factor (vf * group_size, nunits);
+ else
+ ncopies = vector_unroll_factor (vf, nunits);
/* If the function isn't const, only allow it in simd loops where user
has asserted that at least nunits consecutive iterations can be
@@ -4576,6 +4516,15 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
if (!vec_stmt) /* transformation not required. */
{
+ if (slp_node)
+ for (unsigned i = 0; i < nargs; ++i)
+ if (!vect_maybe_update_slp_op_vectype (slp_op[i], arginfo[i].vectype))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "incompatible vector types for invariants\n");
+ return false;
+ }
/* When the original call is pure or const but the SIMD ABI dictates
an aggregate return we will have to use a virtual definition and
in a loop eventually even need to add a virtual PHI. That's
@@ -4584,24 +4533,27 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
&& !gimple_vdef (stmt)
&& TREE_CODE (TREE_TYPE (TREE_TYPE (bestn->decl))) == ARRAY_TYPE)
vinfo->any_known_not_updated_vssa = true;
- STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
+ /* ??? For SLP code-gen we end up inserting after the last
+ vector argument def rather than at the original call position
+ so automagic virtual operand updating doesn't work. */
+ if (gimple_vuse (stmt) && slp_node)
+ vinfo->any_known_not_updated_vssa = true;
+ simd_clone_info.safe_push (bestn->decl);
for (i = 0; i < nargs; i++)
if ((bestn->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
|| (bestn->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
{
- STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
- + 1,
- true);
- STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
+ simd_clone_info.safe_grow_cleared (i * 3 + 1, true);
+ simd_clone_info.safe_push (arginfo[i].op);
tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
? size_type_node : TREE_TYPE (arginfo[i].op);
tree ls = build_int_cst (lst, arginfo[i].linear_step);
- STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
+ simd_clone_info.safe_push (ls);
tree sll = arginfo[i].simd_lane_linear
? boolean_true_node : boolean_false_node;
- STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
+ simd_clone_info.safe_push (sll);
}
STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
@@ -4633,8 +4585,14 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
auto_vec<vec<tree> > vec_oprnds;
auto_vec<unsigned> vec_oprnds_i;
- vec_oprnds.safe_grow_cleared (nargs, true);
vec_oprnds_i.safe_grow_cleared (nargs, true);
+ if (slp_node)
+ {
+ vec_oprnds.reserve_exact (nargs);
+ vect_get_slp_defs (vinfo, slp_node, &vec_oprnds);
+ }
+ else
+ vec_oprnds.safe_grow_cleared (nargs, true);
for (j = 0; j < ncopies; ++j)
{
/* Build argument list for the vectorized call. */
@@ -4665,9 +4623,10 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
gcc_assert ((k & (k - 1)) == 0);
if (m == 0)
{
- vect_get_vec_defs_for_operand (vinfo, stmt_info,
- ncopies * o / k, op,
- &vec_oprnds[i]);
+ if (!slp_node)
+ vect_get_vec_defs_for_operand (vinfo, stmt_info,
+ ncopies * o / k, op,
+ &vec_oprnds[i]);
vec_oprnds_i[i] = 0;
vec_oprnd0 = vec_oprnds[i][vec_oprnds_i[i]++];
}
@@ -4703,10 +4662,11 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
{
if (m == 0 && l == 0)
{
- vect_get_vec_defs_for_operand (vinfo, stmt_info,
- k * o * ncopies,
- op,
- &vec_oprnds[i]);
+ if (!slp_node)
+ vect_get_vec_defs_for_operand (vinfo, stmt_info,
+ k * o * ncopies,
+ op,
+ &vec_oprnds[i]);
vec_oprnds_i[i] = 0;
vec_oprnd0 = vec_oprnds[i][vec_oprnds_i[i]++];
}
@@ -4747,14 +4707,9 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
}
break;
case SIMD_CLONE_ARG_TYPE_MASK:
- atype = bestn->simdclone->args[i].vector_type;
- if (bestn->simdclone->mask_mode != VOIDmode)
- {
- /* FORNOW: this is disabled above. */
- gcc_unreachable ();
- }
- else
+ if (bestn->simdclone->mask_mode == VOIDmode)
{
+ atype = bestn->simdclone->args[i].vector_type;
tree elt_type = TREE_TYPE (atype);
tree one = fold_convert (elt_type, integer_one_node);
tree zero = fold_convert (elt_type, integer_zero_node);
@@ -4777,10 +4732,11 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
elements as the current function. */
if (m == 0)
{
- vect_get_vec_defs_for_operand (vinfo, stmt_info,
- o * ncopies,
- op,
- &vec_oprnds[i]);
+ if (!slp_node)
+ vect_get_vec_defs_for_operand (vinfo, stmt_info,
+ o * ncopies,
+ op,
+ &vec_oprnds[i]);
vec_oprnds_i[i] = 0;
}
vec_oprnd0 = vec_oprnds[i][vec_oprnds_i[i]++];
@@ -4804,6 +4760,72 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
}
}
}
+ else if (SCALAR_INT_MODE_P (bestn->simdclone->mask_mode))
+ {
+ atype = bestn->simdclone->args[i].vector_type;
+ /* Guess the number of lanes represented by atype. */
+ unsigned HOST_WIDE_INT atype_subparts
+ = exact_div (bestn->simdclone->simdlen,
+ num_mask_args).to_constant ();
+ o = vector_unroll_factor (nunits, atype_subparts);
+ for (m = j * o; m < (j + 1) * o; m++)
+ {
+ if (m == 0)
+ {
+ if (!slp_node)
+ vect_get_vec_defs_for_operand (vinfo, stmt_info,
+ o * ncopies,
+ op,
+ &vec_oprnds[i]);
+ vec_oprnds_i[i] = 0;
+ }
+ if (atype_subparts
+ < simd_clone_subparts (arginfo[i].vectype))
+ {
+ /* The mask argument has fewer elements than the
+ input vector. */
+ /* FORNOW */
+ gcc_unreachable ();
+ }
+ else if (atype_subparts
+ == simd_clone_subparts (arginfo[i].vectype))
+ {
+ /* The vector mask argument matches the input
+ in the number of lanes, but not necessarily
+ in the mode. */
+ vec_oprnd0 = vec_oprnds[i][vec_oprnds_i[i]++];
+ tree st = lang_hooks.types.type_for_mode
+ (TYPE_MODE (TREE_TYPE (vec_oprnd0)), 1);
+ vec_oprnd0 = build1 (VIEW_CONVERT_EXPR, st,
+ vec_oprnd0);
+ gassign *new_stmt
+ = gimple_build_assign (make_ssa_name (st),
+ vec_oprnd0);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
+ if (!types_compatible_p (atype, st))
+ {
+ new_stmt
+ = gimple_build_assign (make_ssa_name (atype),
+ NOP_EXPR,
+ gimple_assign_lhs
+ (new_stmt));
+ vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
+ }
+ vargs.safe_push (gimple_assign_lhs (new_stmt));
+ }
+ else
+ {
+ /* The mask argument has more elements than the
+ input vector. */
+ /* FORNOW */
+ gcc_unreachable ();
+ }
+ }
+ }
+ else
+ gcc_unreachable ();
break;
case SIMD_CLONE_ARG_TYPE_UNIFORM:
vargs.safe_push (op);
@@ -4924,7 +4946,11 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
if (j == 0 && l == 0)
*vec_stmt = new_stmt;
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
+ if (slp_node)
+ SLP_TREE_VEC_DEFS (slp_node)
+ .quick_push (gimple_assign_lhs (new_stmt));
+ else
+ STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
if (ratype)
@@ -4967,7 +4993,11 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
if ((unsigned) j == k - 1)
*vec_stmt = new_stmt;
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
+ if (slp_node)
+ SLP_TREE_VEC_DEFS (slp_node)
+ .quick_push (gimple_assign_lhs (new_stmt));
+ else
+ STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
continue;
}
else if (ratype)
@@ -4990,7 +5020,10 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
if (j == 0)
*vec_stmt = new_stmt;
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
+ if (slp_node)
+ SLP_TREE_VEC_DEFS (slp_node).quick_push (gimple_get_lhs (new_stmt));
+ else
+ STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
for (i = 0; i < nargs; ++i)
@@ -8372,7 +8405,8 @@ vectorizable_store (vec_info *vinfo,
return false;
}
- if (!vec_stmt) /* transformation not required. */
+ bool costing_p = !vec_stmt;
+ if (costing_p) /* transformation not required. */
{
STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
@@ -8401,11 +8435,17 @@ vectorizable_store (vec_info *vinfo,
"Vectorizing an unaligned access.\n");
STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
- vect_model_store_cost (vinfo, stmt_info, ncopies,
- memory_access_type, &gs_info,
- alignment_support_scheme,
- misalignment, vls_type, slp_node, cost_vec);
- return true;
+
+ /* As function vect_transform_stmt shows, for interleaving stores
+ the whole chain is vectorized when the last store in the chain
+ is reached, the other stores in the group are skipped. So we
+ want to only cost the last one here, but it's not trivial to
+ get the last, as it's equivalent to use the first one for
+ costing, use the first one instead. */
+ if (grouped_store
+ && !slp
+ && first_stmt_info != stmt_info)
+ return true;
}
gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
@@ -8415,12 +8455,34 @@ vectorizable_store (vec_info *vinfo,
if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
{
- vect_build_scatter_store_calls (vinfo, stmt_info, gsi, vec_stmt,
- &gs_info, mask);
+ vect_build_scatter_store_calls (vinfo, stmt_info, gsi, vec_stmt, &gs_info,
+ mask, cost_vec);
return true;
}
else if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) >= 3)
- return vectorizable_scan_store (vinfo, stmt_info, gsi, vec_stmt, ncopies);
+ {
+ gcc_assert (memory_access_type == VMAT_CONTIGUOUS);
+ gcc_assert (!slp);
+ if (costing_p)
+ {
+ unsigned int inside_cost = 0, prologue_cost = 0;
+ if (vls_type == VLS_STORE_INVARIANT)
+ prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
+ stmt_info, 0, vect_prologue);
+ vect_get_store_cost (vinfo, stmt_info, ncopies,
+ alignment_support_scheme, misalignment,
+ &inside_cost, cost_vec);
+
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vect_model_store_cost: inside_cost = %d, "
+ "prologue_cost = %d .\n",
+ inside_cost, prologue_cost);
+
+ return true;
+ }
+ return vectorizable_scan_store (vinfo, stmt_info, gsi, vec_stmt, ncopies);
+ }
if (grouped_store)
{
@@ -8449,13 +8511,39 @@ vectorizable_store (vec_info *vinfo,
else
ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "transform store. ncopies = %d\n", ncopies);
+ if (!costing_p && dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location, "transform store. ncopies = %d\n",
+ ncopies);
+
+ /* Check if we need to update prologue cost for invariant,
+ and update it accordingly if so. If it's not for
+ interleaving store, we can just check vls_type; but if
+ it's for interleaving store, need to check the def_type
+ of the stored value since the current vls_type is just
+ for first_stmt_info. */
+ auto update_prologue_cost = [&](unsigned *prologue_cost, tree store_rhs)
+ {
+ gcc_assert (costing_p);
+ if (slp)
+ return;
+ if (grouped_store)
+ {
+ gcc_assert (store_rhs);
+ enum vect_def_type cdt;
+ gcc_assert (vect_is_simple_use (store_rhs, vinfo, &cdt));
+ if (cdt != vect_constant_def && cdt != vect_external_def)
+ return;
+ }
+ else if (vls_type != VLS_STORE_INVARIANT)
+ return;
+ *prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, stmt_info,
+ 0, vect_prologue);
+ };
if (memory_access_type == VMAT_ELEMENTWISE
|| memory_access_type == VMAT_STRIDED_SLP)
{
+ unsigned inside_cost = 0, prologue_cost = 0;
gimple_stmt_iterator incr_gsi;
bool insert_after;
gimple *incr;
@@ -8463,7 +8551,7 @@ vectorizable_store (vec_info *vinfo,
tree ivstep;
tree running_off;
tree stride_base, stride_step, alias_off;
- tree vec_oprnd;
+ tree vec_oprnd = NULL_TREE;
tree dr_offset;
unsigned int g;
/* Checked by get_load_store_type. */
@@ -8558,29 +8646,41 @@ vectorizable_store (vec_info *vinfo,
else if (group_size >= const_nunits
&& group_size % const_nunits == 0)
{
- nstores = 1;
- lnel = const_nunits;
- ltype = vectype;
- lvectype = vectype;
+ int mis_align = dr_misalignment (first_dr_info, vectype);
+ dr_alignment_support dr_align
+ = vect_supportable_dr_alignment (vinfo, dr_info, vectype,
+ mis_align);
+ if (dr_align == dr_aligned
+ || dr_align == dr_unaligned_supported)
+ {
+ nstores = 1;
+ lnel = const_nunits;
+ ltype = vectype;
+ lvectype = vectype;
+ alignment_support_scheme = dr_align;
+ misalignment = mis_align;
+ }
}
ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
}
- ivstep = stride_step;
- ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
- build_int_cst (TREE_TYPE (ivstep), vf));
+ if (!costing_p)
+ {
+ ivstep = stride_step;
+ ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
+ build_int_cst (TREE_TYPE (ivstep), vf));
- standard_iv_increment_position (loop, &incr_gsi, &insert_after);
+ standard_iv_increment_position (loop, &incr_gsi, &insert_after);
- stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
- ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
- create_iv (stride_base, PLUS_EXPR, ivstep, NULL,
- loop, &incr_gsi, insert_after,
- &offvar, NULL);
- incr = gsi_stmt (incr_gsi);
+ stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
+ ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
+ create_iv (stride_base, PLUS_EXPR, ivstep, NULL, loop, &incr_gsi,
+ insert_after, &offvar, NULL);
+ incr = gsi_stmt (incr_gsi);
- stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
+ stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
+ }
alias_off = build_int_cst (ref_type, 0);
stmt_vec_info next_stmt_info = first_stmt_info;
@@ -8588,39 +8688,70 @@ vectorizable_store (vec_info *vinfo,
for (g = 0; g < group_size; g++)
{
running_off = offvar;
- if (g)
+ if (!costing_p)
{
- tree size = TYPE_SIZE_UNIT (ltype);
- tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
- size);
- tree newoff = copy_ssa_name (running_off, NULL);
- incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
- running_off, pos);
- vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
- running_off = newoff;
+ if (g)
+ {
+ tree size = TYPE_SIZE_UNIT (ltype);
+ tree pos
+ = fold_build2 (MULT_EXPR, sizetype, size_int (g), size);
+ tree newoff = copy_ssa_name (running_off, NULL);
+ incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
+ running_off, pos);
+ vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
+ running_off = newoff;
+ }
}
if (!slp)
op = vect_get_store_rhs (next_stmt_info);
- vect_get_vec_defs (vinfo, next_stmt_info, slp_node, ncopies,
- op, &vec_oprnds);
+ if (!costing_p)
+ vect_get_vec_defs (vinfo, next_stmt_info, slp_node, ncopies, op,
+ &vec_oprnds);
+ else
+ update_prologue_cost (&prologue_cost, op);
unsigned int group_el = 0;
unsigned HOST_WIDE_INT
elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
for (j = 0; j < ncopies; j++)
{
- vec_oprnd = vec_oprnds[j];
- /* Pun the vector to extract from if necessary. */
- if (lvectype != vectype)
+ if (!costing_p)
{
- tree tem = make_ssa_name (lvectype);
- gimple *pun
- = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
- lvectype, vec_oprnd));
- vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi);
- vec_oprnd = tem;
+ vec_oprnd = vec_oprnds[j];
+ /* Pun the vector to extract from if necessary. */
+ if (lvectype != vectype)
+ {
+ tree tem = make_ssa_name (lvectype);
+ tree cvt
+ = build1 (VIEW_CONVERT_EXPR, lvectype, vec_oprnd);
+ gimple *pun = gimple_build_assign (tem, cvt);
+ vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi);
+ vec_oprnd = tem;
+ }
}
for (i = 0; i < nstores; i++)
{
+ if (costing_p)
+ {
+ /* Only need vector extracting when there are more
+ than one stores. */
+ if (nstores > 1)
+ inside_cost
+ += record_stmt_cost (cost_vec, 1, vec_to_scalar,
+ stmt_info, 0, vect_body);
+ /* Take a single lane vector type store as scalar
+ store to avoid ICE like 110776. */
+ if (VECTOR_TYPE_P (ltype)
+ && known_ne (TYPE_VECTOR_SUBPARTS (ltype), 1U))
+ vect_get_store_cost (vinfo, stmt_info, 1,
+ alignment_support_scheme,
+ misalignment, &inside_cost,
+ cost_vec);
+ else
+ inside_cost
+ += record_stmt_cost (cost_vec, 1, scalar_store,
+ stmt_info, 0, vect_body);
+ continue;
+ }
tree newref, newoff;
gimple *incr, *assign;
tree size = TYPE_SIZE (ltype);
@@ -8671,6 +8802,12 @@ vectorizable_store (vec_info *vinfo,
break;
}
+ if (costing_p && dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vect_model_store_cost: inside_cost = %d, "
+ "prologue_cost = %d .\n",
+ inside_cost, prologue_cost);
+
return true;
}
@@ -8710,8 +8847,9 @@ vectorizable_store (vec_info *vinfo,
else if (memory_access_type == VMAT_GATHER_SCATTER)
{
aggr_type = elem_type;
- vect_get_strided_load_store_ops (stmt_info, loop_vinfo, gsi, &gs_info,
- &bump, &vec_offset, loop_lens);
+ if (!costing_p)
+ vect_get_strided_load_store_ops (stmt_info, loop_vinfo, gsi, &gs_info,
+ &bump, &vec_offset, loop_lens);
}
else
{
@@ -8723,7 +8861,7 @@ vectorizable_store (vec_info *vinfo,
memory_access_type, loop_lens);
}
- if (mask)
+ if (mask && !costing_p)
LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
/* In case the vectorization factor (VF) is bigger than the number
@@ -8774,6 +8912,7 @@ vectorizable_store (vec_info *vinfo,
if (memory_access_type == VMAT_LOAD_STORE_LANES)
{
gcc_assert (!slp && grouped_store);
+ unsigned inside_cost = 0, prologue_cost = 0;
for (j = 0; j < ncopies; j++)
{
gimple *new_stmt;
@@ -8789,29 +8928,39 @@ vectorizable_store (vec_info *vinfo,
DR_GROUP_SIZE is the exact number of stmts in the
chain. Therefore, NEXT_STMT_INFO can't be NULL_TREE. */
op = vect_get_store_rhs (next_stmt_info);
- vect_get_vec_defs_for_operand (vinfo, next_stmt_info, ncopies,
- op, gvec_oprnds[i]);
- vec_oprnd = (*gvec_oprnds[i])[0];
- dr_chain.quick_push (vec_oprnd);
+ if (costing_p)
+ update_prologue_cost (&prologue_cost, op);
+ else
+ {
+ vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
+ ncopies, op,
+ gvec_oprnds[i]);
+ vec_oprnd = (*gvec_oprnds[i])[0];
+ dr_chain.quick_push (vec_oprnd);
+ }
next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
}
- if (mask)
+
+ if (!costing_p)
{
- vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
- mask, &vec_masks,
- mask_vectype);
- vec_mask = vec_masks[0];
- }
+ if (mask)
+ {
+ vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
+ mask, &vec_masks,
+ mask_vectype);
+ vec_mask = vec_masks[0];
+ }
- /* We should have catched mismatched types earlier. */
- gcc_assert (
- useless_type_conversion_p (vectype, TREE_TYPE (vec_oprnd)));
- dataref_ptr
- = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
- NULL, offset, &dummy, gsi,
- &ptr_incr, false, bump);
+ /* We should have catched mismatched types earlier. */
+ gcc_assert (
+ useless_type_conversion_p (vectype, TREE_TYPE (vec_oprnd)));
+ dataref_ptr
+ = vect_create_data_ref_ptr (vinfo, first_stmt_info,
+ aggr_type, NULL, offset, &dummy,
+ gsi, &ptr_incr, false, bump);
+ }
}
- else
+ else if (!costing_p)
{
gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
/* DR_CHAIN is then used as an input to
@@ -8827,6 +8976,15 @@ vectorizable_store (vec_info *vinfo,
stmt_info, bump);
}
+ if (costing_p)
+ {
+ for (i = 0; i < vec_num; i++)
+ vect_get_store_cost (vinfo, stmt_info, 1,
+ alignment_support_scheme, misalignment,
+ &inside_cost, cost_vec);
+ continue;
+ }
+
/* Get an array into which we can store the individual vectors. */
tree vec_array = create_vector_array (vectype, vec_num);
@@ -8913,6 +9071,12 @@ vectorizable_store (vec_info *vinfo,
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
+ if (costing_p && dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vect_model_store_cost: inside_cost = %d, "
+ "prologue_cost = %d .\n",
+ inside_cost, prologue_cost);
+
return true;
}
@@ -8920,41 +9084,49 @@ vectorizable_store (vec_info *vinfo,
{
gcc_assert (!slp && !grouped_store);
auto_vec<tree> vec_offsets;
+ unsigned int inside_cost = 0, prologue_cost = 0;
for (j = 0; j < ncopies; j++)
{
gimple *new_stmt;
if (j == 0)
{
- /* Since the store is not grouped, DR_GROUP_SIZE is 1, and
- DR_CHAIN is of size 1. */
- gcc_assert (group_size == 1);
- op = vect_get_store_rhs (first_stmt_info);
- vect_get_vec_defs_for_operand (vinfo, first_stmt_info, ncopies,
- op, gvec_oprnds[0]);
- vec_oprnd = (*gvec_oprnds[0])[0];
- dr_chain.quick_push (vec_oprnd);
- if (mask)
+ if (costing_p && vls_type == VLS_STORE_INVARIANT)
+ prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
+ stmt_info, 0, vect_prologue);
+ else if (!costing_p)
{
- vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
- mask, &vec_masks,
- mask_vectype);
- vec_mask = vec_masks[0];
- }
+ /* Since the store is not grouped, DR_GROUP_SIZE is 1, and
+ DR_CHAIN is of size 1. */
+ gcc_assert (group_size == 1);
+ op = vect_get_store_rhs (first_stmt_info);
+ vect_get_vec_defs_for_operand (vinfo, first_stmt_info,
+ ncopies, op, gvec_oprnds[0]);
+ vec_oprnd = (*gvec_oprnds[0])[0];
+ dr_chain.quick_push (vec_oprnd);
+ if (mask)
+ {
+ vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
+ mask, &vec_masks,
+ mask_vectype);
+ vec_mask = vec_masks[0];
+ }
- /* We should have catched mismatched types earlier. */
- gcc_assert (useless_type_conversion_p (vectype,
- TREE_TYPE (vec_oprnd)));
- if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
- vect_get_gather_scatter_ops (loop_vinfo, loop, stmt_info,
- slp_node, &gs_info, &dataref_ptr,
- &vec_offsets);
- else
- dataref_ptr
- = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
- NULL, offset, &dummy, gsi,
- &ptr_incr, false, bump);
+ /* We should have catched mismatched types earlier. */
+ gcc_assert (
+ useless_type_conversion_p (vectype, TREE_TYPE (vec_oprnd)));
+ if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ vect_get_gather_scatter_ops (loop_vinfo, loop, stmt_info,
+ slp_node, &gs_info,
+ &dataref_ptr, &vec_offsets);
+ else
+ dataref_ptr
+ = vect_create_data_ref_ptr (vinfo, first_stmt_info,
+ aggr_type, NULL, offset,
+ &dummy, gsi, &ptr_incr, false,
+ bump);
+ }
}
- else
+ else if (!costing_p)
{
gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
vec_oprnd = (*gvec_oprnds[0])[j];
@@ -8971,15 +9143,27 @@ vectorizable_store (vec_info *vinfo,
tree final_mask = NULL_TREE;
tree final_len = NULL_TREE;
tree bias = NULL_TREE;
- if (loop_masks)
- final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks,
- ncopies, vectype, j);
- if (vec_mask)
- final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, final_mask,
- vec_mask, gsi);
+ if (!costing_p)
+ {
+ if (loop_masks)
+ final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks,
+ ncopies, vectype, j);
+ if (vec_mask)
+ final_mask = prepare_vec_mask (loop_vinfo, mask_vectype,
+ final_mask, vec_mask, gsi);
+ }
if (gs_info.ifn != IFN_LAST)
{
+ if (costing_p)
+ {
+ unsigned int cnunits = vect_nunits_for_cost (vectype);
+ inside_cost
+ += record_stmt_cost (cost_vec, cnunits, scalar_store,
+ stmt_info, 0, vect_body);
+ continue;
+ }
+
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
vec_offset = vec_offsets[j];
tree scale = size_int (gs_info.scale);
@@ -9025,6 +9209,25 @@ vectorizable_store (vec_info *vinfo,
{
/* Emulated scatter. */
gcc_assert (!final_mask);
+ if (costing_p)
+ {
+ unsigned int cnunits = vect_nunits_for_cost (vectype);
+ /* For emulated scatter N offset vector element extracts
+ (we assume the scalar scaling and ptr + offset add is
+ consumed by the load). */
+ inside_cost
+ += record_stmt_cost (cost_vec, cnunits, vec_to_scalar,
+ stmt_info, 0, vect_body);
+ /* N scalar stores plus extracting the elements. */
+ inside_cost
+ += record_stmt_cost (cost_vec, cnunits, vec_to_scalar,
+ stmt_info, 0, vect_body);
+ inside_cost
+ += record_stmt_cost (cost_vec, cnunits, scalar_store,
+ stmt_info, 0, vect_body);
+ continue;
+ }
+
unsigned HOST_WIDE_INT const_nunits = nunits.to_constant ();
unsigned HOST_WIDE_INT const_offset_nunits
= TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype).to_constant ();
@@ -9075,9 +9278,22 @@ vectorizable_store (vec_info *vinfo,
*vec_stmt = new_stmt;
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
+
+ if (costing_p && dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vect_model_store_cost: inside_cost = %d, "
+ "prologue_cost = %d .\n",
+ inside_cost, prologue_cost);
+
return true;
}
+ gcc_assert (memory_access_type == VMAT_CONTIGUOUS
+ || memory_access_type == VMAT_CONTIGUOUS_DOWN
+ || memory_access_type == VMAT_CONTIGUOUS_PERMUTE
+ || memory_access_type == VMAT_CONTIGUOUS_REVERSE);
+
+ unsigned inside_cost = 0, prologue_cost = 0;
auto_vec<tree> result_chain (group_size);
auto_vec<tree, 1> vec_oprnds;
for (j = 0; j < ncopies; j++)
@@ -9085,7 +9301,7 @@ vectorizable_store (vec_info *vinfo,
gimple *new_stmt;
if (j == 0)
{
- if (slp)
+ if (slp && !costing_p)
{
/* Get vectorized arguments for SLP_NODE. */
vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op,
@@ -9111,13 +9327,19 @@ vectorizable_store (vec_info *vinfo,
that there is no interleaving, DR_GROUP_SIZE is 1,
and only one iteration of the loop will be executed. */
op = vect_get_store_rhs (next_stmt_info);
- vect_get_vec_defs_for_operand (vinfo, next_stmt_info, ncopies,
- op, gvec_oprnds[i]);
- vec_oprnd = (*gvec_oprnds[i])[0];
- dr_chain.quick_push (vec_oprnd);
+ if (costing_p)
+ update_prologue_cost (&prologue_cost, op);
+ else
+ {
+ vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
+ ncopies, op,
+ gvec_oprnds[i]);
+ vec_oprnd = (*gvec_oprnds[i])[0];
+ dr_chain.quick_push (vec_oprnd);
+ }
next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
}
- if (mask)
+ if (mask && !costing_p)
{
vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
mask, &vec_masks,
@@ -9127,11 +9349,13 @@ vectorizable_store (vec_info *vinfo,
}
/* We should have catched mismatched types earlier. */
- gcc_assert (useless_type_conversion_p (vectype,
- TREE_TYPE (vec_oprnd)));
+ gcc_assert (costing_p
+ || useless_type_conversion_p (vectype,
+ TREE_TYPE (vec_oprnd)));
bool simd_lane_access_p
= STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0;
- if (simd_lane_access_p
+ if (!costing_p
+ && simd_lane_access_p
&& !loop_masks
&& TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
&& VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
@@ -9143,14 +9367,14 @@ vectorizable_store (vec_info *vinfo,
dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
dataref_offset = build_int_cst (ref_type, 0);
}
- else
+ else if (!costing_p)
dataref_ptr
= vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
simd_lane_access_p ? loop : NULL,
offset, &dummy, gsi, &ptr_incr,
simd_lane_access_p, bump);
}
- else
+ else if (!costing_p)
{
gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
/* DR_CHAIN is then used as an input to vect_permute_store_chain().
@@ -9172,15 +9396,78 @@ vectorizable_store (vec_info *vinfo,
new_stmt = NULL;
if (grouped_store)
- /* Permute. */
- vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info, gsi,
- &result_chain);
+ {
+ /* Permute. */
+ gcc_assert (memory_access_type == VMAT_CONTIGUOUS_PERMUTE);
+ if (costing_p)
+ {
+ int group_size = DR_GROUP_SIZE (first_stmt_info);
+ int nstmts = ceil_log2 (group_size) * group_size;
+ inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
+ stmt_info, 0, vect_body);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vect_model_store_cost: "
+ "strided group_size = %d .\n",
+ group_size);
+ }
+ else
+ vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info,
+ gsi, &result_chain);
+ }
stmt_vec_info next_stmt_info = first_stmt_info;
for (i = 0; i < vec_num; i++)
{
- unsigned misalign;
- unsigned HOST_WIDE_INT align;
+ if (!costing_p)
+ {
+ if (slp)
+ vec_oprnd = vec_oprnds[i];
+ else if (grouped_store)
+ /* For grouped stores vectorized defs are interleaved in
+ vect_permute_store_chain(). */
+ vec_oprnd = result_chain[i];
+ }
+
+ if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
+ {
+ if (costing_p)
+ inside_cost += record_stmt_cost (cost_vec, 1, vec_perm,
+ stmt_info, 0, vect_body);
+ else
+ {
+ tree perm_mask = perm_mask_for_reverse (vectype);
+ tree perm_dest = vect_create_destination_var (
+ vect_get_store_rhs (stmt_info), vectype);
+ tree new_temp = make_ssa_name (perm_dest);
+
+ /* Generate the permute statement. */
+ gimple *perm_stmt
+ = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
+ vec_oprnd, perm_mask);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt,
+ gsi);
+
+ perm_stmt = SSA_NAME_DEF_STMT (new_temp);
+ vec_oprnd = new_temp;
+ }
+ }
+
+ if (costing_p)
+ {
+ vect_get_store_cost (vinfo, stmt_info, 1,
+ alignment_support_scheme, misalignment,
+ &inside_cost, cost_vec);
+
+ if (!slp)
+ {
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
+ if (!next_stmt_info)
+ break;
+ }
+
+ continue;
+ }
tree final_mask = NULL_TREE;
tree final_len = NULL_TREE;
@@ -9200,13 +9487,8 @@ vectorizable_store (vec_info *vinfo,
dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
stmt_info, bump);
- if (slp)
- vec_oprnd = vec_oprnds[i];
- else if (grouped_store)
- /* For grouped stores vectorized defs are interleaved in
- vect_permute_store_chain(). */
- vec_oprnd = result_chain[i];
-
+ unsigned misalign;
+ unsigned HOST_WIDE_INT align;
align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
if (alignment_support_scheme == dr_aligned)
misalign = 0;
@@ -9223,24 +9505,6 @@ vectorizable_store (vec_info *vinfo,
misalign);
align = least_bit_hwi (misalign | align);
- if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
- {
- tree perm_mask = perm_mask_for_reverse (vectype);
- tree perm_dest
- = vect_create_destination_var (vect_get_store_rhs (stmt_info),
- vectype);
- tree new_temp = make_ssa_name (perm_dest);
-
- /* Generate the permute statement. */
- gimple *perm_stmt
- = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
- vec_oprnd, perm_mask);
- vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
-
- perm_stmt = SSA_NAME_DEF_STMT (new_temp);
- vec_oprnd = new_temp;
- }
-
/* Compute IFN when LOOP_LENS or final_mask valid. */
machine_mode vmode = TYPE_MODE (vectype);
machine_mode new_vmode = vmode;
@@ -9353,7 +9617,7 @@ vectorizable_store (vec_info *vinfo,
if (!next_stmt_info)
break;
}
- if (!slp)
+ if (!slp && !costing_p)
{
if (j == 0)
*vec_stmt = new_stmt;
@@ -9361,6 +9625,45 @@ vectorizable_store (vec_info *vinfo,
}
}
+ if (costing_p)
+ {
+ /* When vectorizing a store into the function result assign
+ a penalty if the function returns in a multi-register location.
+ In this case we assume we'll end up with having to spill the
+ vector result and do piecewise loads as a conservative estimate. */
+ tree base = get_base_address (STMT_VINFO_DATA_REF (stmt_info)->ref);
+ if (base
+ && (TREE_CODE (base) == RESULT_DECL
+ || (DECL_P (base) && cfun_returns (base)))
+ && !aggregate_value_p (base, cfun->decl))
+ {
+ rtx reg = hard_function_value (TREE_TYPE (base), cfun->decl, 0, 1);
+ /* ??? Handle PARALLEL in some way. */
+ if (REG_P (reg))
+ {
+ int nregs = hard_regno_nregs (REGNO (reg), GET_MODE (reg));
+ /* Assume that a single reg-reg move is possible and cheap,
+ do not account for vector to gp register move cost. */
+ if (nregs > 1)
+ {
+ /* Spill. */
+ prologue_cost
+ += record_stmt_cost (cost_vec, ncopies, vector_store,
+ stmt_info, 0, vect_epilogue);
+ /* Loads. */
+ prologue_cost
+ += record_stmt_cost (cost_vec, ncopies * nregs, scalar_load,
+ stmt_info, 0, vect_epilogue);
+ }
+ }
+ }
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vect_model_store_cost: inside_cost = %d, "
+ "prologue_cost = %d .\n",
+ inside_cost, prologue_cost);
+ }
+
return true;
}
@@ -12028,23 +12331,22 @@ vectorizable_condition (vec_info *vinfo,
return true;
}
-/* vectorizable_comparison.
+/* Helper of vectorizable_comparison.
- Check if STMT_INFO is comparison expression that can be vectorized.
+ Check if STMT_INFO is comparison expression CODE that can be vectorized.
If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
comparison, put it in VEC_STMT, and insert it at GSI.
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_comparison (vec_info *vinfo,
- stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
- gimple **vec_stmt,
- slp_tree slp_node, stmt_vector_for_cost *cost_vec)
+vectorizable_comparison_1 (vec_info *vinfo, tree vectype,
+ stmt_vec_info stmt_info, tree_code code,
+ gimple_stmt_iterator *gsi, gimple **vec_stmt,
+ slp_tree slp_node, stmt_vector_for_cost *cost_vec)
{
tree lhs, rhs1, rhs2;
tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
tree new_temp;
loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
@@ -12052,7 +12354,7 @@ vectorizable_comparison (vec_info *vinfo,
int ndts = 2;
poly_uint64 nunits;
int ncopies;
- enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
+ enum tree_code bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
int i;
bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
vec<tree> vec_oprnds0 = vNULL;
@@ -12075,14 +12377,6 @@ vectorizable_comparison (vec_info *vinfo,
ncopies = vect_get_num_copies (loop_vinfo, vectype);
gcc_assert (ncopies >= 1);
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
- return false;
-
- gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
- if (!stmt)
- return false;
-
- code = gimple_assign_rhs_code (stmt);
if (TREE_CODE_CLASS (code) != tcc_comparison)
return false;
@@ -12197,7 +12491,6 @@ vectorizable_comparison (vec_info *vinfo,
return false;
}
- STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
vect_model_simple_cost (vinfo, stmt_info,
ncopies * (1 + (bitop2 != NOP_EXPR)),
dts, ndts, slp_node, cost_vec);
@@ -12207,7 +12500,7 @@ vectorizable_comparison (vec_info *vinfo,
/* Transform. */
/* Handle def. */
- lhs = gimple_assign_lhs (stmt);
+ lhs = gimple_assign_lhs (STMT_VINFO_STMT (stmt_info));
mask = vect_create_destination_var (lhs, mask_type);
vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
@@ -12263,6 +12556,44 @@ vectorizable_comparison (vec_info *vinfo,
return true;
}
+/* vectorizable_comparison.
+
+ Check if STMT_INFO is comparison expression that can be vectorized.
+ If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
+ comparison, put it in VEC_STMT, and insert it at GSI.
+
+ Return true if STMT_INFO is vectorizable in this way. */
+
+static bool
+vectorizable_comparison (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+ gimple **vec_stmt,
+ slp_tree slp_node, stmt_vector_for_cost *cost_vec)
+{
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
+
+ if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
+ return false;
+
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ return false;
+
+ gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!stmt)
+ return false;
+
+ enum tree_code code = gimple_assign_rhs_code (stmt);
+ tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ if (!vectorizable_comparison_1 (vinfo, vectype, stmt_info, code, gsi,
+ vec_stmt, slp_node, cost_vec))
+ return false;
+
+ if (!vec_stmt)
+ STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
+
+ return true;
+}
+
/* If SLP_NODE is nonnull, return true if vectorizable_live_operation
can handle all live statements in the node. Otherwise return true
if STMT_INFO is not live or if vectorizable_live_operation can handle it.
diff --git a/gcc/tree-vectorizer.cc b/gcc/tree-vectorizer.cc
index a048e9d..d97e2b5 100644
--- a/gcc/tree-vectorizer.cc
+++ b/gcc/tree-vectorizer.cc
@@ -943,6 +943,8 @@ set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call,
class loop *scalar_loop = get_loop (fun, tree_to_shwi (arg));
LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
+ LOOP_VINFO_SCALAR_IV_EXIT (loop_vinfo)
+ = vec_init_loop_exit_info (scalar_loop);
gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
== loop_vectorized_call);
/* If we are going to vectorize outer loop, prevent vectorization
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index f1d0cd7..a4043e4 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -196,6 +196,11 @@ struct _slp_tree {
denotes the number of output lanes. */
lane_permutation_t lane_permutation;
+ /* Selected SIMD clone's function info. First vector element
+ is SIMD clone's function decl, followed by a pair of trees (base + step)
+ for linear arguments (pair of NULLs for other arguments). */
+ vec<tree> simd_clone_info;
+
tree vectype;
/* Vectorized defs. */
vec<tree> vec_defs;
@@ -300,6 +305,7 @@ public:
#define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
#define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
#define SLP_TREE_LANE_PERMUTATION(S) (S)->lane_permutation
+#define SLP_TREE_SIMD_CLONE_INFO(S) (S)->simd_clone_info
#define SLP_TREE_DEF_TYPE(S) (S)->def_type
#define SLP_TREE_VECTYPE(S) (S)->vectype
#define SLP_TREE_REPRESENTATIVE(S) (S)->representative
@@ -882,6 +888,12 @@ public:
we need to peel off iterations at the end to form an epilogue loop. */
bool peeling_for_niter;
+ /* List of loop additional IV conditionals found in the loop. */
+ auto_vec<gcond *> conds;
+
+ /* Main loop IV cond. */
+ gcond* loop_iv_cond;
+
/* True if there are no loop carried data dependencies in the loop.
If loop->safelen <= 1, then this is always true, either the loop
didn't have any loop carried data dependencies, or the loop is being
@@ -919,10 +931,24 @@ public:
analysis. */
vec<_loop_vec_info *> epilogue_vinfos;
+ /* The controlling loop IV for the current loop when vectorizing. This IV
+ controls the natural exits of the loop. */
+ edge vec_loop_iv_exit;
+
+ /* The controlling loop IV for the epilogue loop when vectorizing. This IV
+ controls the natural exits of the loop. */
+ edge vec_epilogue_loop_iv_exit;
+
+ /* The controlling loop IV for the scalar loop being vectorized. This IV
+ controls the natural exits of the loop. */
+ edge scalar_loop_iv_exit;
} *loop_vec_info;
/* Access Functions. */
#define LOOP_VINFO_LOOP(L) (L)->loop
+#define LOOP_VINFO_IV_EXIT(L) (L)->vec_loop_iv_exit
+#define LOOP_VINFO_EPILOGUE_IV_EXIT(L) (L)->vec_epilogue_loop_iv_exit
+#define LOOP_VINFO_SCALAR_IV_EXIT(L) (L)->scalar_loop_iv_exit
#define LOOP_VINFO_BBS(L) (L)->bbs
#define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
#define LOOP_VINFO_NITERS(L) (L)->num_iters
@@ -970,6 +996,8 @@ public:
#define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains
#define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
#define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
+#define LOOP_VINFO_LOOP_CONDS(L) (L)->conds
+#define LOOP_VINFO_LOOP_IV_COND(L) (L)->loop_iv_cond
#define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
#define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
#define LOOP_VINFO_SCALAR_LOOP_SCALING(L) (L)->scalar_loop_scaling
@@ -2155,11 +2183,13 @@ class auto_purge_vect_location
/* Simple loop peeling and versioning utilities for vectorizer's purposes -
in tree-vect-loop-manip.cc. */
-extern void vect_set_loop_condition (class loop *, loop_vec_info,
+extern void vect_set_loop_condition (class loop *, edge, loop_vec_info,
tree, tree, tree, bool);
-extern bool slpeel_can_duplicate_loop_p (const class loop *, const_edge);
-class loop *slpeel_tree_duplicate_loop_to_edge_cfg (class loop *,
- class loop *, edge);
+extern bool slpeel_can_duplicate_loop_p (const class loop *, const_edge,
+ const_edge);
+class loop *slpeel_tree_duplicate_loop_to_edge_cfg (class loop *, edge,
+ class loop *, edge,
+ edge, edge *, bool = true);
class loop *vect_loop_versioning (loop_vec_info, gimple *);
extern class loop *vect_do_peeling (loop_vec_info, tree, tree,
tree *, tree *, tree *, int, bool, bool,
@@ -2169,6 +2199,7 @@ extern void vect_prepare_for_masked_peels (loop_vec_info);
extern dump_user_location_t find_loop_location (class loop *);
extern bool vect_can_advance_ivs_p (loop_vec_info);
extern void vect_update_inits_of_drs (loop_vec_info, tree, tree_code);
+extern edge vec_init_loop_exit_info (class loop *);
/* In tree-vect-stmts.cc. */
extern tree get_related_vectype_for_scalar_type (machine_mode, tree,
@@ -2356,8 +2387,9 @@ struct vect_loop_form_info
tree number_of_iterations;
tree number_of_iterationsm1;
tree assumptions;
- gcond *loop_cond;
+ auto_vec<gcond *> conds;
gcond *inner_loop_cond;
+ edge loop_exit;
};
extern opt_result vect_analyze_loop_form (class loop *, vect_loop_form_info *);
extern loop_vec_info vect_create_loop_vinfo (class loop *, vec_info_shared *,
diff --git a/gcc/tree-vrp.cc b/gcc/tree-vrp.cc
index d7b194f..19d8f99 100644
--- a/gcc/tree-vrp.cc
+++ b/gcc/tree-vrp.cc
@@ -1092,6 +1092,106 @@ execute_ranger_vrp (struct function *fun, bool warn_array_bounds_p,
return 0;
}
+// Implement a Fast VRP folder. Not quite as effective but faster.
+
+class fvrp_folder : public substitute_and_fold_engine
+{
+public:
+ fvrp_folder (dom_ranger *dr) : substitute_and_fold_engine (),
+ m_simplifier (dr)
+ { m_dom_ranger = dr; }
+
+ ~fvrp_folder () { }
+
+ tree value_of_expr (tree name, gimple *s = NULL) override
+ {
+ // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
+ if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
+ return NULL;
+ return m_dom_ranger->value_of_expr (name, s);
+ }
+
+ tree value_on_edge (edge e, tree name) override
+ {
+ // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
+ if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
+ return NULL;
+ return m_dom_ranger->value_on_edge (e, name);
+ }
+
+ tree value_of_stmt (gimple *s, tree name = NULL) override
+ {
+ // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
+ if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
+ return NULL;
+ return m_dom_ranger->value_of_stmt (s, name);
+ }
+
+ void pre_fold_bb (basic_block bb) override
+ {
+ m_dom_ranger->pre_bb (bb);
+ // Now process the PHIs in advance.
+ gphi_iterator psi = gsi_start_phis (bb);
+ for ( ; !gsi_end_p (psi); gsi_next (&psi))
+ {
+ tree name = gimple_range_ssa_p (PHI_RESULT (psi.phi ()));
+ if (name)
+ {
+ Value_Range vr(TREE_TYPE (name));
+ m_dom_ranger->range_of_stmt (vr, psi.phi (), name);
+ }
+ }
+ }
+
+ void post_fold_bb (basic_block bb) override
+ {
+ m_dom_ranger->post_bb (bb);
+ }
+
+ void pre_fold_stmt (gimple *s) override
+ {
+ // Ensure range_of_stmt has been called.
+ tree type = gimple_range_type (s);
+ if (type)
+ {
+ Value_Range vr(type);
+ m_dom_ranger->range_of_stmt (vr, s);
+ }
+ }
+
+ bool fold_stmt (gimple_stmt_iterator *gsi) override
+ {
+ bool ret = m_simplifier.simplify (gsi);
+ if (!ret)
+ ret = ::fold_stmt (gsi, follow_single_use_edges);
+ return ret;
+ }
+
+private:
+ DISABLE_COPY_AND_ASSIGN (fvrp_folder);
+ simplify_using_ranges m_simplifier;
+ dom_ranger *m_dom_ranger;
+};
+
+
+// Main entry point for a FAST VRP pass using a dom ranger.
+
+unsigned int
+execute_fast_vrp (struct function *fun)
+{
+ calculate_dominance_info (CDI_DOMINATORS);
+ dom_ranger dr;
+ fvrp_folder folder (&dr);
+
+ gcc_checking_assert (!fun->x_range_query);
+ fun->x_range_query = &dr;
+
+ folder.substitute_and_fold ();
+
+ fun->x_range_query = NULL;
+ return 0;
+}
+
namespace {
const pass_data pass_data_vrp =
@@ -1120,36 +1220,50 @@ const pass_data pass_data_early_vrp =
( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
};
-static int vrp_pass_num = 0;
+const pass_data pass_data_fast_vrp =
+{
+ GIMPLE_PASS, /* type */
+ "fvrp", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_TREE_FAST_VRP, /* tv_id */
+ PROP_ssa, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
+};
+
+
class pass_vrp : public gimple_opt_pass
{
public:
- pass_vrp (gcc::context *ctxt, const pass_data &data_)
- : gimple_opt_pass (data_, ctxt), data (data_), warn_array_bounds_p (false),
- my_pass (vrp_pass_num++)
- {}
+ pass_vrp (gcc::context *ctxt, const pass_data &data_, bool warn_p)
+ : gimple_opt_pass (data_, ctxt), data (data_),
+ warn_array_bounds_p (warn_p), final_p (false)
+ { }
/* opt_pass methods: */
- opt_pass * clone () final override { return new pass_vrp (m_ctxt, data); }
+ opt_pass * clone () final override
+ { return new pass_vrp (m_ctxt, data, false); }
void set_pass_param (unsigned int n, bool param) final override
{
gcc_assert (n == 0);
- warn_array_bounds_p = param;
+ final_p = param;
}
bool gate (function *) final override { return flag_tree_vrp != 0; }
unsigned int execute (function *fun) final override
{
- // Early VRP pass.
- if (my_pass == 0)
- return execute_ranger_vrp (fun, /*warn_array_bounds_p=*/false, false);
+ // Check for fast vrp.
+ if (&data == &pass_data_fast_vrp)
+ return execute_fast_vrp (fun);
- return execute_ranger_vrp (fun, warn_array_bounds_p, my_pass == 2);
+ return execute_ranger_vrp (fun, warn_array_bounds_p, final_p);
}
private:
const pass_data &data;
bool warn_array_bounds_p;
- int my_pass;
+ bool final_p;
}; // class pass_vrp
const pass_data pass_data_assumptions =
@@ -1219,13 +1333,19 @@ public:
gimple_opt_pass *
make_pass_vrp (gcc::context *ctxt)
{
- return new pass_vrp (ctxt, pass_data_vrp);
+ return new pass_vrp (ctxt, pass_data_vrp, true);
}
gimple_opt_pass *
make_pass_early_vrp (gcc::context *ctxt)
{
- return new pass_vrp (ctxt, pass_data_early_vrp);
+ return new pass_vrp (ctxt, pass_data_early_vrp, false);
+}
+
+gimple_opt_pass *
+make_pass_fast_vrp (gcc::context *ctxt)
+{
+ return new pass_vrp (ctxt, pass_data_fast_vrp, false);
}
gimple_opt_pass *
diff --git a/gcc/tree.cc b/gcc/tree.cc
index 8a8d6d5..69369c6 100644
--- a/gcc/tree.cc
+++ b/gcc/tree.cc
@@ -1763,7 +1763,6 @@ wide_int_to_tree_1 (tree type, const wide_int_ref &pcst)
/* Make sure no one is clobbering the shared constant. */
gcc_checking_assert (TREE_TYPE (t) == type
&& TREE_INT_CST_NUNITS (t) == 1
- && TREE_INT_CST_OFFSET_NUNITS (t) == 1
&& TREE_INT_CST_EXT_NUNITS (t) == 1
&& TREE_INT_CST_ELT (t, 0) == hwi);
return t;
@@ -2676,13 +2675,13 @@ build_zero_cst (tree type)
tree
build_replicated_int_cst (tree type, unsigned int width, HOST_WIDE_INT value)
{
- int n = (TYPE_PRECISION (type) + HOST_BITS_PER_WIDE_INT - 1)
- / HOST_BITS_PER_WIDE_INT;
+ int n = ((TYPE_PRECISION (type) + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT);
unsigned HOST_WIDE_INT low, mask;
- HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
+ HOST_WIDE_INT a[WIDE_INT_MAX_INL_ELTS];
int i;
- gcc_assert (n && n <= WIDE_INT_MAX_ELTS);
+ gcc_assert (n && n <= WIDE_INT_MAX_INL_ELTS);
if (width == HOST_BITS_PER_WIDE_INT)
low = value;
@@ -2696,8 +2695,8 @@ build_replicated_int_cst (tree type, unsigned int width, HOST_WIDE_INT value)
a[i] = low;
gcc_assert (TYPE_PRECISION (type) <= MAX_BITSIZE_MODE_ANY_INT);
- return wide_int_to_tree
- (type, wide_int::from_array (a, n, TYPE_PRECISION (type)));
+ return wide_int_to_tree (type, wide_int::from_array (a, n,
+ TYPE_PRECISION (type)));
}
/* If floating-point type TYPE has an IEEE-style sign bit, return an
@@ -2789,14 +2788,6 @@ make_int_cst (int len, int ext_len MEM_STAT_DECL)
TREE_SET_CODE (t, INTEGER_CST);
TREE_INT_CST_NUNITS (t) = len;
TREE_INT_CST_EXT_NUNITS (t) = ext_len;
- /* to_offset can only be applied to trees that are offset_int-sized
- or smaller. EXT_LEN is correct if it fits, otherwise the constant
- must be exactly the precision of offset_int and so LEN is correct. */
- if (ext_len <= OFFSET_INT_ELTS)
- TREE_INT_CST_OFFSET_NUNITS (t) = ext_len;
- else
- TREE_INT_CST_OFFSET_NUNITS (t) = len;
-
TREE_CONSTANT (t) = 1;
return t;
@@ -3289,7 +3280,7 @@ really_constant_p (const_tree exp)
like sizetype is used to encode a value that is actually negative. */
bool
-ptrdiff_tree_p (const_tree t, poly_int64_pod *value)
+ptrdiff_tree_p (const_tree t, poly_int64 *value)
{
if (!t)
return false;
@@ -14505,7 +14496,8 @@ set_block (location_t loc, tree block)
location_t pure_loc = get_pure_location (loc);
source_range src_range = get_range_from_loc (line_table, loc);
unsigned discriminator = get_discriminator_from_loc (line_table, loc);
- return COMBINE_LOCATION_DATA (line_table, pure_loc, src_range, block, discriminator);
+ return line_table->get_or_create_combined_loc (pure_loc, src_range, block,
+ discriminator);
}
location_t
@@ -14526,11 +14518,10 @@ set_source_range (tree expr, source_range src_range)
location_t expr_location = EXPR_LOCATION (expr);
location_t pure_loc = get_pure_location (expr_location);
unsigned discriminator = get_discriminator_from_loc (expr_location);
- location_t adhoc = COMBINE_LOCATION_DATA (line_table,
- pure_loc,
- src_range,
- NULL,
- discriminator);
+ location_t adhoc = line_table->get_or_create_combined_loc (pure_loc,
+ src_range,
+ nullptr,
+ discriminator);
SET_EXPR_LOCATION (expr, adhoc);
return adhoc;
}
diff --git a/gcc/tree.h b/gcc/tree.h
index 54cf828..31ea52a 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -1139,8 +1139,6 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int,
(INTEGER_CST_CHECK (NODE)->base.u.int_length.unextended)
#define TREE_INT_CST_EXT_NUNITS(NODE) \
(INTEGER_CST_CHECK (NODE)->base.u.int_length.extended)
-#define TREE_INT_CST_OFFSET_NUNITS(NODE) \
- (INTEGER_CST_CHECK (NODE)->base.u.int_length.offset)
#define TREE_INT_CST_ELT(NODE, I) TREE_INT_CST_ELT_CHECK (NODE, I)
#define TREE_INT_CST_LOW(NODE) \
((unsigned HOST_WIDE_INT) TREE_INT_CST_ELT (NODE, 0))
@@ -4974,7 +4972,7 @@ extern tree max_object_size ();
without loss of precision. Store the value in *VALUE if so. */
inline bool
-poly_int_tree_p (const_tree t, poly_int64_pod *value)
+poly_int_tree_p (const_tree t, poly_int64 *value)
{
if (tree_fits_poly_int64_p (t))
{
@@ -4988,7 +4986,7 @@ poly_int_tree_p (const_tree t, poly_int64_pod *value)
without loss of precision. Store the value in *VALUE if so. */
inline bool
-poly_int_tree_p (const_tree t, poly_uint64_pod *value)
+poly_int_tree_p (const_tree t, poly_uint64 *value)
{
if (tree_fits_poly_uint64_p (t))
{
@@ -5617,7 +5615,7 @@ bit_field_offset (const_tree t)
extern tree strip_float_extensions (tree);
extern bool really_constant_p (const_tree);
-extern bool ptrdiff_tree_p (const_tree, poly_int64_pod *);
+extern bool ptrdiff_tree_p (const_tree, poly_int64 *);
extern bool decl_address_invariant_p (const_tree);
extern bool decl_address_ip_invariant_p (const_tree);
extern bool int_fits_type_p (const_tree, const_tree)
@@ -6237,6 +6235,7 @@ namespace wi
static const enum precision_type precision_type = VAR_PRECISION;
static const bool host_dependent_precision = false;
static const bool is_sign_extended = false;
+ static const bool needs_write_val_arg = false;
};
template <int N>
@@ -6258,13 +6257,15 @@ namespace wi
template <int N>
struct int_traits <extended_tree <N> >
{
- static const enum precision_type precision_type = CONST_PRECISION;
+ static const enum precision_type precision_type
+ = N == ADDR_MAX_PRECISION ? INL_CONST_PRECISION : CONST_PRECISION;
static const bool host_dependent_precision = false;
static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = false;
static const unsigned int precision = N;
};
- typedef extended_tree <WIDE_INT_MAX_PRECISION> widest_extended_tree;
+ typedef extended_tree <WIDEST_INT_MAX_PRECISION> widest_extended_tree;
typedef extended_tree <ADDR_MAX_PRECISION> offset_extended_tree;
typedef const generic_wide_int <widest_extended_tree> tree_to_widest_ref;
@@ -6292,6 +6293,13 @@ namespace wi
tree_to_poly_wide_ref to_poly_wide (const_tree);
template <int N>
+ struct ints_for <generic_wide_int <extended_tree <N> >, INL_CONST_PRECISION>
+ {
+ typedef generic_wide_int <extended_tree <N> > extended;
+ static extended zero (const extended &);
+ };
+
+ template <int N>
struct ints_for <generic_wide_int <extended_tree <N> >, CONST_PRECISION>
{
typedef generic_wide_int <extended_tree <N> > extended;
@@ -6308,7 +6316,7 @@ namespace wi
/* Used to convert a tree to a widest2_int like this:
widest2_int foo = widest2_int_cst (some_tree). */
-typedef generic_wide_int <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> >
+typedef generic_wide_int <wi::extended_tree <WIDEST_INT_MAX_PRECISION * 2> >
widest2_int_cst;
/* Refer to INTEGER_CST T as though it were a widest_int.
@@ -6443,8 +6451,16 @@ inline unsigned int
wi::extended_tree <N>::get_len () const
{
if (N == ADDR_MAX_PRECISION)
- return TREE_INT_CST_OFFSET_NUNITS (m_t);
- else if (N >= WIDE_INT_MAX_PRECISION)
+ {
+ /* to_offset can only be applied to trees that are offset_int-sized
+ or smaller. EXT_LEN is correct if it fits, otherwise the constant
+ must be exactly the precision of offset_int and so LEN is correct. */
+ unsigned int ext_len = TREE_INT_CST_EXT_NUNITS (m_t);
+ if (ext_len <= OFFSET_INT_ELTS)
+ return ext_len;
+ return TREE_INT_CST_NUNITS (m_t);
+ }
+ else if (N >= WIDEST_INT_MAX_PRECISION)
return TREE_INT_CST_EXT_NUNITS (m_t);
else
/* This class is designed to be used for specific output precisions
@@ -6530,6 +6546,14 @@ wi::to_poly_wide (const_tree t)
template <int N>
inline generic_wide_int <wi::extended_tree <N> >
wi::ints_for <generic_wide_int <wi::extended_tree <N> >,
+ wi::INL_CONST_PRECISION>::zero (const extended &x)
+{
+ return build_zero_cst (TREE_TYPE (x.get_tree ()));
+}
+
+template <int N>
+inline generic_wide_int <wi::extended_tree <N> >
+wi::ints_for <generic_wide_int <wi::extended_tree <N> >,
wi::CONST_PRECISION>::zero (const extended &x)
{
return build_zero_cst (TREE_TYPE (x.get_tree ()));
@@ -6637,7 +6661,7 @@ extern bool complete_ctor_at_level_p (const_tree, HOST_WIDE_INT, const_tree);
/* Given an expression EXP that is a handled_component_p,
look for the ultimate containing object, which is returned and specify
the access position and size. */
-extern tree get_inner_reference (tree, poly_int64_pod *, poly_int64_pod *,
+extern tree get_inner_reference (tree, poly_int64 *, poly_int64 *,
tree *, machine_mode *, int *, int *, int *);
extern tree build_personality_function (const char *);
diff --git a/gcc/value-range-pretty-print.cc b/gcc/value-range-pretty-print.cc
index c95b09d..4e43107 100644
--- a/gcc/value-range-pretty-print.cc
+++ b/gcc/value-range-pretty-print.cc
@@ -99,12 +99,18 @@ vrange_printer::print_irange_bitmasks (const irange &r) const
return;
pp_string (pp, " MASK ");
- char buf[WIDE_INT_PRINT_BUFFER_SIZE];
- print_hex (bm.mask (), buf);
- pp_string (pp, buf);
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p;
+ unsigned len_mask, len_val;
+ if (print_hex_buf_size (bm.mask (), &len_mask)
+ | print_hex_buf_size (bm.value (), &len_val))
+ p = XALLOCAVEC (char, MAX (len_mask, len_val));
+ else
+ p = buf;
+ print_hex (bm.mask (), p);
+ pp_string (pp, p);
pp_string (pp, " VALUE ");
- print_hex (bm.value (), buf);
- pp_string (pp, buf);
+ print_hex (bm.value (), p);
+ pp_string (pp, p);
}
void
diff --git a/gcc/value-range-storage.cc b/gcc/value-range-storage.cc
index e94d7f9..cca40af 100644
--- a/gcc/value-range-storage.cc
+++ b/gcc/value-range-storage.cc
@@ -229,14 +229,14 @@ vrange_storage::equal_p (const vrange &r) const
// irange_storage implementation
//============================================================================
-unsigned char *
+unsigned short *
irange_storage::write_lengths_address ()
{
- return (unsigned char *)&m_val[(m_num_ranges * 2 + 2)
- * WIDE_INT_MAX_HWIS (m_precision)];
+ return (unsigned short *)&m_val[(m_num_ranges * 2 + 2)
+ * WIDE_INT_MAX_HWIS (m_precision)];
}
-const unsigned char *
+const unsigned short *
irange_storage::lengths_address () const
{
return const_cast <irange_storage *> (this)->write_lengths_address ();
@@ -263,7 +263,7 @@ irange_storage::irange_storage (const irange &r)
}
static inline void
-write_wide_int (HOST_WIDE_INT *&val, unsigned char *&len, const wide_int &w)
+write_wide_int (HOST_WIDE_INT *&val, unsigned short *&len, const wide_int &w)
{
*len = w.get_len ();
for (unsigned i = 0; i < *len; ++i)
@@ -294,7 +294,7 @@ irange_storage::set_irange (const irange &r)
m_kind = VR_RANGE;
HOST_WIDE_INT *val = &m_val[0];
- unsigned char *len = write_lengths_address ();
+ unsigned short *len = write_lengths_address ();
for (unsigned i = 0; i < r.num_pairs (); ++i)
{
@@ -317,7 +317,7 @@ irange_storage::set_irange (const irange &r)
static inline void
read_wide_int (wide_int &w,
- const HOST_WIDE_INT *val, unsigned char len, unsigned prec)
+ const HOST_WIDE_INT *val, unsigned short len, unsigned prec)
{
trailing_wide_int_storage stow (prec, &len,
const_cast <HOST_WIDE_INT *> (val));
@@ -342,7 +342,7 @@ irange_storage::get_irange (irange &r, tree type) const
gcc_checking_assert (TYPE_PRECISION (type) == m_precision);
const HOST_WIDE_INT *val = &m_val[0];
- const unsigned char *len = lengths_address ();
+ const unsigned short *len = lengths_address ();
// Handle the common case where R can fit the new range.
if (r.m_max_ranges >= m_num_ranges)
@@ -411,7 +411,7 @@ irange_storage::size (const irange &r)
unsigned n = r.num_pairs () * 2 + 2;
unsigned hwi_size = ((n * WIDE_INT_MAX_HWIS (prec) - 1)
* sizeof (HOST_WIDE_INT));
- unsigned len_size = n;
+ unsigned len_size = n * sizeof (unsigned short);
return sizeof (irange_storage) + hwi_size + len_size;
}
@@ -433,7 +433,7 @@ irange_storage::dump () const
return;
const HOST_WIDE_INT *val = &m_val[0];
- const unsigned char *len = lengths_address ();
+ const unsigned short *len = lengths_address ();
int i, j;
fprintf (stderr, " lengths = [ ");
diff --git a/gcc/value-range-storage.h b/gcc/value-range-storage.h
index a91833c..bac6ba2 100644
--- a/gcc/value-range-storage.h
+++ b/gcc/value-range-storage.h
@@ -73,8 +73,8 @@ public:
private:
DISABLE_COPY_AND_ASSIGN (irange_storage);
static size_t size (const irange &r);
- const unsigned char *lengths_address () const;
- unsigned char *write_lengths_address ();
+ const unsigned short *lengths_address () const;
+ unsigned short *write_lengths_address ();
friend void gt_ggc_mx_irange_storage (void *);
friend void gt_pch_p_14irange_storage (void *, void *,
gt_pointer_operator, void *);
@@ -97,7 +97,7 @@ private:
// Another variable-length part of the structure following the HWIs.
// This is the length of each wide_int in m_val.
//
- // unsigned char m_len[];
+ // unsigned short m_len[];
irange_storage (const irange &r);
};
diff --git a/gcc/value-range.cc b/gcc/value-range.cc
index 391cef6..f507ec5 100644
--- a/gcc/value-range.cc
+++ b/gcc/value-range.cc
@@ -245,17 +245,23 @@ vrange::dump (FILE *file) const
void
irange_bitmask::dump (FILE *file) const
{
- char buf[WIDE_INT_PRINT_BUFFER_SIZE];
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p;
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
pp_string (&buffer, "MASK ");
- print_hex (m_mask, buf);
- pp_string (&buffer, buf);
+ unsigned len_mask, len_val;
+ if (print_hex_buf_size (m_mask, &len_mask)
+ | print_hex_buf_size (m_value, &len_val))
+ p = XALLOCAVEC (char, MAX (len_mask, len_val));
+ else
+ p = buf;
+ print_hex (m_mask, p);
+ pp_string (&buffer, p);
pp_string (&buffer, " VALUE ");
- print_hex (m_value, buf);
- pp_string (&buffer, buf);
+ print_hex (m_value, p);
+ pp_string (&buffer, p);
pp_flush (&buffer);
}
diff --git a/gcc/value-range.h b/gcc/value-range.h
index a792c59..c00b151 100644
--- a/gcc/value-range.h
+++ b/gcc/value-range.h
@@ -626,7 +626,9 @@ irange::maybe_resize (int needed)
{
m_max_ranges = HARD_MAX_RANGES;
wide_int *newmem = new wide_int[m_max_ranges * 2];
- memcpy (newmem, m_base, sizeof (wide_int) * num_pairs () * 2);
+ unsigned n = num_pairs () * 2;
+ for (unsigned i = 0; i < n; ++i)
+ newmem[i] = m_base[i];
m_base = newmem;
}
}
diff --git a/gcc/value-relation.cc b/gcc/value-relation.cc
index 8fea4aa..c0f513a 100644
--- a/gcc/value-relation.cc
+++ b/gcc/value-relation.cc
@@ -183,19 +183,24 @@ relation_transitive (relation_kind r1, relation_kind r2)
return relation_kind (rr_transitive_table[r1][r2]);
}
-// When operands of a statement are identical ssa_names, return the
-// approriate relation between operands for NAME == NAME, given RANGE.
-//
-relation_kind
-get_identity_relation (tree name, vrange &range ATTRIBUTE_UNUSED)
+// When one name is an equivalence of another, ensure the equivalence
+// range is correct. Specifically for floating point, a +0 is also
+// equivalent to a -0 which may not be reflected. See PR 111694.
+
+void
+adjust_equivalence_range (vrange &range)
{
- // Return VREL_UNEQ when it is supported for floats as appropriate.
- if (frange::supports_p (TREE_TYPE (name)))
- return VREL_EQ;
+ if (range.undefined_p () || !is_a<frange> (range))
+ return;
- // Otherwise return VREL_EQ.
- return VREL_EQ;
-}
+ frange fr = as_a<frange> (range);
+ // If range includes 0 make sure both signs of zero are included.
+ if (fr.contains_p (dconst0) || fr.contains_p (dconstm0))
+ {
+ frange zeros (range.type (), dconstm0, dconst0);
+ range.union_ (zeros);
+ }
+ }
// This vector maps a relation to the equivalent tree code.
@@ -387,6 +392,9 @@ equiv_oracle::add_partial_equiv (relation_kind r, tree op1, tree op2)
// In either case, if PE2 has an entry, we simply do nothing.
if (pe2.members)
return;
+ // If there are no uses of op2, do not register.
+ if (has_zero_uses (op2))
+ return;
// PE1 is the LHS and already has members, so everything in the set
// should be a slice of PE2 rather than PE1.
pe2.code = pe_min (r, pe1.code);
@@ -404,6 +412,9 @@ equiv_oracle::add_partial_equiv (relation_kind r, tree op1, tree op2)
}
if (pe2.members)
{
+ // If there are no uses of op1, do not register.
+ if (has_zero_uses (op1))
+ return;
pe1.ssa_base = pe2.ssa_base;
// If pe2 is a 16 bit value, but only an 8 bit copy, we can't be any
// more than an 8 bit equivalence here, so choose MIN value.
@@ -413,6 +424,9 @@ equiv_oracle::add_partial_equiv (relation_kind r, tree op1, tree op2)
}
else
{
+ // If there are no uses of either operand, do not register.
+ if (has_zero_uses (op1) || has_zero_uses (op2))
+ return;
// Neither name has an entry, simply create op1 as slice of op2.
pe2.code = bits_to_pe (TYPE_PRECISION (TREE_TYPE (op2)));
if (pe2.code == VREL_VARYING)
diff --git a/gcc/value-relation.h b/gcc/value-relation.h
index f00f84f..31d4890 100644
--- a/gcc/value-relation.h
+++ b/gcc/value-relation.h
@@ -91,8 +91,8 @@ inline bool relation_equiv_p (relation_kind r)
void print_relation (FILE *f, relation_kind rel);
-// Return relation for NAME == NAME with RANGE.
-relation_kind get_identity_relation (tree name, vrange &range);
+// Adjust range as an equivalence.
+void adjust_equivalence_range (vrange &range);
class relation_oracle
{
diff --git a/gcc/vec.h b/gcc/vec.h
index 8a9a8d8..d509639 100644
--- a/gcc/vec.h
+++ b/gcc/vec.h
@@ -111,6 +111,24 @@ extern void *ggc_realloc (void *, size_t MEM_STAT_DECL);
the 'space' predicate will tell you whether there is spare capacity
in the vector. You will not normally need to use these two functions.
+ Not all vector operations support non-POD types and such restrictions
+ are enforced through static assertions. Some operations which often use
+ memmove to move elements around like quick_insert, safe_insert,
+ ordered_remove, unordered_remove, block_remove etc. require trivially
+ copyable types. Sorting operations, qsort, sort and stablesort, require
+ those too but as an extension allow also std::pair of 2 trivially copyable
+ types which happens to work even when std::pair itself isn't trivially
+ copyable. The quick_grow and safe_grow operations require trivially
+ default constructible types. One can use quick_grow_cleared or
+ safe_grow_cleared for non-trivially default constructible types if needed
+ (but of course such operation is more expensive then). The pop operation
+ returns reference to the last element only for trivially destructible
+ types, for non-trivially destructible types one should use last operation
+ followed by pop which in that case returns void.
+ And finally, the GC and GC atomic vectors should always be used with
+ trivially destructible types, as nothing will invoke destructors when they
+ are freed during GC.
+
Notes on the different layout strategies
* Embeddable vectors (vec<T, A, vl_embed>)
@@ -185,6 +203,16 @@ extern void dump_vec_loc_statistics (void);
/* Hashtable mapping vec addresses to descriptors. */
extern htab_t vec_mem_usage_hash;
+/* Destruct N elements in DST. */
+
+template <typename T>
+inline void
+vec_destruct (T *dst, unsigned n)
+{
+ for ( ; n; ++dst, --n)
+ dst->~T ();
+}
+
/* Control data for vectors. This contains the number of allocated
and used slots inside a vector. */
@@ -310,6 +338,9 @@ va_heap::release (vec<T, va_heap, vl_embed> *&v)
if (v == NULL)
return;
+ if (!std::is_trivially_destructible <T>::value)
+ vec_destruct (v->address (), v->length ());
+
if (GATHER_STATISTICS)
v->m_vecpfx.release_overhead (v, elt_size * v->allocated (),
v->allocated (), true);
@@ -588,7 +619,10 @@ public:
void splice (const vec &);
void splice (const vec *src);
T *quick_push (const T &);
- T &pop (void);
+ using pop_ret_type
+ = typename std::conditional <std::is_trivially_destructible <T>::value,
+ T &, void>::type;
+ pop_ret_type pop (void);
void truncate (unsigned);
void quick_insert (unsigned, const T &);
void ordered_remove (unsigned);
@@ -735,8 +769,9 @@ vec_safe_grow_cleared (vec<T, A, vl_embed> *&v, unsigned len,
bool exact = false CXX_MEM_STAT_INFO)
{
unsigned oldlen = vec_safe_length (v);
- vec_safe_grow (v, len, exact PASS_MEM_STAT);
- vec_default_construct (v->address () + oldlen, len - oldlen);
+ gcc_checking_assert (len >= oldlen);
+ vec_safe_reserve (v, len - oldlen, exact PASS_MEM_STAT);
+ v->quick_grow_cleared (len);
}
@@ -1005,19 +1040,24 @@ vec<T, A, vl_embed>::quick_push (const T &obj)
{
gcc_checking_assert (space (1));
T *slot = &address ()[m_vecpfx.m_num++];
- *slot = obj;
+ ::new (static_cast<void*>(slot)) T (obj);
return slot;
}
-/* Pop and return the last element off the end of the vector. */
+/* Pop and return a reference to the last element off the end of the
+ vector. If T has non-trivial destructor, this method just pops
+ the element and returns void type. */
template<typename T, typename A>
-inline T &
+inline typename vec<T, A, vl_embed>::pop_ret_type
vec<T, A, vl_embed>::pop (void)
{
gcc_checking_assert (length () > 0);
- return address ()[--m_vecpfx.m_num];
+ T &last = address ()[--m_vecpfx.m_num];
+ if (!std::is_trivially_destructible <T>::value)
+ last.~T ();
+ return static_cast <pop_ret_type> (last);
}
@@ -1028,13 +1068,17 @@ template<typename T, typename A>
inline void
vec<T, A, vl_embed>::truncate (unsigned size)
{
- gcc_checking_assert (length () >= size);
+ unsigned l = length ();
+ gcc_checking_assert (l >= size);
+ if (!std::is_trivially_destructible <T>::value)
+ vec_destruct (address () + size, l - size);
m_vecpfx.m_num = size;
}
/* Insert an element, OBJ, at the IXth position of this vector. There
- must be sufficient space. */
+ must be sufficient space. This operation is not suitable for non-trivially
+ copyable types. */
template<typename T, typename A>
inline void
@@ -1042,6 +1086,12 @@ vec<T, A, vl_embed>::quick_insert (unsigned ix, const T &obj)
{
gcc_checking_assert (length () < allocated ());
gcc_checking_assert (ix <= length ());
+#if GCC_VERSION >= 5000
+ /* GCC 4.8 and 4.9 only implement std::is_trivially_destructible,
+ but not std::is_trivially_copyable nor
+ std::is_trivially_default_constructible. */
+ static_assert (std::is_trivially_copyable <T>::value, "");
+#endif
T *slot = &address ()[ix];
memmove (slot + 1, slot, (m_vecpfx.m_num++ - ix) * sizeof (T));
*slot = obj;
@@ -1050,13 +1100,16 @@ vec<T, A, vl_embed>::quick_insert (unsigned ix, const T &obj)
/* Remove an element from the IXth position of this vector. Ordering of
remaining elements is preserved. This is an O(N) operation due to
- memmove. */
+ memmove. Not suitable for non-trivially copyable types. */
template<typename T, typename A>
inline void
vec<T, A, vl_embed>::ordered_remove (unsigned ix)
{
gcc_checking_assert (ix < length ());
+#if GCC_VERSION >= 5000
+ static_assert (std::is_trivially_copyable <T>::value, "");
+#endif
T *slot = &address ()[ix];
memmove (slot, slot + 1, (--m_vecpfx.m_num - ix) * sizeof (T));
}
@@ -1104,6 +1157,9 @@ inline void
vec<T, A, vl_embed>::unordered_remove (unsigned ix)
{
gcc_checking_assert (ix < length ());
+#if GCC_VERSION >= 5000
+ static_assert (std::is_trivially_copyable <T>::value, "");
+#endif
T *p = address ();
p[ix] = p[--m_vecpfx.m_num];
}
@@ -1117,12 +1173,36 @@ inline void
vec<T, A, vl_embed>::block_remove (unsigned ix, unsigned len)
{
gcc_checking_assert (ix + len <= length ());
+#if GCC_VERSION >= 5000
+ static_assert (std::is_trivially_copyable <T>::value, "");
+#endif
T *slot = &address ()[ix];
m_vecpfx.m_num -= len;
memmove (slot, slot + len, (m_vecpfx.m_num - ix) * sizeof (T));
}
+#if GCC_VERSION >= 5000
+namespace vec_detail
+{
+ /* gcc_{qsort,qsort_r,stablesort_r} implementation under the hood
+ uses memcpy/memmove to reorder the array elements.
+ We want to assert these methods aren't used on types for which
+ that isn't appropriate, but unfortunately std::pair of 2 trivially
+ copyable types isn't trivially copyable and we use qsort on many
+ such std::pair instantiations. Let's allow both trivially
+ copyable types and std::pair of 2 trivially copyable types as
+ exception for qsort/sort/stablesort. */
+ template<typename T>
+ struct is_trivially_copyable_or_pair : std::is_trivially_copyable<T> { };
+
+ template<typename T, typename U>
+ struct is_trivially_copyable_or_pair<std::pair<T, U> >
+ : std::integral_constant<bool, std::is_trivially_copyable<T>::value
+ && std::is_trivially_copyable<U>::value> { };
+}
+#endif
+
/* Sort the contents of this vector with qsort. CMP is the comparison
function to pass to qsort. */
@@ -1130,6 +1210,9 @@ template<typename T, typename A>
inline void
vec<T, A, vl_embed>::qsort (int (*cmp) (const void *, const void *))
{
+#if GCC_VERSION >= 5000
+ static_assert (vec_detail::is_trivially_copyable_or_pair <T>::value, "");
+#endif
if (length () > 1)
gcc_qsort (address (), length (), sizeof (T), cmp);
}
@@ -1142,6 +1225,9 @@ inline void
vec<T, A, vl_embed>::sort (int (*cmp) (const void *, const void *, void *),
void *data)
{
+#if GCC_VERSION >= 5000
+ static_assert (vec_detail::is_trivially_copyable_or_pair <T>::value, "");
+#endif
if (length () > 1)
gcc_sort_r (address (), length (), sizeof (T), cmp, data);
}
@@ -1154,6 +1240,9 @@ inline void
vec<T, A, vl_embed>::stablesort (int (*cmp) (const void *, const void *,
void *), void *data)
{
+#if GCC_VERSION >= 5000
+ static_assert (vec_detail::is_trivially_copyable_or_pair <T>::value, "");
+#endif
if (length () > 1)
gcc_stablesort_r (address (), length (), sizeof (T), cmp, data);
}
@@ -1326,6 +1415,9 @@ inline void
vec<T, A, vl_embed>::quick_grow (unsigned len)
{
gcc_checking_assert (length () <= len && len <= m_vecpfx.m_alloc);
+#if GCC_VERSION >= 5000
+ static_assert (std::is_trivially_default_constructible <T>::value, "");
+#endif
m_vecpfx.m_num = len;
}
@@ -1339,7 +1431,8 @@ vec<T, A, vl_embed>::quick_grow_cleared (unsigned len)
{
unsigned oldlen = length ();
size_t growby = len - oldlen;
- quick_grow (len);
+ gcc_checking_assert (length () <= len && len <= m_vecpfx.m_alloc);
+ m_vecpfx.m_num = len;
if (growby != 0)
vec_default_construct (address () + oldlen, growby);
}
@@ -1350,6 +1443,7 @@ template<typename T>
void
gt_ggc_mx (vec<T, va_gc> *v)
{
+ static_assert (std::is_trivially_destructible <T>::value, "");
extern void gt_ggc_mx (T &);
for (unsigned i = 0; i < v->length (); i++)
gt_ggc_mx ((*v)[i]);
@@ -1359,6 +1453,7 @@ template<typename T>
void
gt_ggc_mx (vec<T, va_gc_atomic, vl_embed> *v ATTRIBUTE_UNUSED)
{
+ static_assert (std::is_trivially_destructible <T>::value, "");
/* Nothing to do. Vectors of atomic types wrt GC do not need to
be traversed. */
}
@@ -1518,7 +1613,10 @@ public:
void safe_splice (const vec & CXX_MEM_STAT_INFO);
T *quick_push (const T &);
T *safe_push (const T &CXX_MEM_STAT_INFO);
- T &pop (void);
+ using pop_ret_type
+ = typename std::conditional <std::is_trivially_destructible <T>::value,
+ T &, void>::type;
+ pop_ret_type pop (void);
void truncate (unsigned);
void safe_grow (unsigned, bool = false CXX_MEM_STAT_INFO);
void safe_grow_cleared (unsigned, bool = false CXX_MEM_STAT_INFO);
@@ -1627,8 +1725,8 @@ public:
auto_vec& operator= (vec<T, va_heap>&& r)
{
- if (this == &r)
- return *this;
+ if (this == &r)
+ return *this;
gcc_assert (!r.using_auto_storage ());
this->release ();
@@ -1639,8 +1737,8 @@ public:
auto_vec& operator= (auto_vec<T> &&r)
{
- if (this == &r)
- return *this;
+ if (this == &r)
+ return *this;
gcc_assert (!r.using_auto_storage ());
this->release ();
@@ -1660,7 +1758,7 @@ public:
// You probably don't want to copy a vector, so these are deleted to prevent
// unintentional use. If you really need a copy of the vectors contents you
// can use copy ().
- auto_vec(const auto_vec &) = delete;
+ auto_vec (const auto_vec &) = delete;
auto_vec &operator= (const auto_vec &) = delete;
};
@@ -1986,10 +2084,12 @@ vec<T, va_heap, vl_ptr>::safe_push (const T &obj MEM_STAT_DECL)
}
-/* Pop and return the last element off the end of the vector. */
+/* Pop and return a reference to the last element off the end of the
+ vector. If T has non-trivial destructor, this method just pops
+ last element and returns void. */
template<typename T>
-inline T &
+inline typename vec<T, va_heap, vl_ptr>::pop_ret_type
vec<T, va_heap, vl_ptr>::pop (void)
{
return m_vec->pop ();
@@ -2038,10 +2138,12 @@ vec<T, va_heap, vl_ptr>::safe_grow_cleared (unsigned len, bool exact
MEM_STAT_DECL)
{
unsigned oldlen = length ();
- size_t growby = len - oldlen;
- safe_grow (len, exact PASS_MEM_STAT);
- if (growby != 0)
- vec_default_construct (address () + oldlen, growby);
+ gcc_checking_assert (oldlen <= len);
+ reserve (len - oldlen, exact PASS_MEM_STAT);
+ if (m_vec)
+ m_vec->quick_grow_cleared (len);
+ else
+ gcc_checking_assert (len == 0);
}
diff --git a/gcc/wide-int-print.cc b/gcc/wide-int-print.cc
index f3a1871..07f23cc 100644
--- a/gcc/wide-int-print.cc
+++ b/gcc/wide-int-print.cc
@@ -49,14 +49,12 @@ print_dec (const wide_int_ref &wi, FILE *file, signop sgn)
}
-/* Try to print the signed self in decimal to BUF if the number fits
- in a HWI. Other print in hex. */
+/* Try to print the signed self in decimal to BUF. */
void
print_decs (const wide_int_ref &wi, char *buf)
{
- if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT)
- || (wi.get_len () == 1))
+ if (wi.get_precision () <= HOST_BITS_PER_WIDE_INT || wi.get_len () == 1)
{
if (wi::neg_p (wi))
sprintf (buf, "-" HOST_WIDE_INT_PRINT_UNSIGNED,
@@ -64,23 +62,30 @@ print_decs (const wide_int_ref &wi, char *buf)
else
sprintf (buf, HOST_WIDE_INT_PRINT_DEC, wi.to_shwi ());
}
+ else if (wi::neg_p (wi))
+ {
+ widest2_int w = widest2_int::from (wi, SIGNED);
+ *buf = '-';
+ print_decu (-w, buf + 1);
+ }
else
- print_hex (wi, buf);
+ print_decu (wi, buf);
}
-/* Try to print the signed self in decimal to FILE if the number fits
- in a HWI. Other print in hex. */
+/* Try to print the signed self in decimal to FILE. */
void
print_decs (const wide_int_ref &wi, FILE *file)
{
- char buf[WIDE_INT_PRINT_BUFFER_SIZE];
- print_decs (wi, buf);
- fputs (buf, file);
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
+ unsigned len;
+ if (print_decs_buf_size (wi, &len))
+ p = XALLOCAVEC (char, len);
+ print_decs (wi, p);
+ fputs (p, file);
}
-/* Try to print the unsigned self in decimal to BUF if the number fits
- in a HWI. Other print in hex. */
+/* Try to print the unsigned self in decimal to BUF. */
void
print_decu (const wide_int_ref &wi, char *buf)
@@ -89,18 +94,47 @@ print_decu (const wide_int_ref &wi, char *buf)
|| (wi.get_len () == 1 && !wi::neg_p (wi)))
sprintf (buf, HOST_WIDE_INT_PRINT_UNSIGNED, wi.to_uhwi ());
else
- print_hex (wi, buf);
+ {
+ widest2_int w = widest2_int::from (wi, UNSIGNED), r;
+ widest2_int ten19 = HOST_WIDE_INT_UC (10000000000000000000);
+ char buf2[20], next1[19], next2[19];
+ size_t l, c = 0, i;
+ /* In order to avoid dividing this twice, print the 19 decimal
+ digit chunks in reverse order into buffer and then reorder
+ them in-place. */
+ while (wi::gtu_p (w, ten19))
+ {
+ w = wi::divmod_trunc (w, ten19, UNSIGNED, &r);
+ sprintf (buf + c * 19, "%019" PRIu64, r.to_uhwi ());
+ ++c;
+ }
+ l = sprintf (buf2, HOST_WIDE_INT_PRINT_UNSIGNED, w.to_uhwi ());
+ buf[c * 19 + l] = '\0';
+ memcpy (next1, buf, 19);
+ memcpy (buf, buf2, l);
+ for (i = 0; i < c / 2; ++i)
+ {
+ memcpy (next2, buf + (c - i - 1) * 19, 19);
+ memcpy (buf + l + (c - i - 1) * 19, next1, 19);
+ memcpy (next1, buf + (i + 1) * 19, 19);
+ memcpy (buf + l + i * 19, next2, 19);
+ }
+ if (c & 1)
+ memcpy (buf + l + i * 19, next1, 19);
+ }
}
-/* Try to print the signed self in decimal to FILE if the number fits
- in a HWI. Other print in hex. */
+/* Try to print the signed self in decimal to FILE. */
void
print_decu (const wide_int_ref &wi, FILE *file)
{
- char buf[WIDE_INT_PRINT_BUFFER_SIZE];
- print_decu (wi, buf);
- fputs (buf, file);
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
+ unsigned len;
+ if (print_decu_buf_size (wi, &len))
+ p = XALLOCAVEC (char, len);
+ print_decu (wi, p);
+ fputs (p, file);
}
void
@@ -134,9 +168,12 @@ print_hex (const wide_int_ref &val, char *buf)
void
print_hex (const wide_int_ref &wi, FILE *file)
{
- char buf[WIDE_INT_PRINT_BUFFER_SIZE];
- print_hex (wi, buf);
- fputs (buf, file);
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
+ unsigned len;
+ if (print_hex_buf_size (wi, &len))
+ p = XALLOCAVEC (char, len);
+ print_hex (wi, p);
+ fputs (p, file);
}
/* Print larger precision wide_int. Not defined as inline in a header
@@ -145,8 +182,9 @@ print_hex (const wide_int_ref &wi, FILE *file)
void
pp_wide_int_large (pretty_printer *pp, const wide_int_ref &w, signop sgn)
{
- unsigned int prec = w.get_precision ();
- char *buf = XALLOCAVEC (char, (prec + 3) / 4 + 3);
+ unsigned int len;
+ print_dec_buf_size (w, sgn, &len);
+ char *buf = XALLOCAVEC (char, len);
print_dec (w, buf, sgn);
pp_string (pp, buf);
}
diff --git a/gcc/wide-int-print.h b/gcc/wide-int-print.h
index 5aca037..191c7f7 100644
--- a/gcc/wide-int-print.h
+++ b/gcc/wide-int-print.h
@@ -22,7 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include <stdio.h>
-#define WIDE_INT_PRINT_BUFFER_SIZE (WIDE_INT_MAX_PRECISION / 4 + 4)
+#define WIDE_INT_PRINT_BUFFER_SIZE (WIDE_INT_MAX_INL_PRECISION / 4 + 4)
/* Printing functions. */
@@ -36,4 +36,40 @@ extern void print_hex (const wide_int_ref &wi, char *buf);
extern void print_hex (const wide_int_ref &wi, FILE *file);
extern void pp_wide_int_large (pretty_printer *, const wide_int_ref &, signop);
+inline bool
+print_dec_buf_size (const wide_int_ref &wi, signop sgn, unsigned int *len)
+{
+ unsigned int l = wi.get_len ();
+ if ((l != 1 || sgn == UNSIGNED) && wi::neg_p (wi))
+ l = WIDE_INT_MAX_HWIS (wi.get_precision ());
+ l = l * HOST_BITS_PER_WIDE_INT / 3 + 3;
+ *len = l;
+ return UNLIKELY (l > WIDE_INT_PRINT_BUFFER_SIZE);
+}
+
+inline bool
+print_decs_buf_size (const wide_int_ref &wi, unsigned int *len)
+{
+ return print_dec_buf_size (wi, SIGNED, len);
+}
+
+inline bool
+print_decu_buf_size (const wide_int_ref &wi, unsigned int *len)
+{
+ return print_dec_buf_size (wi, UNSIGNED, len);
+}
+
+inline bool
+print_hex_buf_size (const wide_int_ref &wi, unsigned int *len)
+{
+ unsigned int l;
+ if (wi::neg_p (wi))
+ l = WIDE_INT_MAX_HWIS (wi.get_precision ());
+ else
+ l = wi.get_len ();
+ l = l * HOST_BITS_PER_WIDE_INT / 4 + 4;
+ *len = l;
+ return UNLIKELY (l > WIDE_INT_PRINT_BUFFER_SIZE);
+}
+
#endif /* WIDE_INT_PRINT_H */
diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc
index 81b7be8..5426766 100644
--- a/gcc/wide-int.cc
+++ b/gcc/wide-int.cc
@@ -51,7 +51,7 @@ typedef unsigned int UDWtype __attribute__ ((mode (TI)));
#include "longlong.h"
#endif
-static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
+static const HOST_WIDE_INT zeros[1] = {};
/*
* Internal utilities.
@@ -62,8 +62,7 @@ static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
#define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
#define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
-#define BLOCKS_NEEDED(PREC) \
- (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
+#define BLOCKS_NEEDED(PREC) (PREC ? CEIL (PREC, HOST_BITS_PER_WIDE_INT) : 1)
#define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
/* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
@@ -96,7 +95,7 @@ canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
top = val[len - 1];
if (len * HOST_BITS_PER_WIDE_INT > precision)
val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
- if (top != 0 && top != (HOST_WIDE_INT)-1)
+ if (top != 0 && top != HOST_WIDE_INT_M1)
return len;
/* At this point we know that the top is either 0 or -1. Find the
@@ -163,7 +162,7 @@ wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
/* We have to clear all the bits ourself, as we merely or in values
below. */
unsigned int len = BLOCKS_NEEDED (precision);
- HOST_WIDE_INT *val = result.write_val ();
+ HOST_WIDE_INT *val = result.write_val (0);
for (unsigned int i = 0; i < len; ++i)
val[i] = 0;
@@ -232,8 +231,7 @@ wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
}
else if (excess < 0 && wi::neg_p (x))
{
- int extra
- = (-excess + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT;
+ int extra = CEIL (-excess, HOST_BITS_PER_WIDE_INT);
HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len + extra);
for (int i = 0; i < len; i++)
t[i] = v[i];
@@ -280,8 +278,8 @@ wi::from_mpz (const_tree type, mpz_t x, bool wrap)
extracted from the GMP manual, section "Integer Import and Export":
http://gmplib.org/manual/Integer-Import-and-Export.html */
numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
- count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
- HOST_WIDE_INT *val = res.write_val ();
+ count = CEIL (mpz_sizeinbase (x, 2), numb);
+ HOST_WIDE_INT *val = res.write_val (0);
/* Read the absolute value.
Write directly to the wide_int storage if possible, otherwise leave
@@ -289,7 +287,7 @@ wi::from_mpz (const_tree type, mpz_t x, bool wrap)
to use mpz_tdiv_r_2exp for the latter case, but the situation is
pathological and it seems safer to operate on the original mpz value
in all cases. */
- void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
+ void *valres = mpz_export (count <= WIDE_INT_MAX_INL_ELTS ? val : 0,
&count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
if (count < 1)
{
@@ -1334,21 +1332,6 @@ wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
unsigned HOST_WIDE_INT o0, o1, k, t;
unsigned int i;
unsigned int j;
- unsigned int blocks_needed = BLOCKS_NEEDED (prec);
- unsigned int half_blocks_needed = blocks_needed * 2;
- /* The sizes here are scaled to support a 2x largest mode by 2x
- largest mode yielding a 4x largest mode result. This is what is
- needed by vpn. */
-
- unsigned HOST_HALF_WIDE_INT
- u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
- unsigned HOST_HALF_WIDE_INT
- v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
- /* The '2' in 'R' is because we are internally doing a full
- multiply. */
- unsigned HOST_HALF_WIDE_INT
- r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
- HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
/* If the top level routine did not really pass in an overflow, then
just make sure that we never attempt to set it. */
@@ -1469,6 +1452,37 @@ wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
return 1;
}
+ /* The sizes here are scaled to support a 2x WIDE_INT_MAX_INL_PRECISION by 2x
+ WIDE_INT_MAX_INL_PRECISION yielding a 4x WIDE_INT_MAX_INL_PRECISION
+ result. */
+
+ unsigned HOST_HALF_WIDE_INT
+ ubuf[4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
+ unsigned HOST_HALF_WIDE_INT
+ vbuf[4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
+ /* The '2' in 'R' is because we are internally doing a full
+ multiply. */
+ unsigned HOST_HALF_WIDE_INT
+ rbuf[2 * 4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
+ const HOST_WIDE_INT mask
+ = (HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
+ unsigned HOST_HALF_WIDE_INT *u = ubuf;
+ unsigned HOST_HALF_WIDE_INT *v = vbuf;
+ unsigned HOST_HALF_WIDE_INT *r = rbuf;
+
+ if (!high)
+ prec = MIN ((op1len + op2len + 1) * HOST_BITS_PER_WIDE_INT, prec);
+ unsigned int blocks_needed = BLOCKS_NEEDED (prec);
+ unsigned int half_blocks_needed = blocks_needed * 2;
+ if (UNLIKELY (prec > WIDE_INT_MAX_INL_PRECISION))
+ {
+ unsigned HOST_HALF_WIDE_INT *buf
+ = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT, 4 * 4 * blocks_needed);
+ u = buf;
+ v = u + 4 * blocks_needed;
+ r = v + 4 * blocks_needed;
+ }
+
/* We do unsigned mul and then correct it. */
wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
@@ -1782,16 +1796,6 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
unsigned int divisor_prec, signop sgn,
wi::overflow_type *oflow)
{
- unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
- unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
- unsigned HOST_HALF_WIDE_INT
- b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
- unsigned HOST_HALF_WIDE_INT
- b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
- unsigned HOST_HALF_WIDE_INT
- b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
- unsigned HOST_HALF_WIDE_INT
- b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
unsigned int m, n;
bool dividend_neg = false;
bool divisor_neg = false;
@@ -1910,6 +1914,44 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
}
}
+ unsigned HOST_HALF_WIDE_INT
+ b_quotient_buf[4 * WIDE_INT_MAX_INL_PRECISION
+ / HOST_BITS_PER_HALF_WIDE_INT];
+ unsigned HOST_HALF_WIDE_INT
+ b_remainder_buf[4 * WIDE_INT_MAX_INL_PRECISION
+ / HOST_BITS_PER_HALF_WIDE_INT];
+ unsigned HOST_HALF_WIDE_INT
+ b_dividend_buf[(4 * WIDE_INT_MAX_INL_PRECISION
+ / HOST_BITS_PER_HALF_WIDE_INT) + 1];
+ unsigned HOST_HALF_WIDE_INT
+ b_divisor_buf[4 * WIDE_INT_MAX_INL_PRECISION
+ / HOST_BITS_PER_HALF_WIDE_INT];
+ unsigned HOST_HALF_WIDE_INT *b_quotient = b_quotient_buf;
+ unsigned HOST_HALF_WIDE_INT *b_remainder = b_remainder_buf;
+ unsigned HOST_HALF_WIDE_INT *b_dividend = b_dividend_buf;
+ unsigned HOST_HALF_WIDE_INT *b_divisor = b_divisor_buf;
+
+ if (sgn == SIGNED || dividend_val[dividend_len - 1] >= 0)
+ dividend_prec = MIN ((dividend_len + 1) * HOST_BITS_PER_WIDE_INT,
+ dividend_prec);
+ if (sgn == SIGNED || divisor_val[divisor_len - 1] >= 0)
+ divisor_prec = MIN (divisor_len * HOST_BITS_PER_WIDE_INT, divisor_prec);
+ unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
+ unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
+ if (UNLIKELY (dividend_prec > WIDE_INT_MAX_INL_PRECISION)
+ || UNLIKELY (divisor_prec > WIDE_INT_MAX_INL_PRECISION))
+ {
+ unsigned HOST_HALF_WIDE_INT *buf
+ = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT,
+ 12 * dividend_blocks_needed
+ + 4 * divisor_blocks_needed + 1);
+ b_quotient = buf;
+ b_remainder = b_quotient + 4 * dividend_blocks_needed;
+ b_dividend = b_remainder + 4 * dividend_blocks_needed;
+ b_divisor = b_dividend + 4 * dividend_blocks_needed + 1;
+ memset (b_quotient, 0,
+ 4 * dividend_blocks_needed * sizeof (HOST_HALF_WIDE_INT));
+ }
wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
dividend_blocks_needed, dividend_prec, UNSIGNED);
wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
@@ -1924,7 +1966,8 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
while (n > 1 && b_divisor[n - 1] == 0)
n--;
- memset (b_quotient, 0, sizeof (b_quotient));
+ if (b_quotient == b_quotient_buf)
+ memset (b_quotient_buf, 0, sizeof (b_quotient_buf));
divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
@@ -1970,6 +2013,7 @@ wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
/* The whole-block shift fills with zeros. */
unsigned int len = BLOCKS_NEEDED (precision);
+ len = MIN (xlen + skip + 1, len);
for (unsigned int i = 0; i < skip; ++i)
val[i] = 0;
@@ -1993,22 +2037,17 @@ wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
return canonize (val, len, precision);
}
-/* Right shift XVAL by SHIFT and store the result in VAL. Return the
+/* Right shift XVAL by SHIFT and store the result in VAL. LEN is the
number of blocks in VAL. The input has XPRECISION bits and the
output has XPRECISION - SHIFT bits. */
-static unsigned int
+static void
rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
- unsigned int xlen, unsigned int xprecision,
- unsigned int shift)
+ unsigned int xlen, unsigned int shift, unsigned int len)
{
/* Split the shift into a whole-block shift and a subblock shift. */
unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
- /* Work out how many blocks are needed to store the significant bits
- (excluding the upper zeros or signs). */
- unsigned int len = BLOCKS_NEEDED (xprecision - shift);
-
/* It's easier to handle the simple block case specially. */
if (small_shift == 0)
for (unsigned int i = 0; i < len; ++i)
@@ -2025,7 +2064,6 @@ rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
}
}
- return len;
}
/* Logically right shift XVAL by SHIFT and store the result in VAL.
@@ -2036,11 +2074,18 @@ wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
unsigned int xlen, unsigned int xprecision,
unsigned int precision, unsigned int shift)
{
- unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
+ /* Work out how many blocks are needed to store the significant bits
+ (excluding the upper zeros or signs). */
+ unsigned int blocks_needed = BLOCKS_NEEDED (xprecision - shift);
+ unsigned int len = blocks_needed;
+ if (len > xlen && xval[xlen - 1] >= 0)
+ len = xlen;
+
+ rshift_large_common (val, xval, xlen, shift, len);
/* The value we just created has precision XPRECISION - SHIFT.
Zero-extend it to wider precisions. */
- if (precision > xprecision - shift)
+ if (precision > xprecision - shift && len == blocks_needed)
{
unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
if (small_prec)
@@ -2063,11 +2108,16 @@ wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
unsigned int xlen, unsigned int xprecision,
unsigned int precision, unsigned int shift)
{
- unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
+ /* Work out how many blocks are needed to store the significant bits
+ (excluding the upper zeros or signs). */
+ unsigned int blocks_needed = BLOCKS_NEEDED (xprecision - shift);
+ unsigned int len = MIN (xlen, blocks_needed);
+
+ rshift_large_common (val, xval, xlen, shift, len);
/* The value we just created has precision XPRECISION - SHIFT.
Sign-extend it to wider types. */
- if (precision > xprecision - shift)
+ if (precision > xprecision - shift && len == blocks_needed)
{
unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
if (small_prec)
@@ -2399,9 +2449,12 @@ from_int (int i)
static void
assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
{
- char buf[WIDE_INT_PRINT_BUFFER_SIZE];
- print_dec (wi, buf, sgn);
- ASSERT_STREQ (expected, buf);
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
+ unsigned len;
+ if (print_dec_buf_size (wi, sgn, &len))
+ p = XALLOCAVEC (char, len);
+ print_dec (wi, p, sgn);
+ ASSERT_STREQ (expected, p);
}
/* Likewise for base 16. */
@@ -2409,9 +2462,12 @@ assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
static void
assert_hexeq (const char *expected, const wide_int_ref &wi)
{
- char buf[WIDE_INT_PRINT_BUFFER_SIZE];
- print_hex (wi, buf);
- ASSERT_STREQ (expected, buf);
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
+ unsigned len;
+ if (print_hex_buf_size (wi, &len))
+ p = XALLOCAVEC (char, len);
+ print_hex (wi, p);
+ ASSERT_STREQ (expected, p);
}
/* Test cases. */
@@ -2428,7 +2484,7 @@ test_printing ()
assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
- if (WIDE_INT_MAX_PRECISION > 128)
+ if (WIDE_INT_MAX_INL_PRECISION > 128)
{
assert_hexeq ("0x20000000000000000fffffffffffffffe",
wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
index 498d14d..73e431d 100644
--- a/gcc/wide-int.h
+++ b/gcc/wide-int.h
@@ -53,6 +53,10 @@ along with GCC; see the file COPYING3. If not see
multiply, division, shifts, comparisons, and operations that need
overflow detected), the signedness must be specified separately.
+ For precisions up to WIDE_INT_MAX_INL_PRECISION, it uses an inline
+ buffer in the type, for larger precisions up to WIDEST_INT_MAX_PRECISION
+ it uses a pointer to heap allocated buffer.
+
2) offset_int. This is a fixed-precision integer that can hold
any address offset, measured in either bits or bytes, with at
least one extra sign bit. At the moment the maximum address
@@ -79,8 +83,7 @@ along with GCC; see the file COPYING3. If not see
3) widest_int. This representation is an approximation of
infinite precision math. However, it is not really infinite
precision math as in the GMP library. It is really finite
- precision math where the precision is 4 times the size of the
- largest integer that the target port can represent.
+ precision math where the precision is WIDEST_INT_MAX_PRECISION.
Like offset_int, widest_int is wider than all the values that
it needs to represent, so the integers are logically signed.
@@ -231,17 +234,30 @@ along with GCC; see the file COPYING3. If not see
can be arbitrarily different from X. */
/* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very
- early examination of the target's mode file. The WIDE_INT_MAX_ELTS
+ early examination of the target's mode file. The WIDE_INT_MAX_INL_ELTS
can accomodate at least 1 more bit so that unsigned numbers of that
mode can be represented as a signed value. Note that it is still
possible to create fixed_wide_ints that have precisions greater than
MAX_BITSIZE_MODE_ANY_INT. This can be useful when representing a
double-width multiplication result, for example. */
-#define WIDE_INT_MAX_ELTS \
- ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) / HOST_BITS_PER_WIDE_INT)
+#define WIDE_INT_MAX_INL_ELTS \
+ ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) \
+ / HOST_BITS_PER_WIDE_INT)
+
+#define WIDE_INT_MAX_INL_PRECISION \
+ (WIDE_INT_MAX_INL_ELTS * HOST_BITS_PER_WIDE_INT)
+/* Precision of wide_int and largest _BitInt precision + 1 we can
+ support. */
+#define WIDE_INT_MAX_ELTS 1024
#define WIDE_INT_MAX_PRECISION (WIDE_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT)
+/* Precision of widest_int. */
+#define WIDEST_INT_MAX_ELTS 2048
+#define WIDEST_INT_MAX_PRECISION (WIDEST_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT)
+
+STATIC_ASSERT (WIDE_INT_MAX_INL_ELTS < WIDE_INT_MAX_ELTS);
+
/* This is the max size of any pointer on any machine. It does not
seem to be as easy to sniff this out of the machine description as
it is for MAX_BITSIZE_MODE_ANY_INT since targets may support
@@ -307,17 +323,18 @@ along with GCC; see the file COPYING3. If not see
#define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \
WI_BINARY_RESULT (T1, T2) RESULT = \
wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \
- HOST_WIDE_INT *VAL = RESULT.write_val ()
+ HOST_WIDE_INT *VAL = RESULT.write_val (0)
/* Similar for the result of a unary operation on X, which has type T. */
#define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \
WI_UNARY_RESULT (T) RESULT = \
wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \
- HOST_WIDE_INT *VAL = RESULT.write_val ()
+ HOST_WIDE_INT *VAL = RESULT.write_val (0)
template <typename T> class generic_wide_int;
template <int N> class fixed_wide_int_storage;
class wide_int_storage;
+template <int N> class widest_int_storage;
/* An N-bit integer. Until we can use typedef templates, use this instead. */
#define FIXED_WIDE_INT(N) \
@@ -325,10 +342,8 @@ class wide_int_storage;
typedef generic_wide_int <wide_int_storage> wide_int;
typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int;
-typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION) widest_int;
-/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
- so as not to confuse gengtype. */
-typedef generic_wide_int < fixed_wide_int_storage <WIDE_INT_MAX_PRECISION * 2> > widest2_int;
+typedef generic_wide_int <widest_int_storage <WIDEST_INT_MAX_PRECISION> > widest_int;
+typedef generic_wide_int <widest_int_storage <WIDEST_INT_MAX_PRECISION * 2> > widest2_int;
/* wi::storage_ref can be a reference to a primitive type,
so this is the conservatively-correct setting. */
@@ -378,8 +393,12 @@ namespace wi
/* The integer has a variable precision but no defined signedness. */
VAR_PRECISION,
- /* The integer has a constant precision (known at GCC compile time)
- and is signed. */
+ /* The integer has a constant precision (known at GCC compile time),
+ is signed and all elements are in inline buffer. */
+ INL_CONST_PRECISION,
+
+ /* Like INL_CONST_PRECISION, but elements can be heap allocated for
+ larger lengths. */
CONST_PRECISION
};
@@ -390,7 +409,8 @@ namespace wi
Classifies the type of T.
static const unsigned int precision;
- Only defined if precision_type == CONST_PRECISION. Specifies the
+ Only defined if precision_type == INL_CONST_PRECISION or
+ precision_type == CONST_PRECISION. Specifies the
precision of all integers of type T.
static const bool host_dependent_precision;
@@ -415,9 +435,10 @@ namespace wi
struct binary_traits;
/* Specify the result type for each supported combination of binary
- inputs. Note that CONST_PRECISION and VAR_PRECISION cannot be
- mixed, in order to give stronger type checking. When both inputs
- are CONST_PRECISION, they must have the same precision. */
+ inputs. Note that INL_CONST_PRECISION, CONST_PRECISION and
+ VAR_PRECISION cannot be mixed, in order to give stronger type
+ checking. When both inputs are INL_CONST_PRECISION or both are
+ CONST_PRECISION, they must have the same precision. */
template <typename T1, typename T2>
struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION>
{
@@ -434,7 +455,7 @@ namespace wi
};
template <typename T1, typename T2>
- struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, INL_CONST_PRECISION>
{
/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
so as not to confuse gengtype. */
@@ -447,6 +468,17 @@ namespace wi
};
template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
+ {
+ typedef generic_wide_int < widest_int_storage
+ <int_traits <T2>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ typedef result_type signed_shift_result_type;
+ typedef bool signed_predicate_result;
+ };
+
+ template <typename T1, typename T2>
struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION>
{
typedef wide_int result_type;
@@ -455,7 +487,7 @@ namespace wi
};
template <typename T1, typename T2>
- struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
+ struct binary_traits <T1, T2, INL_CONST_PRECISION, FLEXIBLE_PRECISION>
{
/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
so as not to confuse gengtype. */
@@ -468,7 +500,18 @@ namespace wi
};
template <typename T1, typename T2>
- struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
+ struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
+ {
+ typedef generic_wide_int < widest_int_storage
+ <int_traits <T1>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ typedef result_type signed_shift_result_type;
+ typedef bool signed_predicate_result;
+ };
+
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, INL_CONST_PRECISION, INL_CONST_PRECISION>
{
STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
@@ -482,6 +525,18 @@ namespace wi
};
template <typename T1, typename T2>
+ struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
+ {
+ STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
+ typedef generic_wide_int < widest_int_storage
+ <int_traits <T1>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ typedef result_type signed_shift_result_type;
+ typedef bool signed_predicate_result;
+ };
+
+ template <typename T1, typename T2>
struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION>
{
typedef wide_int result_type;
@@ -709,8 +764,10 @@ wi::storage_ref::get_val () const
Although not required by generic_wide_int itself, writable storage
classes can also provide the following functions:
- HOST_WIDE_INT *write_val ()
- Get a modifiable version of get_val ()
+ HOST_WIDE_INT *write_val (unsigned int)
+ Get a modifiable version of get_val (). The argument should be
+ upper estimation for LEN (ignored by all storages but
+ widest_int_storage).
unsigned int set_len (unsigned int len)
Set the value returned by get_len () to LEN. */
@@ -777,6 +834,8 @@ public:
static const bool is_sign_extended
= wi::int_traits <generic_wide_int <storage> >::is_sign_extended;
+ static const bool needs_write_val_arg
+ = wi::int_traits <generic_wide_int <storage> >::needs_write_val_arg;
};
template <typename storage>
@@ -1049,6 +1108,7 @@ namespace wi
static const enum precision_type precision_type = VAR_PRECISION;
static const bool host_dependent_precision = HDP;
static const bool is_sign_extended = SE;
+ static const bool needs_write_val_arg = false;
};
}
@@ -1065,7 +1125,11 @@ namespace wi
class GTY(()) wide_int_storage
{
private:
- HOST_WIDE_INT val[WIDE_INT_MAX_ELTS];
+ union
+ {
+ HOST_WIDE_INT val[WIDE_INT_MAX_INL_ELTS];
+ HOST_WIDE_INT *valp;
+ } GTY((skip)) u;
unsigned int len;
unsigned int precision;
@@ -1073,14 +1137,17 @@ public:
wide_int_storage ();
template <typename T>
wide_int_storage (const T &);
+ wide_int_storage (const wide_int_storage &);
+ ~wide_int_storage ();
/* The standard generic_wide_int storage methods. */
unsigned int get_precision () const;
const HOST_WIDE_INT *get_val () const;
unsigned int get_len () const;
- HOST_WIDE_INT *write_val ();
+ HOST_WIDE_INT *write_val (unsigned int);
void set_len (unsigned int, bool = false);
+ wide_int_storage &operator = (const wide_int_storage &);
template <typename T>
wide_int_storage &operator = (const T &);
@@ -1099,12 +1166,15 @@ namespace wi
/* Guaranteed by a static assert in the wide_int_storage constructor. */
static const bool host_dependent_precision = false;
static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = false;
template <typename T1, typename T2>
static wide_int get_binary_result (const T1 &, const T2 &);
+ template <typename T1, typename T2>
+ static unsigned int get_binary_precision (const T1 &, const T2 &);
};
}
-inline wide_int_storage::wide_int_storage () {}
+inline wide_int_storage::wide_int_storage () : precision (0) {}
/* Initialize the storage from integer X, in its natural precision.
Note that we do not allow integers with host-dependent precision
@@ -1113,21 +1183,67 @@ inline wide_int_storage::wide_int_storage () {}
template <typename T>
inline wide_int_storage::wide_int_storage (const T &x)
{
- { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
- { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
+ STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision);
+ STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION);
+ STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::INL_CONST_PRECISION);
WIDE_INT_REF_FOR (T) xi (x);
precision = xi.precision;
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ u.valp = XNEWVEC (HOST_WIDE_INT, CEIL (precision, HOST_BITS_PER_WIDE_INT));
wi::copy (*this, xi);
}
+inline wide_int_storage::wide_int_storage (const wide_int_storage &x)
+{
+ memcpy (this, &x, sizeof (wide_int_storage));
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ {
+ u.valp = XNEWVEC (HOST_WIDE_INT, CEIL (precision, HOST_BITS_PER_WIDE_INT));
+ memcpy (u.valp, x.u.valp, len * sizeof (HOST_WIDE_INT));
+ }
+}
+
+inline wide_int_storage::~wide_int_storage ()
+{
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ XDELETEVEC (u.valp);
+}
+
+inline wide_int_storage&
+wide_int_storage::operator = (const wide_int_storage &x)
+{
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ {
+ if (this == &x)
+ return *this;
+ XDELETEVEC (u.valp);
+ }
+ memcpy (this, &x, sizeof (wide_int_storage));
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ {
+ u.valp = XNEWVEC (HOST_WIDE_INT, CEIL (precision, HOST_BITS_PER_WIDE_INT));
+ memcpy (u.valp, x.u.valp, len * sizeof (HOST_WIDE_INT));
+ }
+ return *this;
+}
+
template <typename T>
inline wide_int_storage&
wide_int_storage::operator = (const T &x)
{
- { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
- { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
+ STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision);
+ STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION);
+ STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::INL_CONST_PRECISION);
WIDE_INT_REF_FOR (T) xi (x);
- precision = xi.precision;
+ if (UNLIKELY (precision != xi.precision))
+ {
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ XDELETEVEC (u.valp);
+ precision = xi.precision;
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ u.valp = XNEWVEC (HOST_WIDE_INT,
+ CEIL (precision, HOST_BITS_PER_WIDE_INT));
+ }
wi::copy (*this, xi);
return *this;
}
@@ -1141,7 +1257,7 @@ wide_int_storage::get_precision () const
inline const HOST_WIDE_INT *
wide_int_storage::get_val () const
{
- return val;
+ return UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION) ? u.valp : u.val;
}
inline unsigned int
@@ -1151,9 +1267,9 @@ wide_int_storage::get_len () const
}
inline HOST_WIDE_INT *
-wide_int_storage::write_val ()
+wide_int_storage::write_val (unsigned int)
{
- return val;
+ return UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION) ? u.valp : u.val;
}
inline void
@@ -1161,8 +1277,10 @@ wide_int_storage::set_len (unsigned int l, bool is_sign_extended)
{
len = l;
if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision)
- val[len - 1] = sext_hwi (val[len - 1],
- precision % HOST_BITS_PER_WIDE_INT);
+ {
+ HOST_WIDE_INT &v = write_val (len)[len - 1];
+ v = sext_hwi (v, precision % HOST_BITS_PER_WIDE_INT);
+ }
}
/* Treat X as having signedness SGN and convert it to a PRECISION-bit
@@ -1172,7 +1290,7 @@ wide_int_storage::from (const wide_int_ref &x, unsigned int precision,
signop sgn)
{
wide_int result = wide_int::create (precision);
- result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ result.set_len (wi::force_to_size (result.write_val (x.len), x.val, x.len,
x.precision, precision, sgn));
return result;
}
@@ -1185,7 +1303,7 @@ wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len,
unsigned int precision, bool need_canon_p)
{
wide_int result = wide_int::create (precision);
- result.set_len (wi::from_array (result.write_val (), val, len, precision,
+ result.set_len (wi::from_array (result.write_val (len), val, len, precision,
need_canon_p));
return result;
}
@@ -1196,6 +1314,9 @@ wide_int_storage::create (unsigned int precision)
{
wide_int x;
x.precision = precision;
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ x.u.valp = XNEWVEC (HOST_WIDE_INT,
+ CEIL (precision, HOST_BITS_PER_WIDE_INT));
return x;
}
@@ -1212,6 +1333,20 @@ wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y)
return wide_int::create (wi::get_precision (x));
}
+template <typename T1, typename T2>
+inline unsigned int
+wi::int_traits <wide_int_storage>::get_binary_precision (const T1 &x,
+ const T2 &y)
+{
+ /* This shouldn't be used for two flexible-precision inputs. */
+ STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION
+ || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION);
+ if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION)
+ return wi::get_precision (y);
+ else
+ return wi::get_precision (x);
+}
+
/* The storage used by FIXED_WIDE_INT (N). */
template <int N>
class GTY(()) fixed_wide_int_storage
@@ -1221,7 +1356,7 @@ private:
unsigned int len;
public:
- fixed_wide_int_storage ();
+ fixed_wide_int_storage () = default;
template <typename T>
fixed_wide_int_storage (const T &);
@@ -1229,7 +1364,7 @@ public:
unsigned int get_precision () const;
const HOST_WIDE_INT *get_val () const;
unsigned int get_len () const;
- HOST_WIDE_INT *write_val ();
+ HOST_WIDE_INT *write_val (unsigned int);
void set_len (unsigned int, bool = false);
static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop);
@@ -1242,18 +1377,18 @@ namespace wi
template <int N>
struct int_traits < fixed_wide_int_storage <N> >
{
- static const enum precision_type precision_type = CONST_PRECISION;
+ static const enum precision_type precision_type = INL_CONST_PRECISION;
static const bool host_dependent_precision = false;
static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = false;
static const unsigned int precision = N;
template <typename T1, typename T2>
static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &);
+ template <typename T1, typename T2>
+ static unsigned int get_binary_precision (const T1 &, const T2 &);
};
}
-template <int N>
-inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {}
-
/* Initialize the storage from integer X, in precision N. */
template <int N>
template <typename T>
@@ -1288,7 +1423,7 @@ fixed_wide_int_storage <N>::get_len () const
template <int N>
inline HOST_WIDE_INT *
-fixed_wide_int_storage <N>::write_val ()
+fixed_wide_int_storage <N>::write_val (unsigned int)
{
return val;
}
@@ -1308,7 +1443,7 @@ inline FIXED_WIDE_INT (N)
fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn)
{
FIXED_WIDE_INT (N) result;
- result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ result.set_len (wi::force_to_size (result.write_val (x.len), x.val, x.len,
x.precision, N, sgn));
return result;
}
@@ -1323,7 +1458,7 @@ fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val,
bool need_canon_p)
{
FIXED_WIDE_INT (N) result;
- result.set_len (wi::from_array (result.write_val (), val, len,
+ result.set_len (wi::from_array (result.write_val (len), val, len,
N, need_canon_p));
return result;
}
@@ -1337,6 +1472,241 @@ get_binary_result (const T1 &, const T2 &)
return FIXED_WIDE_INT (N) ();
}
+template <int N>
+template <typename T1, typename T2>
+inline unsigned int
+wi::int_traits < fixed_wide_int_storage <N> >::
+get_binary_precision (const T1 &, const T2 &)
+{
+ return N;
+}
+
+#define WIDEST_INT(N) generic_wide_int < widest_int_storage <N> >
+
+/* The storage used by widest_int. */
+template <int N>
+class GTY(()) widest_int_storage
+{
+private:
+ union
+ {
+ HOST_WIDE_INT val[WIDE_INT_MAX_INL_ELTS];
+ HOST_WIDE_INT *valp;
+ } GTY((skip)) u;
+ unsigned int len;
+
+public:
+ widest_int_storage ();
+ widest_int_storage (const widest_int_storage &);
+ template <typename T>
+ widest_int_storage (const T &);
+ ~widest_int_storage ();
+ widest_int_storage &operator = (const widest_int_storage &);
+ template <typename T>
+ inline widest_int_storage& operator = (const T &);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ HOST_WIDE_INT *write_val (unsigned int);
+ void set_len (unsigned int, bool = false);
+
+ static WIDEST_INT (N) from (const wide_int_ref &, signop);
+ static WIDEST_INT (N) from_array (const HOST_WIDE_INT *, unsigned int,
+ bool = true);
+};
+
+namespace wi
+{
+ template <int N>
+ struct int_traits < widest_int_storage <N> >
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = true;
+ static const unsigned int precision = N;
+ template <typename T1, typename T2>
+ static WIDEST_INT (N) get_binary_result (const T1 &, const T2 &);
+ template <typename T1, typename T2>
+ static unsigned int get_binary_precision (const T1 &, const T2 &);
+ };
+}
+
+template <int N>
+inline widest_int_storage <N>::widest_int_storage () : len (0) {}
+
+/* Initialize the storage from integer X, in precision N. */
+template <int N>
+template <typename T>
+inline widest_int_storage <N>::widest_int_storage (const T &x) : len (0)
+{
+ /* Check for type compatibility. We don't want to initialize a
+ widest integer from something like a wide_int. */
+ WI_BINARY_RESULT (T, WIDEST_INT (N)) *assertion ATTRIBUTE_UNUSED;
+ wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N));
+}
+
+template <int N>
+inline
+widest_int_storage <N>::widest_int_storage (const widest_int_storage &x)
+{
+ memcpy (this, &x, sizeof (widest_int_storage));
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ {
+ u.valp = XNEWVEC (HOST_WIDE_INT, len);
+ memcpy (u.valp, x.u.valp, len * sizeof (HOST_WIDE_INT));
+ }
+}
+
+template <int N>
+inline widest_int_storage <N>::~widest_int_storage ()
+{
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ XDELETEVEC (u.valp);
+}
+
+template <int N>
+inline widest_int_storage <N>&
+widest_int_storage <N>::operator = (const widest_int_storage &x)
+{
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ {
+ if (this == &x)
+ return *this;
+ XDELETEVEC (u.valp);
+ }
+ memcpy (this, &x, sizeof (widest_int_storage));
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ {
+ u.valp = XNEWVEC (HOST_WIDE_INT, len);
+ memcpy (u.valp, x.u.valp, len * sizeof (HOST_WIDE_INT));
+ }
+ return *this;
+}
+
+template <int N>
+template <typename T>
+inline widest_int_storage <N>&
+widest_int_storage <N>::operator = (const T &x)
+{
+ /* Check for type compatibility. We don't want to assign a
+ widest integer from something like a wide_int. */
+ WI_BINARY_RESULT (T, WIDEST_INT (N)) *assertion ATTRIBUTE_UNUSED;
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ XDELETEVEC (u.valp);
+ len = 0;
+ wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N));
+ return *this;
+}
+
+template <int N>
+inline unsigned int
+widest_int_storage <N>::get_precision () const
+{
+ return N;
+}
+
+template <int N>
+inline const HOST_WIDE_INT *
+widest_int_storage <N>::get_val () const
+{
+ return UNLIKELY (len > WIDE_INT_MAX_INL_ELTS) ? u.valp : u.val;
+}
+
+template <int N>
+inline unsigned int
+widest_int_storage <N>::get_len () const
+{
+ return len;
+}
+
+template <int N>
+inline HOST_WIDE_INT *
+widest_int_storage <N>::write_val (unsigned int l)
+{
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ XDELETEVEC (u.valp);
+ len = l;
+ if (UNLIKELY (l > WIDE_INT_MAX_INL_ELTS))
+ {
+ u.valp = XNEWVEC (HOST_WIDE_INT, l);
+ return u.valp;
+ }
+ else if (CHECKING_P && l < WIDE_INT_MAX_INL_ELTS)
+ u.val[l] = HOST_WIDE_INT_UC (0xbaaaaaaddeadbeef);
+ return u.val;
+}
+
+template <int N>
+inline void
+widest_int_storage <N>::set_len (unsigned int l, bool)
+{
+ gcc_checking_assert (l <= len);
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS)
+ && l <= WIDE_INT_MAX_INL_ELTS)
+ {
+ HOST_WIDE_INT *valp = u.valp;
+ memcpy (u.val, valp, l * sizeof (u.val[0]));
+ XDELETEVEC (valp);
+ }
+ else if (len && len < WIDE_INT_MAX_INL_ELTS)
+ gcc_checking_assert ((unsigned HOST_WIDE_INT) u.val[len]
+ == HOST_WIDE_INT_UC (0xbaaaaaaddeadbeef));
+ len = l;
+ /* There are no excess bits in val[len - 1]. */
+ STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0);
+}
+
+/* Treat X as having signedness SGN and convert it to an N-bit number. */
+template <int N>
+inline WIDEST_INT (N)
+widest_int_storage <N>::from (const wide_int_ref &x, signop sgn)
+{
+ WIDEST_INT (N) result;
+ unsigned int exp_len = x.len;
+ unsigned int prec = result.get_precision ();
+ if (sgn == UNSIGNED && prec > x.precision && x.val[x.len - 1] < 0)
+ exp_len = CEIL (x.precision, HOST_BITS_PER_WIDE_INT) + 1;
+ result.set_len (wi::force_to_size (result.write_val (exp_len), x.val, x.len,
+ x.precision, prec, sgn));
+ return result;
+}
+
+/* Create a WIDEST_INT (N) from the explicit block encoding given by
+ VAL and LEN. NEED_CANON_P is true if the encoding may have redundant
+ trailing blocks. */
+template <int N>
+inline WIDEST_INT (N)
+widest_int_storage <N>::from_array (const HOST_WIDE_INT *val,
+ unsigned int len,
+ bool need_canon_p)
+{
+ WIDEST_INT (N) result;
+ result.set_len (wi::from_array (result.write_val (len), val, len,
+ result.get_precision (), need_canon_p));
+ return result;
+}
+
+template <int N>
+template <typename T1, typename T2>
+inline WIDEST_INT (N)
+wi::int_traits < widest_int_storage <N> >::
+get_binary_result (const T1 &, const T2 &)
+{
+ return WIDEST_INT (N) ();
+}
+
+template <int N>
+template <typename T1, typename T2>
+inline unsigned int
+wi::int_traits < widest_int_storage <N> >::
+get_binary_precision (const T1 &, const T2 &)
+{
+ return N;
+}
+
/* A reference to one element of a trailing_wide_ints structure. */
class trailing_wide_int_storage
{
@@ -1346,20 +1716,20 @@ private:
unsigned int m_precision;
/* A pointer to the length field. */
- unsigned char *m_len;
+ unsigned short *m_len;
/* A pointer to the HWI array. There are enough elements to hold all
values of precision M_PRECISION. */
HOST_WIDE_INT *m_val;
public:
- trailing_wide_int_storage (unsigned int, unsigned char *, HOST_WIDE_INT *);
+ trailing_wide_int_storage (unsigned int, unsigned short *, HOST_WIDE_INT *);
/* The standard generic_wide_int storage methods. */
unsigned int get_len () const;
unsigned int get_precision () const;
const HOST_WIDE_INT *get_val () const;
- HOST_WIDE_INT *write_val ();
+ HOST_WIDE_INT *write_val (unsigned int);
void set_len (unsigned int, bool = false);
template <typename T>
@@ -1391,15 +1761,13 @@ private:
unsigned short m_precision;
/* The shared maximum length of each number. */
- unsigned char m_max_len;
+ unsigned short m_max_len;
/* The number of elements. */
unsigned char m_num_elements;
- /* The current length of each number.
- Avoid char array so the whole structure is not a typeless storage
- that will, in turn, turn off TBAA on gimple, trees and RTL. */
- struct {unsigned char len;} m_len[N];
+ /* The current length of each number. */
+ unsigned short m_len[N];
/* The variable-length part of the structure, which always contains
at least one HWI. Element I starts at index I * M_MAX_LEN. */
@@ -1420,7 +1788,7 @@ public:
};
inline trailing_wide_int_storage::
-trailing_wide_int_storage (unsigned int precision, unsigned char *len,
+trailing_wide_int_storage (unsigned int precision, unsigned short *len,
HOST_WIDE_INT *val)
: m_precision (precision), m_len (len), m_val (val)
{
@@ -1445,7 +1813,7 @@ trailing_wide_int_storage::get_val () const
}
inline HOST_WIDE_INT *
-trailing_wide_int_storage::write_val ()
+trailing_wide_int_storage::write_val (unsigned int)
{
return m_val;
}
@@ -1486,7 +1854,7 @@ template <int N>
inline trailing_wide_int
trailing_wide_ints <N>::operator [] (unsigned int index)
{
- return trailing_wide_int_storage (m_precision, &m_len[index].len,
+ return trailing_wide_int_storage (m_precision, &m_len[index],
&m_val[index * m_max_len]);
}
@@ -1495,7 +1863,7 @@ inline typename trailing_wide_ints <N>::const_reference
trailing_wide_ints <N>::operator [] (unsigned int index) const
{
return wi::storage_ref (&m_val[index * m_max_len],
- m_len[index].len, m_precision);
+ m_len[index], m_precision);
}
/* Return how many extra bytes need to be added to the end of the
@@ -1528,6 +1896,7 @@ namespace wi
static const enum precision_type precision_type = FLEXIBLE_PRECISION;
static const bool host_dependent_precision = true;
static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = false;
static unsigned int get_precision (T);
static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T);
};
@@ -1699,6 +2068,7 @@ namespace wi
precision of HOST_WIDE_INT. */
static const bool host_dependent_precision = false;
static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = false;
static unsigned int get_precision (const wi::hwi_with_prec &);
static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
const wi::hwi_with_prec &);
@@ -1804,8 +2174,8 @@ template <typename T1, typename T2>
inline unsigned int
wi::get_binary_precision (const T1 &x, const T2 &y)
{
- return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)>::
- get_binary_result (x, y));
+ using res_traits = wi::int_traits <WI_BINARY_RESULT (T1, T2)>;
+ return res_traits::get_binary_precision (x, y);
}
/* Copy the contents of Y to X, but keeping X's current precision. */
@@ -1813,14 +2183,17 @@ template <typename T1, typename T2>
inline void
wi::copy (T1 &x, const T2 &y)
{
- HOST_WIDE_INT *xval = x.write_val ();
- const HOST_WIDE_INT *yval = y.get_val ();
unsigned int len = y.get_len ();
+ HOST_WIDE_INT *xval = x.write_val (len);
+ const HOST_WIDE_INT *yval = y.get_val ();
unsigned int i = 0;
do
xval[i] = yval[i];
while (++i < len);
- x.set_len (len, y.is_sign_extended);
+ /* For widest_int write_val is called with an exact value, not
+ upper bound for len, so nothing is needed further. */
+ if (!wi::int_traits <T1>::needs_write_val_arg)
+ x.set_len (len, y.is_sign_extended);
}
/* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */
@@ -2162,6 +2535,8 @@ wi::bit_not (const T &x)
{
WI_UNARY_RESULT_VAR (result, val, T, x);
WIDE_INT_REF_FOR (T) xi (x, get_precision (result));
+ if (result.needs_write_val_arg)
+ val = result.write_val (xi.len);
for (unsigned int i = 0; i < xi.len; ++i)
val[i] = ~xi.val[i];
result.set_len (xi.len);
@@ -2203,6 +2578,9 @@ wi::sext (const T &x, unsigned int offset)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T) xi (x, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len,
+ CEIL (offset, HOST_BITS_PER_WIDE_INT)));
if (offset <= HOST_BITS_PER_WIDE_INT)
{
val[0] = sext_hwi (xi.ulow (), offset);
@@ -2230,6 +2608,9 @@ wi::zext (const T &x, unsigned int offset)
return result;
}
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len,
+ offset / HOST_BITS_PER_WIDE_INT + 1));
/* In these cases we know that at least the top bit will be clear,
so no sign extension is necessary. */
if (offset < HOST_BITS_PER_WIDE_INT)
@@ -2259,6 +2640,9 @@ wi::set_bit (const T &x, unsigned int bit)
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T) xi (x, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len,
+ bit / HOST_BITS_PER_WIDE_INT + 1));
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () | (HOST_WIDE_INT_1U << bit);
@@ -2280,6 +2664,8 @@ wi::bswap (const T &x)
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T) xi (x, precision);
+ static_assert (!result.needs_write_val_arg,
+ "bswap on widest_int makes no sense");
result.set_len (bswap_large (val, xi.val, xi.len, precision));
return result;
}
@@ -2292,6 +2678,8 @@ wi::bitreverse (const T &x)
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T) xi (x, precision);
+ static_assert (!result.needs_write_val_arg,
+ "bitreverse on widest_int makes no sense");
result.set_len (bitreverse_large (val, xi.val, xi.len, precision));
return result;
}
@@ -2368,6 +2756,8 @@ wi::bit_and (const T1 &x, const T2 &y)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len));
if (LIKELY (xi.len + yi.len == 2))
{
val[0] = xi.ulow () & yi.ulow ();
@@ -2389,6 +2779,8 @@ wi::bit_and_not (const T1 &x, const T2 &y)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len));
if (LIKELY (xi.len + yi.len == 2))
{
val[0] = xi.ulow () & ~yi.ulow ();
@@ -2410,6 +2802,8 @@ wi::bit_or (const T1 &x, const T2 &y)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len));
if (LIKELY (xi.len + yi.len == 2))
{
val[0] = xi.ulow () | yi.ulow ();
@@ -2431,6 +2825,8 @@ wi::bit_or_not (const T1 &x, const T2 &y)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len));
if (LIKELY (xi.len + yi.len == 2))
{
val[0] = xi.ulow () | ~yi.ulow ();
@@ -2452,6 +2848,8 @@ wi::bit_xor (const T1 &x, const T2 &y)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len));
if (LIKELY (xi.len + yi.len == 2))
{
val[0] = xi.ulow () ^ yi.ulow ();
@@ -2472,6 +2870,8 @@ wi::add (const T1 &x, const T2 &y)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len) + 1);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () + yi.ulow ();
@@ -2515,6 +2915,8 @@ wi::add (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len) + 1);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT xl = xi.ulow ();
@@ -2558,6 +2960,8 @@ wi::sub (const T1 &x, const T2 &y)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len) + 1);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () - yi.ulow ();
@@ -2601,6 +3005,8 @@ wi::sub (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len) + 1);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT xl = xi.ulow ();
@@ -2643,6 +3049,8 @@ wi::mul (const T1 &x, const T2 &y)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (xi.len + yi.len + 2);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () * yi.ulow ();
@@ -2664,6 +3072,8 @@ wi::mul (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (xi.len + yi.len + 2);
result.set_len (mul_internal (val, xi.val, xi.len,
yi.val, yi.len, precision,
sgn, overflow, false));
@@ -2698,6 +3108,8 @@ wi::mul_high (const T1 &x, const T2 &y, signop sgn)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ static_assert (!result.needs_write_val_arg,
+ "mul_high on widest_int doesn't make sense");
result.set_len (mul_internal (val, xi.val, xi.len,
yi.val, yi.len, precision,
sgn, 0, true));
@@ -2716,6 +3128,12 @@ wi::div_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y);
+ if (quotient.needs_write_val_arg)
+ quotient_val = quotient.write_val ((sgn == UNSIGNED
+ && xi.val[xi.len - 1] < 0)
+ ? CEIL (precision,
+ HOST_BITS_PER_WIDE_INT) + 1
+ : xi.len + 1);
quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len,
precision,
yi.val, yi.len, yi.precision,
@@ -2753,6 +3171,16 @@ wi::div_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -2795,6 +3223,16 @@ wi::div_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -2828,6 +3266,16 @@ wi::div_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -2871,6 +3319,16 @@ wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn,
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -2915,6 +3373,12 @@ wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (remainder.needs_write_val_arg)
+ remainder_val = remainder.write_val ((sgn == UNSIGNED
+ && xi.val[xi.len - 1] < 0)
+ ? CEIL (precision,
+ HOST_BITS_PER_WIDE_INT) + 1
+ : xi.len + 1);
divmod_internal (0, &remainder_len, remainder_val,
xi.val, xi.len, precision,
yi.val, yi.len, yi.precision, sgn, overflow);
@@ -2955,6 +3419,16 @@ wi::mod_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -2991,6 +3465,16 @@ wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -3017,6 +3501,16 @@ wi::mod_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -3086,12 +3580,16 @@ wi::lshift (const T1 &x, const T2 &y)
/* Handle the simple cases quickly. */
if (geu_p (yi, precision))
{
+ if (result.needs_write_val_arg)
+ val = result.write_val (1);
val[0] = 0;
result.set_len (1);
}
else
{
unsigned int shift = yi.to_uhwi ();
+ if (result.needs_write_val_arg)
+ val = result.write_val (xi.len + shift / HOST_BITS_PER_WIDE_INT + 1);
/* For fixed-precision integers like offset_int and widest_int,
handle the case where the shift value is constant and the
result is a single nonnegative HWI (meaning that we don't
@@ -3130,12 +3628,23 @@ wi::lrshift (const T1 &x, const T2 &y)
/* Handle the simple cases quickly. */
if (geu_p (yi, xi.precision))
{
+ if (result.needs_write_val_arg)
+ val = result.write_val (1);
val[0] = 0;
result.set_len (1);
}
else
{
unsigned int shift = yi.to_uhwi ();
+ if (result.needs_write_val_arg)
+ {
+ unsigned int est_len = xi.len;
+ if (xi.val[xi.len - 1] < 0 && shift)
+ /* Logical right shift of sign-extended value might need a very
+ large precision e.g. for widest_int. */
+ est_len = CEIL (xi.precision - shift, HOST_BITS_PER_WIDE_INT) + 1;
+ val = result.write_val (est_len);
+ }
/* For fixed-precision integers like offset_int and widest_int,
handle the case where the shift value is constant and the
shifted value is a single nonnegative HWI (meaning that all
@@ -3171,6 +3680,8 @@ wi::arshift (const T1 &x, const T2 &y)
since the result can be no larger than that. */
WIDE_INT_REF_FOR (T1) xi (x);
WIDE_INT_REF_FOR (T2) yi (y);
+ if (result.needs_write_val_arg)
+ val = result.write_val (xi.len);
/* Handle the simple cases quickly. */
if (geu_p (yi, xi.precision))
{
@@ -3374,25 +3885,41 @@ operator % (const T1 &x, const T2 &y)
return wi::smod_trunc (x, y);
}
-template<typename T>
+void gt_ggc_mx (generic_wide_int <wide_int_storage> *) = delete;
+void gt_pch_nx (generic_wide_int <wide_int_storage> *) = delete;
+void gt_pch_nx (generic_wide_int <wide_int_storage> *,
+ gt_pointer_operator, void *) = delete;
+
+template<int N>
void
-gt_ggc_mx (generic_wide_int <T> *)
+gt_ggc_mx (generic_wide_int <fixed_wide_int_storage <N> > *)
{
}
-template<typename T>
+template<int N>
void
-gt_pch_nx (generic_wide_int <T> *)
+gt_pch_nx (generic_wide_int <fixed_wide_int_storage <N> > *)
{
}
-template<typename T>
+template<int N>
void
-gt_pch_nx (generic_wide_int <T> *, gt_pointer_operator, void *)
+gt_pch_nx (generic_wide_int <fixed_wide_int_storage <N> > *,
+ gt_pointer_operator, void *)
{
}
template<int N>
+void gt_ggc_mx (generic_wide_int <widest_int_storage <N> > *) = delete;
+
+template<int N>
+void gt_pch_nx (generic_wide_int <widest_int_storage <N> > *) = delete;
+
+template<int N>
+void gt_pch_nx (generic_wide_int <widest_int_storage <N> > *,
+ gt_pointer_operator, void *) = delete;
+
+template<int N>
void
gt_ggc_mx (trailing_wide_ints <N> *)
{
@@ -3465,7 +3992,7 @@ inline wide_int
wi::mask (unsigned int width, bool negate_p, unsigned int precision)
{
wide_int result = wide_int::create (precision);
- result.set_len (mask (result.write_val (), width, negate_p, precision));
+ result.set_len (mask (result.write_val (0), width, negate_p, precision));
return result;
}
@@ -3477,7 +4004,7 @@ wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p,
unsigned int precision)
{
wide_int result = wide_int::create (precision);
- result.set_len (shifted_mask (result.write_val (), start, width, negate_p,
+ result.set_len (shifted_mask (result.write_val (0), start, width, negate_p,
precision));
return result;
}
@@ -3498,8 +4025,8 @@ wi::mask (unsigned int width, bool negate_p)
{
STATIC_ASSERT (wi::int_traits<T>::precision);
T result;
- result.set_len (mask (result.write_val (), width, negate_p,
- wi::int_traits <T>::precision));
+ result.set_len (mask (result.write_val (width / HOST_BITS_PER_WIDE_INT + 1),
+ width, negate_p, wi::int_traits <T>::precision));
return result;
}
@@ -3512,9 +4039,13 @@ wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p)
{
STATIC_ASSERT (wi::int_traits<T>::precision);
T result;
- result.set_len (shifted_mask (result.write_val (), start, width,
- negate_p,
- wi::int_traits <T>::precision));
+ unsigned int prec = wi::int_traits <T>::precision;
+ unsigned int est_len
+ = result.needs_write_val_arg
+ ? ((start + (width > prec - start ? prec - start : width))
+ / HOST_BITS_PER_WIDE_INT + 1) : 0;
+ result.set_len (shifted_mask (result.write_val (est_len), start, width,
+ negate_p, prec));
return result;
}