aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorMartin Liska <mliska@suse.cz>2022-09-20 13:53:30 +0200
committerMartin Liska <mliska@suse.cz>2022-09-20 13:53:30 +0200
commit6df29b782e87c6c800be0425023d8438fdc67b92 (patch)
tree48eebe497e384d66a7f5cf861b4b1b963785a2cd /gcc
parentfdb97cd0b7d15efa39ba79dca44be93debb0ef12 (diff)
parent63e3cc294d835b43701eeef9410d1b8fc8922869 (diff)
downloadgcc-6df29b782e87c6c800be0425023d8438fdc67b92.zip
gcc-6df29b782e87c6c800be0425023d8438fdc67b92.tar.gz
gcc-6df29b782e87c6c800be0425023d8438fdc67b92.tar.bz2
Merge branch 'master' into devel/sphinx
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog482
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/ada/ChangeLog502
-rw-r--r--gcc/ada/gcc-interface/trans.cc1
-rw-r--r--gcc/analyzer/ChangeLog5
-rw-r--r--gcc/c-family/ChangeLog6
-rw-r--r--gcc/c-family/c-common.cc2
-rw-r--r--gcc/c-family/c-common.h1
-rw-r--r--gcc/c/ChangeLog15
-rw-r--r--gcc/c/c-decl.cc8
-rw-r--r--gcc/c/c-typeck.cc43
-rw-r--r--gcc/config/aarch64/aarch64-ldpstp.md11
-rw-r--r--gcc/config/aarch64/aarch64-simd.md217
-rw-r--r--gcc/config/aarch64/aarch64.cc94
-rw-r--r--gcc/config/aarch64/aarch64.md11
-rw-r--r--gcc/config/aarch64/iterators.md2
-rw-r--r--gcc/config/csky/csky.h2
-rw-r--r--gcc/config/gcn/mkoffload.cc2
-rw-r--r--gcc/config/i386/i386-builtins.cc185
-rw-r--r--gcc/config/i386/i386-expand.cc43
-rw-r--r--gcc/config/i386/mmx.md154
-rw-r--r--gcc/config/i386/sse.md80
-rw-r--r--gcc/config/i386/x86-tune-sched.cc14
-rw-r--r--gcc/config/loongarch/gnu-user.h6
-rw-r--r--gcc/config/mips/mips.cc2
-rw-r--r--gcc/config/nvptx/mkoffload.cc18
-rw-r--r--gcc/config/rs6000/rs6000-builtin.cc13
-rw-r--r--gcc/config/rs6000/rs6000-c.cc60
-rw-r--r--gcc/config/rs6000/rs6000.cc160
-rw-r--r--gcc/config/rs6000/rs6000.opt18
-rw-r--r--gcc/config/xtensa/xtensa.cc2
-rw-r--r--gcc/config/xtensa/xtensa.h1
-rw-r--r--gcc/config/xtensa/xtensa.md21
-rw-r--r--gcc/cp/ChangeLog153
-rw-r--r--gcc/cp/call.cc22
-rw-r--r--gcc/cp/constexpr.cc31
-rw-r--r--gcc/cp/cp-tree.h32
-rw-r--r--gcc/cp/cvt.cc4
-rw-r--r--gcc/cp/decl.cc17
-rw-r--r--gcc/cp/decl2.cc47
-rw-r--r--gcc/cp/except.cc4
-rw-r--r--gcc/cp/init.cc2
-rw-r--r--gcc/cp/lambda.cc1
-rw-r--r--gcc/cp/module.cc5
-rw-r--r--gcc/cp/name-lookup.cc2
-rw-r--r--gcc/cp/parser.cc7
-rw-r--r--gcc/cp/pt.cc35
-rw-r--r--gcc/cp/semantics.cc38
-rw-r--r--gcc/cp/tree.cc22
-rw-r--r--gcc/cp/typeck.cc24
-rw-r--r--gcc/cp/typeck2.cc33
-rw-r--r--gcc/d/ChangeLog5
-rw-r--r--gcc/d/d-builtins.cc1
-rw-r--r--gcc/doc/extend.texi2
-rw-r--r--gcc/doc/invoke.texi7
-rw-r--r--gcc/fortran/ChangeLog28
-rw-r--r--gcc/fortran/f95-lang.cc2
-rw-r--r--gcc/fortran/libgfortran.h1
-rw-r--r--gcc/fortran/resolve.cc1
-rw-r--r--gcc/fortran/simplify.cc2
-rw-r--r--gcc/fortran/trans-openmp.cc20
-rw-r--r--gcc/gimple-fold.cc1
-rw-r--r--gcc/gimple-range-fold.cc2
-rw-r--r--gcc/gimplify.cc2525
-rw-r--r--gcc/ginclude/float.h4
-rw-r--r--gcc/ginclude/stdatomic.h2
-rw-r--r--gcc/go/ChangeLog5
-rw-r--r--gcc/go/go-lang.cc3
-rw-r--r--gcc/jit/ChangeLog5
-rw-r--r--gcc/jit/dummy-frontend.cc3
-rw-r--r--gcc/lto/ChangeLog5
-rw-r--r--gcc/lto/lto-lang.cc1
-rw-r--r--gcc/match.pd6
-rw-r--r--gcc/omp-low.cc23
-rw-r--r--gcc/range-op-float.cc175
-rw-r--r--gcc/reg-stack.cc3
-rw-r--r--gcc/targhooks.cc17
-rw-r--r--gcc/testsuite/ChangeLog246
-rw-r--r--gcc/testsuite/c-c++-common/Waddress-7.c22
-rw-r--r--gcc/testsuite/c-c++-common/goacc/deep-copy-arrayofstruct.c83
-rw-r--r--gcc/testsuite/c-c++-common/goacc/mdc-2.c2
-rw-r--r--gcc/testsuite/c-c++-common/gomp/target-50.c23
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-mutable3.C9
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/initlist-array17.C37
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-array4.C29
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/auto-fn65.C10
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/constexpr-mutable1.C16
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/noexcept-type26.C12
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/explicit19.C12
-rw-r--r--gcc/testsuite/g++.dg/ext/integer-pack6.C13
-rw-r--r--gcc/testsuite/g++.dg/ext/pr106877.C13
-rw-r--r--gcc/testsuite/g++.dg/gcov/gcov.exp4
-rw-r--r--gcc/testsuite/g++.dg/goacc/mdc.C2
-rw-r--r--gcc/testsuite/g++.dg/goacc/member-array-acc.C13
-rw-r--r--gcc/testsuite/g++.dg/gomp/map-3.C9
-rw-r--r--gcc/testsuite/g++.dg/gomp/member-array-omp.C13
-rw-r--r--gcc/testsuite/g++.dg/gomp/target-3.C4
-rw-r--r--gcc/testsuite/g++.dg/gomp/target-lambda-1.C6
-rw-r--r--gcc/testsuite/g++.dg/gomp/target-this-2.C2
-rw-r--r--gcc/testsuite/g++.dg/gomp/target-this-3.C4
-rw-r--r--gcc/testsuite/g++.dg/gomp/target-this-4.C4
-rw-r--r--gcc/testsuite/g++.dg/modules/typename-friend_a.C11
-rw-r--r--gcc/testsuite/g++.dg/modules/typename-friend_b.C6
-rw-r--r--gcc/testsuite/g++.dg/template/evaluated1.C17
-rw-r--r--gcc/testsuite/g++.dg/template/evaluated1a.C16
-rw-r--r--gcc/testsuite/g++.dg/template/evaluated1b.C17
-rw-r--r--gcc/testsuite/g++.dg/template/evaluated1c.C17
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr106922.C91
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr106936.C14
-rw-r--r--gcc/testsuite/g++.dg/warn/Wsubobject-linkage-5.C7
-rw-r--r--gcc/testsuite/g++.dg/warn/anonymous-namespace-3.C3
-rw-r--r--gcc/testsuite/g++.target/powerpc/pr105485.C9
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr106878.c15
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr106958.c13
-rw-r--r--gcc/testsuite/gcc.dg/atomic/c2x-stdatomic-var-init-1.c9
-rw-r--r--gcc/testsuite/gcc.dg/c2x-float-11.c9
-rw-r--r--gcc/testsuite/gcc.dg/c2x-float-2.c4
-rw-r--r--gcc/testsuite/gcc.dg/pr106938.c36
-rw-r--r--gcc/testsuite/gcc.dg/tree-prof/indir-call-prof-2.c6
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/gen-vect-34.c2
-rw-r--r--gcc/testsuite/gcc.misc-tests/gcov.exp4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/acle/ls64_asm_2.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_stp_20.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_stp_21.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_stp_22.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_stp_23.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_stp_24.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv16qi_1.c21
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv16qi_2.c27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv16qi_3.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv2di_1.c103
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv2x16qi_1.c40
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv2x8qi_1.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv3x16qi_1.c44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv3x8qi_1.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv4x16qi_1.c44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv4x8qi_1.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv8qi_1.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv8qi_2.c27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv8qi_3.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect_unary_2.c5
-rw-r--r--gcc/testsuite/gcc.target/i386/pr105735-1.c88
-rw-r--r--gcc/testsuite/gcc.target/i386/pr105735-2.c28
-rw-r--r--gcc/testsuite/gcc.target/i386/pr106905.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/pr106910-1.c77
-rw-r--r--gcc/testsuite/gcc.target/i386/vect-bfloat16-2c.c76
-rw-r--r--gcc/testsuite/gcc.target/ia64/pr106905.c20
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr104482.c16
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr106550.c14
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr106550_1.c22
-rw-r--r--gcc/testsuite/gcc.target/s390/ifcvt-one-insn-bool.c2
-rw-r--r--gcc/testsuite/gcc.target/s390/ifcvt-one-insn-char.c2
-rw-r--r--gcc/testsuite/gcc.target/s390/ifcvt-two-insns-bool.c2
-rw-r--r--gcc/testsuite/gcc.target/s390/ifcvt-two-insns-int.c2
-rw-r--r--gcc/testsuite/gcc.target/s390/ifcvt-two-insns-long.c13
-rw-r--r--gcc/testsuite/gcc.target/s390/vector/vperm-rev-z14.c2
-rw-r--r--gcc/testsuite/gcc.target/s390/vector/vperm-rev-z15.c6
-rw-r--r--gcc/testsuite/gfortran.dg/ieee/modes_1.f9095
-rw-r--r--gcc/testsuite/gfortran.dg/ieee/rounding_2.f9020
-rw-r--r--gcc/testsuite/gfortran.dg/pr104314.f909
-rw-r--r--gcc/testsuite/gfortran.dg/pr106857.f9012
-rw-r--r--gcc/testsuite/gfortran.dg/pr106934.f907
-rw-r--r--gcc/testsuite/lib/g++.exp10
-rw-r--r--gcc/testsuite/lib/gcc.exp21
-rw-r--r--gcc/testsuite/lib/wrapper.exp7
-rw-r--r--gcc/tree-cfg.cc33
-rw-r--r--gcc/tree-scalar-evolution.cc93
-rw-r--r--gcc/tree-ssa-pre.cc18
-rw-r--r--gcc/tree-ssa-reassoc.cc25
-rw-r--r--gcc/tree-ssa-uninit.cc8
-rw-r--r--gcc/tree-ssa.cc6
-rw-r--r--gcc/tree-vect-loop.cc6
-rw-r--r--gcc/tree.cc2
-rw-r--r--gcc/value-query.cc17
-rw-r--r--gcc/value-range-pretty-print.cc48
-rw-r--r--gcc/value-range-pretty-print.h2
-rw-r--r--gcc/value-range-storage.cc9
-rw-r--r--gcc/value-range-storage.h7
-rw-r--r--gcc/value-range.cc695
-rw-r--r--gcc/value-range.h237
180 files changed, 6648 insertions, 2103 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index e670cae..6dded16 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,485 @@
+2022-09-19 Torbjörn SVENSSON <torbjorn.svensson@foss.st.com>
+
+ * targhooks.cc (default_zero_call_used_regs): Improve sorry
+ message.
+
+2022-09-18 Julian Brown <julian@codesourcery.com>
+
+ * gimplify.cc (omp_segregate_mapping_groups): Update comment.
+ (gimplify_adjust_omp_clauses): Move ATTACH and
+ ATTACH_ZERO_LENGTH_ARRAY_SECTION nodes to the end of the clause list
+ for offloaded OpenMP regions.
+
+2022-09-18 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/106831
+ * value-range.cc (frange::singleton_p): Avoid propagating long
+ doubles that may have multiple representations.
+
+2022-09-18 Aldy Hernandez <aldyh@redhat.com>
+
+ * range-op-float.cc (frange_add_zeros): Replace set_signbit with
+ union of zero.
+ * value-query.cc (range_query::get_tree_range): Remove set_signbit
+ use.
+ * value-range-pretty-print.cc (vrange_printer::print_frange_prop):
+ Remove.
+ (vrange_printer::print_frange_nan): New.
+ * value-range-pretty-print.h (print_frange_prop): Remove.
+ (print_frange_nan): New.
+ * value-range-storage.cc (frange_storage_slot::set_frange): Set
+ kind and NAN fields.
+ (frange_storage_slot::get_frange): Restore kind and NAN fields.
+ * value-range-storage.h (class frange_storage_slot): Add kind and
+ NAN fields.
+ * value-range.cc (frange::update_nan): Remove.
+ (frange::set_signbit): Remove.
+ (frange::set): Adjust for NAN fields.
+ (frange::normalize_kind): Remove m_props.
+ (frange::combine_zeros): New.
+ (frange::union_nans): New.
+ (frange::union_): Handle new NAN fields.
+ (frange::intersect_nans): New.
+ (frange::intersect): Handle new NAN fields.
+ (frange::operator=): Same.
+ (frange::operator==): Same.
+ (frange::contains_p): Same.
+ (frange::singleton_p): Remove special case for signed zeros.
+ (frange::verify_range): Adjust for new NAN fields.
+ (frange::set_zero): Handle signed zeros.
+ (frange::set_nonnegative): Same.
+ (range_tests_nan): Adjust tests.
+ (range_tests_signed_zeros): Same.
+ (range_tests_signbit): Same.
+ (range_tests_floats): Same.
+ * value-range.h (class fp_prop): Remove.
+ (FP_PROP_ACCESSOR): Remove.
+ (class frange_props): Remove
+ (frange::lower_bound): NANs don't have endpoints.
+ (frange::upper_bound): Same.
+ (frange_props::operator==): Remove.
+ (frange_props::union_): Remove.
+ (frange_props::intersect): Remove.
+ (frange::update_nan): New.
+ (frange::clear_nan): New.
+ (frange::undefined_p): New.
+ (frange::set_nan): New.
+ (frange::known_finite): Adjust for new NAN representation.
+ (frange::maybe_isnan): Same.
+ (frange::known_isnan): Same.
+ (frange::signbit_p): Same.
+ * gimple-range-fold.cc (range_of_builtin_int_call): Rename
+ known_signbit_p into signbit_p.
+
+2022-09-17 Jan-Benedict Glaw <jbglaw@lug-owl.de>
+
+ * config/csky/csky.h (FUNCTION_ARG_REGNO_P): Cast REGNO to (int)
+ to prevent warning.
+
+2022-09-17 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/106958
+ * tree-ssa-reassoc.cc (optimize_range_tests_cmp_bitwise): If
+ id >= l, cast op to type1, otherwise to pointer_sized_int_node.
+ If type has pointer type, cast exp to pointer_sized_int_node
+ even when id < l.
+
+2022-09-16 liuhongt <hongtao.liu@intel.com>
+
+ PR target/106910
+ * config/i386/i386-builtins.cc
+ (ix86_builtin_vectorized_function): Modernized with
+ corresponding expanders.
+ * config/i386/sse.md (lrint<mode><sseintvecmodelower>2): New
+ expander.
+ (floor<mode>2): Ditto.
+ (lfloor<mode><sseintvecmodelower>2): Ditto.
+ (ceil<mode>2): Ditto.
+ (lceil<mode><sseintvecmodelower>2): Ditto.
+ (btrunc<mode>2): Ditto.
+ (lround<mode><sseintvecmodelower>2): Ditto.
+ (exp2<mode>2): Ditto.
+
+2022-09-15 Joseph Myers <joseph@codesourcery.com>
+
+ * ginclude/float.h (INFINITY): Define only if
+ [__FLT_HAS_INFINITY__].
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106922
+ * tree-ssa-pre.cc (translate_vuse_through_block): Only
+ keep the VUSE if its def dominates PHIBLOCK.
+ (prune_clobbered_mems): Rewrite logic so we check whether
+ a value dies in a block when the VUSE def doesn't dominate it.
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * tree.cc (build_common_tree_nodes): Initialize void_list_node
+ here.
+
+2022-09-15 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ PR target/106550
+ * config/rs6000/rs6000.cc (rs6000_emit_set_long_const): Use pli.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ * range-op-float.cc (frange_add_zeros): New.
+ (build_le): Call frange_add_zeros.
+ (build_ge): Same.
+ (foperator_equal::op1_range): Same.
+ (foperator_not_equal::op1_range): Same.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ * range-op-float.cc (build_le): Accept frange instead of number.
+ (build_lt): Same.
+ (build_ge): Same.
+ (build_gt): Same.
+ (foperator_lt::op1_range): Pass full range to build_*.
+ (foperator_lt::op2_range): Same.
+ (foperator_le::op1_range): Same.
+ (foperator_le::op2_range): Same.
+ (foperator_gt::op1_range): Same.
+ (foperator_gt::op2_range): Same.
+ (foperator_ge::op1_range): Same.
+ (foperator_ge::op2_range): Same.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ * value-range.cc (frange::set): Use set_nan.
+ * value-range.h (frange::set_nan): Inline code originally in
+ set().
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ * range-op-float.cc (frange_set_nan): Remove.
+ (build_lt): Use set_nan, update_nan, clear_nan.
+ (build_gt): Same.
+ (foperator_equal::op1_range): Same.
+ (foperator_not_equal::op1_range): Same.
+ (foperator_lt::op1_range): Same.
+ (foperator_lt::op2_range): Same.
+ (foperator_le::op1_range): Same.
+ (foperator_le::op2_range): Same.
+ (foperator_gt::op1_range): Same.
+ (foperator_gt::op2_range): Same.
+ (foperator_ge::op1_range): Same.
+ (foperator_ge::op2_range): Same.
+ (foperator_unordered::op1_range): Same.
+ (foperator_ordered::op1_range): Same.
+ * value-query.cc (range_query::get_tree_range): Same.
+ * value-range.cc (frange::set_nan): Same.
+ (frange::update_nan): Same.
+ (frange::union_): Same.
+ (frange::intersect): Same.
+ (range_tests_nan): Same.
+ (range_tests_signed_zeros): Same.
+ (range_tests_signbit): Same.
+ (range_tests_floats): Same.
+ * value-range.h (class frange): Add update_nan and clear_nan.
+ (frange::set_nan): New.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ * value-query.cc (range_query::get_tree_range): Remove check for overflow.
+ * value-range-pretty-print.cc (vrange_printer::visit): Move read
+ of type until after undefined_p is checked.
+ * value-range.cc (frange::set): Remove asserts for REAL_CST.
+ (frange::contains_p): Tidy up.
+ (range_tests_nan): Add comment.
+ * value-range.h (frange::type): Check for undefined_p.
+ (frange::set_undefined): Remove set of endpoints.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * gimplify.cc (omp_group_last): Allow GOMP_MAP_ATTACH_DETACH after
+ GOMP_MAP_STRUCT (for reindexing).
+ (omp_gather_mapping_groups): Reimplement using...
+ (omp_gather_mapping_groups_1): This new function. Stop processing at
+ GATHER_SENTINEL.
+ (omp_group_base): Allow GOMP_MAP_TO_PSET without any following node.
+ (omp_index_mapping_groups): Reimplement using...
+ (omp_index_mapping_groups_1): This new function. Handle
+ REINDEX_SENTINEL.
+ (omp_reindex_mapping_groups, omp_mapped_by_containing_struct): New
+ functions.
+ (omp_tsort_mapping_groups_1): Adjust handling of base group being the
+ same as current group. Use omp_mapped_by_containing_struct.
+ (omp_build_struct_sibling_lists): Use omp_mapped_by_containing_struct
+ and omp_reindex_mapping_groups. Robustify group deletion for reordered
+ lists.
+ (gimplify_scan_omp_clauses): Update calls to
+ omp_build_struct_sibling_lists.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * gimplify.cc (gimplify_omp_var_data): Remove GOVD_MAP_HAS_ATTACHMENTS.
+ (GOMP_FIRSTPRIVATE_IMPLICIT): Renumber.
+ (insert_struct_comp_map): Refactor function into...
+ (build_omp_struct_comp_nodes): This new function. Remove list handling
+ and improve self-documentation.
+ (extract_base_bit_offset): Remove BASE_REF, OFFSETP parameters. Move
+ code to strip outer parts of address out of function, but strip no-op
+ conversions.
+ (omp_mapping_group): Add DELETED field for use during reindexing.
+ (omp_strip_components_and_deref, omp_strip_indirections): New functions.
+ (omp_group_last, omp_group_base): Add GOMP_MAP_STRUCT handling.
+ (omp_gather_mapping_groups): Initialise DELETED field for new groups.
+ (omp_index_mapping_groups): Notice DELETED groups when (re)indexing.
+ (omp_siblist_insert_node_after, omp_siblist_move_node_after,
+ omp_siblist_move_nodes_after, omp_siblist_move_concat_nodes_after): New
+ helper functions.
+ (omp_accumulate_sibling_list): New function to build up GOMP_MAP_STRUCT
+ node groups for sibling lists. Outlined from gimplify_scan_omp_clauses.
+ (omp_build_struct_sibling_lists): New function.
+ (gimplify_scan_omp_clauses): Remove struct_map_to_clause,
+ struct_seen_clause, struct_deref_set. Call
+ omp_build_struct_sibling_lists as pre-pass instead of handling sibling
+ lists in the function's main processing loop.
+ (gimplify_adjust_omp_clauses_1): Remove GOVD_MAP_HAS_ATTACHMENTS
+ handling, unused now.
+ * omp-low.cc (scan_sharing_clauses): Handle pointer-type indirect
+ struct references, and references to pointers to structs also.
+
+2022-09-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106938
+ * tree-cfg.cc (execute_fixup_cfg): Purge dead abnormal
+ edges for all last stmts in a block. Do EH cleanup
+ only on the last stmt in a block.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ PR tree-optimization/106936
+ * value-query.cc (range_query::get_value_range): Remove assert.
+
+2022-09-14 Jan-Benedict Glaw <jbglaw@lug-owl.de>
+
+ * config/mips/mips.cc (mips_option_override): Drop unused variable.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * gimplify.cc (is_or_contains_p, omp_target_reorder_clauses): Delete
+ functions.
+ (omp_tsort_mark): Add enum.
+ (omp_mapping_group): Add struct.
+ (debug_mapping_group, omp_get_base_pointer, omp_get_attachment,
+ omp_group_last, omp_gather_mapping_groups, omp_group_base,
+ omp_index_mapping_groups, omp_containing_struct,
+ omp_tsort_mapping_groups_1, omp_tsort_mapping_groups,
+ omp_segregate_mapping_groups, omp_reorder_mapping_groups): New
+ functions.
+ (gimplify_scan_omp_clauses): Call above functions instead of
+ omp_target_reorder_clauses, unless we've seen an error.
+ * omp-low.cc (scan_sharing_clauses): Avoid strict test if we haven't
+ sorted mapping groups.
+
+2022-09-14 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/106878
+ * tree-cfg.cc (verify_gimple_assign_binary): Disallow pointer,
+ reference or OFFSET_TYPE BIT_IOR_EXPR, BIT_XOR_EXPR or, unless
+ the second argument is INTEGER_CST, BIT_AND_EXPR.
+ * match.pd ((type) X op CST -> (type) (X op ((type-x) CST)),
+ (type) (((type2) X) op Y) -> (X op (type) Y)): Punt for
+ POINTER_TYPE_P or OFFSET_TYPE.
+ * tree-ssa-reassoc.cc (optimize_range_tests_cmp_bitwise): For
+ pointers cast them to pointer sized integers first.
+
+2022-09-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106934
+ * tree-ssa.cc (non_rewritable_mem_ref_base): Avoid BIT_FIELD_REFs
+ of bitfields.
+ (maybe_rewrite_mem_ref_base): Likewise.
+
+2022-09-14 liuhongt <hongtao.liu@intel.com>
+
+ PR tree-optimization/106905
+ * tree-vect-loop.cc (vectorizable_nonlinear_induction): Return
+ false when !vect_use_loop_mask_for_alignment_p (loop_vinfo) &&
+ LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0.
+
+2022-09-13 Roger Sayle <roger@nextmovesoftware.com>
+
+ PR target/106877
+ * reg-stack.cc (move_for_stack_reg): Check for any_malformed_asm
+ in gcc_assert.
+
+2022-09-13 Max Filippov <jcmvbkbc@gmail.com>
+
+ Revert:
+ 2022-09-12 Takayuki 'January June' Suwa <jjsuwa_sys3175@yahoo.co.jp>
+
+ * config/xtensa/xtensa.cc (TARGET_CONSTANT_OK_FOR_CPROP_P):
+ New macro definition.
+ (xtensa_constant_ok_for_cprop_p):
+ Implement the hook as mentioned above.
+
+2022-09-13 Kewen Lin <linkw@linux.ibm.com>
+
+ PR target/104482
+ * config/rs6000/rs6000-c.cc (altivec_resolve_overloaded_builtin): Fix
+ the equality check for argument number, and move this hunk ahead.
+
+2022-09-13 Kewen.Lin <linkw@gcc.gnu.org>
+
+ PR target/105485
+ * config/rs6000/rs6000-builtin.cc (rs6000_gimple_fold_builtin): Add
+ the handling for unresolved overloaded builtin function.
+ (rs6000_expand_builtin): Likewise.
+
+2022-09-13 Kewen Lin <linkw@linux.ibm.com>
+
+ * config/rs6000/rs6000.cc (class rs6000_cost_data): Add new members
+ m_nstores, m_reduc_factor, m_gather_load and member function
+ determine_suggested_unroll_factor.
+ (rs6000_cost_data::update_target_cost_per_stmt): Update for m_nstores,
+ m_reduc_factor and m_gather_load.
+ (rs6000_cost_data::determine_suggested_unroll_factor): New function.
+ (rs6000_cost_data::finish_cost): Use determine_suggested_unroll_factor.
+ * config/rs6000/rs6000.opt (rs6000-vect-unroll-limit): New parameter.
+ (rs6000-vect-unroll-issue): Likewise.
+ (rs6000-vect-unroll-reduc-threshold): Likewise.
+ * doc/invoke.texi (rs6000-vect-unroll-limit): Document new parameter.
+
+2022-09-13 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/106909
+ * gimple-fold.cc (gimple_fold_call): Clear the ctrl-altering
+ flag of a unreachable call.
+
+2022-09-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106913
+ * tree-ssa-uninit.cc (warn_uninitialized_vars): Do not set
+ ft_reachable on EXIT_BLOCK.
+
+2022-09-13 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.cc (aarch64_classify_vector_mode): Use
+ TARGET_FLOAT instead of TARGET_SIMD.
+ (aarch64_vectorize_related_mode): Restrict ADVSIMD handling to
+ TARGET_SIMD.
+ (aarch64_hard_regno_mode_ok): Don't allow tuples of 2 64-bit vectors
+ in GPRs.
+ (aarch64_classify_address): Treat little-endian structure moves
+ like big-endian for TARGET_FLOAT && !TARGET_SIMD.
+ (aarch64_secondary_memory_needed): New function.
+ (aarch64_secondary_reload): Handle 128-bit Advanced SIMD vectors
+ in the same way as TF, TI and TD.
+ (aarch64_rtx_mult_cost): Restrict ADVSIMD handling to TARGET_SIMD.
+ (aarch64_rtx_costs): Likewise.
+ (aarch64_register_move_cost): Treat a pair of 64-bit vectors
+ separately from a single 128-bit vector. Handle the cost implied
+ by aarch64_secondary_memory_needed.
+ (aarch64_simd_valid_immediate): Restrict ADVSIMD handling to
+ TARGET_SIMD.
+ (aarch64_expand_vec_perm_const_1): Likewise.
+ (TARGET_SECONDARY_MEMORY_NEEDED): New macro.
+ * config/aarch64/iterators.md (VTX): New iterator.
+ * config/aarch64/aarch64.md (arches): Add fp_q as a synonym of simd.
+ (arch_enabled): Adjust accordingly.
+ (@aarch64_reload_mov<TX:mode>): Extend to...
+ (@aarch64_reload_mov<VTX:mode>): ...this.
+ * config/aarch64/aarch64-simd.md (mov<mode>): Require TARGET_FLOAT
+ rather than TARGET_SIMD.
+ (movmisalign<mode>): Likewise.
+ (load_pair<DREG:mode><DREG2:mode>): Likewise.
+ (vec_store_pair<DREG:mode><DREG2:mode>): Likewise.
+ (load_pair<VQ:mode><VQ2:mode>): Likewise.
+ (vec_store_pair<VQ:mode><VQ2:mode>): Likewise.
+ (@aarch64_split_simd_mov<mode>): Likewise.
+ (aarch64_get_low<mode>): Likewise.
+ (aarch64_get_high<mode>): Likewise.
+ (aarch64_get_half<mode>): Likewise. Canonicalize to a move for
+ lowpart extracts.
+ (*aarch64_simd_mov<VDMOV:mode>): Require TARGET_FLOAT rather than
+ TARGET_SIMD. Use different w<-w and r<-w instructions for
+ !TARGET_SIMD. Disable immediate moves for !TARGET_SIMD but
+ add an alternative specifically for w<-Z.
+ (*aarch64_simd_mov<VQMOV:mode>): Require TARGET_FLOAT rather than
+ TARGET_SIMD. Likewise for the associated define_splits. Disable
+ FPR moves and immediate moves for !TARGET_SIMD but add an alternative
+ specifically for w<-Z.
+ (aarch64_simd_mov_from_<mode>high): Require TARGET_FLOAT rather than
+ TARGET_SIMD. Restrict the existing alternatives to TARGET_SIMD
+ but add a new r<-w one for !TARGET_SIMD.
+ (*aarch64_get_high<mode>): New pattern.
+ (load_pair_lanes<mode>): Require TARGET_FLOAT rather than TARGET_SIMD.
+ (store_pair_lanes<mode>): Likewise.
+ (*aarch64_combine_internal<mode>): Likewise. Restrict existing
+ w<-w, w<-r and w<-m alternatives to TARGET_SIMD but add a new w<-r
+ alternative for !TARGET_SIMD.
+ (*aarch64_combine_internal_be<mode>): Likewise.
+ (aarch64_combinez<mode>): Require TARGET_FLOAT rather than TARGET_SIMD.
+ Remove bogus arch attribute.
+ (*aarch64_combinez_be<mode>): Likewise.
+ (@aarch64_vec_concat<mode>): Require TARGET_FLOAT rather than
+ TARGET_SIMD.
+ (aarch64_combine<mode>): Likewise.
+ (aarch64_rev_reglist<mode>): Likewise.
+ (mov<mode>): Likewise.
+ (*aarch64_be_mov<VSTRUCT_2D:mode>): Extend to TARGET_FLOAT &&
+ !TARGET_SIMD, regardless of endianness. Extend associated
+ define_splits in the same way, both for this pattern and the
+ ones below.
+ (*aarch64_be_mov<VSTRUCT_2Qmode>): Likewise. Restrict w<-w
+ alternative to TARGET_SIMD.
+ (*aarch64_be_movoi): Likewise.
+ (*aarch64_be_movci): Likewise.
+ (*aarch64_be_movxi): Likewise.
+ (*aarch64_be_mov<VSTRUCT_4QD:mode>): Extend to TARGET_FLOAT
+ && !TARGET_SIMD, regardless of endianness. Restrict w<-w alternative
+ to TARGET_SIMD for tuples of 128-bit vectors.
+ (*aarch64_be_mov<VSTRUCT_4QD:mode>): Likewise.
+ * config/aarch64/aarch64-ldpstp.md: Remove TARGET_SIMD condition
+ from DREG STP peephole. Change TARGET_SIMD to TARGET_FLOAT in
+ the VQ and VP_2E LDP and STP peepholes.
+
+2022-09-13 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-simd.md (movv8di): Remove TARGET_SIMD
+ condition. Likewise for the related define_split. Tweak formatting.
+
+2022-09-12 Takayuki 'January June' Suwa <jjsuwa_sys3175@yahoo.co.jp>
+
+ * config/xtensa/xtensa.cc (TARGET_CONSTANT_OK_FOR_CPROP_P):
+ New macro definition.
+ (xtensa_constant_ok_for_cprop_p):
+ Implement the hook as mentioned above.
+
+2022-09-12 Joseph Myers <joseph@codesourcery.com>
+
+ * ginclude/stdatomic.h [defined __STDC_VERSION__ &&
+ __STDC_VERSION__ > 201710L] (ATOMIC_VAR_INIT): Do not define.
+
+2022-09-12 Tobias Burnus <tobias@codesourcery.com>
+
+ * config/nvptx/mkoffload.cc (process): Replace a fatal_error by
+ a warning + not enabling offloading if -misa=sm_30 prevents
+ reverse offload.
+ (main): Use tool_name as progname for diagnostic.
+ * config/gcn/mkoffload.cc (main): Likewise.
+
+2022-09-12 Aldy Hernandez <aldyh@redhat.com>
+
+ * value-range.cc (frange::set_signbit): Avoid changing sign when
+ already in the correct sign.
+
+2022-09-12 Max Filippov <jcmvbkbc@gmail.com>
+
+ * config/xtensa/xtensa.cc (xtensa_function_value_regno_p):
+ Recognize all 4 return registers.
+ * config/xtensa/xtensa.h (GP_RETURN_REG_COUNT): New definition.
+ * config/xtensa/xtensa.md (untyped_call): New pattern.
+
+2022-09-12 Jonathan Wakely <jwakely@redhat.com>
+
+ * doc/extend.texi (Floating Types): Fix "_float128" typo.
+
2022-09-10 Takayuki 'January June' Suwa <jjsuwa_sys3175@yahoo.co.jp>
* config/xtensa/xtensa.cc (xtensa_constantsynth):
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index eaf5982..54f97aa 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20220912
+20220920
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index b1fbd1e..a1c4375 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,505 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * gcc-interface/trans.cc (gigi): Do not initialize void_list_node.
+
+2022-09-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/decl.cc (gnat_to_gnu_entity): Relax assertion when
+ front-end unnesting is enabled.
+
+2022-09-12 Justin Squirek <squirek@adacore.com>
+
+ * sem_util.adb
+ (Innermost_Master_Scope_Depth): Detect and handle case where scope
+ depth is not set on an enclosing scope.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * bindgen.adb: When the binder is invoked for the host, generate a
+ "with CUDA.Internal;" with clause.
+
+2022-09-12 Piotr Trojanek <trojanek@adacore.com>
+
+ * doc/gnat_rm/implementation_defined_pragmas.rst
+ (Pragma Unreferenced): Sync description with
+ Sem_Warn.Has_Junk_Name routine.
+ * gnat_rm.texi: Regenerate.
+ * gnat_ugn.texi: Regenerate.
+
+2022-09-12 Piotr Trojanek <trojanek@adacore.com>
+
+ * sem_attr.adb (Analyze_Attribute [Valid_Scalars]): Move check for
+ unchecked union before checks for private and public types.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * bindgen.adb: When the binder is invoked for the host, it
+ declares imported subprograms corresponding to the Adainit and
+ Adafinal routines on the device. Declare string constants and
+ expression functions for the Ada source names and the link names
+ of these routines. Generate these subprogram declarations (and
+ accompanying Import pragmas) in Gen_CUDA_Defs. Generate
+ CUDA_Execute pragmas to call these subprograms from the host in
+ Gen_Adafinal and Gen_CUDA_Init. When the binder is invoked for the
+ device, include a CUDA_Global aspect declaration in the
+ declarations of Adainit and Adafinal and use the aforementioned
+ link names in the Export pragmas generated for those two routines.
+ * debug.adb: Update comments about "d_c" and "d_d" switches.
+ * opt.ads: Declare new Boolean variable,
+ Enable_CUDA_Device_Expansion. This complements the existing
+ Enable_CUDA_Expansion variable, which is used to enable host-side
+ CUDA expansion. The new variable enables device-side CUDA
+ expansion. It is currently never set during compilation; it is
+ only set via a binder switch.
+ * switch-b.adb
+ (scan_debug_switches): Add new use of the "-d_d" binder switch.
+ The new switch and the variable Opt.Enabled_CUDA_Device_Expansion
+ follow the existing pattern of the "-d_c" switch and the variable
+ Opt.Enabled_CUDA_Expansion. Flag error if both "-d_c" and "-d_d"
+ are specified.
+
+2022-09-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * contracts.adb (uild_Subprogram_Contract_Wrapper): Remove useless
+ local variable. In the case of a function, replace the extended
+ return statement by a block statement declaring a renaming of the
+ call to the local subprogram after removing side effects manually.
+ (Expand_Subprogram_Contract): Adjust description accordingly.
+ * exp_ch6.adb (Expand_Ctrl_Function_Call): Rewrite obsolete
+ comment and do not apply the transformation twice.
+ * sem_attr.adb (Analyze_Attribute_Old_Result): Now expect a block
+ statement instead of an extended return statement.
+
+2022-09-12 Piotr Trojanek <trojanek@adacore.com>
+
+ * erroutc.adb (Set_Msg_Insertion_Name): Special-case printing with
+ acronyms.
+
+2022-09-12 Yannick Moy <moy@adacore.com>
+
+ * libgnat/s-imagei.adb (Image_Integer): Add justification.
+
+2022-09-12 Piotr Trojanek <trojanek@adacore.com>
+
+ * sem_prag.adb (Get_SPARK_Mode_Type): Fix header box; replace
+ chained IF with a CASE statement.
+
+2022-09-12 Yannick Moy <moy@adacore.com>
+
+ * sem_prag.adb (Analyze_Pragma): Accept SPARK_Mode=>Auto as
+ configuration pragma.
+ (Get_SPARK_Mode): Make the value for Auto explicit.
+ * snames.ads-tmpl (Name_Auto): Add name.
+
+2022-09-12 Joffrey Huguet <huguet@adacore.com>
+
+ * doc/gnat_rm/the_gnat_library.rst: Remove paragraphs about SPARK
+ containers.
+ * gnat_rm.texi, gnat_ugn.texi: Regenerate.
+
+2022-09-12 Yannick Moy <moy@adacore.com>
+
+ * libgnat/s-maccod.ads: Mark package as SPARK_Mode Off.
+
+2022-09-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * fe.h (Unnest_Subprogram_Mode): Declare.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * contracts.adb
+ (Analyze_Package_Contract): Do not analyze the contract of a
+ temporary package created just to check conformance of an actual
+ package.
+
+2022-09-12 Joffrey Huguet <huguet@adacore.com>
+
+ * Makefile.rtl: Remove SPARK containers filenames.
+ * impunit.adb: Remove SPARK containers packages names.
+ * libgnat/a-cfdlli.adb, libgnat/a-cfdlli.ads: Remove content and
+ add pragma Compile_Time_Error with suitable message.
+ * libgnat/a-cfhama.adb, libgnat/a-cfhama.ads: Likewise.
+ * libgnat/a-cfhase.adb, libgnat/a-cfhase.ads: Likewise.
+ * libgnat/a-cfidll.adb, libgnat/a-cfidll.ads: Likewise.
+ * libgnat/a-cfinse.adb, libgnat/a-cfinse.ads: Likewise.
+ * libgnat/a-cfinve.adb, libgnat/a-cfinve.ads: Likewise.
+ * libgnat/a-cforma.adb, libgnat/a-cforma.ads: Likewise.
+ * libgnat/a-cforse.adb, libgnat/a-cforse.ads: Likewise.
+ * libgnat/a-cofove.adb, libgnat/a-cofove.ads: Likewise.
+ * libgnat/a-cofuma.adb, libgnat/a-cofuma.ads: Likewise.
+ * libgnat/a-cofuse.adb, libgnat/a-cofuse.ads: Likewise.
+ * libgnat/a-cofuve.adb, libgnat/a-cofuve.ads: Likewise.
+ * libgnat/a-cofuba.adb, libgnat/a-cofuba.ads: Remove package.
+
+2022-09-12 Piotr Trojanek <trojanek@adacore.com>
+
+ * exp_attr.adb (Expand_N_Attribute_Reference [Attribute_Old]):
+ Adapt to object declaration being rewritten into object renaming.
+
+2022-09-12 Justin Squirek <squirek@adacore.com>
+
+ * contracts.adb, contracts.ads
+ (Analyze_Pragmas_In_Declarations): Added to aid in the new
+ expansion model so that pragmas relating to contracts can get
+ processed early before the rest of the subprogram containing them.
+ (Build_Subprogram_Contract_Wrapper): Created to do the majority of
+ expansion for postconditions. It builds a local wrapper with the
+ statements and declarations within a given subprogram.
+ (Is_Prologue_Renaming): Moved out from Process_Preconditions to be
+ used generally within the contracts package.
+ (Build_Entry_Contract_Wrapper): Moved from exp_ch7.
+ (Expand_Subprogram_Contract): Add new local variable Decls to
+ store expanded declarations needed for evaluation of contracts.
+ Call new wrapper building procedure and modify comments to match
+ new expansion model.
+ (Get_Postcond_Enabled): Deleted.
+ (Get_Result_Object_For_Postcond): Deleted.
+ (Get_Return_Success_For_Postcond): Deleted.
+ (Process_Contract_Cases): Add new parameter to store declarations.
+ (Process_Postconditions): Add new parameter to store declarations.
+ (Process_Preconditions): Add new parameter to store declarations.
+ Add code to move entry-call prologue renamings
+ * einfo.ads: Document new field Wrapped_Statements and modify
+ comment for Postconditions_Proc.
+ * exp_attr.adb
+ (Analyze_Attribute): Modify expansion of the 'Old attribute to
+ recognize new expansion model and use Wrapped_Statements instead
+ of Postconditions_Proc.
+ * exp_ch6.adb
+ (Add_Return): Remove special expansion for postconditions.
+ (Expand_Call): Modify condition checking for calls to access
+ subprogram wrappers to handle new expansion models.
+ (Expand_Call_Helper): Remove special expansion for postconditions.
+ (Expand_Non_Function_Return): Remove special expansion for
+ postconditions.
+ (Expand_Simple_Function_Return): Remove special expansion for
+ postconditions.
+ * exp_ch7.adb
+ (Build_Finalizer): Deleted, but replaced by code in
+ Build_Finalizer_Helper
+ (Build_Finalizer_Helper): Renamed to Build_Finalizer, and special
+ handling of 'Old objects removed.
+ * exp_ch9.adb
+ (Build_Contract_Wrapper): Renamed and moved to contracts package.
+ * exp_prag.adb
+ (Expand_Pragma_Contract_Cases): Delay analysis of contracts since
+ they now instead get analyzed as part of the wrapper generation
+ instead of after analysis of their corresponding subprogram's
+ body.
+ (Expand_Pragma_Check): Label expanded if-statements which come
+ from the expansion of assertion statements as
+ Comes_From_Check_Or_Contract.
+ * freeze.adb
+ (Freeze_Entity): Add special case to avoid freezing when a freeze
+ node gets generated as part of the expansion of a postcondition
+ check.
+ * gen_il-gen-gen_nodes.adb: Add new flag
+ Comes_From_Check_Or_Contract.
+ * gen_il-fields.ads: Add new field Wrapped_Statements. Add new
+ flag Comes_From_Check_Or_Contract.
+ * gen_il-gen-gen_entities.adb: Add new field Wrapped_Statements.
+ * ghost.adb
+ (Is_OK_Declaration): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ (Is_OK_Statement): Simplify condition due to the loss of
+ Original_Node as a result of the new expansion model of contracts
+ and use new flag Comes_From_Check_Or_Contract in its place.
+ * inline.adb
+ (Declare_Postconditions_Result): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ (Expand_Inlined_Call): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ * lib.adb, lib.ads
+ (ipu): Created to aid in debugging.
+ * lib-xref.adb
+ (Generate_References): Remove special handling for postcondition
+ procedures.
+ * sem_attr.adb
+ (Analyze_Attribute_Old_Result): Add new context in which 'Old can
+ appear due to the changes in expansion. Replace
+ Name_uPostconditions with Name_uWrapped_Statements.
+ (Result): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ * sem_ch11.adb
+ (Analyze_Handled_Statements): Remove check to exclude warnings on
+ useless assignments within postcondition procedures since
+ postconditions no longer get isolated into separate subprograms.
+ * sem_ch6.adb
+ (Analyze_Generic_Subprogram_Body): Modify expansion of generic
+ subprogram bodies so that contracts (and their associated pragmas)
+ get analyzed first.
+ (Analyze_Subprogram_Body_Helper): Remove global HSS variable due
+ to the HSS of the body potentially changing during the expansion
+ of contracts. In cases where it was used instead directly call
+ Handled_Statement_Sequence. Modify expansion of subprogram bodies
+ so that contracts (and their associated pragmas) get analyzed
+ first.
+ (Check_Missing_Return): Create local HSS variable instead of using
+ a global one.
+ (Move_Pragmas): Use new pragma table instead of an explicit list.
+ * sem_elab.adb
+ (Is_Postconditions_Proc): Deleted since the new scheme of
+ expansion no longer divides postcondition checks to a separate
+ subprogram and so cannot be easily identified (similar to
+ pre-condition checks).
+ (Info_Call): Remove info printing for _Postconditions subprograms.
+ (Is_Assertion_Pragma_Target): Remove check for postconditions
+ procedure
+ (Is_Bridge_Target): Remove check for postconditions procedure.
+ (Get_Invocation_Attributes): Remove unneeded local variables and
+ check for postconditions procedure.
+ (Output_Call): Remove info printing for _Postconditions
+ subprograms.
+ * sem_prag.adb, sem_prag.ads: Add new Pragma table for pragmas
+ significant to subprograms, along with tech-debt comment.
+ (Check_Arg_Is_Local_Name): Modified to recognize the new
+ _Wrapped_Statements internal subprogram and the new expansion
+ model.
+ (Relocate_Pragmas_To_Body): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ * sem_res.adb
+ (Resolve_Entry_Call): Add conditional to detect both contract
+ based wrappers of entries, but also wrappers generated as part of
+ general contract expansion (e.g. local postconditions
+ subprograms).
+ * sem_util.adb
+ (Accessibility_Level): Verify 'Access is not taken based on a
+ component of a function result.
+ (Has_Significant_Contracts): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ (Same_Or_Aliased_Subprogram): Add conditional to detect and obtain
+ the original subprogram based on the new concept of
+ "postcondition" wrappers.
+ * sinfo.ads: Add documentation for new flag
+ Comes_From_Check_Or_Contract.
+ * snames.ads-tmpl: Remove Name_uPostconditions and add
+ Name_uWrapped_Statements
+
+2022-09-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_unst.adb (Unnest_Subprograms.Search_Subprograms): Skip the
+ subprogram bodies that are not to be unnested.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * sem_aggr.adb
+ (Resolve_Array_Aggregate): Generate an appropriate error message
+ in the case where an error in the source code leads to an
+ N_Iterated_Element_Association node in a bad context.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * sem_ch4.adb
+ (Analyze_Selected_Component): Initialize the local variable Comp
+ to avoid having CodePeer generate an uninitialized variable
+ warning.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * sem_ch4.adb
+ (Analyze_Selected_Component): Avoid initializing the local
+ variable Comp if the variable is not going to be subsequently
+ referenced. This is a correctness issue because the call to
+ First_Entity can fail.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * sem_ch9.adb
+ (Satisfies_Lock_Free_Requirements): If Ceiling_Locking locking
+ policy has been specified, then either return False (if Lock_Free
+ was not explicitly specified) or generate a warning that ceiling
+ locking will not be implemented for this protected unit (if
+ Lock_Free was explicitly specified). Generate an error message (in
+ addition to returning False) if an explicit Lock_Free aspect
+ specification is rejected because atomic primitives are not
+ supported on the given target.
+ * doc/gnat_rm/implementation_defined_pragmas.rst: Clarify that the
+ Lock_Free aspect for a protected unit takes precedence over the
+ Ceiling_Locking locking policy in the case where both apply.
+ * gnat_rm.texi: Regenerate.
+
+2022-09-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_ch9.adb (Build_Protected_Spec): Tidy up and propagate the
+ Comes_From_Source flag onto the new formal parameters.
+ * sem_ch6.adb (Analyze_Subprogram_Body_Helper): Do not check
+ references for subprograms generated for protected subprograms.
+
+2022-09-12 Gary Dismukes <dismukes@adacore.com>
+
+ * sem_res.adb
+ (Resolve_Equality_Op): Add handling for equality ops with
+ user-defined literal operands.
+ * sem_util.ads
+ (Is_User_Defined_Literal): Update spec comment to indicate
+ inclusion of named number cases.
+ * sem_util.adb
+ (Corresponding_Primitive_Op): Rather than following the chain of
+ ancestor subprograms via Alias and Overridden_Operation links, we
+ check for matching profiles between primitive subprograms of the
+ descendant type and the ancestor subprogram (by calling a new
+ nested function Profile_Matches_Ancestor). This prevents the
+ compiler from hanging due to circular linkages via those fields
+ that can occur between inherited and overriding subprograms
+ (which might indicate a latent bug, but one that may be rather
+ delicate to resolve).
+ (Profile_Matches_Ancestor): New nested subprogram to compare the
+ profile of a primitive subprogram with the profile of a candidate
+ ancestor subprogram.
+ (Is_User_Defined_Literal): Also return True in cases where the
+ node N denotes a named number (E_Name_Integer and E_Named_Real).
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * debug.adb: remove a comment.
+
+2022-09-12 Bob Duff <duff@adacore.com>
+
+ * checks.adb
+ (Selected_Length_Checks): In the message for an aggregate that has
+ too few or too many elements, add "!!" to make sure the warning
+ gets printed in with'ed units. Note that we have to put "!!"
+ before the "??", because Compile_Time_Constraint_Error detects
+ warnings by comparing the last character of the message with '?'
+ (which is bit dubious, but we're not changing that here).
+ (Length_Mismatch_Info_Message): Use Unat for some things that
+ can't be negative. Specify Decimal instead of Auto in calls to
+ UI_Image.
+ * sem_util.adb
+ (Compile_Time_Constraint_Error): Minor.
+ * uintp.adb
+ (Image_Uint): It's always better to initialize objects on their
+ declaration.
+
+2022-09-12 Patrick Bernardi <bernardi@adacore.com>
+
+ * libgnat/system-vxworks7-x86_64-kernel.ads: Set
+ Support_Atomic_Primitives to false.
+ * libgnat/system-vxworks7-x86_64-rtp-smp.ads: Ditto.
+
+2022-09-12 Patrick Bernardi <bernardi@adacore.com>
+
+ * libgnat/system-qnx-arm.ads: Set Support_Atomic_Primitives to
+ false.
+ * libgnat/system-vxworks7-aarch64.ads: Ditto.
+ * libgnat/system-vxworks7-aarch64-rtp-smp.ads: Ditto.
+ * libgnat/system-vxworks7-arm.ads: Ditto.
+ * libgnat/system-vxworks7-arm-rtp-smp.ads: Ditto.
+ * libgnat/system-vxworks7-x86-kernel.ads: Ditto.
+ * libgnat/system-vxworks7-x86-rtp-smp.ads: Ditto.
+
+2022-09-12 Bob Duff <duff@adacore.com>
+
+ * par-tchk.adb, par-util.adb, prep.adb, prepcomp.adb, scng.adb:
+ Use "in" instead of chains of "=" connected with "or else".
+ Likewise for "not in", "/=", "and then". Misc cleanup.
+ * par-ch10.adb, par-ch12.adb, par-ch13.adb, par-ch4.adb: Likewise.
+ * par-ch8.adb, par-ch9.adb, par-endh.adb, par-sync.adb: Likewise.
+ * par.adb
+ (Pf_Rec): Remove filler, which was added August 25, 1993 to get
+ around a compiler limitation that no longer exists. Minor cleanup.
+ Remove useless qualfications.
+ * par-ch3.adb: Remove redundant return statements.
+ (Component_Scan_Loop): Remove loop name; there are no nested
+ loops, so it's unnecessary and possibly misleading, and it causes
+ too-long lines.
+ * par-ch5.adb: DRY: Remove comments that repeat the comments in
+ par.adb.
+ (P_Sequence_Of_Statements): It is better to initialize things on
+ the declaration. And constants are better than variables.
+ (Test_Statement_Required): Remove unnecessary insertion of a null
+ statement.
+ * par-ch6.adb, par-ch7.adb: DRY: Remove comments that repeat the
+ comments in par.adb.
+
+2022-09-12 Javier Miranda <miranda@adacore.com>
+
+ Revert:
+ 2022-09-06 Javier Miranda <miranda@adacore.com>
+
+ * debug.adb
+ (Debug_Flag_Underscore_X): Switch added temporarily to allow
+ disabling extra formal checks.
+ * exp_attr.adb
+ (Expand_N_Attribute_Reference [access types]): Add extra formals
+ to the subprogram referenced in the prefix of 'Unchecked_Access,
+ 'Unrestricted_Access or 'Access; required to check that its extra
+ formals match the extra formals of the corresponding subprogram
+ type.
+ * exp_ch3.adb
+ (Stream_Operation_OK): Declaration moved to the public part of the
+ package.
+ (Validate_Tagged_Type_Extra_Formals): New subprogram.
+ (Expand_Freeze_Record_Type): Improve the code that takes care of
+ adding the extra formals of dispatching primitives; extended to
+ add also the extra formals to renamings of dispatching primitives.
+ * exp_ch3.ads
+ (Stream_Operation_OK): Declaration moved from the package body.
+ * exp_ch6.adb
+ (Has_BIP_Extra_Formal): Subprogram declaration moved to the public
+ part of the package. In addition, a parameter has been added to
+ disable an assertion that requires its use with frozen entities.
+ (Expand_Call_Helper): Enforce assertion checking extra formals on
+ thunks.
+ (Is_Build_In_Place_Function): Return False for entities with
+ foreign convention.
+ (Make_Build_In_Place_Call_In_Object_Declaration): Occurrences of
+ Is_Return_Object replaced by the local variable
+ Is_OK_Return_Object that evaluates to False for scopes with
+ foreign convention.
+ (Might_Have_Tasks): Fix check of class-wide limited record types.
+ (Needs_BIP_Task_Actuals): Remove assertion to allow calling this
+ function in more contexts; in addition it returns False for
+ functions returning objects with foreign convention.
+ (Needs_BIP_Finalization_Master): Likewise.
+ (Needs_BIP_Alloc_Form): Likewise.
+ * exp_ch6.ads
+ (Stream_Operation_OK): Declaration moved from the package body. In
+ addition, a parameter has been added to disable assertion that
+ requires its use with frozen entities.
+ * freeze.adb
+ (Check_Itype): Add extra formals to anonymous access subprogram
+ itypes.
+ (Freeze_Expression): Improve code that disables the addition of
+ extra formals to functions with foreign convention.
+ (Check_Extra_Formals): Moved to package Sem_Ch6 as
+ Extra_Formals_OK.
+ (Freeze_Subprogram): Add extra formals to non-dispatching
+ subprograms.
+ * sem_ch3.adb
+ (Access_Subprogram_Declaration): Defer the addition of extra
+ formals to the freezing point so that we know the convention.
+ (Check_Anonymous_Access_Component): Likewise.
+ (Derive_Subprogram): Fix documentation.
+ * sem_ch6.adb
+ (Check_Anonymous_Return): Fix check of access to class-wide
+ limited record types.
+ (Check_Untagged_Equality): Placed in alphabetical order.
+ (Extra_Formals_OK): Subprogram moved from freeze.adb.
+ (Extra_Formals_Match_OK): New subprogram.
+ (Has_BIP_Formals): New subprogram.
+ (Has_Extra_Formals): New subprograms.
+ (Needs_Accessibility_Check_Extra): New subprogram.
+ (Needs_Constrained_Extra): New subprogram.
+ (Parent_Subprogram): New subprogram.
+ (Add_Extra_Formal): Minor code cleanup.
+ (Create_Extra_Formals): Enforce matching extra formals on
+ overridden and aliased entities.
+ (Has_Reliable_Extra_Formals): New subprogram.
+ * sem_ch6.ads
+ (Extra_Formals_OK): Subprogram moved from freeze.adb.
+ (Extra_Formals_Match_OK): New subprogram.
+ * sem_eval.adb
+ (Compile_Time_Known_Value): Improve predicate to avoid assertion
+ failure; found working on this ticket; this change does not affect
+ the behavior of the compiler because this subprogram has an
+ exception handler that returns False when the assertion fails.
+ * sem_util.adb
+ (Needs_Result_Accessibility_Level): Do not return False for
+ dispatching operations compiled with Ada_Version < 2012 since they
+ they may be overridden by primitives compiled with Ada_Version >=
+ Ada_2012.
+
2022-09-06 Eric Botcazou <ebotcazou@adacore.com>
* gcc-interface/decl.cc (gnat_to_gnu_param): Set DECL_ARTIFICIAL.
diff --git a/gcc/ada/gcc-interface/trans.cc b/gcc/ada/gcc-interface/trans.cc
index f2e0cb2..2d93947 100644
--- a/gcc/ada/gcc-interface/trans.cc
+++ b/gcc/ada/gcc-interface/trans.cc
@@ -413,7 +413,6 @@ gigi (Node_Id gnat_root,
save_gnu_tree (gnat_literal, t, false);
/* Declare the building blocks of function nodes. */
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
void_ftype = build_function_type_list (void_type_node, NULL_TREE);
ptr_void_ftype = build_pointer_type (void_ftype);
diff --git a/gcc/analyzer/ChangeLog b/gcc/analyzer/ChangeLog
index ea6d5ee..3af1a38 100644
--- a/gcc/analyzer/ChangeLog
+++ b/gcc/analyzer/ChangeLog
@@ -1,3 +1,8 @@
+2022-09-12 Martin Liska <mliska@suse.cz>
+
+ * region-model.cc (region_model::maybe_complain_about_infoleak):
+ Remove unused fields.
+
2022-09-11 Tim Lange <mail@tim-lange.me>
PR analyzer/106845
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 4144df5..ba3d76d 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,9 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * c-common.h (build_void_list_node): Remove.
+ * c-common.cc (c_common_nodes_and_builtins): Do not initialize
+ void_list_node.
+
2022-09-09 Jan-Benedict Glaw <jbglaw@lug-owl.de>
* c-format.cc (convert_format_name_to_system_name): Fix warning.
diff --git a/gcc/c-family/c-common.cc b/gcc/c-family/c-common.cc
index 0a5b7e1..c0f15f4 100644
--- a/gcc/c-family/c-common.cc
+++ b/gcc/c-family/c-common.cc
@@ -4505,8 +4505,6 @@ c_common_nodes_and_builtins (void)
TYPE_NAME (void_type_node) = void_name;
}
- void_list_node = build_void_list_node ();
-
/* Make a type to be the domain of a few array types
whose domains don't really matter.
200 is small enough that it always fits in size_t
diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
index ce971a2..2f592f5 100644
--- a/gcc/c-family/c-common.h
+++ b/gcc/c-family/c-common.h
@@ -853,7 +853,6 @@ extern tree identifier_global_tag (tree);
extern bool names_builtin_p (const char *);
extern tree c_linkage_bindings (tree);
extern void record_builtin_type (enum rid, const char *, tree);
-extern tree build_void_list_node (void);
extern void start_fname_decls (void);
extern void finish_fname_decls (void);
extern const char *fname_as_string (int);
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index 41dc86b..b7fe1a4 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,18 @@
+2022-09-19 Marek Polacek <polacek@redhat.com>
+
+ PR c/106947
+ * c-typeck.cc (maybe_warn_for_null_address): Don't emit stray
+ notes.
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * c-decl.cc (build_void_list_node): Remove.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * c-typeck.cc (c_finish_omp_clauses): Remove whole mapping node group
+ on error.
+
2022-09-07 Joseph Myers <joseph@codesourcery.com>
* c-parser.cc (c_parser_static_assert_declaration_no_semi)
diff --git a/gcc/c/c-decl.cc b/gcc/c/c-decl.cc
index 34f8fed..b09c639 100644
--- a/gcc/c/c-decl.cc
+++ b/gcc/c/c-decl.cc
@@ -10676,14 +10676,6 @@ record_builtin_type (enum rid rid_index, const char *name, tree type)
debug_hooks->type_decl (decl, false);
}
-/* Build the void_list_node (void_type_node having been created). */
-tree
-build_void_list_node (void)
-{
- tree t = build_tree_list (NULL_TREE, void_type_node);
- return t;
-}
-
/* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */
struct c_parm *
diff --git a/gcc/c/c-typeck.cc b/gcc/c/c-typeck.cc
index 9ada5d2..33d1e84 100644
--- a/gcc/c/c-typeck.cc
+++ b/gcc/c/c-typeck.cc
@@ -11738,18 +11738,19 @@ maybe_warn_for_null_address (location_t loc, tree op, tree_code code)
|| from_macro_expansion_at (loc))
return;
+ bool w;
if (code == EQ_EXPR)
- warning_at (loc, OPT_Waddress,
- "the comparison will always evaluate as %<false%> "
- "for the address of %qE will never be NULL",
- op);
+ w = warning_at (loc, OPT_Waddress,
+ "the comparison will always evaluate as %<false%> "
+ "for the address of %qE will never be NULL",
+ op);
else
- warning_at (loc, OPT_Waddress,
- "the comparison will always evaluate as %<true%> "
- "for the address of %qE will never be NULL",
- op);
+ w = warning_at (loc, OPT_Waddress,
+ "the comparison will always evaluate as %<true%> "
+ "for the address of %qE will never be NULL",
+ op);
- if (DECL_P (op))
+ if (w && DECL_P (op))
inform (DECL_SOURCE_LOCATION (op), "%qD declared here", op);
}
@@ -14238,12 +14239,19 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
break;
}
+ tree *grp_start_p = NULL, grp_sentinel = NULL_TREE;
+
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
bool need_complete = false;
bool need_implicitly_determined = false;
+ /* We've reached the end of a list of expanded nodes. Reset the group
+ start pointer. */
+ if (c == grp_sentinel)
+ grp_start_p = NULL;
+
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
@@ -15001,6 +15009,9 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
{
+ grp_start_p = pc;
+ grp_sentinel = OMP_CLAUSE_CHAIN (c);
+
if (handle_omp_array_sections (c, ort))
remove = true;
else
@@ -15644,7 +15655,19 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
}
if (remove)
- *pc = OMP_CLAUSE_CHAIN (c);
+ {
+ if (grp_start_p)
+ {
+ /* If we found a clause to remove, we want to remove the whole
+ expanded group, otherwise gimplify
+ (omp_resolve_clause_dependencies) can get confused. */
+ *grp_start_p = grp_sentinel;
+ pc = grp_start_p;
+ grp_start_p = NULL;
+ }
+ else
+ *pc = OMP_CLAUSE_CHAIN (c);
+ }
else
pc = &OMP_CLAUSE_CHAIN (c);
}
diff --git a/gcc/config/aarch64/aarch64-ldpstp.md b/gcc/config/aarch64/aarch64-ldpstp.md
index ba76a1b..f8446e2 100644
--- a/gcc/config/aarch64/aarch64-ldpstp.md
+++ b/gcc/config/aarch64/aarch64-ldpstp.md
@@ -83,8 +83,7 @@
(match_operand:DREG 1 "register_operand" ""))
(set (match_operand:DREG2 2 "memory_operand" "")
(match_operand:DREG2 3 "register_operand" ""))]
- "TARGET_SIMD
- && aarch64_operands_ok_for_ldpstp (operands, false, <DREG:MODE>mode)"
+ "aarch64_operands_ok_for_ldpstp (operands, false, <DREG:MODE>mode)"
[(parallel [(set (match_dup 0) (match_dup 1))
(set (match_dup 2) (match_dup 3))])]
{
@@ -96,7 +95,7 @@
(match_operand:VQ 1 "memory_operand" ""))
(set (match_operand:VQ2 2 "register_operand" "")
(match_operand:VQ2 3 "memory_operand" ""))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& aarch64_operands_ok_for_ldpstp (operands, true, <VQ:MODE>mode)
&& (aarch64_tune_params.extra_tuning_flags
& AARCH64_EXTRA_TUNE_NO_LDP_STP_QREGS) == 0"
@@ -111,7 +110,7 @@
(match_operand:VQ 1 "register_operand" ""))
(set (match_operand:VQ2 2 "memory_operand" "")
(match_operand:VQ2 3 "register_operand" ""))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& aarch64_operands_ok_for_ldpstp (operands, false, <VQ:MODE>mode)
&& (aarch64_tune_params.extra_tuning_flags
& AARCH64_EXTRA_TUNE_NO_LDP_STP_QREGS) == 0"
@@ -306,7 +305,7 @@
(set (match_operand:VP_2E 6 "memory_operand" "")
(match_operand:VP_2E 7 "aarch64_reg_or_zero" ""))
(match_dup 8)]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& aarch64_operands_adjust_ok_for_ldpstp (operands, false, <MODE>mode)"
[(const_int 0)]
{
@@ -327,7 +326,7 @@
(set (match_operand:VP_2E 6 "register_operand" "")
(match_operand:VP_2E 7 "memory_operand" ""))
(match_dup 8)]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& aarch64_operands_adjust_ok_for_ldpstp (operands, true, <MODE>mode)"
[(const_int 0)]
{
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 587a45d..dc80f82 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -21,7 +21,7 @@
(define_expand "mov<mode>"
[(set (match_operand:VALL_F16 0 "nonimmediate_operand")
(match_operand:VALL_F16 1 "general_operand"))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
"
/* Force the operand into a register if it is not an
immediate whose use can be replaced with xzr.
@@ -52,7 +52,7 @@
(define_expand "movmisalign<mode>"
[(set (match_operand:VALL_F16 0 "nonimmediate_operand")
(match_operand:VALL_F16 1 "general_operand"))]
- "TARGET_SIMD && !STRICT_ALIGNMENT"
+ "TARGET_FLOAT && !STRICT_ALIGNMENT"
{
/* This pattern is not permitted to fail during expansion: if both arguments
are non-registers (e.g. memory := constant, which can be created by the
@@ -116,10 +116,10 @@
(define_insn "*aarch64_simd_mov<VDMOV:mode>"
[(set (match_operand:VDMOV 0 "nonimmediate_operand"
- "=w, m, m, w, ?r, ?w, ?r, w")
+ "=w, m, m, w, ?r, ?w, ?r, w, w")
(match_operand:VDMOV 1 "general_operand"
- "m, Dz, w, w, w, r, r, Dn"))]
- "TARGET_SIMD
+ "m, Dz, w, w, w, r, r, Dn, Dz"))]
+ "TARGET_FLOAT
&& (register_operand (operands[0], <MODE>mode)
|| aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
{
@@ -128,26 +128,34 @@
case 0: return "ldr\t%d0, %1";
case 1: return "str\txzr, %0";
case 2: return "str\t%d1, %0";
- case 3: return "mov\t%0.<Vbtype>, %1.<Vbtype>";
- case 4: return "umov\t%0, %1.d[0]";
+ case 3:
+ if (TARGET_SIMD)
+ return "mov\t%0.<Vbtype>, %1.<Vbtype>";
+ return "fmov\t%d0, %d1";
+ case 4:
+ if (TARGET_SIMD)
+ return "umov\t%0, %1.d[0]";
+ return "fmov\t%x0, %d1";
case 5: return "fmov\t%d0, %1";
case 6: return "mov\t%0, %1";
case 7:
return aarch64_output_simd_mov_immediate (operands[1], 64);
+ case 8: return "fmov\t%d0, xzr";
default: gcc_unreachable ();
}
}
[(set_attr "type" "neon_load1_1reg<q>, store_8, neon_store1_1reg<q>,\
neon_logic<q>, neon_to_gp<q>, f_mcr,\
- mov_reg, neon_move<q>")]
+ mov_reg, neon_move<q>, f_mcr")
+ (set_attr "arch" "*,*,*,*,*,*,*,simd,*")]
)
(define_insn "*aarch64_simd_mov<VQMOV:mode>"
[(set (match_operand:VQMOV 0 "nonimmediate_operand"
- "=w, Umn, m, w, ?r, ?w, ?r, w")
+ "=w, Umn, m, w, ?r, ?w, ?r, w, w")
(match_operand:VQMOV 1 "general_operand"
- "m, Dz, w, w, w, r, r, Dn"))]
- "TARGET_SIMD
+ "m, Dz, w, w, w, r, r, Dn, Dz"))]
+ "TARGET_FLOAT
&& (register_operand (operands[0], <MODE>mode)
|| aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
{
@@ -167,14 +175,17 @@
return "#";
case 7:
return aarch64_output_simd_mov_immediate (operands[1], 128);
+ case 8:
+ return "fmov\t%d0, xzr";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "neon_load1_1reg<q>, store_16, neon_store1_1reg<q>,\
neon_logic<q>, multiple, multiple,\
- multiple, neon_move<q>")
- (set_attr "length" "4,4,4,4,8,8,8,4")]
+ multiple, neon_move<q>, fmov")
+ (set_attr "length" "4,4,4,4,8,8,8,4,4")
+ (set_attr "arch" "*,*,*,simd,*,*,*,simd,*")]
)
;; When storing lane zero we can use the normal STR and its more permissive
@@ -195,7 +206,7 @@
(match_operand:DREG 1 "aarch64_mem_pair_operand" "Ump"))
(set (match_operand:DREG2 2 "register_operand" "=w")
(match_operand:DREG2 3 "memory_operand" "m"))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
@@ -209,7 +220,7 @@
(match_operand:DREG 1 "register_operand" "w"))
(set (match_operand:DREG2 2 "memory_operand" "=m")
(match_operand:DREG2 3 "register_operand" "w"))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& rtx_equal_p (XEXP (operands[2], 0),
plus_constant (Pmode,
XEXP (operands[0], 0),
@@ -223,7 +234,7 @@
(match_operand:VQ 1 "aarch64_mem_pair_operand" "Ump"))
(set (match_operand:VQ2 2 "register_operand" "=w")
(match_operand:VQ2 3 "memory_operand" "m"))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
@@ -237,10 +248,11 @@
(match_operand:VQ 1 "register_operand" "w"))
(set (match_operand:VQ2 2 "memory_operand" "=m")
(match_operand:VQ2 3 "register_operand" "w"))]
- "TARGET_SIMD && rtx_equal_p (XEXP (operands[2], 0),
- plus_constant (Pmode,
- XEXP (operands[0], 0),
- GET_MODE_SIZE (<VQ:MODE>mode)))"
+ "TARGET_FLOAT
+ && rtx_equal_p (XEXP (operands[2], 0),
+ plus_constant (Pmode,
+ XEXP (operands[0], 0),
+ GET_MODE_SIZE (<VQ:MODE>mode)))"
"stp\\t%q1, %q3, %z0"
[(set_attr "type" "neon_stp_q")]
)
@@ -248,8 +260,9 @@
(define_split
[(set (match_operand:VQMOV 0 "register_operand" "")
- (match_operand:VQMOV 1 "register_operand" ""))]
- "TARGET_SIMD && reload_completed
+ (match_operand:VQMOV 1 "register_operand" ""))]
+ "TARGET_FLOAT
+ && reload_completed
&& GP_REGNUM_P (REGNO (operands[0]))
&& GP_REGNUM_P (REGNO (operands[1]))"
[(const_int 0)]
@@ -261,7 +274,8 @@
(define_split
[(set (match_operand:VQMOV 0 "register_operand" "")
(match_operand:VQMOV 1 "register_operand" ""))]
- "TARGET_SIMD && reload_completed
+ "TARGET_FLOAT
+ && reload_completed
&& ((FP_REGNUM_P (REGNO (operands[0])) && GP_REGNUM_P (REGNO (operands[1])))
|| (GP_REGNUM_P (REGNO (operands[0])) && FP_REGNUM_P (REGNO (operands[1]))))"
[(const_int 0)]
@@ -273,7 +287,7 @@
(define_expand "@aarch64_split_simd_mov<mode>"
[(set (match_operand:VQMOV 0)
(match_operand:VQMOV 1))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
rtx dst = operands[0];
rtx src = operands[1];
@@ -306,13 +320,20 @@
(vec_select:<VHALF>
(match_operand:VQMOV 1 "register_operand")
(match_operand 2 "ascending_int_parallel")))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
+ {
+ if (vect_par_cnst_lo_half (operands[2], <MODE>mode))
+ {
+ emit_move_insn (operands[0], gen_lowpart (<VHALF>mode, operands[1]));
+ DONE;
+ }
+ }
)
(define_expand "aarch64_get_low<mode>"
[(match_operand:<VHALF> 0 "register_operand")
(match_operand:VQMOV 1 "register_operand")]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
rtx lo = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, false);
emit_insn (gen_aarch64_get_half<mode> (operands[0], operands[1], lo));
@@ -323,7 +344,7 @@
(define_expand "aarch64_get_high<mode>"
[(match_operand:<VHALF> 0 "register_operand")
(match_operand:VQMOV 1 "register_operand")]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
rtx hi = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, true);
emit_insn (gen_aarch64_get_half<mode> (operands[0], operands[1], hi));
@@ -350,15 +371,17 @@
)
(define_insn "aarch64_simd_mov_from_<mode>high"
- [(set (match_operand:<VHALF> 0 "register_operand" "=w,?r")
+ [(set (match_operand:<VHALF> 0 "register_operand" "=w,?r,?r")
(vec_select:<VHALF>
- (match_operand:VQMOV_NO2E 1 "register_operand" "w,w")
+ (match_operand:VQMOV_NO2E 1 "register_operand" "w,w,w")
(match_operand:VQMOV_NO2E 2 "vect_par_cnst_hi_half" "")))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
"@
- dup\\t%d0, %1.d[1]
- umov\t%0, %1.d[1]"
- [(set_attr "type" "neon_dup<q>,neon_to_gp<q>")
+ dup\t%d0, %1.d[1]
+ umov\t%0, %1.d[1]
+ fmov\t%0, %1.d[1]"
+ [(set_attr "type" "neon_dup<q>,neon_to_gp<q>,f_mrc")
+ (set_attr "arch" "simd,simd,*")
(set_attr "length" "4")]
)
@@ -4226,12 +4249,22 @@
[(set_attr "type" "neon_to_gp<q>, neon_dup<q>, neon_store1_one_lane<q>")]
)
+(define_insn "*aarch64_get_high<mode>"
+ [(set (match_operand:<VEL> 0 "aarch64_simd_nonimmediate_operand" "=r")
+ (vec_select:<VEL>
+ (match_operand:VQ_2E 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))]
+ "TARGET_FLOAT && ENDIAN_LANE_N (<nunits>, INTVAL (operands[2])) == 1"
+ "fmov\t%0, %1.d[1]"
+ [(set_attr "type" "f_mrc")]
+)
+
(define_insn "load_pair_lanes<mode>"
[(set (match_operand:<VDBL> 0 "register_operand" "=w")
(vec_concat:<VDBL>
(match_operand:VDCSIF 1 "memory_operand" "Utq")
(match_operand:VDCSIF 2 "memory_operand" "m")))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& aarch64_mergeable_load_pair_p (<VDBL>mode, operands[1], operands[2])"
"ldr\\t%<single_dtype>0, %1"
[(set_attr "type" "neon_load1_1reg<dblq>")]
@@ -4261,7 +4294,7 @@
(vec_concat:<VDBL>
(match_operand:VDCSIF 1 "register_operand" "w, r")
(match_operand:VDCSIF 2 "register_operand" "w, r")))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
"@
stp\t%<single_type>1, %<single_type>2, %y0
stp\t%<single_wx>1, %<single_wx>2, %y0"
@@ -4276,39 +4309,44 @@
;; the register alternatives either don't accept or themselves disparage.
(define_insn "*aarch64_combine_internal<mode>"
- [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, Umn, Umn")
+ [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, w, Umn, Umn")
(vec_concat:<VDBL>
- (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, ?w, ?r")
- (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, Utv, w, ?r")))]
- "TARGET_SIMD
+ (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, 0, ?w, ?r")
+ (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, ?r, Utv, w, ?r")))]
+ "TARGET_FLOAT
&& !BYTES_BIG_ENDIAN
&& (register_operand (operands[0], <VDBL>mode)
|| register_operand (operands[2], <MODE>mode))"
"@
ins\t%0.<single_type>[1], %2.<single_type>[0]
ins\t%0.<single_type>[1], %<single_wx>2
+ fmov\t%0.d[1], %2
ld1\t{%0.<single_type>}[1], %2
stp\t%<single_type>1, %<single_type>2, %y0
stp\t%<single_wx>1, %<single_wx>2, %y0"
- [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, neon_load1_one_lane<dblq>, neon_stp, store_16")]
+ [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, f_mcr,
+ neon_load1_one_lane<dblq>, neon_stp, store_16")
+ (set_attr "arch" "simd,simd,*,simd,*,*")]
)
(define_insn "*aarch64_combine_internal_be<mode>"
- [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, Umn, Umn")
+ [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, w, Umn, Umn")
(vec_concat:<VDBL>
- (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, Utv, ?w, ?r")
- (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, ?w, ?r")))]
- "TARGET_SIMD
+ (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, ?r, Utv, ?w, ?r")
+ (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, 0, ?w, ?r")))]
+ "TARGET_FLOAT
&& BYTES_BIG_ENDIAN
&& (register_operand (operands[0], <VDBL>mode)
|| register_operand (operands[2], <MODE>mode))"
"@
ins\t%0.<single_type>[1], %2.<single_type>[0]
ins\t%0.<single_type>[1], %<single_wx>2
+ fmov\t%0.d[1], %2
ld1\t{%0.<single_type>}[1], %2
stp\t%<single_type>2, %<single_type>1, %y0
stp\t%<single_wx>2, %<single_wx>1, %y0"
- [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, neon_load1_one_lane<dblq>, neon_stp, store_16")]
+ [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, f_mcr, neon_load1_one_lane<dblq>, neon_stp, store_16")
+ (set_attr "arch" "simd,simd,*,simd,*,*")]
)
;; In this insn, operand 1 should be low, and operand 2 the high part of the
@@ -4319,13 +4357,12 @@
(vec_concat:<VDBL>
(match_operand:VDCSIF 1 "nonimmediate_operand" "w,?r,m")
(match_operand:VDCSIF 2 "aarch64_simd_or_scalar_imm_zero")))]
- "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "TARGET_FLOAT && !BYTES_BIG_ENDIAN"
"@
fmov\\t%<single_type>0, %<single_type>1
fmov\t%<single_type>0, %<single_wx>1
ldr\\t%<single_type>0, %1"
- [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")
- (set_attr "arch" "simd,fp,simd")]
+ [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")]
)
(define_insn "*aarch64_combinez_be<mode>"
@@ -4333,13 +4370,12 @@
(vec_concat:<VDBL>
(match_operand:VDCSIF 2 "aarch64_simd_or_scalar_imm_zero")
(match_operand:VDCSIF 1 "nonimmediate_operand" "w,?r,m")))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "TARGET_FLOAT && BYTES_BIG_ENDIAN"
"@
fmov\\t%<single_type>0, %<single_type>1
fmov\t%<single_type>0, %<single_wx>1
ldr\\t%<single_type>0, %1"
- [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")
- (set_attr "arch" "simd,fp,simd")]
+ [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")]
)
;; Form a vector whose first half (in array order) comes from operand 1
@@ -4350,7 +4386,7 @@
(vec_concat:<VDBL>
(match_operand:VDCSIF 1 "general_operand")
(match_operand:VDCSIF 2 "general_operand")))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
int lo = BYTES_BIG_ENDIAN ? 2 : 1;
int hi = BYTES_BIG_ENDIAN ? 1 : 2;
@@ -4368,7 +4404,7 @@
}
else
{
- /* Use *aarch64_combine_general<mode>. */
+ /* Use *aarch64_combine_internal<mode>. */
operands[lo] = force_reg (<MODE>mode, operands[lo]);
if (!aarch64_simd_nonimmediate_operand (operands[hi], <MODE>mode))
{
@@ -4390,7 +4426,7 @@
[(match_operand:<VDBL> 0 "register_operand")
(match_operand:VDC 1 "general_operand")
(match_operand:VDC 2 "general_operand")]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
if (BYTES_BIG_ENDIAN)
std::swap (operands[1], operands[2]);
@@ -7063,7 +7099,7 @@
(define_expand "mov<mode>"
[(set (match_operand:VSTRUCT_QD 0 "nonimmediate_operand")
(match_operand:VSTRUCT_QD 1 "general_operand"))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
if (can_create_pseudo_p ())
{
@@ -7075,7 +7111,7 @@
(define_expand "mov<mode>"
[(set (match_operand:VSTRUCT 0 "nonimmediate_operand")
(match_operand:VSTRUCT 1 "general_operand"))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
if (can_create_pseudo_p ())
{
@@ -7087,7 +7123,7 @@
(define_expand "movv8di"
[(set (match_operand:V8DI 0 "nonimmediate_operand")
(match_operand:V8DI 1 "general_operand"))]
- "TARGET_SIMD"
+ ""
{
if (can_create_pseudo_p () && MEM_P (operands[0]))
operands[1] = force_reg (V8DImode, operands[1]);
@@ -7255,7 +7291,8 @@
(define_insn "*aarch64_be_mov<mode>"
[(set (match_operand:VSTRUCT_2D 0 "nonimmediate_operand" "=w,m,w")
(match_operand:VSTRUCT_2D 1 "general_operand" " w,w,m"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
"@
@@ -7269,7 +7306,8 @@
(define_insn "*aarch64_be_mov<mode>"
[(set (match_operand:VSTRUCT_2Q 0 "nonimmediate_operand" "=w,m,w")
(match_operand:VSTRUCT_2Q 1 "general_operand" " w,w,m"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
"@
@@ -7277,13 +7315,15 @@
stp\\t%q1, %R1, %0
ldp\\t%q0, %R0, %1"
[(set_attr "type" "multiple,neon_stp_q,neon_ldp_q")
+ (set_attr "arch" "simd,*,*")
(set_attr "length" "8,4,4")]
)
(define_insn "*aarch64_be_movoi"
[(set (match_operand:OI 0 "nonimmediate_operand" "=w,m,w")
(match_operand:OI 1 "general_operand" " w,w,m"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], OImode)
|| register_operand (operands[1], OImode))"
"@
@@ -7291,57 +7331,66 @@
stp\\t%q1, %R1, %0
ldp\\t%q0, %R0, %1"
[(set_attr "type" "multiple,neon_stp_q,neon_ldp_q")
+ (set_attr "arch" "simd,*,*")
(set_attr "length" "8,4,4")]
)
(define_insn "*aarch64_be_mov<mode>"
[(set (match_operand:VSTRUCT_3QD 0 "nonimmediate_operand" "=w,o,w")
(match_operand:VSTRUCT_3QD 1 "general_operand" " w,w,o"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
"#"
[(set_attr "type" "multiple")
+ (set_attr "arch" "fp<q>,*,*")
(set_attr "length" "12,8,8")]
)
(define_insn "*aarch64_be_movci"
[(set (match_operand:CI 0 "nonimmediate_operand" "=w,o,w")
(match_operand:CI 1 "general_operand" " w,w,o"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], CImode)
|| register_operand (operands[1], CImode))"
"#"
[(set_attr "type" "multiple")
- (set_attr "length" "12,4,4")]
+ (set_attr "arch" "simd,*,*")
+ (set_attr "length" "12,8,8")]
)
(define_insn "*aarch64_be_mov<mode>"
[(set (match_operand:VSTRUCT_4QD 0 "nonimmediate_operand" "=w,o,w")
(match_operand:VSTRUCT_4QD 1 "general_operand" " w,w,o"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
"#"
[(set_attr "type" "multiple")
+ (set_attr "arch" "fp<q>,*,*")
(set_attr "length" "16,8,8")]
)
(define_insn "*aarch64_be_movxi"
[(set (match_operand:XI 0 "nonimmediate_operand" "=w,o,w")
(match_operand:XI 1 "general_operand" " w,w,o"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], XImode)
|| register_operand (operands[1], XImode))"
"#"
[(set_attr "type" "multiple")
- (set_attr "length" "16,4,4")]
+ (set_attr "arch" "simd,*,*")
+ (set_attr "length" "16,8,8")]
)
(define_split
[(set (match_operand:VSTRUCT_2QD 0 "register_operand")
(match_operand:VSTRUCT_2QD 1 "register_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
aarch64_simd_emit_reg_reg_move (operands, <VSTRUCT_ELT>mode, 2);
@@ -7351,7 +7400,7 @@
(define_split
[(set (match_operand:OI 0 "register_operand")
(match_operand:OI 1 "register_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
aarch64_simd_emit_reg_reg_move (operands, TImode, 2);
@@ -7361,7 +7410,7 @@
(define_split
[(set (match_operand:VSTRUCT_3QD 0 "nonimmediate_operand")
(match_operand:VSTRUCT_3QD 1 "general_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
if (register_operand (operands[0], <MODE>mode)
@@ -7370,7 +7419,7 @@
aarch64_simd_emit_reg_reg_move (operands, <VSTRUCT_ELT>mode, 3);
DONE;
}
- else if (BYTES_BIG_ENDIAN)
+ else if (!TARGET_SIMD || BYTES_BIG_ENDIAN)
{
int elt_size = GET_MODE_SIZE (<MODE>mode).to_constant () / <nregs>;
machine_mode pair_mode = elt_size == 16 ? V2x16QImode : V2x8QImode;
@@ -7397,7 +7446,7 @@
(define_split
[(set (match_operand:CI 0 "nonimmediate_operand")
(match_operand:CI 1 "general_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
if (register_operand (operands[0], CImode)
@@ -7406,7 +7455,7 @@
aarch64_simd_emit_reg_reg_move (operands, TImode, 3);
DONE;
}
- else if (BYTES_BIG_ENDIAN)
+ else if (!TARGET_SIMD || BYTES_BIG_ENDIAN)
{
emit_move_insn (simplify_gen_subreg (OImode, operands[0], CImode, 0),
simplify_gen_subreg (OImode, operands[1], CImode, 0));
@@ -7425,7 +7474,7 @@
(define_split
[(set (match_operand:VSTRUCT_4QD 0 "nonimmediate_operand")
(match_operand:VSTRUCT_4QD 1 "general_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
if (register_operand (operands[0], <MODE>mode)
@@ -7434,7 +7483,7 @@
aarch64_simd_emit_reg_reg_move (operands, <VSTRUCT_ELT>mode, 4);
DONE;
}
- else if (BYTES_BIG_ENDIAN)
+ else if (!TARGET_SIMD || BYTES_BIG_ENDIAN)
{
int elt_size = GET_MODE_SIZE (<MODE>mode).to_constant () / <nregs>;
machine_mode pair_mode = elt_size == 16 ? V2x16QImode : V2x8QImode;
@@ -7455,7 +7504,7 @@
(define_split
[(set (match_operand:XI 0 "nonimmediate_operand")
(match_operand:XI 1 "general_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
if (register_operand (operands[0], XImode)
@@ -7464,7 +7513,7 @@
aarch64_simd_emit_reg_reg_move (operands, TImode, 4);
DONE;
}
- else if (BYTES_BIG_ENDIAN)
+ else if (!TARGET_SIMD || BYTES_BIG_ENDIAN)
{
emit_move_insn (simplify_gen_subreg (OImode, operands[0], XImode, 0),
simplify_gen_subreg (OImode, operands[1], XImode, 0));
@@ -7479,7 +7528,7 @@
(define_split
[(set (match_operand:V8DI 0 "nonimmediate_operand")
(match_operand:V8DI 1 "general_operand"))]
- "TARGET_SIMD && reload_completed"
+ "reload_completed"
[(const_int 0)]
{
if (register_operand (operands[0], V8DImode)
@@ -7489,15 +7538,15 @@
DONE;
}
else if ((register_operand (operands[0], V8DImode)
- && memory_operand (operands[1], V8DImode))
- || (memory_operand (operands[0], V8DImode)
- && register_operand (operands[1], V8DImode)))
+ && memory_operand (operands[1], V8DImode))
+ || (memory_operand (operands[0], V8DImode)
+ && register_operand (operands[1], V8DImode)))
{
for (int offset = 0; offset < 64; offset += 16)
- emit_move_insn (simplify_gen_subreg (TImode, operands[0],
- V8DImode, offset),
- simplify_gen_subreg (TImode, operands[1],
- V8DImode, offset));
+ emit_move_insn (simplify_gen_subreg (TImode, operands[0],
+ V8DImode, offset),
+ simplify_gen_subreg (TImode, operands[1],
+ V8DImode, offset));
DONE;
}
else
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 786ede7..467979a 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -3492,7 +3492,7 @@ aarch64_classify_vector_mode (machine_mode mode)
case E_OImode:
case E_CImode:
case E_XImode:
- return TARGET_SIMD ? VEC_ADVSIMD | VEC_STRUCT : 0;
+ return TARGET_FLOAT ? VEC_ADVSIMD | VEC_STRUCT : 0;
/* Structures of 64-bit Advanced SIMD vectors. */
case E_V2x8QImode:
@@ -3519,7 +3519,7 @@ aarch64_classify_vector_mode (machine_mode mode)
case E_V4x4HFmode:
case E_V4x2SFmode:
case E_V4x1DFmode:
- return TARGET_SIMD ? VEC_ADVSIMD | VEC_STRUCT | VEC_PARTIAL : 0;
+ return TARGET_FLOAT ? VEC_ADVSIMD | VEC_STRUCT | VEC_PARTIAL : 0;
/* Structures of 128-bit Advanced SIMD vectors. */
case E_V2x16QImode:
@@ -3546,7 +3546,7 @@ aarch64_classify_vector_mode (machine_mode mode)
case E_V4x8HFmode:
case E_V4x4SFmode:
case E_V4x2DFmode:
- return TARGET_SIMD ? VEC_ADVSIMD | VEC_STRUCT : 0;
+ return TARGET_FLOAT ? VEC_ADVSIMD | VEC_STRUCT : 0;
/* 64-bit Advanced SIMD vectors. */
case E_V8QImode:
@@ -3566,7 +3566,7 @@ aarch64_classify_vector_mode (machine_mode mode)
case E_V8BFmode:
case E_V4SFmode:
case E_V2DFmode:
- return TARGET_SIMD ? VEC_ADVSIMD : 0;
+ return TARGET_FLOAT ? VEC_ADVSIMD : 0;
default:
return 0;
@@ -3854,7 +3854,8 @@ aarch64_vectorize_related_mode (machine_mode vector_mode,
}
/* Prefer to use 1 128-bit vector instead of 2 64-bit vectors. */
- if ((vec_flags & VEC_ADVSIMD)
+ if (TARGET_SIMD
+ && (vec_flags & VEC_ADVSIMD)
&& known_eq (nunits, 0U)
&& known_eq (GET_MODE_BITSIZE (vector_mode), 64U)
&& maybe_ge (GET_MODE_BITSIZE (element_mode)
@@ -3952,7 +3953,7 @@ aarch64_hard_regno_mode_ok (unsigned regno, machine_mode mode)
if (GP_REGNUM_P (regno))
{
- if (vec_flags & VEC_ANY_SVE)
+ if (vec_flags & (VEC_ANY_SVE | VEC_STRUCT))
return false;
if (known_le (GET_MODE_SIZE (mode), 8))
return true;
@@ -10602,7 +10603,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
|| mode == TImode
|| mode == TFmode
|| mode == TDmode
- || (BYTES_BIG_ENDIAN && advsimd_struct_p));
+ || ((!TARGET_SIMD || BYTES_BIG_ENDIAN)
+ && advsimd_struct_p));
/* If we are dealing with ADDR_QUERY_LDP_STP_N that means the incoming mode
corresponds to the actual size of the memory being loaded/stored and the
mode of the corresponding addressing mode is half of that. */
@@ -10632,6 +10634,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
/* On LE, for AdvSIMD, don't support anything other than POST_INC or
REG addressing. */
if (advsimd_struct_p
+ && TARGET_SIMD
&& !BYTES_BIG_ENDIAN
&& (code != POST_INC && code != REG))
return false;
@@ -10694,7 +10697,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
&& aarch64_offset_7bit_signed_scaled_p (DImode, offset + 48));
/* A 7bit offset check because OImode will emit a ldp/stp
- instruction (only big endian will get here).
+ instruction (only !TARGET_SIMD or big endian will get here).
For ldp/stp instructions, the offset is scaled for the size of a
single element of the pair. */
if (aarch64_advsimd_partial_struct_mode_p (mode)
@@ -10705,7 +10708,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
return aarch64_offset_7bit_signed_scaled_p (TImode, offset);
/* Three 9/12 bit offsets checks because CImode will emit three
- ldr/str instructions (only big endian will get here). */
+ ldr/str instructions (only !TARGET_SIMD or big endian will
+ get here). */
if (aarch64_advsimd_partial_struct_mode_p (mode)
&& known_eq (GET_MODE_SIZE (mode), 24))
return (aarch64_offset_7bit_signed_scaled_p (DImode, offset)
@@ -12428,18 +12432,16 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
/* Use aarch64_sve_reload_mem for SVE memory reloads that cannot use
LDR and STR. See the comment at the head of aarch64-sve.md for
more details about the big-endian handling. */
+ unsigned int vec_flags = aarch64_classify_vector_mode (mode);
if (reg_class_subset_p (rclass, FP_REGS)
&& !((REG_P (x) && HARD_REGISTER_P (x))
|| aarch64_simd_valid_immediate (x, NULL))
- && mode != VNx16QImode)
+ && mode != VNx16QImode
+ && (vec_flags & VEC_SVE_DATA)
+ && ((vec_flags & VEC_PARTIAL) || BYTES_BIG_ENDIAN))
{
- unsigned int vec_flags = aarch64_classify_vector_mode (mode);
- if ((vec_flags & VEC_SVE_DATA)
- && ((vec_flags & VEC_PARTIAL) || BYTES_BIG_ENDIAN))
- {
- sri->icode = CODE_FOR_aarch64_sve_reload_mem;
- return NO_REGS;
- }
+ sri->icode = CODE_FOR_aarch64_sve_reload_mem;
+ return NO_REGS;
}
/* If we have to disable direct literal pool loads and stores because the
@@ -12456,9 +12458,13 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
/* Without the TARGET_SIMD instructions we cannot move a Q register
to a Q register directly. We need a scratch. */
if (REG_P (x)
- && (mode == TFmode || mode == TImode || mode == TDmode)
+ && (mode == TFmode
+ || mode == TImode
+ || mode == TDmode
+ || (vec_flags == VEC_ADVSIMD && known_eq (GET_MODE_SIZE (mode), 16)))
&& mode == GET_MODE (x)
- && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
+ && !TARGET_SIMD
+ && FP_REGNUM_P (REGNO (x))
&& reg_class_subset_p (rclass, FP_REGS))
{
sri->icode = code_for_aarch64_reload_mov (mode);
@@ -12480,6 +12486,28 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
return NO_REGS;
}
+/* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
+
+static bool
+aarch64_secondary_memory_needed (machine_mode mode, reg_class_t class1,
+ reg_class_t class2)
+{
+ if (!TARGET_SIMD
+ && reg_classes_intersect_p (class1, FP_REGS)
+ && reg_classes_intersect_p (class2, FP_REGS))
+ {
+ /* We can't do a 128-bit FPR-to-FPR move without TARGET_SIMD,
+ so we can't easily split a move involving tuples of 128-bit
+ vectors. Force the copy through memory instead.
+
+ (Tuples of 64-bit vectors are fine.) */
+ unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+ if (vec_flags == (VEC_ADVSIMD | VEC_STRUCT))
+ return true;
+ }
+ return false;
+}
+
static bool
aarch64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
{
@@ -13023,7 +13051,7 @@ aarch64_rtx_mult_cost (rtx x, enum rtx_code code, int outer, bool speed)
if (VECTOR_MODE_P (mode))
{
unsigned int vec_flags = aarch64_classify_vector_mode (mode);
- if (vec_flags & VEC_ADVSIMD)
+ if (TARGET_SIMD && (vec_flags & VEC_ADVSIMD))
{
/* The select-operand-high-half versions of the instruction have the
same cost as the three vector version - don't add the costs of the
@@ -13969,7 +13997,7 @@ cost_minus:
{
/* SUBL2 and SUBW2. */
unsigned int vec_flags = aarch64_classify_vector_mode (mode);
- if (vec_flags & VEC_ADVSIMD)
+ if (TARGET_SIMD && (vec_flags & VEC_ADVSIMD))
{
/* The select-operand-high-half versions of the sub instruction
have the same cost as the regular three vector version -
@@ -14056,7 +14084,7 @@ cost_plus:
{
/* ADDL2 and ADDW2. */
unsigned int vec_flags = aarch64_classify_vector_mode (mode);
- if (vec_flags & VEC_ADVSIMD)
+ if (TARGET_SIMD && (vec_flags & VEC_ADVSIMD))
{
/* The select-operand-high-half versions of the add instruction
have the same cost as the regular three vector version -
@@ -14981,7 +15009,9 @@ aarch64_register_move_cost (machine_mode mode,
return aarch64_register_move_cost (mode, from, GENERAL_REGS)
+ aarch64_register_move_cost (mode, GENERAL_REGS, to);
- if (known_eq (GET_MODE_SIZE (mode), 16))
+ unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+ if (vec_flags != (VEC_ADVSIMD | VEC_STRUCT | VEC_PARTIAL)
+ && known_eq (GET_MODE_SIZE (mode), 16))
{
/* 128-bit operations on general registers require 2 instructions. */
if (from == GENERAL_REGS && to == GENERAL_REGS)
@@ -15009,6 +15039,16 @@ aarch64_register_move_cost (machine_mode mode,
else if (to == GENERAL_REGS)
return regmove_cost->FP2GP;
+ if (!TARGET_SIMD && vec_flags == (VEC_ADVSIMD | VEC_STRUCT))
+ {
+ /* Needs a round-trip through memory, which can use LDP/STP for pairs.
+ The cost must be greater than 2 units to indicate that direct
+ moves aren't possible. */
+ auto per_vector = (aarch64_tune_params.memmov_cost.load_fp
+ + aarch64_tune_params.memmov_cost.store_fp);
+ return MIN (CEIL (per_vector, 2), 4);
+ }
+
return regmove_cost->FP2FP;
}
@@ -21115,6 +21155,9 @@ aarch64_simd_valid_immediate (rtx op, simd_immediate_info *info,
if (vec_flags == 0 || vec_flags == (VEC_ADVSIMD | VEC_STRUCT))
return false;
+ if ((vec_flags & VEC_ADVSIMD) && !TARGET_SIMD)
+ return false;
+
if (vec_flags & VEC_SVE_PRED)
return aarch64_sve_pred_valid_immediate (op, info);
@@ -24048,7 +24091,7 @@ aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
std::swap (d->op0, d->op1);
}
- if ((d->vec_flags == VEC_ADVSIMD
+ if (((d->vec_flags == VEC_ADVSIMD && TARGET_SIMD)
|| d->vec_flags == VEC_SVE_DATA
|| d->vec_flags == (VEC_SVE_DATA | VEC_PARTIAL)
|| d->vec_flags == VEC_SVE_PRED)
@@ -27482,6 +27525,9 @@ aarch64_libgcc_floating_mode_supported_p
#undef TARGET_SECONDARY_RELOAD
#define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
+#undef TARGET_SECONDARY_MEMORY_NEEDED
+#define TARGET_SECONDARY_MEMORY_NEEDED aarch64_secondary_memory_needed
+
#undef TARGET_SHIFT_TRUNCATION_MASK
#define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index efcbecb..3f8e40a 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -370,8 +370,11 @@
;; Attributes of the architecture required to support the instruction (or
;; alternative). This attribute is used to compute attribute "enabled", use type
;; "any" to enable an alternative in all cases.
+;;
+;; As a convenience, "fp_q" means "fp" + the ability to move between
+;; Q registers and is equivalent to "simd".
-(define_enum "arches" [ any rcpc8_4 fp simd sve fp16])
+(define_enum "arches" [ any rcpc8_4 fp fp_q simd sve fp16])
(define_enum_attr "arch" "arches" (const_string "any"))
@@ -399,7 +402,7 @@
(and (eq_attr "arch" "fp")
(match_test "TARGET_FLOAT"))
- (and (eq_attr "arch" "simd")
+ (and (eq_attr "arch" "fp_q, simd")
(match_test "TARGET_SIMD"))
(and (eq_attr "arch" "fp16")
@@ -6819,8 +6822,8 @@
)
(define_expand "@aarch64_reload_mov<mode>"
- [(set (match_operand:TX 0 "register_operand" "=w")
- (match_operand:TX 1 "register_operand" "w"))
+ [(set (match_operand:VTX 0 "register_operand" "=w")
+ (match_operand:VTX 1 "register_operand" "w"))
(clobber (match_operand:DI 2 "register_operand" "=&r"))
]
"TARGET_FLOAT"
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 0dd9dc6..9354dbe 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -313,6 +313,8 @@
(define_mode_iterator TX [TI TF TD])
+(define_mode_iterator VTX [TI TF TD V16QI V8HI V4SI V2DI V8HF V4SF V2DF V8BF])
+
;; Advanced SIMD opaque structure modes.
(define_mode_iterator VSTRUCT [OI CI XI])
diff --git a/gcc/config/csky/csky.h b/gcc/config/csky/csky.h
index f786ad5..a9d1369 100644
--- a/gcc/config/csky/csky.h
+++ b/gcc/config/csky/csky.h
@@ -422,7 +422,7 @@ typedef struct
The int cast is to prevent a complaint about unsigned comparison to
zero, since CSKY_FIRST_PARM_REGNUM is zero. */
#define FUNCTION_ARG_REGNO_P(REGNO) \
- (((REGNO) >= CSKY_FIRST_PARM_REGNUM \
+ (((int)(REGNO) >= CSKY_FIRST_PARM_REGNUM \
&& (REGNO) < (CSKY_NPARM_REGS + CSKY_FIRST_PARM_REGNUM)) \
|| FUNCTION_VARG_REGNO_P(REGNO))
diff --git a/gcc/config/gcn/mkoffload.cc b/gcc/config/gcn/mkoffload.cc
index 24d3273..6403780 100644
--- a/gcc/config/gcn/mkoffload.cc
+++ b/gcc/config/gcn/mkoffload.cc
@@ -805,7 +805,7 @@ main (int argc, char **argv)
FILE *cfile = stdout;
const char *outname = 0;
- progname = "mkoffload";
+ progname = tool_name;
diagnostic_initialize (global_dc, 0);
obstack_init (&files_to_cleanup);
diff --git a/gcc/config/i386/i386-builtins.cc b/gcc/config/i386/i386-builtins.cc
index 6a04fb5..af2faee 100644
--- a/gcc/config/i386/i386-builtins.cc
+++ b/gcc/config/i386/i386-builtins.cc
@@ -1540,21 +1540,16 @@ ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
switch (fn)
{
- CASE_CFN_EXP2:
- if (out_mode == SFmode && in_mode == SFmode)
- {
- if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_EXP2PS);
- }
- break;
-
CASE_CFN_IFLOOR:
CASE_CFN_LFLOOR:
- CASE_CFN_LLFLOOR:
/* The round insn does not trap on denormals. */
if (flag_trapping_math || !TARGET_SSE4_1)
break;
+ /* PR106910, currently vectorizer doesn't go direct internal fn way
+ when out_n != in_n, so let's still keep this.
+ Otherwise, it relies on expander of
+ lceilmn2/lfloormn2/lroundmn2/lrintmn2. */
if (out_mode == SImode && in_mode == DFmode)
{
if (out_n == 4 && in_n == 2)
@@ -1564,20 +1559,10 @@ ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
else if (out_n == 16 && in_n == 8)
return ix86_get_builtin (IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX512);
}
- if (out_mode == SImode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS_SFIX);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS_SFIX256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS_SFIX512);
- }
break;
CASE_CFN_ICEIL:
CASE_CFN_LCEIL:
- CASE_CFN_LLCEIL:
/* The round insn does not trap on denormals. */
if (flag_trapping_math || !TARGET_SSE4_1)
break;
@@ -1591,20 +1576,10 @@ ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
else if (out_n == 16 && in_n == 8)
return ix86_get_builtin (IX86_BUILTIN_CEILPD_VEC_PACK_SFIX512);
}
- if (out_mode == SImode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS_SFIX);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS_SFIX256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS_SFIX512);
- }
break;
CASE_CFN_IRINT:
CASE_CFN_LRINT:
- CASE_CFN_LLRINT:
if (out_mode == SImode && in_mode == DFmode)
{
if (out_n == 4 && in_n == 2)
@@ -1614,20 +1589,10 @@ ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
else if (out_n == 16 && in_n == 8)
return ix86_get_builtin (IX86_BUILTIN_VEC_PACK_SFIX512);
}
- if (out_mode == SImode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_CVTPS2DQ);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_CVTPS2DQ256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_CVTPS2DQ512);
- }
break;
CASE_CFN_IROUND:
CASE_CFN_LROUND:
- CASE_CFN_LLROUND:
/* The round insn does not trap on denormals. */
if (flag_trapping_math || !TARGET_SSE4_1)
break;
@@ -1641,150 +1606,8 @@ ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
else if (out_n == 16 && in_n == 8)
return ix86_get_builtin (IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX512);
}
- if (out_mode == SImode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_ROUNDPS_AZ_SFIX);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_ROUNDPS_AZ_SFIX256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_ROUNDPS_AZ_SFIX512);
- }
break;
- CASE_CFN_FLOOR:
- /* The round insn does not trap on denormals. */
- if (flag_trapping_math || !TARGET_SSE4_1)
- break;
-
- if (out_mode == DFmode && in_mode == DFmode)
- {
- if (out_n == 2 && in_n == 2)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPD);
- else if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPD256);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPD512);
- }
- if (out_mode == SFmode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS512);
- }
- if (out_mode == HFmode && in_mode == HFmode)
- {
- /* V8HF/V16HF is supported in ix86_vector_mode_supported_p
- under TARGET_AVX512FP16, TARGET_AVX512VL is needed here. */
- if (out_n < 32 && !TARGET_AVX512VL)
- break;
-
- if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPH);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPH256);
- else if (out_n == 32 && in_n == 32)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPH512);
- }
- break;
-
- CASE_CFN_CEIL:
- /* The round insn does not trap on denormals. */
- if (flag_trapping_math || !TARGET_SSE4_1)
- break;
-
- if (out_mode == DFmode && in_mode == DFmode)
- {
- if (out_n == 2 && in_n == 2)
- return ix86_get_builtin (IX86_BUILTIN_CEILPD);
- else if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_CEILPD256);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_CEILPD512);
- }
- if (out_mode == SFmode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS512);
- }
- if (out_mode == HFmode && in_mode == HFmode)
- {
- /* V8HF/V16HF is supported in ix86_vector_mode_supported_p
- under TARGET_AVX512FP16, TARGET_AVX512VL is needed here. */
- if (out_n < 32 && !TARGET_AVX512VL)
- break;
-
- if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_CEILPH);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_CEILPH256);
- else if (out_n == 32 && in_n == 32)
- return ix86_get_builtin (IX86_BUILTIN_CEILPH512);
- }
- break;
-
- CASE_CFN_TRUNC:
- /* The round insn does not trap on denormals. */
- if (flag_trapping_math || !TARGET_SSE4_1)
- break;
-
- if (out_mode == DFmode && in_mode == DFmode)
- {
- if (out_n == 2 && in_n == 2)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPD);
- else if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPD256);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPD512);
- }
- if (out_mode == SFmode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPS);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPS256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPS512);
- }
- if (out_mode == HFmode && in_mode == HFmode)
- {
- /* V8HF/V16HF is supported in ix86_vector_mode_supported_p
- under TARGET_AVX512FP16, TARGET_AVX512VL is needed here. */
- if (out_n < 32 && !TARGET_AVX512VL)
- break;
-
- if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPH);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPH256);
- else if (out_n == 32 && in_n == 32)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPH512);
- }
- break;
-
- CASE_CFN_FMA:
- if (out_mode == DFmode && in_mode == DFmode)
- {
- if (out_n == 2 && in_n == 2)
- return ix86_get_builtin (IX86_BUILTIN_VFMADDPD);
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_VFMADDPD256);
- }
- if (out_mode == SFmode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_VFMADDPS);
- if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_VFMADDPS256);
- }
- break;
default:
break;
diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
index d7b49c9..5334363 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -15109,9 +15109,24 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
return ix86_vector_duplicate_value (mode, target, val);
else
{
- machine_mode hvmode = (mode == V16HImode ? V8HImode
- : mode == V16HFmode ? V8HFmode
- : V16QImode);
+ machine_mode hvmode;
+ switch (mode)
+ {
+ case V16HImode:
+ hvmode = V8HImode;
+ break;
+ case V16HFmode:
+ hvmode = V8HFmode;
+ break;
+ case V16BFmode:
+ hvmode = V8BFmode;
+ break;
+ case V32QImode:
+ hvmode = V16QImode;
+ break;
+ default:
+ gcc_unreachable ();
+ }
rtx x = gen_reg_rtx (hvmode);
ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
@@ -15130,10 +15145,24 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
return ix86_vector_duplicate_value (mode, target, val);
else
{
- machine_mode hvmode = (mode == V32HImode ? V16HImode
- : mode == V32HFmode ? V16HFmode
- : mode == V32BFmode ? V16BFmode
- : V32QImode);
+ machine_mode hvmode;
+ switch (mode)
+ {
+ case V32HImode:
+ hvmode = V16HImode;
+ break;
+ case V32HFmode:
+ hvmode = V16HFmode;
+ break;
+ case V32BFmode:
+ hvmode = V16BFmode;
+ break;
+ case V64QImode:
+ hvmode = V32QImode;
+ break;
+ default:
+ gcc_unreachable ();
+ }
rtx x = gen_reg_rtx (hvmode);
ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md
index dda4b43..222a041 100644
--- a/gcc/config/i386/mmx.md
+++ b/gcc/config/i386/mmx.md
@@ -1629,6 +1629,160 @@
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
+;; Parallel single-precision floating point rounding operations.
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "nearbyintv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "register_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && TARGET_MMX_WITH_SSE"
+ "operands[2] = GEN_INT (ROUND_MXCSR | ROUND_NO_EXC);")
+
+(define_expand "rintv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "register_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && TARGET_MMX_WITH_SSE"
+ "operands[2] = GEN_INT (ROUND_MXCSR);")
+
+(define_expand "ceilv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "register_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+ "operands[2] = GEN_INT (ROUND_CEIL | ROUND_NO_EXC);")
+
+(define_expand "lceilv2sfv2si2"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V2SF 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+{
+ rtx tmp = gen_reg_rtx (V2SFmode);
+ emit_insn (gen_ceilv2sf2 (tmp, operands[1]));
+ emit_insn (gen_fix_truncv2sfv2si2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "floorv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "vector_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+ "operands[2] = GEN_INT (ROUND_FLOOR | ROUND_NO_EXC);")
+
+(define_expand "lfloorv2sfv2si2"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V2SF 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+{
+ rtx tmp = gen_reg_rtx (V2SFmode);
+ emit_insn (gen_floorv2sf2 (tmp, operands[1]));
+ emit_insn (gen_fix_truncv2sfv2si2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "btruncv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "register_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+ "operands[2] = GEN_INT (ROUND_TRUNC | ROUND_NO_EXC);")
+
+(define_insn "*mmx_roundv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand" "=Yr,*x,v")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "register_operand" "Yr,x,v")
+ (match_operand:SI 2 "const_0_to_15_operand")]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && TARGET_MMX_WITH_SSE"
+ "%vroundps\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "type" "ssecvt")
+ (set_attr "prefix_data16" "1,1,*")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "orig,orig,vex")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "lrintv2sfv2si2"
+ [(set (match_operand:V2SI 0 "register_operand" "=v")
+ (unspec:V2SI
+ [(match_operand:V2SF 1 "register_operand" "v")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_MMX_WITH_SSE"
+ "%vcvtps2dq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set (attr "prefix_data16")
+ (if_then_else
+ (match_test "TARGET_AVX")
+ (const_string "*")
+ (const_string "1")))
+ (set_attr "prefix" "maybe_vex")
+ (set_attr "mode" "TI")])
+
+(define_expand "roundv2sf2"
+ [(set (match_dup 3)
+ (plus:V2SF
+ (match_operand:V2SF 1 "register_operand")
+ (match_dup 2)))
+ (set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_dup 3) (match_dup 4)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+{
+ const struct real_format *fmt;
+ REAL_VALUE_TYPE pred_half, half_minus_pred_half;
+ rtx half, vec_half;
+
+ /* load nextafter (0.5, 0.0) */
+ fmt = REAL_MODE_FORMAT (SFmode);
+ real_2expN (&half_minus_pred_half, -(fmt->p) - 1, SFmode);
+ real_arithmetic (&pred_half, MINUS_EXPR, &dconsthalf, &half_minus_pred_half);
+ half = const_double_from_real_value (pred_half, SFmode);
+
+ vec_half = ix86_build_const_vector (V2SFmode, true, half);
+ vec_half = force_reg (V2SFmode, vec_half);
+
+ operands[2] = gen_reg_rtx (V2SFmode);
+ emit_insn (gen_copysignv2sf3 (operands[2], vec_half, operands[1]));
+
+ operands[3] = gen_reg_rtx (V2SFmode);
+ operands[4] = GEN_INT (ROUND_TRUNC);
+})
+
+(define_expand "lroundv2sfv2si2"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V2SF 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+{
+ rtx tmp = gen_reg_rtx (V2SFmode);
+ emit_insn (gen_roundv2sf2 (tmp, operands[1]));
+ emit_insn (gen_fix_truncv2sfv2si2 (operands[0], tmp));
+ DONE;
+})
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
;; Parallel half-precision floating point arithmetic
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index d535c0a..b60c0d3 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -321,6 +321,11 @@
[(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+(define_mode_iterator VF1_VF2_AVX512DQ
+ [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512DQ") (V4DF "TARGET_AVX512DQ && TARGET_AVX512VL")
+ (V2DF "TARGET_AVX512DQ && TARGET_AVX512VL")])
+
(define_mode_iterator VFH
[(V32HF "TARGET_AVX512FP16")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
@@ -23177,6 +23182,14 @@
"TARGET_SSE4_1"
"operands[2] = GEN_INT (ROUND_MXCSR);")
+;; Note vcvtpd2qq require avx512dq for all vector lengths.
+(define_expand "lrint<mode><sseintvecmodelower>2"
+ [(set (match_operand:<sseintvecmode> 0 "register_operand")
+ (unspec:<sseintvecmode>
+ [(match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_SSE2")
+
(define_insn "<sse4_1>_round<ssemodesuffix><avxsizesuffix>"
[(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x")
(unspec:VF_128_256
@@ -23316,6 +23329,55 @@
(set_attr "prefix" "orig,orig,vex,evex")
(set_attr "mode" "<MODE>")])
+(define_expand "floor<mode>2"
+ [(set (match_operand:VFH 0 "register_operand")
+ (unspec:VFH
+ [(match_operand:VFH 1 "vector_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+ "operands[2] = GEN_INT (ROUND_FLOOR | ROUND_NO_EXC);")
+
+(define_expand "lfloor<mode><sseintvecmodelower>2"
+ [(match_operand:<sseintvecmode> 0 "register_operand")
+ (match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_floor<mode>2 (tmp, operands[1]));
+ emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "ceil<mode>2"
+ [(set (match_operand:VFH 0 "register_operand")
+ (unspec:VFH
+ [(match_operand:VFH 1 "vector_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+ "operands[2] = GEN_INT (ROUND_CEIL | ROUND_NO_EXC);")
+
+(define_expand "lceil<mode><sseintvecmodelower>2"
+ [(match_operand:<sseintvecmode> 0 "register_operand")
+ (match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_ceil<mode>2 (tmp, operands[1]));
+ emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "btrunc<mode>2"
+ [(set (match_operand:VFH 0 "register_operand")
+ (unspec:VFH
+ [(match_operand:VFH 1 "vector_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+ "operands[2] = GEN_INT (ROUND_TRUNC | ROUND_NO_EXC);")
+
(define_expand "round<mode>2"
[(set (match_dup 3)
(plus:VF
@@ -23350,6 +23412,17 @@
operands[4] = GEN_INT (ROUND_TRUNC);
})
+(define_expand "lround<mode><sseintvecmodelower>2"
+ [(match_operand:<sseintvecmode> 0 "register_operand")
+ (match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_round<mode>2 (tmp, operands[1]));
+ emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
(define_expand "round<mode>2_sfix"
[(match_operand:<sseintvecmode> 0 "register_operand")
(match_operand:VF1 1 "register_operand")]
@@ -23868,6 +23941,13 @@
(set_attr "prefix" "evex")
(set_attr "mode" "XI")])
+(define_expand "exp2<mode>2"
+ [(set (match_operand:VF_512 0 "register_operand")
+ (unspec:VF_512
+ [(match_operand:VF_512 1 "vector_operand")]
+ UNSPEC_EXP2))]
+ "TARGET_AVX512ER")
+
(define_insn "avx512er_exp2<mode><mask_name><round_saeonly_name>"
[(set (match_operand:VF_512 0 "register_operand" "=v")
(unspec:VF_512
diff --git a/gcc/config/i386/x86-tune-sched.cc b/gcc/config/i386/x86-tune-sched.cc
index 1ffaeef..e2765f8 100644
--- a/gcc/config/i386/x86-tune-sched.cc
+++ b/gcc/config/i386/x86-tune-sched.cc
@@ -73,10 +73,24 @@ ix86_issue_rate (void)
case PROCESSOR_SANDYBRIDGE:
case PROCESSOR_HASWELL:
case PROCESSOR_TREMONT:
+ case PROCESSOR_SKYLAKE:
+ case PROCESSOR_SKYLAKE_AVX512:
+ case PROCESSOR_CASCADELAKE:
+ case PROCESSOR_CANNONLAKE:
case PROCESSOR_ALDERLAKE:
case PROCESSOR_GENERIC:
return 4;
+ case PROCESSOR_ICELAKE_CLIENT:
+ case PROCESSOR_ICELAKE_SERVER:
+ case PROCESSOR_TIGERLAKE:
+ case PROCESSOR_COOPERLAKE:
+ case PROCESSOR_ROCKETLAKE:
+ return 5;
+
+ case PROCESSOR_SAPPHIRERAPIDS:
+ return 6;
+
default:
return 1;
}
diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h
index 664dc92..c5b1afe 100644
--- a/gcc/config/loongarch/gnu-user.h
+++ b/gcc/config/loongarch/gnu-user.h
@@ -40,8 +40,10 @@ along with GCC; see the file COPYING3. If not see
#undef GNU_USER_TARGET_LINK_SPEC
#define GNU_USER_TARGET_LINK_SPEC \
"%{G*} %{shared} -m " GNU_USER_LINK_EMULATION \
- "%{!shared: %{static} %{!static: %{rdynamic:-export-dynamic} " \
- "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}"
+ "%{!shared: %{static} " \
+ "%{!static: %{!static-pie: %{rdynamic:-export-dynamic} " \
+ "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}} " \
+ "%{static-pie: -static -pie --no-dynamic-linker -z text}}"
/* Similar to standard Linux, but adding -ffast-math support. */
diff --git a/gcc/config/mips/mips.cc b/gcc/config/mips/mips.cc
index 4772495..387376b 100644
--- a/gcc/config/mips/mips.cc
+++ b/gcc/config/mips/mips.cc
@@ -20018,7 +20018,7 @@ mips_set_tune (const struct mips_cpu_info *info)
static void
mips_option_override (void)
{
- int i, start, regno, mode;
+ int i, regno, mode;
if (OPTION_SET_P (mips_isa_option))
mips_isa_option_info = &mips_cpu_info_table[mips_isa_option];
diff --git a/gcc/config/nvptx/mkoffload.cc b/gcc/config/nvptx/mkoffload.cc
index 834b205..854cd72 100644
--- a/gcc/config/nvptx/mkoffload.cc
+++ b/gcc/config/nvptx/mkoffload.cc
@@ -324,9 +324,19 @@ process (FILE *in, FILE *out, uint32_t omp_requires)
{
if (sm_ver && sm_ver[0] == '3' && sm_ver[1] == '0'
&& sm_ver[2] == '\n')
- fatal_error (input_location,
- "%<omp requires reverse_offload%> requires at least "
- "%<sm_35%> for %<-misa=%>");
+ {
+ warning_at (input_location, 0,
+ "%<omp requires reverse_offload%> requires at "
+ "least %<sm_35%> for "
+ "%<-foffload-options=nvptx-none=-march=%> - disabling"
+ " offload-code generation for this device type");
+ /* As now an empty file is compiled and there is no call to
+ GOMP_offload_register_ver, this device type is effectively
+ disabled. */
+ fflush (out);
+ ftruncate (fileno (out), 0);
+ return;
+ }
sm_ver2 = sm_ver;
version2 = version;
}
@@ -526,7 +536,7 @@ main (int argc, char **argv)
FILE *out = stdout;
const char *outname = 0;
- progname = "mkoffload";
+ progname = tool_name;
diagnostic_initialize (global_dc, 0);
if (atexit (mkoffload_cleanup) != 0)
diff --git a/gcc/config/rs6000/rs6000-builtin.cc b/gcc/config/rs6000/rs6000-builtin.cc
index 6dfb0db..3ce729c 100644
--- a/gcc/config/rs6000/rs6000-builtin.cc
+++ b/gcc/config/rs6000/rs6000-builtin.cc
@@ -1260,6 +1260,11 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
enum tree_code bcode;
gimple *g;
+ /* For an unresolved overloaded builtin, return early here since there
+ is no builtin info for it and we are unable to fold it. */
+ if (fn_code > RS6000_OVLD_NONE)
+ return false;
+
size_t uns_fncode = (size_t) fn_code;
enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
const char *fn_name1 = rs6000_builtin_info[uns_fncode].bifname;
@@ -3256,6 +3261,14 @@ rs6000_expand_builtin (tree exp, rtx target, rtx /* subtarget */,
tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
enum rs6000_gen_builtins fcode
= (enum rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl);
+
+ /* Emit error message if it's an unresolved overloaded builtin. */
+ if (fcode > RS6000_OVLD_NONE)
+ {
+ error ("unresolved overload for builtin %qF", fndecl);
+ return const0_rtx;
+ }
+
size_t uns_fcode = (size_t)fcode;
enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
diff --git a/gcc/config/rs6000/rs6000-c.cc b/gcc/config/rs6000/rs6000-c.cc
index 4d051b9..ca9cc42 100644
--- a/gcc/config/rs6000/rs6000-c.cc
+++ b/gcc/config/rs6000/rs6000-c.cc
@@ -1749,6 +1749,36 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
unsigned int nargs = vec_safe_length (arglist);
+ /* If the number of arguments did not match the prototype, return NULL
+ and the generic code will issue the appropriate error message. Skip
+ this test for functions where we don't fully describe all the possible
+ overload signatures in rs6000-overload.def (because they aren't relevant
+ to the expansion here). If we don't, we get confusing error messages. */
+ /* As an example, for vec_splats we have:
+
+; There are no actual builtins for vec_splats. There is special handling for
+; this in altivec_resolve_overloaded_builtin in rs6000-c.cc, where the call
+; is replaced by a constructor. The single overload here causes
+; __builtin_vec_splats to be registered with the front end so that can happen.
+[VEC_SPLATS, vec_splats, __builtin_vec_splats]
+ vsi __builtin_vec_splats (vsi);
+ ABS_V4SI SPLATS_FAKERY
+
+ So even though __builtin_vec_splats accepts all vector types, the
+ infrastructure cheats and just records one prototype. We end up getting
+ an error message that refers to this specific prototype even when we
+ are handling a different argument type. That is completely confusing
+ to the user, so it's best to let these cases be handled individually
+ in the resolve_vec_splats, etc., helper functions. */
+
+ if (expected_args != nargs
+ && !(fcode == RS6000_OVLD_VEC_PROMOTE
+ || fcode == RS6000_OVLD_VEC_SPLATS
+ || fcode == RS6000_OVLD_VEC_EXTRACT
+ || fcode == RS6000_OVLD_VEC_INSERT
+ || fcode == RS6000_OVLD_VEC_STEP))
+ return NULL;
+
for (n = 0;
!VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs;
fnargs = TREE_CHAIN (fnargs), n++)
@@ -1809,36 +1839,6 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
types[n] = type;
}
- /* If the number of arguments did not match the prototype, return NULL
- and the generic code will issue the appropriate error message. Skip
- this test for functions where we don't fully describe all the possible
- overload signatures in rs6000-overload.def (because they aren't relevant
- to the expansion here). If we don't, we get confusing error messages. */
- /* As an example, for vec_splats we have:
-
-; There are no actual builtins for vec_splats. There is special handling for
-; this in altivec_resolve_overloaded_builtin in rs6000-c.cc, where the call
-; is replaced by a constructor. The single overload here causes
-; __builtin_vec_splats to be registered with the front end so that can happen.
-[VEC_SPLATS, vec_splats, __builtin_vec_splats]
- vsi __builtin_vec_splats (vsi);
- ABS_V4SI SPLATS_FAKERY
-
- So even though __builtin_vec_splats accepts all vector types, the
- infrastructure cheats and just records one prototype. We end up getting
- an error message that refers to this specific prototype even when we
- are handling a different argument type. That is completely confusing
- to the user, so it's best to let these cases be handled individually
- in the resolve_vec_splats, etc., helper functions. */
-
- if (n != expected_args
- && !(fcode == RS6000_OVLD_VEC_PROMOTE
- || fcode == RS6000_OVLD_VEC_SPLATS
- || fcode == RS6000_OVLD_VEC_EXTRACT
- || fcode == RS6000_OVLD_VEC_INSERT
- || fcode == RS6000_OVLD_VEC_STEP))
- return NULL;
-
/* Some overloads require special handling. */
tree returned_expr = NULL;
resolution res = unresolved;
diff --git a/gcc/config/rs6000/rs6000.cc b/gcc/config/rs6000/rs6000.cc
index bcf634a..5f347e9 100644
--- a/gcc/config/rs6000/rs6000.cc
+++ b/gcc/config/rs6000/rs6000.cc
@@ -5141,16 +5141,23 @@ protected:
vect_cost_model_location, unsigned int);
void density_test (loop_vec_info);
void adjust_vect_cost_per_loop (loop_vec_info);
+ unsigned int determine_suggested_unroll_factor (loop_vec_info);
/* Total number of vectorized stmts (loop only). */
unsigned m_nstmts = 0;
/* Total number of loads (loop only). */
unsigned m_nloads = 0;
+ /* Total number of stores (loop only). */
+ unsigned m_nstores = 0;
+ /* Reduction factor for suggesting unroll factor (loop only). */
+ unsigned m_reduc_factor = 0;
/* Possible extra penalized cost on vector construction (loop only). */
unsigned m_extra_ctor_cost = 0;
/* For each vectorized loop, this var holds TRUE iff a non-memory vector
instruction is needed by the vectorization. */
bool m_vect_nonmem = false;
+ /* If this loop gets vectorized with emulated gather load. */
+ bool m_gather_load = false;
};
/* Test for likely overcommitment of vector hardware resources. If a
@@ -5301,9 +5308,34 @@ rs6000_cost_data::update_target_cost_per_stmt (vect_cost_for_stmt kind,
{
m_nstmts += orig_count;
- if (kind == scalar_load || kind == vector_load
- || kind == unaligned_load || kind == vector_gather_load)
- m_nloads += orig_count;
+ if (kind == scalar_load
+ || kind == vector_load
+ || kind == unaligned_load
+ || kind == vector_gather_load)
+ {
+ m_nloads += orig_count;
+ if (stmt_info && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ m_gather_load = true;
+ }
+ else if (kind == scalar_store
+ || kind == vector_store
+ || kind == unaligned_store
+ || kind == vector_scatter_store)
+ m_nstores += orig_count;
+ else if ((kind == scalar_stmt
+ || kind == vector_stmt
+ || kind == vec_to_scalar)
+ && stmt_info
+ && vect_is_reduction (stmt_info))
+ {
+ /* Loop body contains normal int or fp operations and epilogue
+ contains vector reduction. For simplicity, we assume int
+ operation takes one cycle and fp operation takes one more. */
+ tree lhs = gimple_get_lhs (stmt_info->stmt);
+ bool is_float = FLOAT_TYPE_P (TREE_TYPE (lhs));
+ unsigned int basic_cost = is_float ? 2 : 1;
+ m_reduc_factor = MAX (basic_cost * orig_count, m_reduc_factor);
+ }
/* Power processors do not currently have instructions for strided
and elementwise loads, and instead we must generate multiple
@@ -5395,6 +5427,90 @@ rs6000_cost_data::adjust_vect_cost_per_loop (loop_vec_info loop_vinfo)
}
}
+/* Determine suggested unroll factor by considering some below factors:
+
+ - unroll option/pragma which can disable unrolling for this loop;
+ - simple hardware resource model for non memory vector insns;
+ - aggressive heuristics when iteration count is unknown:
+ - reduction case to break cross iteration dependency;
+ - emulated gather load;
+ - estimated iteration count when iteration count is unknown;
+*/
+
+
+unsigned int
+rs6000_cost_data::determine_suggested_unroll_factor (loop_vec_info loop_vinfo)
+{
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+
+ /* Don't unroll if it's specified explicitly not to be unrolled. */
+ if (loop->unroll == 1
+ || (OPTION_SET_P (flag_unroll_loops) && !flag_unroll_loops)
+ || (OPTION_SET_P (flag_unroll_all_loops) && !flag_unroll_all_loops))
+ return 1;
+
+ unsigned int nstmts_nonldst = m_nstmts - m_nloads - m_nstores;
+ /* Don't unroll if no vector instructions excepting for memory access. */
+ if (nstmts_nonldst == 0)
+ return 1;
+
+ /* Consider breaking cross iteration dependency for reduction. */
+ unsigned int reduc_factor = m_reduc_factor > 1 ? m_reduc_factor : 1;
+
+ /* Use this simple hardware resource model that how many non ld/st
+ vector instructions can be issued per cycle. */
+ unsigned int issue_width = rs6000_vect_unroll_issue;
+ unsigned int uf = CEIL (reduc_factor * issue_width, nstmts_nonldst);
+ uf = MIN ((unsigned int) rs6000_vect_unroll_limit, uf);
+ /* Make sure it is power of 2. */
+ uf = 1 << ceil_log2 (uf);
+
+ /* If the iteration count is known, the costing would be exact enough,
+ don't worry it could be worse. */
+ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
+ return uf;
+
+ /* Inspired by SPEC2017 parest_r, we want to aggressively unroll the
+ loop if either condition is satisfied:
+ - reduction factor exceeds the threshold;
+ - emulated gather load adopted. */
+ if (reduc_factor > (unsigned int) rs6000_vect_unroll_reduc_threshold
+ || m_gather_load)
+ return uf;
+
+ /* Check if we can conclude it's good to unroll from the estimated
+ iteration count. */
+ HOST_WIDE_INT est_niter = get_estimated_loop_iterations_int (loop);
+ unsigned int vf = vect_vf_for_cost (loop_vinfo);
+ unsigned int unrolled_vf = vf * uf;
+ if (est_niter == -1 || est_niter < unrolled_vf)
+ /* When the estimated iteration of this loop is unknown, it's possible
+ that we are able to vectorize this loop with the original VF but fail
+ to vectorize it with the unrolled VF any more if the actual iteration
+ count is in between. */
+ return 1;
+ else
+ {
+ unsigned int epil_niter_unr = est_niter % unrolled_vf;
+ unsigned int epil_niter = est_niter % vf;
+ /* Even if we have partial vector support, it can be still inefficent
+ to calculate the length when the iteration count is unknown, so
+ only expect it's good to unroll when the epilogue iteration count
+ is not bigger than VF (only one time length calculation). */
+ if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
+ && epil_niter_unr <= vf)
+ return uf;
+ /* Without partial vector support, conservatively unroll this when
+ the epilogue iteration count is less than the original one
+ (epilogue execution time wouldn't be longer than before). */
+ else if (!LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
+ && epil_niter_unr <= epil_niter)
+ return uf;
+ }
+
+ return 1;
+}
+
void
rs6000_cost_data::finish_cost (const vector_costs *scalar_costs)
{
@@ -5411,6 +5527,9 @@ rs6000_cost_data::finish_cost (const vector_costs *scalar_costs)
&& LOOP_VINFO_VECT_FACTOR (loop_vinfo) == 2
&& LOOP_REQUIRES_VERSIONING (loop_vinfo))
m_costs[vect_body] += 10000;
+
+ m_suggested_unroll_factor
+ = determine_suggested_unroll_factor (loop_vinfo);
}
vector_costs::finish_cost (scalar_costs);
@@ -10178,6 +10297,41 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
gen_rtx_IOR (DImode, copy_rtx (temp),
GEN_INT (ud1)));
}
+ else if (TARGET_PREFIXED)
+ {
+ if (can_create_pseudo_p ())
+ {
+ /* pli A,L + pli B,H + rldimi A,B,32,0. */
+ temp = gen_reg_rtx (DImode);
+ rtx temp1 = gen_reg_rtx (DImode);
+ emit_move_insn (temp, GEN_INT ((ud4 << 16) | ud3));
+ emit_move_insn (temp1, GEN_INT ((ud2 << 16) | ud1));
+
+ emit_insn (gen_rotldi3_insert_3 (dest, temp, GEN_INT (32), temp1,
+ GEN_INT (0xffffffff)));
+ }
+ else
+ {
+ /* pli A,H + sldi A,32 + paddi A,A,L. */
+ emit_move_insn (dest, GEN_INT ((ud4 << 16) | ud3));
+
+ emit_move_insn (dest, gen_rtx_ASHIFT (DImode, dest, GEN_INT (32)));
+
+ bool can_use_paddi = REGNO (dest) != FIRST_GPR_REGNO;
+
+ /* Use paddi for the low 32 bits. */
+ if (ud2 != 0 && ud1 != 0 && can_use_paddi)
+ emit_move_insn (dest, gen_rtx_PLUS (DImode, dest,
+ GEN_INT ((ud2 << 16) | ud1)));
+
+ /* Use oris, ori for low 32 bits. */
+ if (ud2 != 0 && (ud1 == 0 || !can_use_paddi))
+ emit_move_insn (dest,
+ gen_rtx_IOR (DImode, dest, GEN_INT (ud2 << 16)));
+ if (ud1 != 0 && (ud2 == 0 || !can_use_paddi))
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
+ }
+ }
else
{
temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
diff --git a/gcc/config/rs6000/rs6000.opt b/gcc/config/rs6000/rs6000.opt
index b227bf9..b63a5d4 100644
--- a/gcc/config/rs6000/rs6000.opt
+++ b/gcc/config/rs6000/rs6000.opt
@@ -620,6 +620,14 @@ mieee128-constant
Target Var(TARGET_IEEE128_CONSTANT) Init(1) Save
Generate (do not generate) code that uses the LXVKQ instruction.
+; Documented parameters
+
+-param=rs6000-vect-unroll-limit=
+Target Joined UInteger Var(rs6000_vect_unroll_limit) Init(4) IntegerRange(1, 64) Param
+Used to limit unroll factor which indicates how much the autovectorizer may
+unroll a loop. The default value is 4.
+
+; Undocumented parameters
-param=rs6000-density-pct-threshold=
Target Undocumented Joined UInteger Var(rs6000_density_pct_threshold) Init(85) IntegerRange(0, 100) Param
When costing for loop vectorization, we probably need to penalize the loop body
@@ -657,3 +665,13 @@ Like parameter rs6000-density-load-pct-threshold, we also check if the total
number of load statements exceeds the threshold specified by this parameter,
and penalize only if it's satisfied. The default value is 20.
+-param=rs6000-vect-unroll-issue=
+Target Undocumented Joined UInteger Var(rs6000_vect_unroll_issue) Init(4) IntegerRange(1, 128) Param
+Indicate how many non memory access vector instructions can be issued per
+cycle, it's used in unroll factor determination for autovectorizer. The
+default value is 4.
+
+-param=rs6000-vect-unroll-reduc-threshold=
+Target Undocumented Joined UInteger Var(rs6000_vect_unroll_reduc_threshold) Init(1) Param
+When reduction factor computed for a loop exceeds the threshold specified by
+this parameter, prefer to unroll this loop. The default value is 1.
diff --git a/gcc/config/xtensa/xtensa.cc b/gcc/config/xtensa/xtensa.cc
index ac52c01..f1b3331 100644
--- a/gcc/config/xtensa/xtensa.cc
+++ b/gcc/config/xtensa/xtensa.cc
@@ -4472,7 +4472,7 @@ xtensa_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
static bool
xtensa_function_value_regno_p (const unsigned int regno)
{
- return (regno == GP_RETURN);
+ return (regno >= GP_RETURN && regno < GP_RETURN + GP_RETURN_REG_COUNT);
}
/* The static chain is passed in memory. Provide rtx giving 'mem'
diff --git a/gcc/config/xtensa/xtensa.h b/gcc/config/xtensa/xtensa.h
index 0f3006d..16e3d55 100644
--- a/gcc/config/xtensa/xtensa.h
+++ b/gcc/config/xtensa/xtensa.h
@@ -488,6 +488,7 @@ enum reg_class
point, and values of coprocessor and user-defined modes. */
#define GP_RETURN (GP_REG_FIRST + 2 + WINDOW_SIZE)
#define GP_OUTGOING_RETURN (GP_REG_FIRST + 2)
+#define GP_RETURN_REG_COUNT 4
/* Symbolic macros for the first/last argument registers. */
#define GP_ARG_FIRST (GP_REG_FIRST + 2)
diff --git a/gcc/config/xtensa/xtensa.md b/gcc/config/xtensa/xtensa.md
index f722ea5..608110c 100644
--- a/gcc/config/xtensa/xtensa.md
+++ b/gcc/config/xtensa/xtensa.md
@@ -2305,6 +2305,27 @@
(set_attr "mode" "none")
(set_attr "length" "3")])
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "")
+ (const_int 0))
+ (match_operand 1 "")
+ (match_operand 2 "")])]
+ ""
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ emit_insn (gen_blockage ());
+ DONE;
+})
+
(define_insn "entry"
[(set (reg:SI A1_REG)
(unspec_volatile:SI [(match_operand:SI 0 "const_int_operand" "i")]
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 0f37423..dc4ce202 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,156 @@
+2022-09-17 Patrick Palka <ppalka@redhat.com>
+
+ * module.cc (friend_from_decl_list): Don't consider
+ CLASSTYPE_TEMPLATE_INFO for a TYPENAME_TYPE friend.
+ (trees_in::read_class_def): Don't add to
+ CLASSTYPE_BEFRIENDING_CLASSES for a TYPENAME_TYPE friend.
+
+2022-09-16 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/92505
+ * constexpr.cc (cxx_eval_component_reference): Check non_constant_p
+ sooner. In C++14 or later, reject a DECL_MUTABLE_P member access
+ only if CONSTRUCTOR_MUTABLE_POISION is also set.
+
+2022-09-16 Jason Merrill <jason@redhat.com>
+
+ PR c++/106858
+ * parser.cc (cp_parser_omp_var_list_no_open): Pass the
+ initial token location down.
+ * semantics.cc (finish_omp_clauses): Check
+ invalid_nonstatic_memfn_p.
+ * typeck.cc (invalid_nonstatic_memfn_p): Handle null TREE_TYPE.
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * decl.cc (cxx_init_decl_processing): Inline last
+ build_void_list_node call.
+ (build_void_list_node): Remove.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * semantics.cc (finish_omp_clauses): Likewise.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * cp-tree.h (mark_used): Remove single-parameter overload. Add
+ default argument to the two-parameter overload.
+ * decl2.cc (mark_used): Likewise.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * cp-tree.h (cxx_constant_value): Define two-parameter version
+ that omits the object parameter.
+ * decl.cc (build_explicit_specifier): Omit NULL_TREE object
+ argument to cxx_constant_value.
+ * except.cc (build_noexcept_spec): Likewise.
+ * pt.cc (expand_integer_pack): Likewise.
+ (fold_targs_r): Likewise.
+ * semantics.cc (finish_if_stmt_cond): Likewise.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * decl.cc (build_explicit_specifier): Pass complain to
+ cxx_constant_value.
+ * except.cc (build_noexcept_spec): Likewise.
+ * pt.cc (expand_integer_pack): Likewise.
+ (tsubst_function_decl): Propagate error_mark_node returned
+ from build_explicit_specifier.
+
+2022-09-12 Patrick Palka <ppalka@redhat.com>
+
+ * call.cc (build_conditional_expr): Adjust calls to
+ '_sfinae'-suffixed functions.
+ (build_temp): Likewise.
+ (convert_like_internal): Likewise.
+ (convert_arg_to_ellipsis): Likewise.
+ (build_over_call): Likewise.
+ (build_cxx_call): Likewise.
+ (build_new_method_call): Likewise.
+ * constexpr.cc (cxx_eval_outermost_constant_expr): Likewise.
+ (cxx_constant_value_sfinae): Rename to ...
+ (cxx_constant_value): ... this. Document its default arguments.
+ (fold_non_dependent_expr): Adjust function comment.
+ * cp-tree.h (instantiate_non_dependent_expr_sfinae): Rename to ...
+ (instantiate_non_dependent_expr): ... this. Give its 'complain'
+ parameter a default argument.
+ (get_target_expr_sfinae, get_target_expr): Likewise.
+ (require_complete_type_sfinae, require_complete_type): Likewise.
+ (abstract_virtuals_error_sfinae, abstract_virtuals_error):
+ Likewise.
+ (cxx_constant_value_sfinae, cxx_constant_value): Likewise.
+ * cvt.cc (build_up_reference): Adjust calls to '_sfinae'-suffixed
+ functions.
+ (ocp_convert): Likewise.
+ * decl.cc (build_explicit_specifier): Likewise.
+ * except.cc (build_noexcept_spec): Likewise.
+ * init.cc (build_new_1): Likewise.
+ * pt.cc (expand_integer_pack): Likewise.
+ (instantiate_non_dependent_expr_internal): Adjust function
+ comment.
+ (instantiate_non_dependent_expr): Rename to ...
+ (instantiate_non_dependent_expr_sfinae): ... this. Document its
+ default argument.
+ (tsubst_init): Adjust calls to '_sfinae'-suffixed functions.
+ (fold_targs_r): Likewise.
+ * semantics.cc (finish_compound_literal): Likewise.
+ (finish_decltype_type): Likewise.
+ (cp_build_bit_cast): Likewise.
+ * tree.cc (build_cplus_new): Likewise.
+ (get_target_expr): Rename to ...
+ (get_target_expr_sfinae): ... this. Document its default
+ argument.
+ * typeck.cc (require_complete_type): Rename to ...
+ (require_complete_type_sfinae): ... this. Document its default
+ argument.
+ (cp_build_array_ref): Adjust calls to '_sfinae'-suffixed
+ functions.
+ (convert_arguments): Likewise.
+ (cp_build_binary_op): Likewise.
+ (build_static_cast_1): Likewise.
+ (cp_build_modify_expr): Likewise.
+ (convert_for_initialization): Likewise.
+ * typeck2.cc (abstract_virtuals_error): Rename to ...
+ (abstract_virtuals_error_sfinae): ... this. Document its default
+ argument.
+ (build_functional_cast_1): Adjust calls to '_sfinae'-suffixed
+ functions.
+
+2022-09-12 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/101906
+ * pt.cc (tsubst_template_args): Set cp_evaluated here.
+ (tsubst_aggr_type): Not here.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/106893
+ PR c++/90451
+ * decl.cc (cp_finish_decl): Call mark_single_function.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/93259
+ * pt.cc (type_dependent_expression_p): Treat a compound
+ literal of array-of-unknown-bound type like a variable.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/106567
+ * lambda.cc (type_deducible_expression_p): Check
+ array_of_unknown_bound_p.
+
+2022-09-12 Jonathan Wakely <jwakely@redhat.com>
+
+ PR c++/86491
+ * decl2.cc (constrain_class_visibility): Adjust wording of
+ -Wsubobject-linkage for cases where anonymous
+ namespaces aren't used.
+ * tree.cc (decl_anon_ns_mem_p): Now only true for actual anonymous
+ namespace members, rename old semantics to...
+ (decl_internal_context_p): ...this.
+ * cp-tree.h, name-lookup.cc, pt.cc: Adjust.
+
2022-09-08 Jonathan Wakely <jwakely@redhat.com>
PR c++/106838
diff --git a/gcc/cp/call.cc b/gcc/cp/call.cc
index d107a28..7e9289f 100644
--- a/gcc/cp/call.cc
+++ b/gcc/cp/call.cc
@@ -5976,7 +5976,7 @@ build_conditional_expr (const op_location_t &loc,
but now we sometimes wrap them in NOP_EXPRs so the test would
fail. */
if (CLASS_TYPE_P (TREE_TYPE (result)))
- result = get_target_expr_sfinae (result, complain);
+ result = get_target_expr (result, complain);
/* If this expression is an rvalue, but might be mistaken for an
lvalue, we must add a NON_LVALUE_EXPR. */
result = rvalue (result);
@@ -7672,7 +7672,7 @@ build_temp (tree expr, tree type, int flags,
if ((lvalue_kind (expr) & clk_packed)
&& CLASS_TYPE_P (TREE_TYPE (expr))
&& !type_has_nontrivial_copy_init (TREE_TYPE (expr)))
- return get_target_expr_sfinae (expr, complain);
+ return get_target_expr (expr, complain);
/* In decltype, we might have decided not to wrap this call in a TARGET_EXPR.
But it turns out to be a subexpression, so perform temporary
@@ -8008,10 +8008,10 @@ convert_like_internal (conversion *convs, tree expr, tree fn, int argnum,
&& !processing_template_decl)
{
bool direct = CONSTRUCTOR_IS_DIRECT_INIT (expr);
- if (abstract_virtuals_error_sfinae (NULL_TREE, totype, complain))
+ if (abstract_virtuals_error (NULL_TREE, totype, complain))
return error_mark_node;
expr = build_value_init (totype, complain);
- expr = get_target_expr_sfinae (expr, complain);
+ expr = get_target_expr (expr, complain);
if (expr != error_mark_node)
{
TARGET_EXPR_LIST_INIT_P (expr) = true;
@@ -8137,7 +8137,7 @@ convert_like_internal (conversion *convs, tree expr, tree fn, int argnum,
field = next_aggregate_field (DECL_CHAIN (field));
CONSTRUCTOR_APPEND_ELT (vec, field, size_int (len));
tree new_ctor = build_constructor (totype, vec);
- return get_target_expr_sfinae (new_ctor, complain);
+ return get_target_expr (new_ctor, complain);
}
case ck_aggr:
@@ -8153,7 +8153,7 @@ convert_like_internal (conversion *convs, tree expr, tree fn, int argnum,
return expr;
}
expr = reshape_init (totype, expr, complain);
- expr = get_target_expr_sfinae (digest_init (totype, expr, complain),
+ expr = get_target_expr (digest_init (totype, expr, complain),
complain);
if (expr != error_mark_node)
TARGET_EXPR_LIST_INIT_P (expr) = true;
@@ -8580,12 +8580,12 @@ convert_arg_to_ellipsis (tree arg, tsubst_flags_t complain)
standard conversions are performed. */
arg = decay_conversion (arg, complain);
- arg = require_complete_type_sfinae (arg, complain);
+ arg = require_complete_type (arg, complain);
arg_type = TREE_TYPE (arg);
if (arg != error_mark_node
/* In a template (or ill-formed code), we can have an incomplete type
- even after require_complete_type_sfinae, in which case we don't know
+ even after require_complete_type, in which case we don't know
whether it has trivial copy or not. */
&& COMPLETE_TYPE_P (arg_type)
&& !cp_unevaluated_operand)
@@ -10000,7 +10000,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
obj_arg = TREE_OPERAND (addr, 0);
}
}
- call = cxx_constant_value_sfinae (call, obj_arg, complain);
+ call = cxx_constant_value (call, obj_arg, complain);
if (obj_arg && !error_operand_p (call))
call = build2 (INIT_EXPR, void_type_node, obj_arg, call);
call = convert_from_reference (call);
@@ -10505,7 +10505,7 @@ build_cxx_call (tree fn, int nargs, tree *argarray,
prvalue. The type of the prvalue may be incomplete. */
if (!(complain & tf_decltype))
{
- fn = require_complete_type_sfinae (fn, complain);
+ fn = require_complete_type (fn, complain);
if (fn == error_mark_node)
return error_mark_node;
@@ -11084,7 +11084,7 @@ build_new_method_call (tree instance, tree fns, vec<tree, va_gc> **args,
if (init)
{
if (is_dummy_object (instance))
- return get_target_expr_sfinae (init, complain);
+ return get_target_expr (init, complain);
init = build2 (INIT_EXPR, TREE_TYPE (instance), instance, init);
TREE_SIDE_EFFECTS (init) = true;
return init;
diff --git a/gcc/cp/constexpr.cc b/gcc/cp/constexpr.cc
index c047fe4..1063987 100644
--- a/gcc/cp/constexpr.cc
+++ b/gcc/cp/constexpr.cc
@@ -4088,6 +4088,8 @@ cxx_eval_component_reference (const constexpr_ctx *ctx, tree t,
tree whole = cxx_eval_constant_expression (ctx, orig_whole,
lval,
non_constant_p, overflow_p);
+ if (*non_constant_p)
+ return t;
if (INDIRECT_REF_P (whole)
&& integer_zerop (TREE_OPERAND (whole, 0)))
{
@@ -4108,20 +4110,21 @@ cxx_eval_component_reference (const constexpr_ctx *ctx, tree t,
whole, part, NULL_TREE);
/* Don't VERIFY_CONSTANT here; we only want to check that we got a
CONSTRUCTOR. */
- if (!*non_constant_p && TREE_CODE (whole) != CONSTRUCTOR)
+ if (TREE_CODE (whole) != CONSTRUCTOR)
{
if (!ctx->quiet)
error ("%qE is not a constant expression", orig_whole);
*non_constant_p = true;
+ return t;
}
- if (DECL_MUTABLE_P (part))
+ if ((cxx_dialect < cxx14 || CONSTRUCTOR_MUTABLE_POISON (whole))
+ && DECL_MUTABLE_P (part))
{
if (!ctx->quiet)
error ("mutable %qD is not usable in a constant expression", part);
*non_constant_p = true;
+ return t;
}
- if (*non_constant_p)
- return t;
bool pmf = TYPE_PTRMEMFUNC_P (TREE_TYPE (whole));
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (whole), i, field, value)
{
@@ -8068,7 +8071,7 @@ cxx_eval_outermost_constant_expr (tree t, bool allow_non_constant,
r = get_target_expr (r);
else
{
- r = get_target_expr_sfinae (r, tf_warning_or_error | tf_no_cleanup);
+ r = get_target_expr (r, tf_warning_or_error | tf_no_cleanup);
TREE_CONSTANT (r) = true;
}
}
@@ -8081,19 +8084,11 @@ cxx_eval_outermost_constant_expr (tree t, bool allow_non_constant,
}
/* If T represents a constant expression returns its reduced value.
- Otherwise return error_mark_node. If T is dependent, then
- return NULL. */
-
-tree
-cxx_constant_value (tree t, tree decl)
-{
- return cxx_eval_outermost_constant_expr (t, false, true, true, false, decl);
-}
-
-/* As above, but respect SFINAE. */
+ Otherwise return error_mark_node. */
tree
-cxx_constant_value_sfinae (tree t, tree decl, tsubst_flags_t complain)
+cxx_constant_value (tree t, tree decl /* = NULL_TREE */,
+ tsubst_flags_t complain /* = tf_error */)
{
bool sfinae = !(complain & tf_error);
tree r = cxx_eval_outermost_constant_expr (t, sfinae, true, true, false, decl);
@@ -8316,8 +8311,8 @@ fold_non_dependent_expr_template (tree t, tsubst_flags_t complain,
/* Like maybe_constant_value but first fully instantiate the argument.
- Note: this is equivalent to instantiate_non_dependent_expr_sfinae
- (t, complain) followed by maybe_constant_value but is more efficient,
+ Note: this is equivalent to instantiate_non_dependent_expr (t, complain)
+ followed by maybe_constant_value but is more efficient,
because it calls instantiation_dependent_expression_p and
potential_constant_expression at most once.
The manifestly_const_eval argument is passed to maybe_constant_value.
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 7b28405..f19ecaf 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -6945,8 +6945,8 @@ extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern void no_linkage_error (tree);
extern void check_default_args (tree);
-extern bool mark_used (tree);
-extern bool mark_used (tree, tsubst_flags_t);
+extern bool mark_used (tree,
+ tsubst_flags_t = tf_warning_or_error);
extern bool mark_single_function (tree, tsubst_flags_t);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree, tree);
@@ -7391,8 +7391,7 @@ extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
-extern tree instantiate_non_dependent_expr (tree);
-extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t);
+extern tree instantiate_non_dependent_expr (tree, tsubst_flags_t = tf_error);
extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_or_null (tree);
extern bool variable_template_specialization_p (tree);
@@ -7824,8 +7823,8 @@ extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_local_temp (tree);
extern bool is_local_temp (tree);
extern tree build_aggr_init_expr (tree, tree);
-extern tree get_target_expr (tree);
-extern tree get_target_expr_sfinae (tree, tsubst_flags_t);
+extern tree get_target_expr (tree,
+ tsubst_flags_t = tf_warning_or_error);
extern tree build_cplus_array_type (tree, tree, int is_dep = -1);
extern tree build_array_of_n_type (tree, int);
extern bool array_of_runtime_bound_p (tree);
@@ -7874,7 +7873,8 @@ extern tree replace_placeholders (tree, tree, bool * = NULL);
extern bool find_placeholders (tree);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
-extern bool decl_anon_ns_mem_p (const_tree);
+extern bool decl_anon_ns_mem_p (tree);
+extern bool decl_internal_context_p (const_tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (const_tree);
@@ -7937,8 +7937,8 @@ extern int string_conv_p (const_tree, const_tree, int);
extern tree cp_truthvalue_conversion (tree, tsubst_flags_t);
extern tree contextual_conv_bool (tree, tsubst_flags_t);
extern tree condition_conversion (tree);
-extern tree require_complete_type (tree);
-extern tree require_complete_type_sfinae (tree, tsubst_flags_t);
+extern tree require_complete_type (tree,
+ tsubst_flags_t = tf_warning_or_error);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
@@ -8152,10 +8152,10 @@ extern tree binfo_or_else (tree, tree);
extern void cxx_readonly_error (location_t, tree,
enum lvalue_use);
extern void complete_type_check_abstract (tree);
-extern int abstract_virtuals_error (tree, tree);
-extern int abstract_virtuals_error (abstract_class_use, tree);
-extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
-extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t);
+extern int abstract_virtuals_error (tree, tree,
+ tsubst_flags_t = tf_warning_or_error);
+extern int abstract_virtuals_error (abstract_class_use, tree,
+ tsubst_flags_t = tf_warning_or_error);
extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern tree split_nonconstant_init (tree, tree);
@@ -8412,8 +8412,10 @@ extern bool require_potential_constant_expression (tree);
extern bool require_constant_expression (tree);
extern bool require_rvalue_constant_expression (tree);
extern bool require_potential_rvalue_constant_expression (tree);
-extern tree cxx_constant_value (tree, tree = NULL_TREE);
-extern tree cxx_constant_value_sfinae (tree, tree, tsubst_flags_t);
+extern tree cxx_constant_value (tree, tree = NULL_TREE,
+ tsubst_flags_t = tf_error);
+inline tree cxx_constant_value (tree t, tsubst_flags_t complain)
+{ return cxx_constant_value (t, NULL_TREE, complain); }
extern void cxx_constant_dtor (tree, tree);
extern tree cxx_constant_init (tree, tree = NULL_TREE);
extern tree maybe_constant_value (tree, tree = NULL_TREE, bool = false);
diff --git a/gcc/cp/cvt.cc b/gcc/cp/cvt.cc
index 30a9806..434d306 100644
--- a/gcc/cp/cvt.cc
+++ b/gcc/cp/cvt.cc
@@ -339,7 +339,7 @@ build_up_reference (tree type, tree arg, int flags, tree decl,
LOOKUP_ONLYCONVERTING|DIRECT_BIND);
}
else if (!(flags & DIRECT_BIND) && ! obvalue_p (arg))
- return get_target_expr_sfinae (arg, complain);
+ return get_target_expr (arg, complain);
/* If we had a way to wrap this up, and say, if we ever needed its
address, transform all occurrences of the register, into a memory
@@ -939,7 +939,7 @@ ocp_convert (tree type, tree expr, int convtype, int flags,
ctor = e;
- if (abstract_virtuals_error_sfinae (NULL_TREE, type, complain))
+ if (abstract_virtuals_error (NULL_TREE, type, complain))
return error_mark_node;
if (BRACE_ENCLOSED_INITIALIZER_P (ctor))
diff --git a/gcc/cp/decl.cc b/gcc/cp/decl.cc
index 4665a29..070f673 100644
--- a/gcc/cp/decl.cc
+++ b/gcc/cp/decl.cc
@@ -4623,7 +4623,7 @@ cxx_init_decl_processing (void)
record_unknown_type (init_list_type_node, "init list");
/* Used when parsing to distinguish parameter-lists () and (void). */
- explicit_void_list_node = build_void_list_node ();
+ explicit_void_list_node = build_tree_list (NULL_TREE, void_type_node);
{
/* Make sure we get a unique function type, so we can give
@@ -8140,6 +8140,9 @@ cp_finish_decl (tree decl, tree init, bool init_const_expr_p,
d_init = build_x_compound_expr_from_list (d_init, ELK_INIT,
tf_warning_or_error);
d_init = resolve_nondeduced_context (d_init, tf_warning_or_error);
+ /* Force auto deduction now. Use tf_none to avoid redundant warnings
+ on deprecated-14.C. */
+ mark_single_function (d_init, tf_none);
}
enum auto_deduction_context adc = adc_variable_type;
if (DECL_DECOMPOSITION_P (decl))
@@ -18447,14 +18450,6 @@ cp_tree_node_structure (union lang_tree_node * t)
}
}
-/* Build the void_list_node (void_type_node having been created). */
-tree
-build_void_list_node (void)
-{
- tree t = build_tree_list (NULL_TREE, void_type_node);
- return t;
-}
-
bool
cp_missing_noreturn_ok_p (tree decl)
{
@@ -18553,8 +18548,8 @@ build_explicit_specifier (tree expr, tsubst_flags_t complain)
return expr;
expr = build_converted_constant_bool_expr (expr, complain);
- expr = instantiate_non_dependent_expr_sfinae (expr, complain);
- expr = cxx_constant_value (expr);
+ expr = instantiate_non_dependent_expr (expr, complain);
+ expr = cxx_constant_value (expr, complain);
return expr;
}
diff --git a/gcc/cp/decl2.cc b/gcc/cp/decl2.cc
index cd18881..9f18466 100644
--- a/gcc/cp/decl2.cc
+++ b/gcc/cp/decl2.cc
@@ -2851,7 +2851,7 @@ determine_visibility (tree decl)
if (class_type)
determine_visibility_from_class (decl, class_type);
- if (decl_anon_ns_mem_p (decl))
+ if (decl_internal_context_p (decl))
/* Names in an anonymous namespace get internal linkage. */
constrain_visibility (decl, VISIBILITY_ANON, false);
else if (TREE_CODE (decl) != TYPE_DECL)
@@ -2965,16 +2965,21 @@ constrain_class_visibility (tree type)
{
if (same_type_p (TREE_TYPE (t), nlt))
warning (OPT_Wsubobject_linkage, "\
-%qT has a field %qD whose type has no linkage",
+%qT has a field %q#D whose type has no linkage",
type, t);
else
warning (OPT_Wsubobject_linkage, "\
%qT has a field %qD whose type depends on the type %qT which has no linkage",
type, t, nlt);
}
- else
+ else if (cxx_dialect > cxx98
+ && !decl_anon_ns_mem_p (ftype))
warning (OPT_Wsubobject_linkage, "\
-%qT has a field %qD whose type uses the anonymous namespace",
+%qT has a field %q#D whose type has internal linkage",
+ type, t);
+ else // In C++98 this can only happen with unnamed namespaces.
+ warning (OPT_Wsubobject_linkage, "\
+%qT has a field %q#D whose type uses the anonymous namespace",
type, t);
}
}
@@ -2989,28 +2994,34 @@ constrain_class_visibility (tree type)
binfo = TYPE_BINFO (type);
for (i = 0; BINFO_BASE_ITERATE (binfo, i, t); ++i)
{
- int subvis = type_visibility (TREE_TYPE (t));
+ tree btype = BINFO_TYPE (t);
+ int subvis = type_visibility (btype);
if (subvis == VISIBILITY_ANON)
{
if (!in_main_input_context())
{
- tree nlt = no_linkage_check (TREE_TYPE (t), /*relaxed_p=*/false);
+ tree nlt = no_linkage_check (btype, /*relaxed_p=*/false);
if (nlt)
{
- if (same_type_p (TREE_TYPE (t), nlt))
+ if (same_type_p (btype, nlt))
warning (OPT_Wsubobject_linkage, "\
-%qT has a base %qT whose type has no linkage",
- type, TREE_TYPE (t));
+%qT has a base %qT which has no linkage",
+ type, btype);
else
warning (OPT_Wsubobject_linkage, "\
-%qT has a base %qT whose type depends on the type %qT which has no linkage",
- type, TREE_TYPE (t), nlt);
+%qT has a base %qT which depends on the type %qT which has no linkage",
+ type, btype, nlt);
}
- else
+ else if (cxx_dialect > cxx98
+ && !decl_anon_ns_mem_p (btype))
+ warning (OPT_Wsubobject_linkage, "\
+%qT has a base %qT which has internal linkage",
+ type, btype);
+ else // In C++98 this can only happen with unnamed namespaces.
warning (OPT_Wsubobject_linkage, "\
-%qT has a base %qT whose type uses the anonymous namespace",
- type, TREE_TYPE (t));
+%qT has a base %qT which uses the anonymous namespace",
+ type, btype);
}
}
else if (vis < VISIBILITY_HIDDEN
@@ -5564,7 +5575,7 @@ mark_single_function (tree expr, tsubst_flags_t complain)
wrong, true otherwise. */
bool
-mark_used (tree decl, tsubst_flags_t complain)
+mark_used (tree decl, tsubst_flags_t complain /* = tf_warning_or_error */)
{
/* If we're just testing conversions or resolving overloads, we
don't want any permanent effects like forcing functions to be
@@ -5806,12 +5817,6 @@ mark_used (tree decl, tsubst_flags_t complain)
return true;
}
-bool
-mark_used (tree decl)
-{
- return mark_used (decl, tf_warning_or_error);
-}
-
tree
vtv_start_verification_constructor_init_function (void)
{
diff --git a/gcc/cp/except.cc b/gcc/cp/except.cc
index da0a65c..048612d 100644
--- a/gcc/cp/except.cc
+++ b/gcc/cp/except.cc
@@ -1256,8 +1256,8 @@ build_noexcept_spec (tree expr, tsubst_flags_t complain)
&& !instantiation_dependent_expression_p (expr))
{
expr = build_converted_constant_bool_expr (expr, complain);
- expr = instantiate_non_dependent_expr_sfinae (expr, complain);
- expr = cxx_constant_value (expr);
+ expr = instantiate_non_dependent_expr (expr, complain);
+ expr = cxx_constant_value (expr, complain);
}
if (TREE_CODE (expr) == INTEGER_CST)
{
diff --git a/gcc/cp/init.cc b/gcc/cp/init.cc
index edca843..a85c303 100644
--- a/gcc/cp/init.cc
+++ b/gcc/cp/init.cc
@@ -3158,7 +3158,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
"%<new%> of %<initializer_list%> does not "
"extend the lifetime of the underlying array");
- if (abstract_virtuals_error_sfinae (ACU_NEW, elt_type, complain))
+ if (abstract_virtuals_error (ACU_NEW, elt_type, complain))
return error_mark_node;
is_initialized = (type_build_ctor_call (elt_type) || *init != NULL);
diff --git a/gcc/cp/lambda.cc b/gcc/cp/lambda.cc
index 3fb98a9..3ee1fe9 100644
--- a/gcc/cp/lambda.cc
+++ b/gcc/cp/lambda.cc
@@ -198,6 +198,7 @@ type_deducible_expression_p (tree expr)
tree t = non_reference (TREE_TYPE (expr));
return (t && TREE_CODE (t) != TYPE_PACK_EXPANSION
&& !WILDCARD_TYPE_P (t) && !LAMBDA_TYPE_P (t)
+ && !array_of_unknown_bound_p (t)
&& !type_uses_auto (t));
}
diff --git a/gcc/cp/module.cc b/gcc/cp/module.cc
index f27f4d0..1a1ff5b 100644
--- a/gcc/cp/module.cc
+++ b/gcc/cp/module.cc
@@ -4734,7 +4734,8 @@ friend_from_decl_list (tree frnd)
if (TYPE_P (frnd))
{
res = TYPE_NAME (frnd);
- if (CLASSTYPE_TEMPLATE_INFO (frnd))
+ if (CLASS_TYPE_P (frnd)
+ && CLASSTYPE_TEMPLATE_INFO (frnd))
tmpl = CLASSTYPE_TI_TEMPLATE (frnd);
}
else if (DECL_TEMPLATE_INFO (frnd))
@@ -12121,7 +12122,7 @@ trees_in::read_class_def (tree defn, tree maybe_template)
{
tree f = TREE_VALUE (friend_classes);
- if (TYPE_P (f))
+ if (CLASS_TYPE_P (f))
{
CLASSTYPE_BEFRIENDING_CLASSES (f)
= tree_cons (NULL_TREE, type,
diff --git a/gcc/cp/name-lookup.cc b/gcc/cp/name-lookup.cc
index f89a1dc..69d555d 100644
--- a/gcc/cp/name-lookup.cc
+++ b/gcc/cp/name-lookup.cc
@@ -402,7 +402,7 @@ add_decl_to_level (cp_binding_level *b, tree decl)
&& ((VAR_P (decl) && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
|| (TREE_CODE (decl) == FUNCTION_DECL
&& (!TREE_PUBLIC (decl)
- || decl_anon_ns_mem_p (decl)
+ || decl_internal_context_p (decl)
|| DECL_DECLARED_INLINE_P (decl)))))
vec_safe_push (static_decls, decl);
}
diff --git a/gcc/cp/parser.cc b/gcc/cp/parser.cc
index 841ba6e..3cbe0d6 100644
--- a/gcc/cp/parser.cc
+++ b/gcc/cp/parser.cc
@@ -36938,10 +36938,9 @@ cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
cp_id_kind idk = CP_ID_KIND_NONE;
cp_lexer_consume_token (parser->lexer);
decl = convert_from_reference (decl);
- decl
- = cp_parser_postfix_dot_deref_expression (parser, ttype,
- decl, false,
- &idk, loc);
+ decl = (cp_parser_postfix_dot_deref_expression
+ (parser, ttype, cp_expr (decl, token->location),
+ false, &idk, loc));
}
/* FALLTHROUGH. */
case OMP_CLAUSE_AFFINITY:
diff --git a/gcc/cp/pt.cc b/gcc/cp/pt.cc
index c5fc0f1..db4e808 100644
--- a/gcc/cp/pt.cc
+++ b/gcc/cp/pt.cc
@@ -3868,8 +3868,8 @@ expand_integer_pack (tree call, tree args, tsubst_flags_t complain,
}
else
{
- hi = instantiate_non_dependent_expr_sfinae (hi, complain);
- hi = cxx_constant_value (hi);
+ hi = instantiate_non_dependent_expr (hi, complain);
+ hi = cxx_constant_value (hi, complain);
int len = valid_constant_size_p (hi) ? tree_to_shwi (hi) : -1;
/* Calculate the largest value of len that won't make the size of the vec
@@ -6428,7 +6428,7 @@ redeclare_class_template (tree type, tree parms, tree cons)
return true;
}
-/* The actual substitution part of instantiate_non_dependent_expr_sfinae,
+/* The actual substitution part of instantiate_non_dependent_expr,
to be used when the caller has already checked
!instantiation_dependent_uneval_expression_p (expr)
and cleared processing_template_decl. */
@@ -6447,7 +6447,8 @@ instantiate_non_dependent_expr_internal (tree expr, tsubst_flags_t complain)
/* Instantiate the non-dependent expression EXPR. */
tree
-instantiate_non_dependent_expr_sfinae (tree expr, tsubst_flags_t complain)
+instantiate_non_dependent_expr (tree expr,
+ tsubst_flags_t complain /* = tf_error */)
{
if (expr == NULL_TREE)
return NULL_TREE;
@@ -6462,12 +6463,6 @@ instantiate_non_dependent_expr_sfinae (tree expr, tsubst_flags_t complain)
return expr;
}
-tree
-instantiate_non_dependent_expr (tree expr)
-{
- return instantiate_non_dependent_expr_sfinae (expr, tf_error);
-}
-
/* Like instantiate_non_dependent_expr, but return NULL_TREE if the
expression is dependent or non-constant. */
@@ -13616,6 +13611,9 @@ tsubst_template_args (tree t, tree args, tsubst_flags_t complain, tree in_decl)
if (t == error_mark_node)
return error_mark_node;
+ /* In "sizeof(X<I>)" we need to evaluate "I". */
+ cp_evaluated ev;
+
const int len = TREE_VEC_LENGTH (t);
tree *elts = XALLOCAVEC (tree, len);
int expanded_len_adjust = 0;
@@ -13888,9 +13886,6 @@ tsubst_aggr_type (tree t,
tree argvec;
tree r;
- /* In "sizeof(X<I>)" we need to evaluate "I". */
- cp_evaluated ev;
-
/* Figure out what arguments are appropriate for the
type we are trying to find. For example, given:
@@ -14317,6 +14312,8 @@ tsubst_function_decl (tree t, tree args, tsubst_flags_t complain,
/*function_p=*/false,
/*i_c_e_p=*/true);
spec = build_explicit_specifier (spec, complain);
+ if (spec == error_mark_node)
+ return error_mark_node;
if (instantiation_dependent_expression_p (spec))
store_explicit_specifier (r, spec);
else
@@ -16970,7 +16967,7 @@ tsubst_init (tree init, tree decl, tree args,
zero. */
init = build_value_init (type, complain);
if (TREE_CODE (init) == AGGR_INIT_EXPR)
- init = get_target_expr_sfinae (init, complain);
+ init = get_target_expr (init, complain);
if (TREE_CODE (init) == TARGET_EXPR)
TARGET_EXPR_DIRECT_INIT_P (init) = true;
}
@@ -20124,7 +20121,7 @@ fold_targs_r (tree targs, tsubst_flags_t complain)
&& !glvalue_p (elt)
&& !TREE_CONSTANT (elt))
{
- elt = cxx_constant_value_sfinae (elt, NULL_TREE, complain);
+ elt = cxx_constant_value (elt, complain);
if (elt == error_mark_node)
return false;
}
@@ -25025,7 +25022,7 @@ mark_decl_instantiated (tree result, int extern_p)
return;
/* For anonymous namespace we don't need to do anything. */
- if (decl_anon_ns_mem_p (result))
+ if (decl_internal_context_p (result))
{
gcc_assert (!TREE_PUBLIC (result));
return;
@@ -28082,11 +28079,11 @@ type_dependent_expression_p (tree expression)
If the array has no length and has an initializer, it must be that
we couldn't determine its length in cp_complete_array_type because
it is dependent. */
- if (VAR_P (expression)
+ if (((VAR_P (expression) && DECL_INITIAL (expression))
+ || COMPOUND_LITERAL_P (expression))
&& TREE_TYPE (expression) != NULL_TREE
&& TREE_CODE (TREE_TYPE (expression)) == ARRAY_TYPE
- && !TYPE_DOMAIN (TREE_TYPE (expression))
- && DECL_INITIAL (expression))
+ && !TYPE_DOMAIN (TREE_TYPE (expression)))
return true;
/* Pull a FUNCTION_DECL out of a BASELINK if we can. */
diff --git a/gcc/cp/semantics.cc b/gcc/cp/semantics.cc
index 6bda30e..8656207 100644
--- a/gcc/cp/semantics.cc
+++ b/gcc/cp/semantics.cc
@@ -1042,7 +1042,7 @@ finish_if_stmt_cond (tree orig_cond, tree if_stmt)
{
maybe_warn_for_constant_evaluated (cond, /*constexpr_if=*/true);
cond = instantiate_non_dependent_expr (cond);
- cond = cxx_constant_value (cond, NULL_TREE);
+ cond = cxx_constant_value (cond);
}
else
{
@@ -3318,7 +3318,7 @@ finish_compound_literal (tree type, tree compound_literal,
/* The CONSTRUCTOR is now an initializer, not a compound literal. */
if (TREE_CODE (compound_literal) == CONSTRUCTOR)
TREE_HAS_CONSTRUCTOR (compound_literal) = false;
- compound_literal = get_target_expr_sfinae (compound_literal, complain);
+ compound_literal = get_target_expr (compound_literal, complain);
}
else
/* For e.g. int{42} just make sure it's a prvalue. */
@@ -6755,11 +6755,18 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
break;
}
+ tree *grp_start_p = NULL, grp_sentinel = NULL_TREE;
+
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
bool field_ok = false;
+ /* We've reached the end of a list of expanded nodes. Reset the group
+ start pointer. */
+ if (c == grp_sentinel)
+ grp_start_p = NULL;
+
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
@@ -7982,6 +7989,9 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
{
+ grp_start_p = pc;
+ grp_sentinel = OMP_CLAUSE_CHAIN (c);
+
if (handle_omp_array_sections (c, ort))
remove = true;
else
@@ -8109,6 +8119,10 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
t = TREE_OPERAND (t, 1);
STRIP_NOPS (t);
}
+ if (TREE_CODE (t) == COMPONENT_REF
+ && invalid_nonstatic_memfn_p (EXPR_LOCATION (t), t,
+ tf_warning_or_error))
+ remove = true;
indir_component_ref_p = false;
if (TREE_CODE (t) == COMPONENT_REF
&& (TREE_CODE (TREE_OPERAND (t, 0)) == INDIRECT_REF
@@ -8353,6 +8367,9 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_ATTACH_DETACH))
{
+ grp_start_p = pc;
+ grp_sentinel = OMP_CLAUSE_CHAIN (c);
+
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
if (TREE_CODE (t) == COMPONENT_REF)
@@ -8763,7 +8780,18 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
}
if (remove)
- *pc = OMP_CLAUSE_CHAIN (c);
+ {
+ if (grp_start_p)
+ {
+ /* If we found a clause to remove, we want to remove the whole
+ expanded group, otherwise gimplify can get confused. */
+ *grp_start_p = grp_sentinel;
+ pc = grp_start_p;
+ grp_start_p = NULL;
+ }
+ else
+ *pc = OMP_CLAUSE_CHAIN (c);
+ }
else
pc = &OMP_CLAUSE_CHAIN (c);
}
@@ -11329,7 +11357,7 @@ finish_decltype_type (tree expr, bool id_expression_or_member_access_p,
}
else if (processing_template_decl)
{
- expr = instantiate_non_dependent_expr_sfinae (expr, complain|tf_decltype);
+ expr = instantiate_non_dependent_expr (expr, complain|tf_decltype);
if (expr == error_mark_node)
return error_mark_node;
/* Keep processing_template_decl cleared for the rest of the function
@@ -12500,7 +12528,7 @@ cp_build_bit_cast (location_t loc, tree type, tree arg,
SET_EXPR_LOCATION (ret, loc);
if (!processing_template_decl && CLASS_TYPE_P (type))
- ret = get_target_expr_sfinae (ret, complain);
+ ret = get_target_expr (ret, complain);
return ret;
}
diff --git a/gcc/cp/tree.cc b/gcc/cp/tree.cc
index c678e3b..d0bd41a 100644
--- a/gcc/cp/tree.cc
+++ b/gcc/cp/tree.cc
@@ -713,7 +713,7 @@ build_cplus_new (tree type, tree init, tsubst_flags_t complain)
/* Make sure that we're not trying to create an instance of an
abstract class. */
- if (abstract_virtuals_error_sfinae (NULL_TREE, type, complain))
+ if (abstract_virtuals_error (NULL_TREE, type, complain))
return error_mark_node;
if (TREE_CODE (rval) == AGGR_INIT_EXPR)
@@ -922,7 +922,7 @@ force_target_expr (tree type, tree init, tsubst_flags_t complain)
/* Like build_target_expr_with_type, but use the type of INIT. */
tree
-get_target_expr_sfinae (tree init, tsubst_flags_t complain)
+get_target_expr (tree init, tsubst_flags_t complain /* = tf_warning_or_error */)
{
if (TREE_CODE (init) == AGGR_INIT_EXPR)
return build_target_expr (AGGR_INIT_EXPR_SLOT (init), init, complain);
@@ -935,12 +935,6 @@ get_target_expr_sfinae (tree init, tsubst_flags_t complain)
}
}
-tree
-get_target_expr (tree init)
-{
- return get_target_expr_sfinae (init, tf_warning_or_error);
-}
-
/* If EXPR is a bitfield reference, convert it to the declared type of
the bitfield, and return the resulting expression. Otherwise,
return EXPR itself. */
@@ -2968,7 +2962,7 @@ verify_stmt_tree (tree t)
/* Check if the type T depends on a type with no linkage and if so,
return it. If RELAXED_P then do not consider a class type declared
within a vague-linkage function to have no linkage. Remember:
- no-linkage is not the same as internal-linkage*/
+ no-linkage is not the same as internal-linkage. */
tree
no_linkage_check (tree t, bool relaxed_p)
@@ -3817,7 +3811,15 @@ decl_namespace_context (tree decl)
nested, or false otherwise. */
bool
-decl_anon_ns_mem_p (const_tree decl)
+decl_anon_ns_mem_p (tree decl)
+{
+ return !TREE_PUBLIC (decl_namespace_context (decl));
+}
+
+/* Returns true if the enclosing scope of DECL has internal or no linkage. */
+
+bool
+decl_internal_context_p (const_tree decl)
{
while (TREE_CODE (decl) != NAMESPACE_DECL)
{
diff --git a/gcc/cp/typeck.cc b/gcc/cp/typeck.cc
index b99947c..22d834d 100644
--- a/gcc/cp/typeck.cc
+++ b/gcc/cp/typeck.cc
@@ -71,7 +71,8 @@ static bool is_std_forward_p (tree);
complete type when this function returns. */
tree
-require_complete_type_sfinae (tree value, tsubst_flags_t complain)
+require_complete_type (tree value,
+ tsubst_flags_t complain /* = tf_warning_or_error */)
{
tree type;
@@ -96,12 +97,6 @@ require_complete_type_sfinae (tree value, tsubst_flags_t complain)
return error_mark_node;
}
-tree
-require_complete_type (tree value)
-{
- return require_complete_type_sfinae (value, tf_warning_or_error);
-}
-
/* Try to complete TYPE, if it is incomplete. For example, if TYPE is
a template instantiation, do the instantiation. Returns TYPE,
whether or not it could be completed, unless something goes
@@ -2201,7 +2196,8 @@ invalid_nonstatic_memfn_p (location_t loc, tree expr, tsubst_flags_t complain)
return false;
if (is_overloaded_fn (expr) && !really_overloaded_fn (expr))
expr = get_first_fn (expr);
- if (DECL_NONSTATIC_MEMBER_FUNCTION_P (expr))
+ if (TREE_TYPE (expr)
+ && DECL_NONSTATIC_MEMBER_FUNCTION_P (expr))
{
if (complain & tf_error)
{
@@ -3899,7 +3895,7 @@ cp_build_array_ref (location_t loc, tree array, tree idx,
|= (CP_TYPE_VOLATILE_P (type) | TREE_SIDE_EFFECTS (array));
TREE_THIS_VOLATILE (rval)
|= (CP_TYPE_VOLATILE_P (type) | TREE_THIS_VOLATILE (array));
- ret = require_complete_type_sfinae (rval, complain);
+ ret = require_complete_type (rval, complain);
protected_set_expr_location (ret, loc);
if (non_lvalue)
ret = non_lvalue_loc (loc, ret);
@@ -4464,7 +4460,7 @@ convert_arguments (tree typelist, vec<tree, va_gc> **values, tree fndecl,
/* Don't do ellipsis conversion for __built_in_constant_p
as this will result in spurious errors for non-trivial
types. */
- val = require_complete_type_sfinae (val, complain);
+ val = require_complete_type (val, complain);
else
val = convert_arg_to_ellipsis (val, complain);
@@ -6264,7 +6260,7 @@ cp_build_binary_op (const op_location_t &location,
instrument_expr, result);
if (resultcode == SPACESHIP_EXPR && !processing_template_decl)
- result = get_target_expr_sfinae (result, complain);
+ result = get_target_expr (result, complain);
if (!c_inhibit_evaluation_warnings)
{
@@ -8016,7 +8012,7 @@ build_static_cast_1 (location_t loc, tree type, tree expr, bool c_cast_p,
/* [class.abstract]
An abstract class shall not be used ... as the type of an explicit
conversion. */
- if (abstract_virtuals_error_sfinae (ACU_CAST, type, complain))
+ if (abstract_virtuals_error (ACU_CAST, type, complain))
return error_mark_node;
/* [expr.static.cast]
@@ -9144,7 +9140,7 @@ cp_build_modify_expr (location_t loc, tree lhs, enum tree_code modifycode,
}
else
{
- lhs = require_complete_type_sfinae (lhs, complain);
+ lhs = require_complete_type (lhs, complain);
if (lhs == error_mark_node)
return error_mark_node;
@@ -10123,7 +10119,7 @@ convert_for_initialization (tree exp, tree type, tree rhs, int flags,
}
if (exp != 0)
- exp = require_complete_type_sfinae (exp, complain);
+ exp = require_complete_type (exp, complain);
if (exp == error_mark_node)
return error_mark_node;
diff --git a/gcc/cp/typeck2.cc b/gcc/cp/typeck2.cc
index 1a96be3..688e9c1 100644
--- a/gcc/cp/typeck2.cc
+++ b/gcc/cp/typeck2.cc
@@ -130,8 +130,8 @@ cxx_readonly_error (location_t loc, tree arg, enum lvalue_use errstring)
all was well. */
static int
-abstract_virtuals_error_sfinae (tree decl, tree type, abstract_class_use use,
- tsubst_flags_t complain)
+abstract_virtuals_error (tree decl, tree type, abstract_class_use use,
+ tsubst_flags_t complain)
{
vec<tree, va_gc> *pure;
@@ -251,32 +251,19 @@ abstract_virtuals_error_sfinae (tree decl, tree type, abstract_class_use use,
}
int
-abstract_virtuals_error_sfinae (tree decl, tree type, tsubst_flags_t complain)
-{
- return abstract_virtuals_error_sfinae (decl, type, ACU_UNKNOWN, complain);
-}
-
-int
-abstract_virtuals_error_sfinae (abstract_class_use use, tree type,
- tsubst_flags_t complain)
+abstract_virtuals_error (tree decl, tree type,
+ tsubst_flags_t complain /* = tf_warning_or_error */)
{
- return abstract_virtuals_error_sfinae (NULL_TREE, type, use, complain);
+ return abstract_virtuals_error (decl, type, ACU_UNKNOWN, complain);
}
-
-/* Wrapper for the above function in the common case of wanting errors. */
-
int
-abstract_virtuals_error (tree decl, tree type)
+abstract_virtuals_error (abstract_class_use use, tree type,
+ tsubst_flags_t complain /* = tf_warning_or_error */)
{
- return abstract_virtuals_error_sfinae (decl, type, tf_warning_or_error);
+ return abstract_virtuals_error (NULL_TREE, type, use, complain);
}
-int
-abstract_virtuals_error (abstract_class_use use, tree type)
-{
- return abstract_virtuals_error_sfinae (use, type, tf_warning_or_error);
-}
/* Print an inform about the declaration of the incomplete type TYPE. */
@@ -2502,7 +2489,7 @@ build_functional_cast_1 (location_t loc, tree exp, tree parms,
if (!complete_type_or_maybe_complain (type, NULL_TREE, complain))
return error_mark_node;
- if (abstract_virtuals_error_sfinae (ACU_CAST, type, complain))
+ if (abstract_virtuals_error (ACU_CAST, type, complain))
return error_mark_node;
/* [expr.type.conv]
@@ -2523,7 +2510,7 @@ build_functional_cast_1 (location_t loc, tree exp, tree parms,
if (parms == NULL_TREE)
{
exp = build_value_init (type, complain);
- exp = get_target_expr_sfinae (exp, complain);
+ exp = get_target_expr (exp, complain);
return exp;
}
diff --git a/gcc/d/ChangeLog b/gcc/d/ChangeLog
index eae3ac1..9bb29d3 100644
--- a/gcc/d/ChangeLog
+++ b/gcc/d/ChangeLog
@@ -1,3 +1,8 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * d-builtins.cc (d_build_c_type_nodes): Do not initialize
+ void_list_node.
+
2022-08-27 Iain Buclaw <ibuclaw@gdcproject.org>
* dmd/MERGE: Merge upstream dmd 817610b16d.
diff --git a/gcc/d/d-builtins.cc b/gcc/d/d-builtins.cc
index c2ef0c8..5997e5d 100644
--- a/gcc/d/d-builtins.cc
+++ b/gcc/d/d-builtins.cc
@@ -889,7 +889,6 @@ static GTY(()) tree signed_size_type_node;
static void
d_build_c_type_nodes (void)
{
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
string_type_node = build_pointer_type (char_type_node);
const_string_type_node
= build_pointer_type (build_qualified_type (char_type_node,
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index 8a34690..46eeb98 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -1104,7 +1104,7 @@ add, subtract, multiply, divide; unary arithmetic operators;
relational operators; equality operators; and conversions to and from
integer and other floating types. Use a suffix @samp{w} or @samp{W}
in a literal constant of type @code{__float80} or type
-@code{__ibm128}. Use a suffix @samp{q} or @samp{Q} for @code{_float128}.
+@code{__ibm128}. Use a suffix @samp{q} or @samp{Q} for @code{__float128}.
In order to use @code{_Float128}, @code{__float128}, and @code{__ibm128}
on PowerPC Linux systems, you must use the @option{-mfloat128} option. It is
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index a28d3a0..a134df7 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -29840,6 +29840,13 @@ Generate (do not generate) code that will run in privileged state.
@opindex no-block-ops-unaligned-vsx
Generate (do not generate) unaligned vsx loads and stores for
inline expansion of @code{memcpy} and @code{memmove}.
+
+@item --param rs6000-vect-unroll-limit=
+The vectorizer will check with target information to determine whether it
+would be beneficial to unroll the main vectorized loop and by how much. This
+parameter sets the upper bound of how much the vectorizer will unroll the main
+loop. The default value is four.
+
@end table
@node RX Options
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 676f89f..f5f8ac0 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,31 @@
+2022-09-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * libgfortran.h: Declare GFC_FPE_AWAY.
+
+2022-09-15 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/106857
+ * simplify.cc (gfc_simplify_pack): Check for NULL pointer dereferences
+ while walking through constructors (error recovery).
+
+2022-09-15 Harald Anlauf <anlauf@gmx.de>
+ Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/104314
+ * resolve.cc (deferred_op_assign): Do not try to generate temporary
+ for deferred character length assignment if types do not agree.
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * f95-lang.cc (gfc_init_decl_processing): Do not initialize
+ void_list_node.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * trans-openmp.cc (gfc_trans_omp_clauses): Don't create
+ GOMP_MAP_TO_PSET mappings for class metadata, nor GOMP_MAP_POINTER
+ mappings for POINTER_TYPE_P decls.
+
2022-09-10 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
PR fortran/95644
diff --git a/gcc/fortran/f95-lang.cc b/gcc/fortran/f95-lang.cc
index ff4bf80..a6750be 100644
--- a/gcc/fortran/f95-lang.cc
+++ b/gcc/fortran/f95-lang.cc
@@ -530,8 +530,6 @@ gfc_init_decl_processing (void)
only use it for actual characters, not for INTEGER(1). */
build_common_tree_nodes (false);
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
-
/* Set up F95 type nodes. */
gfc_init_kinds ();
gfc_init_types ();
diff --git a/gcc/fortran/libgfortran.h b/gcc/fortran/libgfortran.h
index ef06194..79a8c2f 100644
--- a/gcc/fortran/libgfortran.h
+++ b/gcc/fortran/libgfortran.h
@@ -60,6 +60,7 @@ along with GCC; see the file COPYING3. If not see
#define GFC_FPE_TONEAREST 2
#define GFC_FPE_TOWARDZERO 3
#define GFC_FPE_UPWARD 4
+#define GFC_FPE_AWAY 5
/* Size of the buffer required to store FPU state for any target.
In particular, this has to be larger than fenv_t on all glibc targets.
diff --git a/gcc/fortran/resolve.cc b/gcc/fortran/resolve.cc
index ca11475..ae7ebb6 100644
--- a/gcc/fortran/resolve.cc
+++ b/gcc/fortran/resolve.cc
@@ -11803,6 +11803,7 @@ deferred_op_assign (gfc_code **code, gfc_namespace *ns)
if (!((*code)->expr1->ts.type == BT_CHARACTER
&& (*code)->expr1->ts.deferred && (*code)->expr1->rank
+ && (*code)->expr2->ts.type == BT_CHARACTER
&& (*code)->expr2->expr_type == EXPR_OP))
return false;
diff --git a/gcc/fortran/simplify.cc b/gcc/fortran/simplify.cc
index bc178d5..140c177 100644
--- a/gcc/fortran/simplify.cc
+++ b/gcc/fortran/simplify.cc
@@ -6431,7 +6431,7 @@ gfc_simplify_pack (gfc_expr *array, gfc_expr *mask, gfc_expr *vector)
/* Copy only those elements of ARRAY to RESULT whose
MASK equals .TRUE.. */
mask_ctor = gfc_constructor_first (mask->value.constructor);
- while (mask_ctor)
+ while (mask_ctor && array_ctor)
{
if (mask_ctor->expr->value.logical)
{
diff --git a/gcc/fortran/trans-openmp.cc b/gcc/fortran/trans-openmp.cc
index 1be7d23..8e9d534 100644
--- a/gcc/fortran/trans-openmp.cc
+++ b/gcc/fortran/trans-openmp.cc
@@ -3125,30 +3125,16 @@ gfc_trans_omp_clauses (stmtblock_t *block, gfc_omp_clauses *clauses,
tree present = gfc_omp_check_optional_argument (decl, true);
if (openacc && n->sym->ts.type == BT_CLASS)
{
- tree type = TREE_TYPE (decl);
if (n->sym->attr.optional)
sorry ("optional class parameter");
- if (POINTER_TYPE_P (type))
- {
- node4 = build_omp_clause (input_location,
- OMP_CLAUSE_MAP);
- OMP_CLAUSE_SET_MAP_KIND (node4, GOMP_MAP_POINTER);
- OMP_CLAUSE_DECL (node4) = decl;
- OMP_CLAUSE_SIZE (node4) = size_int (0);
- decl = build_fold_indirect_ref (decl);
- }
tree ptr = gfc_class_data_get (decl);
ptr = build_fold_indirect_ref (ptr);
OMP_CLAUSE_DECL (node) = ptr;
OMP_CLAUSE_SIZE (node) = gfc_class_vtab_size_get (decl);
node2 = build_omp_clause (input_location, OMP_CLAUSE_MAP);
- OMP_CLAUSE_SET_MAP_KIND (node2, GOMP_MAP_TO_PSET);
- OMP_CLAUSE_DECL (node2) = decl;
- OMP_CLAUSE_SIZE (node2) = TYPE_SIZE_UNIT (type);
- node3 = build_omp_clause (input_location, OMP_CLAUSE_MAP);
- OMP_CLAUSE_SET_MAP_KIND (node3, GOMP_MAP_ATTACH_DETACH);
- OMP_CLAUSE_DECL (node3) = gfc_class_data_get (decl);
- OMP_CLAUSE_SIZE (node3) = size_int (0);
+ OMP_CLAUSE_SET_MAP_KIND (node2, GOMP_MAP_ATTACH_DETACH);
+ OMP_CLAUSE_DECL (node2) = gfc_class_data_get (decl);
+ OMP_CLAUSE_SIZE (node2) = size_int (0);
goto finalize_map_clause;
}
else if (POINTER_TYPE_P (TREE_TYPE (decl))
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index a170478..9055cd8 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -5512,6 +5512,7 @@ gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace)
{
location_t loc = gimple_location (stmt);
gimple *new_stmt = gimple_build_builtin_unreachable (loc);
+ gimple_call_set_ctrl_altering (new_stmt, false);
/* If the call had a SSA name as lhs morph that into
an uninitialized value. */
if (lhs && TREE_CODE (lhs) == SSA_NAME)
diff --git a/gcc/gimple-range-fold.cc b/gcc/gimple-range-fold.cc
index 85ed6f9..a45fc7a 100644
--- a/gcc/gimple-range-fold.cc
+++ b/gcc/gimple-range-fold.cc
@@ -1030,7 +1030,7 @@ fold_using_range::range_of_builtin_int_call (irange &r, gcall *call,
if (src.get_operand (tmp, arg))
{
bool signbit;
- if (tmp.known_signbit (signbit))
+ if (tmp.signbit_p (signbit))
{
if (signbit)
r.set_nonzero (type);
diff --git a/gcc/gimplify.cc b/gcc/gimplify.cc
index dcdc852..4d032c6 100644
--- a/gcc/gimplify.cc
+++ b/gcc/gimplify.cc
@@ -125,12 +125,8 @@ enum gimplify_omp_var_data
/* Flag for GOVD_REDUCTION: inscan seen in {in,ex}clusive clause. */
GOVD_REDUCTION_INSCAN = 0x2000000,
- /* Flag for GOVD_MAP: (struct) vars that have pointer attachments for
- fields. */
- GOVD_MAP_HAS_ATTACHMENTS = 0x4000000,
-
/* Flag for GOVD_FIRSTPRIVATE: OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT. */
- GOVD_FIRSTPRIVATE_IMPLICIT = 0x8000000,
+ GOVD_FIRSTPRIVATE_IMPLICIT = 0x4000000,
GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE
| GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LINEAR
@@ -8795,73 +8791,66 @@ gimplify_omp_depend (tree *list_p, gimple_seq *pre_p)
return 1;
}
-/* Insert a GOMP_MAP_ALLOC or GOMP_MAP_RELEASE node following a
- GOMP_MAP_STRUCT mapping. C is an always_pointer mapping. STRUCT_NODE is
- the struct node to insert the new mapping after (when the struct node is
- initially created). PREV_NODE is the first of two or three mappings for a
- pointer, and is either:
- - the node before C, when a pair of mappings is used, e.g. for a C/C++
- array section.
- - not the node before C. This is true when we have a reference-to-pointer
- type (with a mapping for the reference and for the pointer), or for
- Fortran derived-type mappings with a GOMP_MAP_TO_PSET.
- If SCP is non-null, the new node is inserted before *SCP.
- if SCP is null, the new node is inserted before PREV_NODE.
- The return type is:
- - PREV_NODE, if SCP is non-null.
- - The newly-created ALLOC or RELEASE node, if SCP is null.
- - The second newly-created ALLOC or RELEASE node, if we are mapping a
- reference to a pointer. */
+/* For a set of mappings describing an array section pointed to by a struct
+ (or derived type, etc.) component, create an "alloc" or "release" node to
+ insert into a list following a GOMP_MAP_STRUCT node. For some types of
+ mapping (e.g. Fortran arrays with descriptors), an additional mapping may
+ be created that is inserted into the list of mapping nodes attached to the
+ directive being processed -- not part of the sorted list of nodes after
+ GOMP_MAP_STRUCT.
+
+ CODE is the code of the directive being processed. GRP_START and GRP_END
+ are the first and last of two or three nodes representing this array section
+ mapping (e.g. a data movement node like GOMP_MAP_{TO,FROM}, optionally a
+ GOMP_MAP_TO_PSET, and finally a GOMP_MAP_ALWAYS_POINTER). EXTRA_NODE is
+ filled with the additional node described above, if needed.
+
+ This function does not add the new nodes to any lists itself. It is the
+ responsibility of the caller to do that. */
static tree
-insert_struct_comp_map (enum tree_code code, tree c, tree struct_node,
- tree prev_node, tree *scp)
+build_omp_struct_comp_nodes (enum tree_code code, tree grp_start, tree grp_end,
+ tree *extra_node)
{
enum gomp_map_kind mkind
= (code == OMP_TARGET_EXIT_DATA || code == OACC_EXIT_DATA)
? GOMP_MAP_RELEASE : GOMP_MAP_ALLOC;
- tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
- tree cl = scp ? prev_node : c2;
+ gcc_assert (grp_start != grp_end);
+
+ tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (grp_end), OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
- OMP_CLAUSE_DECL (c2) = unshare_expr (OMP_CLAUSE_DECL (c));
- OMP_CLAUSE_CHAIN (c2) = scp ? *scp : prev_node;
- if (OMP_CLAUSE_CHAIN (prev_node) != c
- && OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (prev_node)) == OMP_CLAUSE_MAP
- && (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node))
- == GOMP_MAP_TO_PSET))
- OMP_CLAUSE_SIZE (c2) = OMP_CLAUSE_SIZE (OMP_CLAUSE_CHAIN (prev_node));
+ OMP_CLAUSE_DECL (c2) = unshare_expr (OMP_CLAUSE_DECL (grp_end));
+ OMP_CLAUSE_CHAIN (c2) = NULL_TREE;
+ tree grp_mid = NULL_TREE;
+ if (OMP_CLAUSE_CHAIN (grp_start) != grp_end)
+ grp_mid = OMP_CLAUSE_CHAIN (grp_start);
+
+ if (grp_mid
+ && OMP_CLAUSE_CODE (grp_mid) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (grp_mid) == GOMP_MAP_TO_PSET)
+ OMP_CLAUSE_SIZE (c2) = OMP_CLAUSE_SIZE (grp_mid);
else
OMP_CLAUSE_SIZE (c2) = TYPE_SIZE_UNIT (ptr_type_node);
- if (struct_node)
- OMP_CLAUSE_CHAIN (struct_node) = c2;
-
- /* We might need to create an additional mapping if we have a reference to a
- pointer (in C++). Don't do this if we have something other than a
- GOMP_MAP_ALWAYS_POINTER though, i.e. a GOMP_MAP_TO_PSET. */
- if (OMP_CLAUSE_CHAIN (prev_node) != c
- && OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (prev_node)) == OMP_CLAUSE_MAP
- && ((OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node))
- == GOMP_MAP_ALWAYS_POINTER)
- || (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node))
- == GOMP_MAP_ATTACH_DETACH)))
- {
- tree c4 = OMP_CLAUSE_CHAIN (prev_node);
- tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
+
+ if (grp_mid
+ && OMP_CLAUSE_CODE (grp_mid) == OMP_CLAUSE_MAP
+ && (OMP_CLAUSE_MAP_KIND (grp_mid) == GOMP_MAP_ALWAYS_POINTER
+ || OMP_CLAUSE_MAP_KIND (grp_mid) == GOMP_MAP_ATTACH_DETACH))
+ {
+ tree c3
+ = build_omp_clause (OMP_CLAUSE_LOCATION (grp_end), OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c3, mkind);
- OMP_CLAUSE_DECL (c3) = unshare_expr (OMP_CLAUSE_DECL (c4));
+ OMP_CLAUSE_DECL (c3) = unshare_expr (OMP_CLAUSE_DECL (grp_mid));
OMP_CLAUSE_SIZE (c3) = TYPE_SIZE_UNIT (ptr_type_node);
- OMP_CLAUSE_CHAIN (c3) = prev_node;
- if (!scp)
- OMP_CLAUSE_CHAIN (c2) = c3;
- else
- cl = c3;
- }
+ OMP_CLAUSE_CHAIN (c3) = NULL_TREE;
- if (scp)
- *scp = c2;
+ *extra_node = c3;
+ }
+ else
+ *extra_node = NULL_TREE;
- return cl;
+ return c2;
}
/* Strip ARRAY_REFS or an indirect ref off BASE, find the containing object,
@@ -8872,8 +8861,8 @@ insert_struct_comp_map (enum tree_code code, tree c, tree struct_node,
has array type, else return NULL. */
static tree
-extract_base_bit_offset (tree base, tree *base_ref, poly_int64 *bitposp,
- poly_offset_int *poffsetp, tree *offsetp)
+extract_base_bit_offset (tree base, poly_int64 *bitposp,
+ poly_offset_int *poffsetp)
{
tree offset;
poly_int64 bitsize, bitpos;
@@ -8881,44 +8870,12 @@ extract_base_bit_offset (tree base, tree *base_ref, poly_int64 *bitposp,
int unsignedp, reversep, volatilep = 0;
poly_offset_int poffset;
- if (base_ref)
- {
- *base_ref = NULL_TREE;
-
- while (TREE_CODE (base) == ARRAY_REF)
- base = TREE_OPERAND (base, 0);
-
- if (TREE_CODE (base) == INDIRECT_REF)
- base = TREE_OPERAND (base, 0);
- }
- else
- {
- if (TREE_CODE (base) == ARRAY_REF)
- {
- while (TREE_CODE (base) == ARRAY_REF)
- base = TREE_OPERAND (base, 0);
- if (TREE_CODE (base) != COMPONENT_REF
- || TREE_CODE (TREE_TYPE (base)) != ARRAY_TYPE)
- return NULL_TREE;
- }
- else if (TREE_CODE (base) == INDIRECT_REF
- && TREE_CODE (TREE_OPERAND (base, 0)) == COMPONENT_REF
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0)))
- == REFERENCE_TYPE))
- base = TREE_OPERAND (base, 0);
- }
+ STRIP_NOPS (base);
base = get_inner_reference (base, &bitsize, &bitpos, &offset, &mode,
&unsignedp, &reversep, &volatilep);
- tree orig_base = base;
-
- if ((TREE_CODE (base) == INDIRECT_REF
- || (TREE_CODE (base) == MEM_REF
- && integer_zerop (TREE_OPERAND (base, 1))))
- && DECL_P (TREE_OPERAND (base, 0))
- && TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0))) == REFERENCE_TYPE)
- base = TREE_OPERAND (base, 0);
+ STRIP_NOPS (base);
if (offset && poly_int_tree_p (offset))
{
@@ -8933,216 +8890,897 @@ extract_base_bit_offset (tree base, tree *base_ref, poly_int64 *bitposp,
*bitposp = bitpos;
*poffsetp = poffset;
- *offsetp = offset;
-
- /* Set *BASE_REF if BASE was a dereferenced reference variable. */
- if (base_ref && orig_base != base)
- *base_ref = orig_base;
return base;
}
-/* Returns true if EXPR is or contains (as a sub-component) BASE_PTR. */
+/* Used for topological sorting of mapping groups. UNVISITED means we haven't
+ started processing the group yet. The TEMPORARY mark is used when we first
+ encounter a group on a depth-first traversal, and the PERMANENT mark is used
+ when we have processed all the group's children (i.e. all the base pointers
+ referred to by the group's mapping nodes, recursively). */
-static bool
-is_or_contains_p (tree expr, tree base_ptr)
+enum omp_tsort_mark {
+ UNVISITED,
+ TEMPORARY,
+ PERMANENT
+};
+
+/* A group of OMP_CLAUSE_MAP nodes that correspond to a single "map"
+ clause. */
+
+struct omp_mapping_group {
+ tree *grp_start;
+ tree grp_end;
+ omp_tsort_mark mark;
+ /* If we've removed the group but need to reindex, mark the group as
+ deleted. */
+ bool deleted;
+ struct omp_mapping_group *sibling;
+ struct omp_mapping_group *next;
+};
+
+DEBUG_FUNCTION void
+debug_mapping_group (omp_mapping_group *grp)
{
- if ((TREE_CODE (expr) == INDIRECT_REF && TREE_CODE (base_ptr) == MEM_REF)
- || (TREE_CODE (expr) == MEM_REF && TREE_CODE (base_ptr) == INDIRECT_REF))
- return operand_equal_p (TREE_OPERAND (expr, 0),
- TREE_OPERAND (base_ptr, 0));
- while (!operand_equal_p (expr, base_ptr))
- {
- if (TREE_CODE (base_ptr) == COMPOUND_EXPR)
- base_ptr = TREE_OPERAND (base_ptr, 1);
- if (TREE_CODE (base_ptr) == COMPONENT_REF
- || TREE_CODE (base_ptr) == POINTER_PLUS_EXPR
- || TREE_CODE (base_ptr) == SAVE_EXPR)
- base_ptr = TREE_OPERAND (base_ptr, 0);
- else
- break;
+ tree tmp = OMP_CLAUSE_CHAIN (grp->grp_end);
+ OMP_CLAUSE_CHAIN (grp->grp_end) = NULL;
+ debug_generic_expr (*grp->grp_start);
+ OMP_CLAUSE_CHAIN (grp->grp_end) = tmp;
+}
+
+/* Return the OpenMP "base pointer" of an expression EXPR, or NULL if there
+ isn't one. */
+
+static tree
+omp_get_base_pointer (tree expr)
+{
+ while (TREE_CODE (expr) == ARRAY_REF
+ || TREE_CODE (expr) == COMPONENT_REF)
+ expr = TREE_OPERAND (expr, 0);
+
+ if (TREE_CODE (expr) == INDIRECT_REF
+ || (TREE_CODE (expr) == MEM_REF
+ && integer_zerop (TREE_OPERAND (expr, 1))))
+ {
+ expr = TREE_OPERAND (expr, 0);
+ while (TREE_CODE (expr) == COMPOUND_EXPR)
+ expr = TREE_OPERAND (expr, 1);
+ if (TREE_CODE (expr) == POINTER_PLUS_EXPR)
+ expr = TREE_OPERAND (expr, 0);
+ if (TREE_CODE (expr) == SAVE_EXPR)
+ expr = TREE_OPERAND (expr, 0);
+ STRIP_NOPS (expr);
+ return expr;
}
- return operand_equal_p (expr, base_ptr);
+
+ return NULL_TREE;
}
-/* Implement OpenMP 5.x map ordering rules for target directives. There are
- several rules, and with some level of ambiguity, hopefully we can at least
- collect the complexity here in one place. */
+/* Remove COMPONENT_REFS and indirections from EXPR. */
-static void
-omp_target_reorder_clauses (tree *list_p)
+static tree
+omp_strip_components_and_deref (tree expr)
{
- /* Collect refs to alloc/release/delete maps. */
- auto_vec<tree, 32> ard;
- tree *cp = list_p;
- while (*cp != NULL_TREE)
- if (OMP_CLAUSE_CODE (*cp) == OMP_CLAUSE_MAP
- && (OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_ALLOC
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_RELEASE
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_DELETE))
- {
- /* Unlink cp and push to ard. */
- tree c = *cp;
- tree nc = OMP_CLAUSE_CHAIN (c);
- *cp = nc;
- ard.safe_push (c);
-
- /* Any associated pointer type maps should also move along. */
- while (*cp != NULL_TREE
- && OMP_CLAUSE_CODE (*cp) == OMP_CLAUSE_MAP
- && (OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_FIRSTPRIVATE_REFERENCE
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_FIRSTPRIVATE_POINTER
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_ATTACH_DETACH
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_POINTER
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_ALWAYS_POINTER
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_TO_PSET))
+ while (TREE_CODE (expr) == COMPONENT_REF
+ || TREE_CODE (expr) == INDIRECT_REF
+ || (TREE_CODE (expr) == MEM_REF
+ && integer_zerop (TREE_OPERAND (expr, 1)))
+ || TREE_CODE (expr) == POINTER_PLUS_EXPR
+ || TREE_CODE (expr) == COMPOUND_EXPR)
+ if (TREE_CODE (expr) == COMPOUND_EXPR)
+ expr = TREE_OPERAND (expr, 1);
+ else
+ expr = TREE_OPERAND (expr, 0);
+
+ STRIP_NOPS (expr);
+
+ return expr;
+}
+
+static tree
+omp_strip_indirections (tree expr)
+{
+ while (TREE_CODE (expr) == INDIRECT_REF
+ || (TREE_CODE (expr) == MEM_REF
+ && integer_zerop (TREE_OPERAND (expr, 1))))
+ expr = TREE_OPERAND (expr, 0);
+
+ return expr;
+}
+
+/* An attach or detach operation depends directly on the address being
+ attached/detached. Return that address, or none if there are no
+ attachments/detachments. */
+
+static tree
+omp_get_attachment (omp_mapping_group *grp)
+{
+ tree node = *grp->grp_start;
+
+ switch (OMP_CLAUSE_MAP_KIND (node))
+ {
+ case GOMP_MAP_TO:
+ case GOMP_MAP_FROM:
+ case GOMP_MAP_TOFROM:
+ case GOMP_MAP_ALWAYS_FROM:
+ case GOMP_MAP_ALWAYS_TO:
+ case GOMP_MAP_ALWAYS_TOFROM:
+ case GOMP_MAP_FORCE_FROM:
+ case GOMP_MAP_FORCE_TO:
+ case GOMP_MAP_FORCE_TOFROM:
+ case GOMP_MAP_FORCE_PRESENT:
+ case GOMP_MAP_ALLOC:
+ case GOMP_MAP_RELEASE:
+ case GOMP_MAP_DELETE:
+ case GOMP_MAP_FORCE_ALLOC:
+ if (node == grp->grp_end)
+ return NULL_TREE;
+
+ node = OMP_CLAUSE_CHAIN (node);
+ if (node && OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_TO_PSET)
+ {
+ gcc_assert (node != grp->grp_end);
+ node = OMP_CLAUSE_CHAIN (node);
+ }
+ if (node)
+ switch (OMP_CLAUSE_MAP_KIND (node))
{
- c = *cp;
- nc = OMP_CLAUSE_CHAIN (c);
- *cp = nc;
- ard.safe_push (c);
+ case GOMP_MAP_POINTER:
+ case GOMP_MAP_ALWAYS_POINTER:
+ case GOMP_MAP_FIRSTPRIVATE_POINTER:
+ case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
+ case GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION:
+ return NULL_TREE;
+
+ case GOMP_MAP_ATTACH_DETACH:
+ case GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION:
+ return OMP_CLAUSE_DECL (node);
+
+ default:
+ internal_error ("unexpected mapping node");
}
- }
- else
- cp = &OMP_CLAUSE_CHAIN (*cp);
+ return error_mark_node;
+
+ case GOMP_MAP_TO_PSET:
+ gcc_assert (node != grp->grp_end);
+ node = OMP_CLAUSE_CHAIN (node);
+ if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_ATTACH
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_DETACH)
+ return OMP_CLAUSE_DECL (node);
+ else
+ internal_error ("unexpected mapping node");
+ return error_mark_node;
+
+ case GOMP_MAP_ATTACH:
+ case GOMP_MAP_DETACH:
+ node = OMP_CLAUSE_CHAIN (node);
+ if (!node || *grp->grp_start == grp->grp_end)
+ return OMP_CLAUSE_DECL (*grp->grp_start);
+ if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
+ return OMP_CLAUSE_DECL (*grp->grp_start);
+ else
+ internal_error ("unexpected mapping node");
+ return error_mark_node;
+
+ case GOMP_MAP_STRUCT:
+ case GOMP_MAP_FORCE_DEVICEPTR:
+ case GOMP_MAP_DEVICE_RESIDENT:
+ case GOMP_MAP_LINK:
+ case GOMP_MAP_IF_PRESENT:
+ case GOMP_MAP_FIRSTPRIVATE:
+ case GOMP_MAP_FIRSTPRIVATE_INT:
+ case GOMP_MAP_USE_DEVICE_PTR:
+ case GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION:
+ return NULL_TREE;
- /* Link alloc/release/delete maps to the end of list. */
- for (unsigned int i = 0; i < ard.length (); i++)
- {
- *cp = ard[i];
- cp = &OMP_CLAUSE_CHAIN (ard[i]);
+ default:
+ internal_error ("unexpected mapping node");
}
- *cp = NULL_TREE;
- /* OpenMP 5.0 requires that pointer variables are mapped before
- its use as a base-pointer. */
- auto_vec<tree *, 32> atf;
- for (tree *cp = list_p; *cp; cp = &OMP_CLAUSE_CHAIN (*cp))
- if (OMP_CLAUSE_CODE (*cp) == OMP_CLAUSE_MAP)
+ return error_mark_node;
+}
+
+/* Given a pointer START_P to the start of a group of related (e.g. pointer)
+ mappings, return the chain pointer to the end of that group in the list. */
+
+static tree *
+omp_group_last (tree *start_p)
+{
+ tree c = *start_p, nc, *grp_last_p = start_p;
+
+ gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP);
+
+ nc = OMP_CLAUSE_CHAIN (c);
+
+ if (!nc || OMP_CLAUSE_CODE (nc) != OMP_CLAUSE_MAP)
+ return grp_last_p;
+
+ switch (OMP_CLAUSE_MAP_KIND (c))
+ {
+ default:
+ while (nc
+ && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
+ && (OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_REFERENCE
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_ATTACH_DETACH
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
+ || (OMP_CLAUSE_MAP_KIND (nc)
+ == GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION)
+ || (OMP_CLAUSE_MAP_KIND (nc)
+ == GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION)
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_ALWAYS_POINTER
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_TO_PSET))
+ {
+ grp_last_p = &OMP_CLAUSE_CHAIN (c);
+ c = nc;
+ tree nc2 = OMP_CLAUSE_CHAIN (nc);
+ if (nc2
+ && OMP_CLAUSE_CODE (nc2) == OMP_CLAUSE_MAP
+ && (OMP_CLAUSE_MAP_KIND (nc)
+ == GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION)
+ && OMP_CLAUSE_MAP_KIND (nc2) == GOMP_MAP_ATTACH)
+ {
+ grp_last_p = &OMP_CLAUSE_CHAIN (nc);
+ c = nc2;
+ nc2 = OMP_CLAUSE_CHAIN (nc2);
+ }
+ nc = nc2;
+ }
+ break;
+
+ case GOMP_MAP_ATTACH:
+ case GOMP_MAP_DETACH:
+ /* This is a weird artifact of how directives are parsed: bare attach or
+ detach clauses get a subsequent (meaningless) FIRSTPRIVATE_POINTER or
+ FIRSTPRIVATE_REFERENCE node. FIXME. */
+ if (nc
+ && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
+ && (OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_REFERENCE
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_POINTER))
+ grp_last_p = &OMP_CLAUSE_CHAIN (c);
+ break;
+
+ case GOMP_MAP_TO_PSET:
+ if (OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
+ && (OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_ATTACH
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_DETACH))
+ grp_last_p = &OMP_CLAUSE_CHAIN (c);
+ break;
+
+ case GOMP_MAP_STRUCT:
{
- /* Collect alloc, to, from, to/from clause tree pointers. */
- gomp_map_kind k = OMP_CLAUSE_MAP_KIND (*cp);
- if (k == GOMP_MAP_ALLOC
- || k == GOMP_MAP_TO
- || k == GOMP_MAP_FROM
- || k == GOMP_MAP_TOFROM
- || k == GOMP_MAP_ALWAYS_TO
- || k == GOMP_MAP_ALWAYS_FROM
- || k == GOMP_MAP_ALWAYS_TOFROM)
- atf.safe_push (cp);
+ unsigned HOST_WIDE_INT num_mappings
+ = tree_to_uhwi (OMP_CLAUSE_SIZE (c));
+ if (OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_REFERENCE
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_ATTACH_DETACH)
+ grp_last_p = &OMP_CLAUSE_CHAIN (*grp_last_p);
+ for (unsigned i = 0; i < num_mappings; i++)
+ grp_last_p = &OMP_CLAUSE_CHAIN (*grp_last_p);
}
+ break;
+ }
- for (unsigned int i = 0; i < atf.length (); i++)
- if (atf[i])
- {
- tree *cp = atf[i];
- tree decl = OMP_CLAUSE_DECL (*cp);
- if (TREE_CODE (decl) == INDIRECT_REF || TREE_CODE (decl) == MEM_REF)
- {
- tree base_ptr = TREE_OPERAND (decl, 0);
- STRIP_TYPE_NOPS (base_ptr);
- for (unsigned int j = i + 1; j < atf.length (); j++)
- if (atf[j])
- {
- tree *cp2 = atf[j];
- tree decl2 = OMP_CLAUSE_DECL (*cp2);
+ return grp_last_p;
+}
- decl2 = OMP_CLAUSE_DECL (*cp2);
- if (is_or_contains_p (decl2, base_ptr))
- {
- /* Move *cp2 to before *cp. */
- tree c = *cp2;
- *cp2 = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = *cp;
- *cp = c;
-
- if (*cp2 != NULL_TREE
- && OMP_CLAUSE_CODE (*cp2) == OMP_CLAUSE_MAP
- && OMP_CLAUSE_MAP_KIND (*cp2) == GOMP_MAP_ALWAYS_POINTER)
- {
- tree c2 = *cp2;
- *cp2 = OMP_CLAUSE_CHAIN (c2);
- OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = c2;
- }
+/* Walk through LIST_P, and return a list of groups of mappings found (e.g.
+ OMP_CLAUSE_MAP with GOMP_MAP_{TO/FROM/TOFROM} followed by one or two
+ associated GOMP_MAP_POINTER mappings). Return a vector of omp_mapping_group
+ if we have more than one such group, else return NULL. */
- atf[j] = NULL;
- }
- }
+static void
+omp_gather_mapping_groups_1 (tree *list_p, vec<omp_mapping_group> *groups,
+ tree gather_sentinel)
+{
+ for (tree *cp = list_p;
+ *cp && *cp != gather_sentinel;
+ cp = &OMP_CLAUSE_CHAIN (*cp))
+ {
+ if (OMP_CLAUSE_CODE (*cp) != OMP_CLAUSE_MAP)
+ continue;
+
+ tree *grp_last_p = omp_group_last (cp);
+ omp_mapping_group grp;
+
+ grp.grp_start = cp;
+ grp.grp_end = *grp_last_p;
+ grp.mark = UNVISITED;
+ grp.sibling = NULL;
+ grp.deleted = false;
+ grp.next = NULL;
+ groups->safe_push (grp);
+
+ cp = grp_last_p;
+ }
+}
+
+static vec<omp_mapping_group> *
+omp_gather_mapping_groups (tree *list_p)
+{
+ vec<omp_mapping_group> *groups = new vec<omp_mapping_group> ();
+
+ omp_gather_mapping_groups_1 (list_p, groups, NULL_TREE);
+
+ if (groups->length () > 0)
+ return groups;
+ else
+ {
+ delete groups;
+ return NULL;
+ }
+}
+
+/* A pointer mapping group GRP may define a block of memory starting at some
+ base address, and maybe also define a firstprivate pointer or firstprivate
+ reference that points to that block. The return value is a node containing
+ the former, and the *FIRSTPRIVATE pointer is set if we have the latter.
+ If we define several base pointers, i.e. for a GOMP_MAP_STRUCT mapping,
+ return the number of consecutive chained nodes in CHAINED. */
+
+static tree
+omp_group_base (omp_mapping_group *grp, unsigned int *chained,
+ tree *firstprivate)
+{
+ tree node = *grp->grp_start;
+
+ *firstprivate = NULL_TREE;
+ *chained = 1;
+
+ switch (OMP_CLAUSE_MAP_KIND (node))
+ {
+ case GOMP_MAP_TO:
+ case GOMP_MAP_FROM:
+ case GOMP_MAP_TOFROM:
+ case GOMP_MAP_ALWAYS_FROM:
+ case GOMP_MAP_ALWAYS_TO:
+ case GOMP_MAP_ALWAYS_TOFROM:
+ case GOMP_MAP_FORCE_FROM:
+ case GOMP_MAP_FORCE_TO:
+ case GOMP_MAP_FORCE_TOFROM:
+ case GOMP_MAP_FORCE_PRESENT:
+ case GOMP_MAP_ALLOC:
+ case GOMP_MAP_RELEASE:
+ case GOMP_MAP_DELETE:
+ case GOMP_MAP_FORCE_ALLOC:
+ if (node == grp->grp_end)
+ return node;
+
+ node = OMP_CLAUSE_CHAIN (node);
+ if (node && OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_TO_PSET)
+ {
+ if (node == grp->grp_end)
+ return *grp->grp_start;
+ node = OMP_CLAUSE_CHAIN (node);
+ }
+ if (node)
+ switch (OMP_CLAUSE_MAP_KIND (node))
+ {
+ case GOMP_MAP_POINTER:
+ case GOMP_MAP_FIRSTPRIVATE_POINTER:
+ case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
+ case GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION:
+ *firstprivate = OMP_CLAUSE_DECL (node);
+ return *grp->grp_start;
+
+ case GOMP_MAP_ALWAYS_POINTER:
+ case GOMP_MAP_ATTACH_DETACH:
+ case GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION:
+ return *grp->grp_start;
+
+ default:
+ internal_error ("unexpected mapping node");
}
- }
+ else
+ internal_error ("unexpected mapping node");
+ return error_mark_node;
+
+ case GOMP_MAP_TO_PSET:
+ gcc_assert (node != grp->grp_end);
+ node = OMP_CLAUSE_CHAIN (node);
+ if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_ATTACH
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_DETACH)
+ return NULL_TREE;
+ else
+ internal_error ("unexpected mapping node");
+ return error_mark_node;
+
+ case GOMP_MAP_ATTACH:
+ case GOMP_MAP_DETACH:
+ node = OMP_CLAUSE_CHAIN (node);
+ if (!node || *grp->grp_start == grp->grp_end)
+ return NULL_TREE;
+ if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
+ {
+ /* We're mapping the base pointer itself in a bare attach or detach
+ node. This is a side effect of how parsing works, and the mapping
+ will be removed anyway (at least for enter/exit data directives).
+ We should ignore the mapping here. FIXME. */
+ return NULL_TREE;
+ }
+ else
+ internal_error ("unexpected mapping node");
+ return error_mark_node;
- /* For attach_detach map clauses, if there is another map that maps the
- attached/detached pointer, make sure that map is ordered before the
- attach_detach. */
- atf.truncate (0);
- for (tree *cp = list_p; *cp; cp = &OMP_CLAUSE_CHAIN (*cp))
- if (OMP_CLAUSE_CODE (*cp) == OMP_CLAUSE_MAP)
+ case GOMP_MAP_STRUCT:
{
- /* Collect alloc, to, from, to/from clauses, and
- always_pointer/attach_detach clauses. */
- gomp_map_kind k = OMP_CLAUSE_MAP_KIND (*cp);
- if (k == GOMP_MAP_ALLOC
- || k == GOMP_MAP_TO
- || k == GOMP_MAP_FROM
- || k == GOMP_MAP_TOFROM
- || k == GOMP_MAP_ALWAYS_TO
- || k == GOMP_MAP_ALWAYS_FROM
- || k == GOMP_MAP_ALWAYS_TOFROM
- || k == GOMP_MAP_ATTACH_DETACH
- || k == GOMP_MAP_ALWAYS_POINTER)
- atf.safe_push (cp);
+ unsigned HOST_WIDE_INT num_mappings
+ = tree_to_uhwi (OMP_CLAUSE_SIZE (node));
+ node = OMP_CLAUSE_CHAIN (node);
+ if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
+ {
+ *firstprivate = OMP_CLAUSE_DECL (node);
+ node = OMP_CLAUSE_CHAIN (node);
+ }
+ *chained = num_mappings;
+ return node;
}
- for (unsigned int i = 0; i < atf.length (); i++)
- if (atf[i])
- {
- tree *cp = atf[i];
- tree ptr = OMP_CLAUSE_DECL (*cp);
- STRIP_TYPE_NOPS (ptr);
- if (OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_ATTACH_DETACH)
- for (unsigned int j = i + 1; j < atf.length (); j++)
+ case GOMP_MAP_FORCE_DEVICEPTR:
+ case GOMP_MAP_DEVICE_RESIDENT:
+ case GOMP_MAP_LINK:
+ case GOMP_MAP_IF_PRESENT:
+ case GOMP_MAP_FIRSTPRIVATE:
+ case GOMP_MAP_FIRSTPRIVATE_INT:
+ case GOMP_MAP_USE_DEVICE_PTR:
+ case GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION:
+ return NULL_TREE;
+
+ case GOMP_MAP_FIRSTPRIVATE_POINTER:
+ case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
+ case GOMP_MAP_POINTER:
+ case GOMP_MAP_ALWAYS_POINTER:
+ case GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION:
+ /* These shouldn't appear by themselves. */
+ if (!seen_error ())
+ internal_error ("unexpected pointer mapping node");
+ return error_mark_node;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return error_mark_node;
+}
+
+/* Given a vector of omp_mapping_groups, build a hash table so we can look up
+ nodes by tree_operand_hash. */
+
+static void
+omp_index_mapping_groups_1 (hash_map<tree_operand_hash,
+ omp_mapping_group *> *grpmap,
+ vec<omp_mapping_group> *groups,
+ tree reindex_sentinel)
+{
+ omp_mapping_group *grp;
+ unsigned int i;
+ bool reindexing = reindex_sentinel != NULL_TREE, above_hwm = false;
+
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ {
+ if (reindexing && *grp->grp_start == reindex_sentinel)
+ above_hwm = true;
+
+ if (reindexing && !above_hwm)
+ continue;
+
+ tree fpp;
+ unsigned int chained;
+ tree node = omp_group_base (grp, &chained, &fpp);
+
+ if (node == error_mark_node || (!node && !fpp))
+ continue;
+
+ for (unsigned j = 0;
+ node && j < chained;
+ node = OMP_CLAUSE_CHAIN (node), j++)
+ {
+ tree decl = OMP_CLAUSE_DECL (node);
+
+ /* Sometimes we see zero-offset MEM_REF instead of INDIRECT_REF,
+ meaning node-hash lookups don't work. This is a workaround for
+ that, but ideally we should just create the INDIRECT_REF at
+ source instead. FIXME. */
+ if (TREE_CODE (decl) == MEM_REF
+ && integer_zerop (TREE_OPERAND (decl, 1)))
+ decl = build_fold_indirect_ref (TREE_OPERAND (decl, 0));
+
+ omp_mapping_group **prev = grpmap->get (decl);
+
+ if (prev && *prev == grp)
+ /* Empty. */;
+ else if (prev)
{
- tree *cp2 = atf[j];
- tree decl2 = OMP_CLAUSE_DECL (*cp2);
- if (OMP_CLAUSE_MAP_KIND (*cp2) != GOMP_MAP_ATTACH_DETACH
- && OMP_CLAUSE_MAP_KIND (*cp2) != GOMP_MAP_ALWAYS_POINTER
- && is_or_contains_p (decl2, ptr))
- {
- /* Move *cp2 to before *cp. */
- tree c = *cp2;
- *cp2 = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = *cp;
- *cp = c;
- atf[j] = NULL;
-
- /* If decl2 is of the form '*decl2_opnd0', and followed by an
- ALWAYS_POINTER or ATTACH_DETACH of 'decl2_opnd0', move the
- pointer operation along with *cp2. This can happen for C++
- reference sequences. */
- if (j + 1 < atf.length ()
- && (TREE_CODE (decl2) == INDIRECT_REF
- || TREE_CODE (decl2) == MEM_REF))
- {
- tree *cp3 = atf[j + 1];
- tree decl3 = OMP_CLAUSE_DECL (*cp3);
- tree decl2_opnd0 = TREE_OPERAND (decl2, 0);
- if ((OMP_CLAUSE_MAP_KIND (*cp3) == GOMP_MAP_ALWAYS_POINTER
- || OMP_CLAUSE_MAP_KIND (*cp3) == GOMP_MAP_ATTACH_DETACH)
- && operand_equal_p (decl3, decl2_opnd0))
- {
- /* Also move *cp3 to before *cp. */
- c = *cp3;
- *cp2 = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = *cp;
- *cp = c;
- atf[j + 1] = NULL;
- j += 1;
- }
- }
- }
+ /* Mapping the same thing twice is normally diagnosed as an error,
+ but can happen under some circumstances, e.g. in pr99928-16.c,
+ the directive:
+
+ #pragma omp target simd reduction(+:a[:3]) \
+ map(always, tofrom: a[:6])
+ ...
+
+ will result in two "a[0]" mappings (of different sizes). */
+
+ grp->sibling = (*prev)->sibling;
+ (*prev)->sibling = grp;
}
- }
+ else
+ grpmap->put (decl, grp);
+ }
+
+ if (!fpp)
+ continue;
+
+ omp_mapping_group **prev = grpmap->get (fpp);
+ if (prev && *prev != grp)
+ {
+ grp->sibling = (*prev)->sibling;
+ (*prev)->sibling = grp;
+ }
+ else
+ grpmap->put (fpp, grp);
+ }
+}
+
+static hash_map<tree_operand_hash, omp_mapping_group *> *
+omp_index_mapping_groups (vec<omp_mapping_group> *groups)
+{
+ hash_map<tree_operand_hash, omp_mapping_group *> *grpmap
+ = new hash_map<tree_operand_hash, omp_mapping_group *>;
+
+ omp_index_mapping_groups_1 (grpmap, groups, NULL_TREE);
+
+ return grpmap;
+}
+
+/* Rebuild group map from partially-processed clause list (during
+ omp_build_struct_sibling_lists). We have already processed nodes up until
+ a high-water mark (HWM). This is a bit tricky because the list is being
+ reordered as it is scanned, but we know:
+
+ 1. The list after HWM has not been touched yet, so we can reindex it safely.
+
+ 2. The list before and including HWM has been altered, but remains
+ well-formed throughout the sibling-list building operation.
+
+ so, we can do the reindex operation in two parts, on the processed and
+ then the unprocessed halves of the list. */
+
+static hash_map<tree_operand_hash, omp_mapping_group *> *
+omp_reindex_mapping_groups (tree *list_p,
+ vec<omp_mapping_group> *groups,
+ vec<omp_mapping_group> *processed_groups,
+ tree sentinel)
+{
+ hash_map<tree_operand_hash, omp_mapping_group *> *grpmap
+ = new hash_map<tree_operand_hash, omp_mapping_group *>;
+
+ processed_groups->truncate (0);
+
+ omp_gather_mapping_groups_1 (list_p, processed_groups, sentinel);
+ omp_index_mapping_groups_1 (grpmap, processed_groups, NULL_TREE);
+ if (sentinel)
+ omp_index_mapping_groups_1 (grpmap, groups, sentinel);
+
+ return grpmap;
+}
+
+/* Find the immediately-containing struct for a component ref (etc.)
+ expression EXPR. */
+
+static tree
+omp_containing_struct (tree expr)
+{
+ tree expr0 = expr;
+
+ STRIP_NOPS (expr);
+
+ /* Note: don't strip NOPs unless we're also stripping off array refs or a
+ component ref. */
+ if (TREE_CODE (expr) != ARRAY_REF && TREE_CODE (expr) != COMPONENT_REF)
+ return expr0;
+
+ while (TREE_CODE (expr) == ARRAY_REF)
+ expr = TREE_OPERAND (expr, 0);
+
+ if (TREE_CODE (expr) == COMPONENT_REF)
+ expr = TREE_OPERAND (expr, 0);
+
+ return expr;
+}
+
+/* Return TRUE if DECL describes a component that is part of a whole structure
+ that is mapped elsewhere in GRPMAP. *MAPPED_BY_GROUP is set to the group
+ that maps that structure, if present. */
+
+static bool
+omp_mapped_by_containing_struct (hash_map<tree_operand_hash,
+ omp_mapping_group *> *grpmap,
+ tree decl,
+ omp_mapping_group **mapped_by_group)
+{
+ tree wsdecl = NULL_TREE;
+
+ *mapped_by_group = NULL;
+
+ while (true)
+ {
+ wsdecl = omp_containing_struct (decl);
+ if (wsdecl == decl)
+ break;
+ omp_mapping_group **wholestruct = grpmap->get (wsdecl);
+ if (!wholestruct
+ && TREE_CODE (wsdecl) == MEM_REF
+ && integer_zerop (TREE_OPERAND (wsdecl, 1)))
+ {
+ tree deref = TREE_OPERAND (wsdecl, 0);
+ deref = build_fold_indirect_ref (deref);
+ wholestruct = grpmap->get (deref);
+ }
+ if (wholestruct)
+ {
+ *mapped_by_group = *wholestruct;
+ return true;
+ }
+ decl = wsdecl;
+ }
+
+ return false;
+}
+
+/* Helper function for omp_tsort_mapping_groups. Returns TRUE on success, or
+ FALSE on error. */
+
+static bool
+omp_tsort_mapping_groups_1 (omp_mapping_group ***outlist,
+ vec<omp_mapping_group> *groups,
+ hash_map<tree_operand_hash, omp_mapping_group *>
+ *grpmap,
+ omp_mapping_group *grp)
+{
+ if (grp->mark == PERMANENT)
+ return true;
+ if (grp->mark == TEMPORARY)
+ {
+ fprintf (stderr, "when processing group:\n");
+ debug_mapping_group (grp);
+ internal_error ("base pointer cycle detected");
+ return false;
+ }
+ grp->mark = TEMPORARY;
+
+ tree attaches_to = omp_get_attachment (grp);
+
+ if (attaches_to)
+ {
+ omp_mapping_group **basep = grpmap->get (attaches_to);
+
+ if (basep && *basep != grp)
+ {
+ for (omp_mapping_group *w = *basep; w; w = w->sibling)
+ if (!omp_tsort_mapping_groups_1 (outlist, groups, grpmap, w))
+ return false;
+ }
+ }
+
+ tree decl = OMP_CLAUSE_DECL (*grp->grp_start);
+
+ while (decl)
+ {
+ tree base = omp_get_base_pointer (decl);
+
+ if (!base)
+ break;
+
+ omp_mapping_group **innerp = grpmap->get (base);
+ omp_mapping_group *wholestruct;
+
+ /* We should treat whole-structure mappings as if all (pointer, in this
+ case) members are mapped as individual list items. Check if we have
+ such a whole-structure mapping, if we don't have an explicit reference
+ to the pointer member itself. */
+ if (!innerp
+ && TREE_CODE (base) == COMPONENT_REF
+ && omp_mapped_by_containing_struct (grpmap, base, &wholestruct))
+ innerp = &wholestruct;
+
+ if (innerp && *innerp != grp)
+ {
+ for (omp_mapping_group *w = *innerp; w; w = w->sibling)
+ if (!omp_tsort_mapping_groups_1 (outlist, groups, grpmap, w))
+ return false;
+ break;
+ }
+
+ decl = base;
+ }
+
+ grp->mark = PERMANENT;
+
+ /* Emit grp to output list. */
+
+ **outlist = grp;
+ *outlist = &grp->next;
+
+ return true;
+}
+
+/* Topologically sort GROUPS, so that OMP 5.0-defined base pointers come
+ before mappings that use those pointers. This is an implementation of the
+ depth-first search algorithm, described e.g. at:
+
+ https://en.wikipedia.org/wiki/Topological_sorting
+*/
+
+static omp_mapping_group *
+omp_tsort_mapping_groups (vec<omp_mapping_group> *groups,
+ hash_map<tree_operand_hash, omp_mapping_group *>
+ *grpmap)
+{
+ omp_mapping_group *grp, *outlist = NULL, **cursor;
+ unsigned int i;
+
+ cursor = &outlist;
+
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ {
+ if (grp->mark != PERMANENT)
+ if (!omp_tsort_mapping_groups_1 (&cursor, groups, grpmap, grp))
+ return NULL;
+ }
+
+ return outlist;
+}
+
+/* Split INLIST into two parts, moving groups corresponding to
+ ALLOC/RELEASE/DELETE mappings to one list, and other mappings to another.
+ The former list is then appended to the latter. Each sub-list retains the
+ order of the original list.
+ Note that ATTACH nodes are later moved to the end of the list in
+ gimplify_adjust_omp_clauses, for target regions. */
+
+static omp_mapping_group *
+omp_segregate_mapping_groups (omp_mapping_group *inlist)
+{
+ omp_mapping_group *ard_groups = NULL, *tf_groups = NULL;
+ omp_mapping_group **ard_tail = &ard_groups, **tf_tail = &tf_groups;
+
+ for (omp_mapping_group *w = inlist; w;)
+ {
+ tree c = *w->grp_start;
+ omp_mapping_group *next = w->next;
+
+ gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP);
+
+ switch (OMP_CLAUSE_MAP_KIND (c))
+ {
+ case GOMP_MAP_ALLOC:
+ case GOMP_MAP_RELEASE:
+ case GOMP_MAP_DELETE:
+ *ard_tail = w;
+ w->next = NULL;
+ ard_tail = &w->next;
+ break;
+
+ default:
+ *tf_tail = w;
+ w->next = NULL;
+ tf_tail = &w->next;
+ }
+
+ w = next;
+ }
+
+ /* Now splice the lists together... */
+ *tf_tail = ard_groups;
+
+ return tf_groups;
+}
+
+/* Given a list LIST_P containing groups of mappings given by GROUPS, reorder
+ those groups based on the output list of omp_tsort_mapping_groups --
+ singly-linked, threaded through each element's NEXT pointer starting at
+ HEAD. Each list element appears exactly once in that linked list.
+
+ Each element of GROUPS may correspond to one or several mapping nodes.
+ Node groups are kept together, and in the reordered list, the positions of
+ the original groups are reused for the positions of the reordered list.
+ Hence if we have e.g.
+
+ {to ptr ptr} firstprivate {tofrom ptr} ...
+ ^ ^ ^
+ first group non-"map" second group
+
+ and say the second group contains a base pointer for the first so must be
+ moved before it, the resulting list will contain:
+
+ {tofrom ptr} firstprivate {to ptr ptr} ...
+ ^ prev. second group ^ prev. first group
+*/
+
+static tree *
+omp_reorder_mapping_groups (vec<omp_mapping_group> *groups,
+ omp_mapping_group *head,
+ tree *list_p)
+{
+ omp_mapping_group *grp;
+ unsigned int i;
+ unsigned numgroups = groups->length ();
+ auto_vec<tree> old_heads (numgroups);
+ auto_vec<tree *> old_headps (numgroups);
+ auto_vec<tree> new_heads (numgroups);
+ auto_vec<tree> old_succs (numgroups);
+ bool map_at_start = (list_p == (*groups)[0].grp_start);
+
+ tree *new_grp_tail = NULL;
+
+ /* Stash the start & end nodes of each mapping group before we start
+ modifying the list. */
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ {
+ old_headps.quick_push (grp->grp_start);
+ old_heads.quick_push (*grp->grp_start);
+ old_succs.quick_push (OMP_CLAUSE_CHAIN (grp->grp_end));
+ }
+
+ /* And similarly, the heads of the groups in the order we want to rearrange
+ the list to. */
+ for (omp_mapping_group *w = head; w; w = w->next)
+ new_heads.quick_push (*w->grp_start);
+
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ {
+ gcc_assert (head);
+
+ if (new_grp_tail && old_succs[i - 1] == old_heads[i])
+ {
+ /* a {b c d} {e f g} h i j (original)
+ -->
+ a {k l m} {e f g} h i j (inserted new group on last iter)
+ -->
+ a {k l m} {n o p} h i j (this time, chain last group to new one)
+ ^new_grp_tail
+ */
+ *new_grp_tail = new_heads[i];
+ }
+ else if (new_grp_tail)
+ {
+ /* a {b c d} e {f g h} i j k (original)
+ -->
+ a {l m n} e {f g h} i j k (gap after last iter's group)
+ -->
+ a {l m n} e {o p q} h i j (chain last group to old successor)
+ ^new_grp_tail
+ */
+ *new_grp_tail = old_succs[i - 1];
+ *old_headps[i] = new_heads[i];
+ }
+ else
+ {
+ /* The first inserted group -- point to new group, and leave end
+ open.
+ a {b c d} e f
+ -->
+ a {g h i...
+ */
+ *grp->grp_start = new_heads[i];
+ }
+
+ new_grp_tail = &OMP_CLAUSE_CHAIN (head->grp_end);
+
+ head = head->next;
+ }
+
+ if (new_grp_tail)
+ *new_grp_tail = old_succs[numgroups - 1];
+
+ gcc_assert (!head);
+
+ return map_at_start ? (*groups)[0].grp_start : list_p;
}
/* DECL is supposed to have lastprivate semantics in the outer contexts
@@ -9223,6 +9861,688 @@ omp_lastprivate_for_combined_outer_constructs (struct gimplify_omp_ctx *octx,
omp_notice_variable (octx, decl, true);
}
+/* Link node NEWNODE so it is pointed to by chain INSERT_AT. NEWNODE's chain
+ is linked to the previous node pointed to by INSERT_AT. */
+
+static tree *
+omp_siblist_insert_node_after (tree newnode, tree *insert_at)
+{
+ OMP_CLAUSE_CHAIN (newnode) = *insert_at;
+ *insert_at = newnode;
+ return &OMP_CLAUSE_CHAIN (newnode);
+}
+
+/* Move NODE (which is currently pointed to by the chain OLD_POS) so it is
+ pointed to by chain MOVE_AFTER instead. */
+
+static void
+omp_siblist_move_node_after (tree node, tree *old_pos, tree *move_after)
+{
+ gcc_assert (node == *old_pos);
+ *old_pos = OMP_CLAUSE_CHAIN (node);
+ OMP_CLAUSE_CHAIN (node) = *move_after;
+ *move_after = node;
+}
+
+/* Move nodes from FIRST_PTR (pointed to by previous node's chain) to
+ LAST_NODE to after MOVE_AFTER chain. Similar to below function, but no
+ new nodes are prepended to the list before splicing into the new position.
+ Return the position we should continue scanning the list at, or NULL to
+ stay where we were. */
+
+static tree *
+omp_siblist_move_nodes_after (tree *first_ptr, tree last_node,
+ tree *move_after)
+{
+ if (first_ptr == move_after)
+ return NULL;
+
+ tree tmp = *first_ptr;
+ *first_ptr = OMP_CLAUSE_CHAIN (last_node);
+ OMP_CLAUSE_CHAIN (last_node) = *move_after;
+ *move_after = tmp;
+
+ return first_ptr;
+}
+
+/* Concatenate two lists described by [FIRST_NEW, LAST_NEW_TAIL] and
+ [FIRST_PTR, LAST_NODE], and insert them in the OMP clause list after chain
+ pointer MOVE_AFTER.
+
+ The latter list was previously part of the OMP clause list, and the former
+ (prepended) part is comprised of new nodes.
+
+ We start with a list of nodes starting with a struct mapping node. We
+ rearrange the list so that new nodes starting from FIRST_NEW and whose last
+ node's chain is LAST_NEW_TAIL comes directly after MOVE_AFTER, followed by
+ the group of mapping nodes we are currently processing (from the chain
+ FIRST_PTR to LAST_NODE). The return value is the pointer to the next chain
+ we should continue processing from, or NULL to stay where we were.
+
+ The transformation (in the case where MOVE_AFTER and FIRST_PTR are
+ different) is worked through below. Here we are processing LAST_NODE, and
+ FIRST_PTR points at the preceding mapping clause:
+
+ #. mapping node chain
+ ---------------------------------------------------
+ A. struct_node [->B]
+ B. comp_1 [->C]
+ C. comp_2 [->D (move_after)]
+ D. map_to_3 [->E]
+ E. attach_3 [->F (first_ptr)]
+ F. map_to_4 [->G (continue_at)]
+ G. attach_4 (last_node) [->H]
+ H. ...
+
+ *last_new_tail = *first_ptr;
+
+ I. new_node (first_new) [->F (last_new_tail)]
+
+ *first_ptr = OMP_CLAUSE_CHAIN (last_node)
+
+ #. mapping node chain
+ ----------------------------------------------------
+ A. struct_node [->B]
+ B. comp_1 [->C]
+ C. comp_2 [->D (move_after)]
+ D. map_to_3 [->E]
+ E. attach_3 [->H (first_ptr)]
+ F. map_to_4 [->G (continue_at)]
+ G. attach_4 (last_node) [->H]
+ H. ...
+
+ I. new_node (first_new) [->F (last_new_tail)]
+
+ OMP_CLAUSE_CHAIN (last_node) = *move_after;
+
+ #. mapping node chain
+ ---------------------------------------------------
+ A. struct_node [->B]
+ B. comp_1 [->C]
+ C. comp_2 [->D (move_after)]
+ D. map_to_3 [->E]
+ E. attach_3 [->H (continue_at)]
+ F. map_to_4 [->G]
+ G. attach_4 (last_node) [->D]
+ H. ...
+
+ I. new_node (first_new) [->F (last_new_tail)]
+
+ *move_after = first_new;
+
+ #. mapping node chain
+ ---------------------------------------------------
+ A. struct_node [->B]
+ B. comp_1 [->C]
+ C. comp_2 [->I (move_after)]
+ D. map_to_3 [->E]
+ E. attach_3 [->H (continue_at)]
+ F. map_to_4 [->G]
+ G. attach_4 (last_node) [->D]
+ H. ...
+ I. new_node (first_new) [->F (last_new_tail)]
+
+ or, in order:
+
+ #. mapping node chain
+ ---------------------------------------------------
+ A. struct_node [->B]
+ B. comp_1 [->C]
+ C. comp_2 [->I (move_after)]
+ I. new_node (first_new) [->F (last_new_tail)]
+ F. map_to_4 [->G]
+ G. attach_4 (last_node) [->D]
+ D. map_to_3 [->E]
+ E. attach_3 [->H (continue_at)]
+ H. ...
+*/
+
+static tree *
+omp_siblist_move_concat_nodes_after (tree first_new, tree *last_new_tail,
+ tree *first_ptr, tree last_node,
+ tree *move_after)
+{
+ tree *continue_at = NULL;
+ *last_new_tail = *first_ptr;
+ if (first_ptr == move_after)
+ *move_after = first_new;
+ else
+ {
+ *first_ptr = OMP_CLAUSE_CHAIN (last_node);
+ continue_at = first_ptr;
+ OMP_CLAUSE_CHAIN (last_node) = *move_after;
+ *move_after = first_new;
+ }
+ return continue_at;
+}
+
+/* Mapping struct members causes an additional set of nodes to be created,
+ starting with GOMP_MAP_STRUCT followed by a number of mappings equal to the
+ number of members being mapped, in order of ascending position (address or
+ bitwise).
+
+ We scan through the list of mapping clauses, calling this function for each
+ struct member mapping we find, and build up the list of mappings after the
+ initial GOMP_MAP_STRUCT node. For pointer members, these will be
+ newly-created ALLOC nodes. For non-pointer members, the existing mapping is
+ moved into place in the sorted list.
+
+ struct {
+ int *a;
+ int *b;
+ int c;
+ int *d;
+ };
+
+ #pragma (acc|omp directive) copy(struct.a[0:n], struct.b[0:n], struct.c,
+ struct.d[0:n])
+
+ GOMP_MAP_STRUCT (4)
+ [GOMP_MAP_FIRSTPRIVATE_REFERENCE -- for refs to structs]
+ GOMP_MAP_ALLOC (struct.a)
+ GOMP_MAP_ALLOC (struct.b)
+ GOMP_MAP_TO (struct.c)
+ GOMP_MAP_ALLOC (struct.d)
+ ...
+
+ In the case where we are mapping references to pointers, or in Fortran if
+ we are mapping an array with a descriptor, additional nodes may be created
+ after the struct node list also.
+
+ The return code is either a pointer to the next node to process (if the
+ list has been rearranged), else NULL to continue with the next node in the
+ original list. */
+
+static tree *
+omp_accumulate_sibling_list (enum omp_region_type region_type,
+ enum tree_code code,
+ hash_map<tree_operand_hash, tree>
+ *&struct_map_to_clause, tree *grp_start_p,
+ tree grp_end, tree *inner)
+{
+ poly_offset_int coffset;
+ poly_int64 cbitpos;
+ tree ocd = OMP_CLAUSE_DECL (grp_end);
+ bool openmp = !(region_type & ORT_ACC);
+ tree *continue_at = NULL;
+
+ while (TREE_CODE (ocd) == ARRAY_REF)
+ ocd = TREE_OPERAND (ocd, 0);
+
+ if (TREE_CODE (ocd) == INDIRECT_REF)
+ ocd = TREE_OPERAND (ocd, 0);
+
+ tree base = extract_base_bit_offset (ocd, &cbitpos, &coffset);
+
+ bool ptr = (OMP_CLAUSE_MAP_KIND (grp_end) == GOMP_MAP_ALWAYS_POINTER);
+ bool attach_detach = ((OMP_CLAUSE_MAP_KIND (grp_end)
+ == GOMP_MAP_ATTACH_DETACH)
+ || (OMP_CLAUSE_MAP_KIND (grp_end)
+ == GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION));
+ bool attach = (OMP_CLAUSE_MAP_KIND (grp_end) == GOMP_MAP_ATTACH
+ || OMP_CLAUSE_MAP_KIND (grp_end) == GOMP_MAP_DETACH);
+
+ /* FIXME: If we're not mapping the base pointer in some other clause on this
+ directive, I think we want to create ALLOC/RELEASE here -- i.e. not
+ early-exit. */
+ if (openmp && attach_detach)
+ return NULL;
+
+ if (!struct_map_to_clause || struct_map_to_clause->get (base) == NULL)
+ {
+ tree l = build_omp_clause (OMP_CLAUSE_LOCATION (grp_end), OMP_CLAUSE_MAP);
+ gomp_map_kind k = attach ? GOMP_MAP_FORCE_PRESENT : GOMP_MAP_STRUCT;
+
+ OMP_CLAUSE_SET_MAP_KIND (l, k);
+
+ OMP_CLAUSE_DECL (l) = unshare_expr (base);
+
+ OMP_CLAUSE_SIZE (l)
+ = (!attach ? size_int (1)
+ : (DECL_P (OMP_CLAUSE_DECL (l))
+ ? DECL_SIZE_UNIT (OMP_CLAUSE_DECL (l))
+ : TYPE_SIZE_UNIT (TREE_TYPE (OMP_CLAUSE_DECL (l)))));
+ if (struct_map_to_clause == NULL)
+ struct_map_to_clause = new hash_map<tree_operand_hash, tree>;
+ struct_map_to_clause->put (base, l);
+
+ if (ptr || attach_detach)
+ {
+ tree extra_node;
+ tree alloc_node
+ = build_omp_struct_comp_nodes (code, *grp_start_p, grp_end,
+ &extra_node);
+ OMP_CLAUSE_CHAIN (l) = alloc_node;
+
+ tree *insert_node_pos = grp_start_p;
+
+ if (extra_node)
+ {
+ OMP_CLAUSE_CHAIN (extra_node) = *insert_node_pos;
+ OMP_CLAUSE_CHAIN (alloc_node) = extra_node;
+ }
+ else
+ OMP_CLAUSE_CHAIN (alloc_node) = *insert_node_pos;
+
+ *insert_node_pos = l;
+ }
+ else
+ {
+ gcc_assert (*grp_start_p == grp_end);
+ grp_start_p = omp_siblist_insert_node_after (l, grp_start_p);
+ }
+
+ tree noind = omp_strip_indirections (base);
+
+ if (!openmp
+ && (region_type & ORT_TARGET)
+ && TREE_CODE (noind) == COMPONENT_REF)
+ {
+ /* The base for this component access is a struct component access
+ itself. Insert a node to be processed on the next iteration of
+ our caller's loop, which will subsequently be turned into a new,
+ inner GOMP_MAP_STRUCT mapping.
+
+ We need to do this else the non-DECL_P base won't be
+ rewritten correctly in the offloaded region. */
+ tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (grp_end),
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FORCE_PRESENT);
+ OMP_CLAUSE_DECL (c2) = unshare_expr (noind);
+ OMP_CLAUSE_SIZE (c2) = TYPE_SIZE_UNIT (TREE_TYPE (noind));
+ *inner = c2;
+ return NULL;
+ }
+
+ tree sdecl = omp_strip_components_and_deref (base);
+
+ if (POINTER_TYPE_P (TREE_TYPE (sdecl)) && (region_type & ORT_TARGET))
+ {
+ tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (grp_end),
+ OMP_CLAUSE_MAP);
+ bool base_ref
+ = (TREE_CODE (base) == INDIRECT_REF
+ && ((TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0)))
+ == REFERENCE_TYPE)
+ || ((TREE_CODE (TREE_OPERAND (base, 0))
+ == INDIRECT_REF)
+ && (TREE_CODE (TREE_TYPE (TREE_OPERAND
+ (TREE_OPERAND (base, 0), 0)))
+ == REFERENCE_TYPE))));
+ enum gomp_map_kind mkind = base_ref ? GOMP_MAP_FIRSTPRIVATE_REFERENCE
+ : GOMP_MAP_FIRSTPRIVATE_POINTER;
+ OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
+ OMP_CLAUSE_DECL (c2) = sdecl;
+ tree baddr = build_fold_addr_expr (base);
+ baddr = fold_convert_loc (OMP_CLAUSE_LOCATION (grp_end),
+ ptrdiff_type_node, baddr);
+ /* This isn't going to be good enough when we add support for more
+ complicated lvalue expressions. FIXME. */
+ if (TREE_CODE (TREE_TYPE (sdecl)) == REFERENCE_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (sdecl))) == POINTER_TYPE)
+ sdecl = build_simple_mem_ref (sdecl);
+ tree decladdr = fold_convert_loc (OMP_CLAUSE_LOCATION (grp_end),
+ ptrdiff_type_node, sdecl);
+ OMP_CLAUSE_SIZE (c2)
+ = fold_build2_loc (OMP_CLAUSE_LOCATION (grp_end), MINUS_EXPR,
+ ptrdiff_type_node, baddr, decladdr);
+ /* Insert after struct node. */
+ OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (l);
+ OMP_CLAUSE_CHAIN (l) = c2;
+ }
+
+ return NULL;
+ }
+ else if (struct_map_to_clause)
+ {
+ tree *osc = struct_map_to_clause->get (base);
+ tree *sc = NULL, *scp = NULL;
+ sc = &OMP_CLAUSE_CHAIN (*osc);
+ /* The struct mapping might be immediately followed by a
+ FIRSTPRIVATE_POINTER and/or FIRSTPRIVATE_REFERENCE -- if it's an
+ indirect access or a reference, or both. (This added node is removed
+ in omp-low.c after it has been processed there.) */
+ if (*sc != grp_end
+ && (OMP_CLAUSE_MAP_KIND (*sc) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (*sc) == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
+ sc = &OMP_CLAUSE_CHAIN (*sc);
+ for (; *sc != grp_end; sc = &OMP_CLAUSE_CHAIN (*sc))
+ if ((ptr || attach_detach) && sc == grp_start_p)
+ break;
+ else if (TREE_CODE (OMP_CLAUSE_DECL (*sc)) != COMPONENT_REF
+ && TREE_CODE (OMP_CLAUSE_DECL (*sc)) != INDIRECT_REF
+ && TREE_CODE (OMP_CLAUSE_DECL (*sc)) != ARRAY_REF)
+ break;
+ else
+ {
+ tree sc_decl = OMP_CLAUSE_DECL (*sc);
+ poly_offset_int offset;
+ poly_int64 bitpos;
+
+ if (TREE_CODE (sc_decl) == ARRAY_REF)
+ {
+ while (TREE_CODE (sc_decl) == ARRAY_REF)
+ sc_decl = TREE_OPERAND (sc_decl, 0);
+ if (TREE_CODE (sc_decl) != COMPONENT_REF
+ || TREE_CODE (TREE_TYPE (sc_decl)) != ARRAY_TYPE)
+ break;
+ }
+ else if (TREE_CODE (sc_decl) == INDIRECT_REF
+ && TREE_CODE (TREE_OPERAND (sc_decl, 0)) == COMPONENT_REF
+ && (TREE_CODE (TREE_TYPE (TREE_OPERAND (sc_decl, 0)))
+ == REFERENCE_TYPE))
+ sc_decl = TREE_OPERAND (sc_decl, 0);
+
+ tree base2 = extract_base_bit_offset (sc_decl, &bitpos, &offset);
+ if (!base2 || !operand_equal_p (base2, base, 0))
+ break;
+ if (scp)
+ continue;
+ if ((region_type & ORT_ACC) != 0)
+ {
+ /* This duplicate checking code is currently only enabled for
+ OpenACC. */
+ tree d1 = OMP_CLAUSE_DECL (*sc);
+ tree d2 = OMP_CLAUSE_DECL (grp_end);
+ while (TREE_CODE (d1) == ARRAY_REF)
+ d1 = TREE_OPERAND (d1, 0);
+ while (TREE_CODE (d2) == ARRAY_REF)
+ d2 = TREE_OPERAND (d2, 0);
+ if (TREE_CODE (d1) == INDIRECT_REF)
+ d1 = TREE_OPERAND (d1, 0);
+ if (TREE_CODE (d2) == INDIRECT_REF)
+ d2 = TREE_OPERAND (d2, 0);
+ while (TREE_CODE (d1) == COMPONENT_REF)
+ if (TREE_CODE (d2) == COMPONENT_REF
+ && TREE_OPERAND (d1, 1) == TREE_OPERAND (d2, 1))
+ {
+ d1 = TREE_OPERAND (d1, 0);
+ d2 = TREE_OPERAND (d2, 0);
+ }
+ else
+ break;
+ if (d1 == d2)
+ {
+ error_at (OMP_CLAUSE_LOCATION (grp_end),
+ "%qE appears more than once in map clauses",
+ OMP_CLAUSE_DECL (grp_end));
+ return NULL;
+ }
+ }
+ if (maybe_lt (coffset, offset)
+ || (known_eq (coffset, offset)
+ && maybe_lt (cbitpos, bitpos)))
+ {
+ if (ptr || attach_detach)
+ scp = sc;
+ else
+ break;
+ }
+ }
+
+ if (!attach)
+ OMP_CLAUSE_SIZE (*osc)
+ = size_binop (PLUS_EXPR, OMP_CLAUSE_SIZE (*osc), size_one_node);
+ if (ptr || attach_detach)
+ {
+ tree cl = NULL_TREE, extra_node;
+ tree alloc_node = build_omp_struct_comp_nodes (code, *grp_start_p,
+ grp_end, &extra_node);
+ tree *tail_chain = NULL;
+
+ /* Here, we have:
+
+ grp_end : the last (or only) node in this group.
+ grp_start_p : pointer to the first node in a pointer mapping group
+ up to and including GRP_END.
+ sc : pointer to the chain for the end of the struct component
+ list.
+ scp : pointer to the chain for the sorted position at which we
+ should insert in the middle of the struct component list
+ (else NULL to insert at end).
+ alloc_node : the "alloc" node for the structure (pointer-type)
+ component. We insert at SCP (if present), else SC
+ (the end of the struct component list).
+ extra_node : a newly-synthesized node for an additional indirect
+ pointer mapping or a Fortran pointer set, if needed.
+ cl : first node to prepend before grp_start_p.
+ tail_chain : pointer to chain of last prepended node.
+
+ The general idea is we move the nodes for this struct mapping
+ together: the alloc node goes into the sorted list directly after
+ the struct mapping, and any extra nodes (together with the nodes
+ mapping arrays pointed to by struct components) get moved after
+ that list. When SCP is NULL, we insert the nodes at SC, i.e. at
+ the end of the struct component mapping list. It's important that
+ the alloc_node comes first in that case because it's part of the
+ sorted component mapping list (but subsequent nodes are not!). */
+
+ if (scp)
+ omp_siblist_insert_node_after (alloc_node, scp);
+
+ /* Make [cl,tail_chain] a list of the alloc node (if we haven't
+ already inserted it) and the extra_node (if it is present). The
+ list can be empty if we added alloc_node above and there is no
+ extra node. */
+ if (scp && extra_node)
+ {
+ cl = extra_node;
+ tail_chain = &OMP_CLAUSE_CHAIN (extra_node);
+ }
+ else if (extra_node)
+ {
+ OMP_CLAUSE_CHAIN (alloc_node) = extra_node;
+ cl = alloc_node;
+ tail_chain = &OMP_CLAUSE_CHAIN (extra_node);
+ }
+ else if (!scp)
+ {
+ cl = alloc_node;
+ tail_chain = &OMP_CLAUSE_CHAIN (alloc_node);
+ }
+
+ continue_at
+ = cl ? omp_siblist_move_concat_nodes_after (cl, tail_chain,
+ grp_start_p, grp_end,
+ sc)
+ : omp_siblist_move_nodes_after (grp_start_p, grp_end, sc);
+ }
+ else if (*sc != grp_end)
+ {
+ gcc_assert (*grp_start_p == grp_end);
+
+ /* We are moving the current node back to a previous struct node:
+ the node that used to point to the current node will now point to
+ the next node. */
+ continue_at = grp_start_p;
+ /* In the non-pointer case, the mapping clause itself is moved into
+ the correct position in the struct component list, which in this
+ case is just SC. */
+ omp_siblist_move_node_after (*grp_start_p, grp_start_p, sc);
+ }
+ }
+ return continue_at;
+}
+
+/* Scan through GROUPS, and create sorted structure sibling lists without
+ gimplifying. */
+
+static bool
+omp_build_struct_sibling_lists (enum tree_code code,
+ enum omp_region_type region_type,
+ vec<omp_mapping_group> *groups,
+ hash_map<tree_operand_hash, omp_mapping_group *>
+ **grpmap,
+ tree *list_p)
+{
+ unsigned i;
+ omp_mapping_group *grp;
+ hash_map<tree_operand_hash, tree> *struct_map_to_clause = NULL;
+ bool success = true;
+ tree *new_next = NULL;
+ tree *tail = &OMP_CLAUSE_CHAIN ((*groups)[groups->length () - 1].grp_end);
+ auto_vec<omp_mapping_group> pre_hwm_groups;
+
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ {
+ tree c = grp->grp_end;
+ tree decl = OMP_CLAUSE_DECL (c);
+ tree grp_end = grp->grp_end;
+ tree sentinel = OMP_CLAUSE_CHAIN (grp_end);
+
+ if (new_next)
+ grp->grp_start = new_next;
+
+ new_next = NULL;
+
+ tree *grp_start_p = grp->grp_start;
+
+ if (DECL_P (decl))
+ continue;
+
+ if (OMP_CLAUSE_CHAIN (*grp_start_p)
+ && OMP_CLAUSE_CHAIN (*grp_start_p) != grp_end)
+ {
+ /* Don't process an array descriptor that isn't inside a derived type
+ as a struct (the GOMP_MAP_POINTER following will have the form
+ "var.data", but such mappings are handled specially). */
+ tree grpmid = OMP_CLAUSE_CHAIN (*grp_start_p);
+ if (OMP_CLAUSE_CODE (grpmid) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (grpmid) == GOMP_MAP_TO_PSET
+ && DECL_P (OMP_CLAUSE_DECL (grpmid)))
+ continue;
+ }
+
+ tree d = decl;
+ if (TREE_CODE (d) == ARRAY_REF)
+ {
+ while (TREE_CODE (d) == ARRAY_REF)
+ d = TREE_OPERAND (d, 0);
+ if (TREE_CODE (d) == COMPONENT_REF
+ && TREE_CODE (TREE_TYPE (d)) == ARRAY_TYPE)
+ decl = d;
+ }
+ if (d == decl
+ && TREE_CODE (decl) == INDIRECT_REF
+ && TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
+ && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
+ == REFERENCE_TYPE)
+ && (OMP_CLAUSE_MAP_KIND (c)
+ != GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION))
+ decl = TREE_OPERAND (decl, 0);
+
+ STRIP_NOPS (decl);
+
+ if (TREE_CODE (decl) != COMPONENT_REF)
+ continue;
+
+ /* If we're mapping the whole struct in another node, skip creation of
+ sibling lists. */
+ omp_mapping_group *wholestruct;
+ if (!(region_type & ORT_ACC)
+ && omp_mapped_by_containing_struct (*grpmap, OMP_CLAUSE_DECL (c),
+ &wholestruct))
+ {
+ if (*grp_start_p == grp_end)
+ /* Remove the whole of this mapping -- redundant. */
+ grp->deleted = true;
+
+ continue;
+ }
+
+ if (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_TO_PSET
+ && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH
+ && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_DETACH
+ && code != OACC_UPDATE
+ && code != OMP_TARGET_UPDATE)
+ {
+ if (error_operand_p (decl))
+ {
+ success = false;
+ goto error_out;
+ }
+
+ tree stype = TREE_TYPE (decl);
+ if (TREE_CODE (stype) == REFERENCE_TYPE)
+ stype = TREE_TYPE (stype);
+ if (TYPE_SIZE_UNIT (stype) == NULL
+ || TREE_CODE (TYPE_SIZE_UNIT (stype)) != INTEGER_CST)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "mapping field %qE of variable length "
+ "structure", OMP_CLAUSE_DECL (c));
+ success = false;
+ goto error_out;
+ }
+
+ tree inner = NULL_TREE;
+
+ new_next
+ = omp_accumulate_sibling_list (region_type, code,
+ struct_map_to_clause, grp_start_p,
+ grp_end, &inner);
+
+ if (inner)
+ {
+ if (new_next && *new_next == NULL_TREE)
+ *new_next = inner;
+ else
+ *tail = inner;
+
+ OMP_CLAUSE_CHAIN (inner) = NULL_TREE;
+ omp_mapping_group newgrp;
+ newgrp.grp_start = new_next ? new_next : tail;
+ newgrp.grp_end = inner;
+ newgrp.mark = UNVISITED;
+ newgrp.sibling = NULL;
+ newgrp.deleted = false;
+ newgrp.next = NULL;
+ groups->safe_push (newgrp);
+
+ /* !!! Growing GROUPS might invalidate the pointers in the group
+ map. Rebuild it here. This is a bit inefficient, but
+ shouldn't happen very often. */
+ delete (*grpmap);
+ *grpmap
+ = omp_reindex_mapping_groups (list_p, groups, &pre_hwm_groups,
+ sentinel);
+
+ tail = &OMP_CLAUSE_CHAIN (inner);
+ }
+ }
+ }
+
+ /* Delete groups marked for deletion above. At this point the order of the
+ groups may no longer correspond to the order of the underlying list,
+ which complicates this a little. First clear out OMP_CLAUSE_DECL for
+ deleted nodes... */
+
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ if (grp->deleted)
+ for (tree d = *grp->grp_start;
+ d != OMP_CLAUSE_CHAIN (grp->grp_end);
+ d = OMP_CLAUSE_CHAIN (d))
+ OMP_CLAUSE_DECL (d) = NULL_TREE;
+
+ /* ...then sweep through the list removing the now-empty nodes. */
+
+ tail = list_p;
+ while (*tail)
+ {
+ if (OMP_CLAUSE_CODE (*tail) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_DECL (*tail) == NULL_TREE)
+ *tail = OMP_CLAUSE_CHAIN (*tail);
+ else
+ tail = &OMP_CLAUSE_CHAIN (*tail);
+ }
+
+error_out:
+ if (struct_map_to_clause)
+ delete struct_map_to_clause;
+
+ return success;
+}
+
/* Scan the OMP clauses in *LIST_P, installing mappings into a new
and previous omp contexts. */
@@ -9233,9 +10553,6 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
{
struct gimplify_omp_ctx *ctx, *outer_ctx;
tree c;
- hash_map<tree_operand_hash, tree> *struct_map_to_clause = NULL;
- hash_map<tree_operand_hash, tree *> *struct_seen_clause = NULL;
- hash_set<tree> *struct_deref_set = NULL;
tree *prev_list_p = NULL, *orig_list_p = list_p;
int handled_depend_iterators = -1;
int nowait = -1;
@@ -9271,7 +10588,57 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
|| code == OMP_TARGET_DATA
|| code == OMP_TARGET_ENTER_DATA
|| code == OMP_TARGET_EXIT_DATA)
- omp_target_reorder_clauses (list_p);
+ {
+ vec<omp_mapping_group> *groups;
+ groups = omp_gather_mapping_groups (list_p);
+ if (groups)
+ {
+ hash_map<tree_operand_hash, omp_mapping_group *> *grpmap;
+ grpmap = omp_index_mapping_groups (groups);
+
+ omp_build_struct_sibling_lists (code, region_type, groups, &grpmap,
+ list_p);
+
+ omp_mapping_group *outlist = NULL;
+
+ /* Topological sorting may fail if we have duplicate nodes, which
+ we should have detected and shown an error for already. Skip
+ sorting in that case. */
+ if (seen_error ())
+ goto failure;
+
+ delete grpmap;
+ delete groups;
+
+ /* Rebuild now we have struct sibling lists. */
+ groups = omp_gather_mapping_groups (list_p);
+ grpmap = omp_index_mapping_groups (groups);
+
+ outlist = omp_tsort_mapping_groups (groups, grpmap);
+ outlist = omp_segregate_mapping_groups (outlist);
+ list_p = omp_reorder_mapping_groups (groups, outlist, list_p);
+
+ failure:
+ delete grpmap;
+ delete groups;
+ }
+ }
+ else if (region_type & ORT_ACC)
+ {
+ vec<omp_mapping_group> *groups;
+ groups = omp_gather_mapping_groups (list_p);
+ if (groups)
+ {
+ hash_map<tree_operand_hash, omp_mapping_group *> *grpmap;
+ grpmap = omp_index_mapping_groups (groups);
+
+ omp_build_struct_sibling_lists (code, region_type, groups, &grpmap,
+ list_p);
+
+ delete groups;
+ delete grpmap;
+ }
+ }
while ((c = *list_p) != NULL)
{
@@ -9678,6 +11045,28 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
GOVD_FIRSTPRIVATE | GOVD_SEEN);
}
+ if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_STRUCT)
+ {
+ tree base = omp_strip_components_and_deref (decl);
+ if (DECL_P (base))
+ {
+ decl = base;
+ splay_tree_node n
+ = splay_tree_lookup (ctx->variables,
+ (splay_tree_key) decl);
+ if (seen_error ()
+ && n
+ && (n->value & (GOVD_MAP | GOVD_FIRSTPRIVATE)) != 0)
+ {
+ remove = true;
+ break;
+ }
+ flags = GOVD_MAP | GOVD_EXPLICIT;
+
+ goto do_add_decl;
+ }
+ }
+
if (TREE_CODE (decl) == TARGET_EXPR)
{
if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p, NULL,
@@ -9708,113 +11097,6 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
pd = &TREE_OPERAND (decl, 0);
decl = TREE_OPERAND (decl, 0);
}
- bool indir_p = false;
- bool component_ref_p = false;
- tree indir_base = NULL_TREE;
- tree orig_decl = decl;
- tree decl_ref = NULL_TREE;
- if ((region_type & (ORT_ACC | ORT_TARGET | ORT_TARGET_DATA)) != 0
- && TREE_CODE (*pd) == COMPONENT_REF
- && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH
- && code != OACC_UPDATE)
- {
- while (TREE_CODE (decl) == COMPONENT_REF)
- {
- decl = TREE_OPERAND (decl, 0);
- component_ref_p = true;
- if (((TREE_CODE (decl) == MEM_REF
- && integer_zerop (TREE_OPERAND (decl, 1)))
- || INDIRECT_REF_P (decl))
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
- == POINTER_TYPE))
- {
- indir_p = true;
- indir_base = decl;
- decl = TREE_OPERAND (decl, 0);
- STRIP_NOPS (decl);
- }
- if (TREE_CODE (decl) == INDIRECT_REF
- && DECL_P (TREE_OPERAND (decl, 0))
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
- == REFERENCE_TYPE))
- {
- decl_ref = decl;
- decl = TREE_OPERAND (decl, 0);
- }
- }
- }
- else if (TREE_CODE (decl) == COMPONENT_REF
- && (OMP_CLAUSE_MAP_KIND (c)
- != GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION))
- {
- component_ref_p = true;
- while (TREE_CODE (decl) == COMPONENT_REF)
- decl = TREE_OPERAND (decl, 0);
- if (TREE_CODE (decl) == INDIRECT_REF
- && DECL_P (TREE_OPERAND (decl, 0))
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
- == REFERENCE_TYPE))
- decl = TREE_OPERAND (decl, 0);
- }
- if (decl != orig_decl && DECL_P (decl) && indir_p
- && (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
- || (decl_ref
- && TREE_CODE (TREE_TYPE (decl_ref)) == POINTER_TYPE)))
- {
- gomp_map_kind k
- = ((code == OACC_EXIT_DATA || code == OMP_TARGET_EXIT_DATA)
- ? GOMP_MAP_DETACH : GOMP_MAP_ATTACH);
- /* We have a dereference of a struct member. Make this an
- attach/detach operation, and ensure the base pointer is
- mapped as a FIRSTPRIVATE_POINTER. */
- OMP_CLAUSE_SET_MAP_KIND (c, k);
- flags = GOVD_MAP | GOVD_SEEN | GOVD_EXPLICIT;
- tree next_clause = OMP_CLAUSE_CHAIN (c);
- if (k == GOMP_MAP_ATTACH
- && code != OACC_ENTER_DATA
- && code != OMP_TARGET_ENTER_DATA
- && (!next_clause
- || (OMP_CLAUSE_CODE (next_clause) != OMP_CLAUSE_MAP)
- || (OMP_CLAUSE_MAP_KIND (next_clause)
- != GOMP_MAP_POINTER)
- || OMP_CLAUSE_DECL (next_clause) != decl)
- && (!struct_deref_set
- || !struct_deref_set->contains (decl))
- && (!struct_map_to_clause
- || !struct_map_to_clause->get (indir_base)))
- {
- if (!struct_deref_set)
- struct_deref_set = new hash_set<tree> ();
- /* As well as the attach, we also need a
- FIRSTPRIVATE_POINTER clause to properly map the
- pointer to the struct base. */
- tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
- OMP_CLAUSE_MAP);
- OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALLOC);
- OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c2)
- = 1;
- tree charptr_zero
- = build_int_cst (build_pointer_type (char_type_node),
- 0);
- OMP_CLAUSE_DECL (c2)
- = build2 (MEM_REF, char_type_node,
- decl_ref ? decl_ref : decl, charptr_zero);
- OMP_CLAUSE_SIZE (c2) = size_zero_node;
- tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
- OMP_CLAUSE_MAP);
- OMP_CLAUSE_SET_MAP_KIND (c3,
- GOMP_MAP_FIRSTPRIVATE_POINTER);
- OMP_CLAUSE_DECL (c3) = decl;
- OMP_CLAUSE_SIZE (c3) = size_zero_node;
- tree mapgrp = *prev_list_p;
- *prev_list_p = c2;
- OMP_CLAUSE_CHAIN (c3) = mapgrp;
- OMP_CLAUSE_CHAIN (c2) = c3;
-
- struct_deref_set->add (decl);
- }
- goto do_add_decl;
- }
/* An "attach/detach" operation on an update directive should
behave as a GOMP_MAP_ALWAYS_POINTER. Beware that
unlike attach or detach map kinds, GOMP_MAP_ALWAYS_POINTER
@@ -9822,373 +11104,49 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
if (code == OACC_UPDATE
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)
OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_ALWAYS_POINTER);
- if ((DECL_P (decl)
- || (component_ref_p
- && (INDIRECT_REF_P (decl)
- || TREE_CODE (decl) == MEM_REF
- || TREE_CODE (decl) == ARRAY_REF)))
- && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_TO_PSET
- && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH
- && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_DETACH
- && code != OACC_UPDATE
- && code != OMP_TARGET_UPDATE)
- {
- if (error_operand_p (decl))
- {
- remove = true;
- break;
- }
-
- tree stype = TREE_TYPE (decl);
- if (TREE_CODE (stype) == REFERENCE_TYPE)
- stype = TREE_TYPE (stype);
- if (TYPE_SIZE_UNIT (stype) == NULL
- || TREE_CODE (TYPE_SIZE_UNIT (stype)) != INTEGER_CST)
- {
- error_at (OMP_CLAUSE_LOCATION (c),
- "mapping field %qE of variable length "
- "structure", OMP_CLAUSE_DECL (c));
- remove = true;
- break;
- }
-
- if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER
- || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)
- {
- /* Error recovery. */
- if (prev_list_p == NULL)
- {
- remove = true;
- break;
- }
-
- /* The below prev_list_p based error recovery code is
- currently no longer valid for OpenMP. */
- if (code != OMP_TARGET
- && code != OMP_TARGET_DATA
- && code != OMP_TARGET_UPDATE
- && code != OMP_TARGET_ENTER_DATA
- && code != OMP_TARGET_EXIT_DATA
- && OMP_CLAUSE_CHAIN (*prev_list_p) != c)
- {
- tree ch = OMP_CLAUSE_CHAIN (*prev_list_p);
- if (ch == NULL_TREE || OMP_CLAUSE_CHAIN (ch) != c)
- {
- remove = true;
- break;
- }
- }
- }
-
- poly_offset_int offset1;
- poly_int64 bitpos1;
- tree tree_offset1;
- tree base_ref;
-
- tree base
- = extract_base_bit_offset (OMP_CLAUSE_DECL (c), &base_ref,
- &bitpos1, &offset1,
- &tree_offset1);
-
- bool do_map_struct = (base == decl && !tree_offset1);
-
- splay_tree_node n
- = (DECL_P (decl)
- ? splay_tree_lookup (ctx->variables,
- (splay_tree_key) decl)
- : NULL);
- bool ptr = (OMP_CLAUSE_MAP_KIND (c)
- == GOMP_MAP_ALWAYS_POINTER);
- bool attach_detach = (OMP_CLAUSE_MAP_KIND (c)
- == GOMP_MAP_ATTACH_DETACH);
- bool attach = OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
- || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH;
- bool has_attachments = false;
- /* For OpenACC, pointers in structs should trigger an
- attach action. */
- if (attach_detach
- && ((region_type & (ORT_ACC | ORT_TARGET | ORT_TARGET_DATA))
- || code == OMP_TARGET_ENTER_DATA
- || code == OMP_TARGET_EXIT_DATA))
+ if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)
+ {
+ if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c)))
+ == ARRAY_TYPE)
+ remove = true;
+ else
{
- /* Turn a GOMP_MAP_ATTACH_DETACH clause into a
- GOMP_MAP_ATTACH or GOMP_MAP_DETACH clause after we
- have detected a case that needs a GOMP_MAP_STRUCT
- mapping added. */
- gomp_map_kind k
- = ((code == OACC_EXIT_DATA || code == OMP_TARGET_EXIT_DATA)
- ? GOMP_MAP_DETACH : GOMP_MAP_ATTACH);
+ gomp_map_kind k = ((code == OACC_EXIT_DATA
+ || code == OMP_TARGET_EXIT_DATA)
+ ? GOMP_MAP_DETACH : GOMP_MAP_ATTACH);
OMP_CLAUSE_SET_MAP_KIND (c, k);
- has_attachments = true;
}
+ }
- /* We currently don't handle non-constant offset accesses wrt to
- GOMP_MAP_STRUCT elements. */
- if (!do_map_struct)
- goto skip_map_struct;
-
- /* Nor for attach_detach for OpenMP. */
- if ((code == OMP_TARGET
- || code == OMP_TARGET_DATA
- || code == OMP_TARGET_UPDATE
- || code == OMP_TARGET_ENTER_DATA
- || code == OMP_TARGET_EXIT_DATA)
- && attach_detach)
- {
- if (DECL_P (decl))
- {
- if (struct_seen_clause == NULL)
- struct_seen_clause
- = new hash_map<tree_operand_hash, tree *>;
- if (!struct_seen_clause->get (decl))
- struct_seen_clause->put (decl, list_p);
- }
+ tree cref = decl;
- goto skip_map_struct;
- }
+ while (TREE_CODE (cref) == ARRAY_REF)
+ cref = TREE_OPERAND (cref, 0);
- if ((DECL_P (decl)
- && (n == NULL || (n->value & GOVD_MAP) == 0))
- || (!DECL_P (decl)
- && (!struct_map_to_clause
- || struct_map_to_clause->get (decl) == NULL)))
- {
- tree l = build_omp_clause (OMP_CLAUSE_LOCATION (c),
- OMP_CLAUSE_MAP);
- gomp_map_kind k = attach ? GOMP_MAP_FORCE_PRESENT
- : GOMP_MAP_STRUCT;
-
- OMP_CLAUSE_SET_MAP_KIND (l, k);
- if (base_ref)
- OMP_CLAUSE_DECL (l) = unshare_expr (base_ref);
- else
- {
- OMP_CLAUSE_DECL (l) = unshare_expr (decl);
- if (!DECL_P (OMP_CLAUSE_DECL (l))
- && (gimplify_expr (&OMP_CLAUSE_DECL (l),
- pre_p, NULL, is_gimple_lvalue,
- fb_lvalue)
- == GS_ERROR))
- {
- remove = true;
- break;
- }
- }
- OMP_CLAUSE_SIZE (l)
- = (!attach
- ? size_int (1)
- : DECL_P (OMP_CLAUSE_DECL (l))
- ? DECL_SIZE_UNIT (OMP_CLAUSE_DECL (l))
- : TYPE_SIZE_UNIT (TREE_TYPE (OMP_CLAUSE_DECL (l))));
- if (struct_map_to_clause == NULL)
- struct_map_to_clause
- = new hash_map<tree_operand_hash, tree>;
- struct_map_to_clause->put (decl, l);
- if (ptr || attach_detach)
- {
- tree **sc = (struct_seen_clause
- ? struct_seen_clause->get (decl)
- : NULL);
- tree *insert_node_pos = sc ? *sc : prev_list_p;
-
- insert_struct_comp_map (code, c, l, *insert_node_pos,
- NULL);
- *insert_node_pos = l;
- prev_list_p = NULL;
- }
- else
- {
- OMP_CLAUSE_CHAIN (l) = c;
- *list_p = l;
- list_p = &OMP_CLAUSE_CHAIN (l);
- }
- if (base_ref && code == OMP_TARGET)
- {
- tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
- OMP_CLAUSE_MAP);
- enum gomp_map_kind mkind
- = GOMP_MAP_FIRSTPRIVATE_REFERENCE;
- OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
- OMP_CLAUSE_DECL (c2) = decl;
- OMP_CLAUSE_SIZE (c2) = size_zero_node;
- OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (l);
- OMP_CLAUSE_CHAIN (l) = c2;
- }
- flags = GOVD_MAP | GOVD_EXPLICIT;
- if (GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c))
- || ptr
- || attach_detach)
- flags |= GOVD_SEEN;
- if (has_attachments)
- flags |= GOVD_MAP_HAS_ATTACHMENTS;
-
- /* If this is a *pointer-to-struct expression, make sure a
- firstprivate map of the base-pointer exists. */
- if (component_ref_p
- && ((TREE_CODE (decl) == MEM_REF
- && integer_zerop (TREE_OPERAND (decl, 1)))
- || INDIRECT_REF_P (decl))
- && DECL_P (TREE_OPERAND (decl, 0))
- && !splay_tree_lookup (ctx->variables,
- ((splay_tree_key)
- TREE_OPERAND (decl, 0))))
- {
- decl = TREE_OPERAND (decl, 0);
- tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
- OMP_CLAUSE_MAP);
- enum gomp_map_kind mkind
- = GOMP_MAP_FIRSTPRIVATE_POINTER;
- OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
- OMP_CLAUSE_DECL (c2) = decl;
- OMP_CLAUSE_SIZE (c2) = size_zero_node;
- OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = c2;
- }
+ if (TREE_CODE (cref) == INDIRECT_REF)
+ cref = TREE_OPERAND (cref, 0);
- if (DECL_P (decl))
- goto do_add_decl;
- }
- else if (struct_map_to_clause)
+ if (TREE_CODE (cref) == COMPONENT_REF)
+ {
+ tree base = cref;
+ while (base && !DECL_P (base))
{
- tree *osc = struct_map_to_clause->get (decl);
- tree *sc = NULL, *scp = NULL;
- if (n != NULL
- && (GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c))
- || ptr
- || attach_detach))
- n->value |= GOVD_SEEN;
- sc = &OMP_CLAUSE_CHAIN (*osc);
- if (*sc != c
- && (OMP_CLAUSE_MAP_KIND (*sc)
- == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
- sc = &OMP_CLAUSE_CHAIN (*sc);
- /* Here "prev_list_p" is the end of the inserted
- alloc/release nodes after the struct node, OSC. */
- for (; *sc != c; sc = &OMP_CLAUSE_CHAIN (*sc))
- if ((ptr || attach_detach) && sc == prev_list_p)
- break;
- else if (TREE_CODE (OMP_CLAUSE_DECL (*sc))
- != COMPONENT_REF
- && (TREE_CODE (OMP_CLAUSE_DECL (*sc))
- != INDIRECT_REF)
- && (TREE_CODE (OMP_CLAUSE_DECL (*sc))
- != ARRAY_REF))
- break;
- else
- {
- tree sc_decl = OMP_CLAUSE_DECL (*sc);
- poly_offset_int offsetn;
- poly_int64 bitposn;
- tree tree_offsetn;
- tree base
- = extract_base_bit_offset (sc_decl, NULL,
- &bitposn, &offsetn,
- &tree_offsetn);
- if (base != decl)
- break;
- if (scp)
- continue;
- if ((region_type & ORT_ACC) != 0)
- {
- /* This duplicate checking code is currently only
- enabled for OpenACC. */
- tree d1 = OMP_CLAUSE_DECL (*sc);
- tree d2 = OMP_CLAUSE_DECL (c);
- while (TREE_CODE (d1) == ARRAY_REF)
- d1 = TREE_OPERAND (d1, 0);
- while (TREE_CODE (d2) == ARRAY_REF)
- d2 = TREE_OPERAND (d2, 0);
- if (TREE_CODE (d1) == INDIRECT_REF)
- d1 = TREE_OPERAND (d1, 0);
- if (TREE_CODE (d2) == INDIRECT_REF)
- d2 = TREE_OPERAND (d2, 0);
- while (TREE_CODE (d1) == COMPONENT_REF)
- if (TREE_CODE (d2) == COMPONENT_REF
- && TREE_OPERAND (d1, 1)
- == TREE_OPERAND (d2, 1))
- {
- d1 = TREE_OPERAND (d1, 0);
- d2 = TREE_OPERAND (d2, 0);
- }
- else
- break;
- if (d1 == d2)
- {
- error_at (OMP_CLAUSE_LOCATION (c),
- "%qE appears more than once in map "
- "clauses", OMP_CLAUSE_DECL (c));
- remove = true;
- break;
- }
- }
- if (maybe_lt (offset1, offsetn)
- || (known_eq (offset1, offsetn)
- && maybe_lt (bitpos1, bitposn)))
- {
- if (ptr || attach_detach)
- scp = sc;
- else
- break;
- }
- }
- if (remove)
+ tree innerbase = omp_get_base_pointer (base);
+ if (!innerbase)
break;
- if (!attach)
- OMP_CLAUSE_SIZE (*osc)
- = size_binop (PLUS_EXPR, OMP_CLAUSE_SIZE (*osc),
- size_one_node);
- if (ptr || attach_detach)
- {
- tree cl = insert_struct_comp_map (code, c, NULL,
- *prev_list_p, scp);
- if (sc == prev_list_p)
- {
- *sc = cl;
- prev_list_p = NULL;
- }
- else
- {
- *prev_list_p = OMP_CLAUSE_CHAIN (c);
- list_p = prev_list_p;
- prev_list_p = NULL;
- OMP_CLAUSE_CHAIN (c) = *sc;
- *sc = cl;
- continue;
- }
- }
- else if (*sc != c)
- {
- if (gimplify_expr (pd, pre_p, NULL, is_gimple_lvalue,
- fb_lvalue)
- == GS_ERROR)
- {
- remove = true;
- break;
- }
- *list_p = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = *sc;
- *sc = c;
- continue;
- }
+ base = innerbase;
+ }
+ if (base
+ && DECL_P (base)
+ && GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c))
+ && POINTER_TYPE_P (TREE_TYPE (base)))
+ {
+ splay_tree_node n
+ = splay_tree_lookup (ctx->variables,
+ (splay_tree_key) base);
+ n->value |= GOVD_SEEN;
}
- skip_map_struct:
- ;
- }
- else if ((code == OACC_ENTER_DATA
- || code == OACC_EXIT_DATA
- || code == OACC_DATA
- || code == OACC_PARALLEL
- || code == OACC_KERNELS
- || code == OACC_SERIAL
- || code == OMP_TARGET_ENTER_DATA
- || code == OMP_TARGET_EXIT_DATA)
- && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)
- {
- gomp_map_kind k = ((code == OACC_EXIT_DATA
- || code == OMP_TARGET_EXIT_DATA)
- ? GOMP_MAP_DETACH : GOMP_MAP_ATTACH);
- OMP_CLAUSE_SET_MAP_KIND (c, k);
}
if (code == OMP_TARGET && OMP_CLAUSE_MAP_IN_REDUCTION (c))
@@ -10306,24 +11264,6 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
break;
}
- /* If this was of the form map(*pointer_to_struct), then the
- 'pointer_to_struct' DECL should be considered deref'ed. */
- if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALLOC
- || GOMP_MAP_COPY_TO_P (OMP_CLAUSE_MAP_KIND (c))
- || GOMP_MAP_COPY_FROM_P (OMP_CLAUSE_MAP_KIND (c)))
- && INDIRECT_REF_P (orig_decl)
- && DECL_P (TREE_OPERAND (orig_decl, 0))
- && TREE_CODE (TREE_TYPE (orig_decl)) == RECORD_TYPE)
- {
- tree ptr = TREE_OPERAND (orig_decl, 0);
- if (!struct_deref_set || !struct_deref_set->contains (ptr))
- {
- if (!struct_deref_set)
- struct_deref_set = new hash_set<tree> ();
- struct_deref_set->add (ptr);
- }
- }
-
if (!remove
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_POINTER
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH_DETACH
@@ -10340,28 +11280,6 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
break;
}
- else
- {
- /* DECL_P (decl) == true */
- tree *sc;
- if (struct_map_to_clause
- && (sc = struct_map_to_clause->get (decl)) != NULL
- && OMP_CLAUSE_MAP_KIND (*sc) == GOMP_MAP_STRUCT
- && decl == OMP_CLAUSE_DECL (*sc))
- {
- /* We have found a map of the whole structure after a
- leading GOMP_MAP_STRUCT has been created, so refill the
- leading clause into a map of the whole structure
- variable, and remove the current one.
- TODO: we should be able to remove some maps of the
- following structure element maps if they are of
- compatible TO/FROM/ALLOC type. */
- OMP_CLAUSE_SET_MAP_KIND (*sc, OMP_CLAUSE_MAP_KIND (c));
- OMP_CLAUSE_SIZE (*sc) = unshare_expr (OMP_CLAUSE_SIZE (c));
- remove = true;
- break;
- }
- }
flags = GOVD_MAP | GOVD_EXPLICIT;
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TO
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TOFROM)
@@ -11031,12 +11949,6 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
ctx->clauses = *orig_list_p;
gimplify_omp_ctxp = ctx;
- if (struct_seen_clause)
- delete struct_seen_clause;
- if (struct_map_to_clause)
- delete struct_map_to_clause;
- if (struct_deref_set)
- delete struct_deref_set;
}
/* Return true if DECL is a candidate for shared to firstprivate
@@ -11185,8 +12097,6 @@ gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data)
return 0;
if ((flags & GOVD_SEEN) == 0)
return 0;
- if ((flags & GOVD_MAP_HAS_ATTACHMENTS) != 0)
- return 0;
if (flags & GOVD_DEBUG_PRIVATE)
{
gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_SHARED);
@@ -11503,10 +12413,15 @@ gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
*list_p = c2;
}
}
+
+ tree attach_list = NULL_TREE;
+ tree *attach_tail = &attach_list;
+
while ((c = *list_p) != NULL)
{
splay_tree_node n;
bool remove = false;
+ bool move_attach = false;
switch (OMP_CLAUSE_CODE (c))
{
@@ -11668,6 +12583,19 @@ gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
remove = true;
break;
}
+ /* If we have a target region, we can push all the attaches to the
+ end of the list (we may have standalone "attach" operations
+ synthesized for GOMP_MAP_STRUCT nodes that must be processed after
+ the attachment point AND the pointed-to block have been mapped).
+ If we have something else, e.g. "enter data", we need to keep
+ "attach" nodes together with the previous node they attach to so
+ that separate "exit data" operations work properly (see
+ libgomp/target.c). */
+ if ((ctx->region_type & ORT_TARGET) != 0
+ && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
+ || (OMP_CLAUSE_MAP_KIND (c)
+ == GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION)))
+ move_attach = true;
decl = OMP_CLAUSE_DECL (c);
/* Data clauses associated with reductions must be
compatible with present_or_copy. Warn and adjust the clause
@@ -11982,10 +12910,25 @@ gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
+ else if (move_attach)
+ {
+ /* Remove attach node from here, separate out into its own list. */
+ *attach_tail = c;
+ *list_p = OMP_CLAUSE_CHAIN (c);
+ OMP_CLAUSE_CHAIN (c) = NULL_TREE;
+ attach_tail = &OMP_CLAUSE_CHAIN (c);
+ }
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
+ /* Splice attach nodes at the end of the list. */
+ if (attach_list)
+ {
+ *list_p = attach_list;
+ list_p = attach_tail;
+ }
+
/* Add in any implicit data sharing. */
struct gimplify_adjust_omp_clauses_data data;
if ((gimplify_omp_ctxp->region_type & ORT_ACC) == 0)
diff --git a/gcc/ginclude/float.h b/gcc/ginclude/float.h
index 9d368c4..afe4a71 100644
--- a/gcc/ginclude/float.h
+++ b/gcc/ginclude/float.h
@@ -257,9 +257,11 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#define DBL_IS_IEC_60559 __DBL_IS_IEC_60559__
#define LDBL_IS_IEC_60559 __LDBL_IS_IEC_60559__
-/* Infinity in type float, or overflow if infinity not supported. */
+/* Infinity in type float; not defined if infinity not supported. */
+#if __FLT_HAS_INFINITY__
#undef INFINITY
#define INFINITY (__builtin_inff ())
+#endif
/* Quiet NaN, if supported for float. */
#if __FLT_HAS_QUIET_NAN__
diff --git a/gcc/ginclude/stdatomic.h b/gcc/ginclude/stdatomic.h
index 9f2475b..a56ba5d 100644
--- a/gcc/ginclude/stdatomic.h
+++ b/gcc/ginclude/stdatomic.h
@@ -79,7 +79,9 @@ typedef _Atomic __INTMAX_TYPE__ atomic_intmax_t;
typedef _Atomic __UINTMAX_TYPE__ atomic_uintmax_t;
+#if !(defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L)
#define ATOMIC_VAR_INIT(VALUE) (VALUE)
+#endif
/* Initialize an atomic object pointed to by PTR with VAL. */
#define atomic_init(PTR, VAL) \
diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog
index c3f3da1..43cc2e0 100644
--- a/gcc/go/ChangeLog
+++ b/gcc/go/ChangeLog
@@ -1,3 +1,8 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * go-lang.cc (go_langhook_init): Do not initialize
+ void_list_node.
+
2022-09-02 Martin Liska <mliska@suse.cz>
* go-lang.cc (go_langhook_pushdecl): Remove -gstabs option support, DBX-related
diff --git a/gcc/go/go-lang.cc b/gcc/go/go-lang.cc
index d519a69..4743370 100644
--- a/gcc/go/go-lang.cc
+++ b/gcc/go/go-lang.cc
@@ -98,9 +98,6 @@ go_langhook_init (void)
{
build_common_tree_nodes (false);
- /* I don't know why this has to be done explicitly. */
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
-
/* We must create the gogo IR after calling build_common_tree_nodes
(because Gogo::define_builtin_function_trees refers indirectly
to, e.g., unsigned_char_type_node) but before calling
diff --git a/gcc/jit/ChangeLog b/gcc/jit/ChangeLog
index 314b831..602cda3 100644
--- a/gcc/jit/ChangeLog
+++ b/gcc/jit/ChangeLog
@@ -1,3 +1,8 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * dummy-frontend.cc (jit_langhook_init): Do not initialize
+ void_list_node.
+
2022-08-09 Vibhav Pant <vibhavp@gmail.com>
* libgccjit.h (LIBGCCJIT_HAVE_gcc_jit_context_new_bitcast): Move
diff --git a/gcc/jit/dummy-frontend.cc b/gcc/jit/dummy-frontend.cc
index 84ff359..0687567 100644
--- a/gcc/jit/dummy-frontend.cc
+++ b/gcc/jit/dummy-frontend.cc
@@ -594,9 +594,6 @@ jit_langhook_init (void)
build_common_tree_nodes (false);
- /* I don't know why this has to be done explicitly. */
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
-
build_common_builtin_nodes ();
/* The default precision for floating point numbers. This is used
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index 09a86ef..84fc5a4 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,8 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * lto-lang.cc (lto_build_c_type_nodes): Do not initialize
+ void_list_node.
+
2022-08-22 Martin Liska <mliska@suse.cz>
PR lto/106700
diff --git a/gcc/lto/lto-lang.cc b/gcc/lto/lto-lang.cc
index 972a033..d36453b 100644
--- a/gcc/lto/lto-lang.cc
+++ b/gcc/lto/lto-lang.cc
@@ -1239,7 +1239,6 @@ lto_build_c_type_nodes (void)
{
gcc_assert (void_type_node);
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
string_type_node = build_pointer_type (char_type_node);
const_string_type_node
= build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST));
diff --git a/gcc/match.pd b/gcc/match.pd
index 17318f52..345bcb7 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -1763,6 +1763,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& (int_fits_type_p (@1, TREE_TYPE (@0))
|| tree_nop_conversion_p (TREE_TYPE (@0), type)))
|| types_match (@0, @1))
+ && !POINTER_TYPE_P (TREE_TYPE (@0))
+ && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE
/* ??? This transform conflicts with fold-const.cc doing
Convert (T)(x & c) into (T)x & (T)c, if c is an integer
constants (if x has signed type, the sign bit cannot be set
@@ -1799,7 +1801,9 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (GIMPLE
&& TREE_CODE (@1) != INTEGER_CST
&& tree_nop_conversion_p (type, TREE_TYPE (@2))
- && types_match (type, @0))
+ && types_match (type, @0)
+ && !POINTER_TYPE_P (TREE_TYPE (@0))
+ && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE)
(bitop @0 (convert @1)))))
(for bitop (bit_and bit_ior)
diff --git a/gcc/omp-low.cc b/gcc/omp-low.cc
index fd0ccd5..f0469d2 100644
--- a/gcc/omp-low.cc
+++ b/gcc/omp-low.cc
@@ -1599,8 +1599,11 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
{
/* If this is an offloaded region, an attach operation should
only exist when the pointer variable is mapped in a prior
- clause. */
- if (is_gimple_omp_offloaded (ctx->stmt))
+ clause.
+ If we had an error, we may not have attempted to sort clauses
+ properly, so avoid the test. */
+ if (is_gimple_omp_offloaded (ctx->stmt)
+ && !seen_error ())
gcc_assert
(maybe_lookup_decl (decl, ctx)
|| (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
@@ -1633,8 +1636,10 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
if (TREE_CODE (decl) == COMPONENT_REF
|| (TREE_CODE (decl) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
- == REFERENCE_TYPE)))
+ && (((TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
+ == REFERENCE_TYPE)
+ || (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
+ == POINTER_TYPE)))))
break;
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
@@ -14012,6 +14017,7 @@ lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
is_ref = false;
bool ref_to_array = false;
+ bool ref_to_ptr = false;
if (is_ref)
{
type = TREE_TYPE (type);
@@ -14030,6 +14036,12 @@ lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
new_var = decl2;
type = TREE_TYPE (new_var);
}
+ else if (TREE_CODE (type) == REFERENCE_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == POINTER_TYPE)
+ {
+ type = TREE_TYPE (type);
+ ref_to_ptr = true;
+ }
x = build_receiver_ref (OMP_CLAUSE_DECL (prev), false, ctx);
x = fold_convert_loc (clause_loc, type, x);
if (!integer_zerop (OMP_CLAUSE_SIZE (c)))
@@ -14046,7 +14058,8 @@ lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
if (ref_to_array)
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
- if (is_ref && !ref_to_array)
+ if ((is_ref && !ref_to_array)
+ || ref_to_ptr)
{
tree t = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (t);
diff --git a/gcc/range-op-float.cc b/gcc/range-op-float.cc
index 0f928b6..1e39a07 100644
--- a/gcc/range-op-float.cc
+++ b/gcc/range-op-float.cc
@@ -150,24 +150,12 @@ range_operator_float::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED) cons
return VREL_VARYING;
}
-// Set R to [NAN, NAN].
-
-static inline void
-frange_set_nan (frange &r, tree type)
-{
- REAL_VALUE_TYPE rv;
- bool res = real_nan (&rv, "", 1, TYPE_MODE (type));
- if (flag_checking)
- gcc_assert (res);
- r.set (type, rv, rv);
-}
-
// Return TRUE if OP1 is known to be free of NANs.
static inline bool
finite_operand_p (const frange &op1)
{
- return flag_finite_math_only || !op1.maybe_nan ();
+ return flag_finite_math_only || !op1.maybe_isnan ();
}
// Return TRUE if OP1 and OP2 are known to be free of NANs.
@@ -175,7 +163,7 @@ finite_operand_p (const frange &op1)
static inline bool
finite_operands_p (const frange &op1, const frange &op2)
{
- return flag_finite_math_only || (!op1.maybe_nan () && !op2.maybe_nan ());
+ return flag_finite_math_only || (!op1.maybe_isnan () && !op2.maybe_isnan ());
}
// Floating version of relop_early_resolve that takes into account NAN
@@ -220,80 +208,105 @@ frange_drop_ninf (frange &r, tree type)
r.intersect (tmp);
}
-// (X <= VAL) produces the range of [-INF, VAL].
+// If zero is in R, make sure both -0.0 and +0.0 are in the range.
+
+static inline void
+frange_add_zeros (frange &r, tree type)
+{
+ if (r.undefined_p () || r.known_isnan ())
+ return;
+
+ if (HONOR_SIGNED_ZEROS (type)
+ && (real_iszero (&r.lower_bound ()) || real_iszero (&r.upper_bound ())))
+ {
+ frange zero;
+ zero.set_zero (type);
+ r.union_ (zero);
+ }
+}
+
+// Build a range that is <= VAL and store it in R.
static bool
-build_le (frange &r, tree type, const REAL_VALUE_TYPE &val)
+build_le (frange &r, tree type, const frange &val)
{
- if (real_isnan (&val))
+ if (val.known_isnan ())
{
r.set_undefined ();
return false;
}
- r.set (type, dconstninf, val);
+ r.set (type, dconstninf, val.upper_bound ());
+
+ // Add both zeros if there's the possibility of zero equality.
+ frange_add_zeros (r, type);
+
return true;
}
-// (X < VAL) produces the range of [-INF, VAL).
+// Build a range that is < VAL and store it in R.
static bool
-build_lt (frange &r, tree type, const REAL_VALUE_TYPE &val)
+build_lt (frange &r, tree type, const frange &val)
{
- if (real_isnan (&val))
+ if (val.known_isnan ())
{
r.set_undefined ();
return false;
}
// < -INF is outside the range.
- if (real_isinf (&val, 1))
+ if (real_isinf (&val.upper_bound (), 1))
{
if (HONOR_NANS (type))
- frange_set_nan (r, type);
+ r.set_nan (type);
else
r.set_undefined ();
return false;
}
- // Hijack LE because we only support closed intervals.
- build_le (r, type, val);
+ // We only support closed intervals.
+ r.set (type, dconstninf, val.upper_bound ());
return true;
}
-// (X >= VAL) produces the range of [VAL, +INF].
+// Build a range that is >= VAL and store it in R.
static bool
-build_ge (frange &r, tree type, const REAL_VALUE_TYPE &val)
+build_ge (frange &r, tree type, const frange &val)
{
- if (real_isnan (&val))
+ if (val.known_isnan ())
{
r.set_undefined ();
return false;
}
- r.set (type, val, dconstinf);
+ r.set (type, val.lower_bound (), dconstinf);
+
+ // Add both zeros if there's the possibility of zero equality.
+ frange_add_zeros (r, type);
+
return true;
}
-// (X > VAL) produces the range of (VAL, +INF].
+// Build a range that is > VAL and store it in R.
static bool
-build_gt (frange &r, tree type, const REAL_VALUE_TYPE &val)
+build_gt (frange &r, tree type, const frange &val)
{
- if (real_isnan (&val))
+ if (val.known_isnan ())
{
r.set_undefined ();
return false;
}
// > +INF is outside the range.
- if (real_isinf (&val, 0))
+ if (real_isinf (&val.lower_bound (), 0))
{
if (HONOR_NANS (type))
- frange_set_nan (r, type);
+ r.set_nan (type);
else
r.set_undefined ();
return false;
}
- // Hijack GE because we only support closed intervals.
- build_ge (r, type, val);
+ // We only support closed intervals.
+ r.set (type, val.lower_bound (), dconstinf);
return true;
}
@@ -388,18 +401,17 @@ foperator_equal::op1_range (frange &r, tree type,
case BRS_TRUE:
// If it's true, the result is the same as OP2.
r = op2;
- // Make sure we don't copy the sign bit if we may have a zero.
- if (HONOR_SIGNED_ZEROS (type) && r.contains_p (build_zero_cst (type)))
- r.set_signbit (fp_prop::VARYING);
+ // Add both zeros if there's the possibility of zero equality.
+ frange_add_zeros (r, type);
// The TRUE side of op1 == op2 implies op1 is !NAN.
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
break;
case BRS_FALSE:
r.set_varying (type);
// The FALSE side of op1 == op1 implies op1 is a NAN.
if (rel == VREL_EQ)
- frange_set_nan (r, type);
+ r.set_nan (type);
// If the result is false, the only time we know anything is
// if OP2 is a constant.
else if (op2.singleton_p ()
@@ -492,11 +504,10 @@ foperator_not_equal::op1_range (frange &r, tree type,
case BRS_FALSE:
// If it's false, the result is the same as OP2.
r = op2;
- // Make sure we don't copy the sign bit if we may have a zero.
- if (HONOR_SIGNED_ZEROS (type) && r.contains_p (build_zero_cst (type)))
- r.set_signbit (fp_prop::VARYING);
+ // Add both zeros if there's the possibility of zero equality.
+ frange_add_zeros (r, type);
// The FALSE side of op1 != op2 implies op1 is !NAN.
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
break;
default:
@@ -544,7 +555,7 @@ foperator_lt::fold_range (irange &r, tree type,
else
r = range_true_and_false (type);
}
- else if (op1.known_nan () || op2.known_nan ())
+ else if (op1.known_isnan () || op2.known_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -561,16 +572,16 @@ foperator_lt::op1_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_lt (r, type, op2.upper_bound ()))
+ if (build_lt (r, type, op2))
{
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
// x < y implies x is not +INF.
frange_drop_inf (r, type);
}
break;
case BRS_FALSE:
- build_ge (r, type, op2.lower_bound ());
+ build_ge (r, type, op2);
break;
default:
@@ -589,16 +600,16 @@ foperator_lt::op2_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_gt (r, type, op1.lower_bound ()))
+ if (build_gt (r, type, op1))
{
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
// x < y implies y is not -INF.
frange_drop_ninf (r, type);
}
break;
case BRS_FALSE:
- build_le (r, type, op1.upper_bound ());
+ build_le (r, type, op1);
break;
default:
@@ -646,7 +657,7 @@ foperator_le::fold_range (irange &r, tree type,
else
r = range_true_and_false (type);
}
- else if (op1.known_nan () || op2.known_nan ())
+ else if (op1.known_isnan () || op2.known_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -663,12 +674,12 @@ foperator_le::op1_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_le (r, type, op2.upper_bound ()))
- r.set_nan (fp_prop::NO);
+ if (build_le (r, type, op2))
+ r.clear_nan ();
break;
case BRS_FALSE:
- build_gt (r, type, op2.lower_bound ());
+ build_gt (r, type, op2);
break;
default:
@@ -687,12 +698,12 @@ foperator_le::op2_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_ge (r, type, op1.lower_bound ()))
- r.set_nan (fp_prop::NO);
+ if (build_ge (r, type, op1))
+ r.clear_nan ();
break;
case BRS_FALSE:
- build_lt (r, type, op1.upper_bound ());
+ build_lt (r, type, op1);
break;
default:
@@ -740,7 +751,7 @@ foperator_gt::fold_range (irange &r, tree type,
else
r = range_true_and_false (type);
}
- else if (op1.known_nan () || op2.known_nan ())
+ else if (op1.known_isnan () || op2.known_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -757,16 +768,16 @@ foperator_gt::op1_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_gt (r, type, op2.lower_bound ()))
+ if (build_gt (r, type, op2))
{
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
// x > y implies x is not -INF.
frange_drop_ninf (r, type);
}
break;
case BRS_FALSE:
- build_le (r, type, op2.upper_bound ());
+ build_le (r, type, op2);
break;
default:
@@ -785,16 +796,16 @@ foperator_gt::op2_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_lt (r, type, op1.upper_bound ()))
+ if (build_lt (r, type, op1))
{
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
// x > y implies y is not +INF.
frange_drop_inf (r, type);
}
break;
case BRS_FALSE:
- build_ge (r, type, op1.lower_bound ());
+ build_ge (r, type, op1);
break;
default:
@@ -842,7 +853,7 @@ foperator_ge::fold_range (irange &r, tree type,
else
r = range_true_and_false (type);
}
- else if (op1.known_nan () || op2.known_nan ())
+ else if (op1.known_isnan () || op2.known_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -859,12 +870,12 @@ foperator_ge::op1_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- build_ge (r, type, op2.lower_bound ());
- r.set_nan (fp_prop::NO);
+ build_ge (r, type, op2);
+ r.clear_nan ();
break;
case BRS_FALSE:
- build_lt (r, type, op2.upper_bound ());
+ build_lt (r, type, op2);
break;
default:
@@ -882,12 +893,12 @@ foperator_ge::op2_range (frange &r, tree type,
switch (get_bool_state (r, lhs, type))
{
case BRS_FALSE:
- build_gt (r, type, op1.lower_bound ());
+ build_gt (r, type, op1);
break;
case BRS_TRUE:
- build_le (r, type, op1.upper_bound ());
- r.set_nan (fp_prop::NO);
+ build_le (r, type, op1);
+ r.clear_nan ();
break;
default:
@@ -925,10 +936,10 @@ foperator_unordered::fold_range (irange &r, tree type,
relation_kind) const
{
// UNORDERED is TRUE if either operand is a NAN.
- if (op1.known_nan () || op2.known_nan ())
+ if (op1.known_isnan () || op2.known_isnan ())
r = range_true (type);
// UNORDERED is FALSE if neither operand is a NAN.
- else if (!op1.maybe_nan () && !op2.maybe_nan ())
+ else if (!op1.maybe_isnan () && !op2.maybe_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -947,14 +958,14 @@ foperator_unordered::op1_range (frange &r, tree type,
r.set_varying (type);
// Since at least one operand must be NAN, if one of them is
// not, the other must be.
- if (!op2.maybe_nan ())
- frange_set_nan (r, type);
+ if (!op2.maybe_isnan ())
+ r.set_nan (type);
break;
case BRS_FALSE:
r.set_varying (type);
// A false UNORDERED means both operands are !NAN.
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
break;
default:
@@ -991,9 +1002,9 @@ foperator_ordered::fold_range (irange &r, tree type,
const frange &op1, const frange &op2,
relation_kind) const
{
- if (!op1.maybe_nan () && !op2.maybe_nan ())
+ if (!op1.maybe_isnan () && !op2.maybe_isnan ())
r = range_true (type);
- else if (op1.known_nan () || op2.known_nan ())
+ else if (op1.known_isnan () || op2.known_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -1011,14 +1022,14 @@ foperator_ordered::op1_range (frange &r, tree type,
case BRS_TRUE:
r.set_varying (type);
// The TRUE side of op1 ORDERED op2 implies op1 is !NAN.
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
break;
case BRS_FALSE:
r.set_varying (type);
// The FALSE side of op1 ORDERED op1 implies op1 is !NAN.
if (rel == VREL_EQ)
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
break;
default:
diff --git a/gcc/reg-stack.cc b/gcc/reg-stack.cc
index fd03250..95e0e61 100644
--- a/gcc/reg-stack.cc
+++ b/gcc/reg-stack.cc
@@ -1073,7 +1073,8 @@ move_for_stack_reg (rtx_insn *insn, stack_ptr regstack, rtx pat)
break;
/* The destination must be dead, or life analysis is borked. */
- gcc_assert (get_hard_regnum (regstack, dest) < FIRST_STACK_REG);
+ gcc_assert (get_hard_regnum (regstack, dest) < FIRST_STACK_REG
+ || any_malformed_asm);
/* If the source is not live, this is yet another case of
uninitialized variables. Load up a NaN instead. */
diff --git a/gcc/targhooks.cc b/gcc/targhooks.cc
index b15ae19..d17d393 100644
--- a/gcc/targhooks.cc
+++ b/gcc/targhooks.cc
@@ -93,6 +93,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "cfgloop.h"
#include "tree-vectorizer.h"
+#include "options.h"
bool
default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
@@ -1181,9 +1182,21 @@ default_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
static bool issued_error;
if (!issued_error)
{
+ const char *name = NULL;
+ for (unsigned int i = 0; zero_call_used_regs_opts[i].name != NULL;
+ ++i)
+ if (flag_zero_call_used_regs == zero_call_used_regs_opts[i].flag)
+ {
+ name = zero_call_used_regs_opts[i].name;
+ break;
+ }
+
+ if (!name)
+ name = "";
+
issued_error = true;
- sorry ("%qs not supported on this target",
- "-fzero-call-used-regs");
+ sorry ("argument %qs is not supported for %qs on this target",
+ name, "-fzero-call-used-regs");
}
}
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 21459ed..a6048da 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,249 @@
+2022-09-19 Marek Polacek <polacek@redhat.com>
+
+ PR c/106947
+ * c-c++-common/Waddress-7.c: New test.
+
+2022-09-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * gfortran.dg/ieee/modes_1.f90: New test.
+
+2022-09-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * gfortran.dg/ieee/rounding_2.f90: New test.
+
+2022-09-18 Julian Brown <julian@codesourcery.com>
+
+ * g++.dg/gomp/target-lambda-1.C: Adjust expected scan output.
+
+2022-09-18 Palmer Dabbelt <palmer@rivosinc.com>
+
+ * gcc.dg/tree-ssa/gen-vect-34.c: Skip RISC-V targets.
+
+2022-09-17 Patrick Palka <ppalka@redhat.com>
+
+ * g++.dg/modules/typename-friend_a.C: New test.
+ * g++.dg/modules/typename-friend_b.C: New test.
+
+2022-09-17 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/106958
+ * gcc.c-torture/compile/pr106958.c: New test.
+
+2022-09-16 Eugene Rozenfeld <erozen@microsoft.com>
+
+ * gcc.dg/tree-prof/indir-call-prof-2.c: Fix dg-final-use-autofdo.
+
+2022-09-16 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/92505
+ * g++.dg/cpp0x/constexpr-mutable3.C: New test.
+ * g++.dg/cpp1y/constexpr-mutable1.C: New test.
+
+2022-09-16 Jason Merrill <jason@redhat.com>
+
+ PR c++/106858
+ * g++.dg/gomp/map-3.C: New test.
+
+2022-09-15 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/106857
+ * gfortran.dg/pr106857.f90: New test.
+
+2022-09-15 Harald Anlauf <anlauf@gmx.de>
+ Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/104314
+ * gfortran.dg/pr104314.f90: New test.
+
+2022-09-15 Joseph Myers <joseph@codesourcery.com>
+
+ * gcc.dg/c2x-float-2.c: Require inff effective-target.
+ * gcc.dg/c2x-float-11.c: New test.
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106922
+ * g++.dg/tree-ssa/pr106922.C: New testcase.
+
+2022-09-15 Julian Brown <julian@codesourcery.com>
+
+ * c-c++-common/gomp/target-50.c: Modify scan pattern.
+
+2022-09-15 Julian Brown <julian@codesourcery.com>
+
+ * c-c++-common/goacc/mdc-2.c: Update expected errors.
+ * g++.dg/goacc/mdc.C: Likewise.
+
+2022-09-15 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ PR target/106550
+ * gcc.target/powerpc/pr106550.c: New test.
+ * gcc.target/powerpc/pr106550_1.c: New test.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * g++.dg/goacc/member-array-acc.C: New test.
+ * g++.dg/gomp/member-array-omp.C: New test.
+ * g++.dg/gomp/target-3.C: Update expected output.
+ * g++.dg/gomp/target-lambda-1.C: Likewise.
+ * g++.dg/gomp/target-this-2.C: Likewise.
+ * c-c++-common/goacc/deep-copy-arrayofstruct.c: Move test from here.
+ * c-c++-common/gomp/target-50.c: New test.
+
+2022-09-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106938
+ * gcc.dg/pr106938.c: New testcase.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ PR tree-optimization/106936
+ * g++.dg/tree-ssa/pr106936.C: New test.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * g++.dg/gomp/target-lambda-1.C: Adjust expected output.
+ * g++.dg/gomp/target-this-3.C: Likewise.
+ * g++.dg/gomp/target-this-4.C: Likewise.
+
+2022-09-14 Robin Dapp <rdapp@linux.ibm.com>
+
+ * gcc.target/s390/ifcvt-one-insn-bool.c: Add -mzarch.
+ * gcc.target/s390/ifcvt-one-insn-char.c: Dito.
+ * gcc.target/s390/ifcvt-two-insns-bool.c: Dito.
+ * gcc.target/s390/ifcvt-two-insns-int.c: Dito.
+ * gcc.target/s390/ifcvt-two-insns-long.c: Add -mzarch and change
+ long into long long.
+
+2022-09-14 Robin Dapp <rdapp@linux.ibm.com>
+
+ * gcc.target/s390/vector/vperm-rev-z14.c: Add -save-temps.
+ * gcc.target/s390/vector/vperm-rev-z15.c: Likewise.
+
+2022-09-14 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/106878
+ * gcc.c-torture/compile/pr106878.c: New test.
+
+2022-09-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106934
+ * gfortran.dg/pr106934.f90: New testcase.
+
+2022-09-14 liuhongt <hongtao.liu@intel.com>
+
+ * gcc.target/i386/pr106905.c: New test.
+ * gcc.target/ia64/pr106905.c: New test.
+
+2022-09-14 Torbjörn SVENSSON <torbjorn.svensson@foss.st.com>
+ Yvan ROUX <yvan.roux@foss.st.com>
+
+ PR target/95720
+ * lib/g++.exp: Moved gluefile block to after flags have been
+ prefixed for the target_compile call.
+ * lib/gcc.exp: Likewise.
+ * lib/wrapper.exp: Reset adjusted state flag.
+
+2022-09-13 Roger Sayle <roger@nextmovesoftware.com>
+
+ PR target/106877
+ * g++.dg/ext/pr106877.C: New test case.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * g++.dg/cpp1z/noexcept-type26.C: New test.
+ * g++.dg/cpp2a/explicit19.C: New test.
+ * g++.dg/ext/integer-pack6.C: New test.
+
+2022-09-13 Kewen Lin <linkw@linux.ibm.com>
+
+ PR target/104482
+ * gcc.target/powerpc/pr104482.c: New test.
+
+2022-09-13 Kewen.Lin <linkw@gcc.gnu.org>
+
+ PR target/105485
+ * g++.target/powerpc/pr105485.C: New test.
+
+2022-09-13 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/ldp_stp_20.c: New test.
+ * gcc.target/aarch64/ldp_stp_21.c: Likewise.
+ * gcc.target/aarch64/ldp_stp_22.c: Likewise.
+ * gcc.target/aarch64/ldp_stp_23.c: Likewise.
+ * gcc.target/aarch64/ldp_stp_24.c: Likewise.
+ * gcc.target/aarch64/movv16qi_1.c (gpr_to_gpr): New function.
+ * gcc.target/aarch64/movv8qi_1.c (gpr_to_gpr): Likewise.
+ * gcc.target/aarch64/movv16qi_2.c: New test.
+ * gcc.target/aarch64/movv16qi_3.c: Likewise.
+ * gcc.target/aarch64/movv2di_1.c: Likewise.
+ * gcc.target/aarch64/movv2x16qi_1.c: Likewise.
+ * gcc.target/aarch64/movv2x8qi_1.c: Likewise.
+ * gcc.target/aarch64/movv3x16qi_1.c: Likewise.
+ * gcc.target/aarch64/movv3x8qi_1.c: Likewise.
+ * gcc.target/aarch64/movv4x16qi_1.c: Likewise.
+ * gcc.target/aarch64/movv4x8qi_1.c: Likewise.
+ * gcc.target/aarch64/movv8qi_2.c: Likewise.
+ * gcc.target/aarch64/movv8qi_3.c: Likewise.
+ * gcc.target/aarch64/vect_unary_2.c: Likewise.
+
+2022-09-13 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/acle/ls64_asm_2.c: New test.
+
+2022-09-12 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/101906
+ * g++.dg/template/evaluated1.C: New test.
+ * g++.dg/template/evaluated1a.C: New test.
+ * g++.dg/template/evaluated1b.C: New test.
+ * g++.dg/template/evaluated1c.C: New test.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/106893
+ PR c++/90451
+ * g++.dg/cpp1y/auto-fn65.C: New test.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/93259
+ * g++.dg/cpp0x/initlist-array17.C: New test.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/106567
+ * g++.dg/cpp0x/lambda/lambda-array4.C: New test.
+
+2022-09-12 Jonathan Wakely <jwakely@redhat.com>
+
+ PR c++/86491
+ * g++.dg/warn/anonymous-namespace-3.C: Use separate dg-warning
+ directives for C++98 and everything else.
+ * g++.dg/warn/Wsubobject-linkage-5.C: New test.
+
+2022-09-12 Joseph Myers <joseph@codesourcery.com>
+
+ * gcc.dg/atomic/c2x-stdatomic-var-init-1.c: New test.
+
+2022-09-12 Torbjörn SVENSSON <torbjorn.svensson@foss.st.com>
+
+ * g++.dg/gcov/gcov.exp: Respect triplet when looking for gcov.
+ * gcc.misc-tests/gcov.exp: Likewise.
+
+2022-09-12 Joffrey Huguet <huguet@adacore.com>
+
+ * gnat.dg/aspect2.adb: Removed.
+ * gnat.dg/aspect2.ads: Removed.
+ * gnat.dg/config_pragma1.adb: Removed.
+ * gnat.dg/config_pragma1_pkg.ads: Removed.
+ * gnat.dg/equal8.adb: Removed.
+ * gnat.dg/equal8.ads: Removed.
+ * gnat.dg/equal8_pkg.ads: Removed.
+ * gnat.dg/formal_containers.adb: Removed.
+ * gnat.dg/iter1.adb: Removed.
+ * gnat.dg/iter1.ads: Removed.
+
2022-09-11 Tim Lange <mail@tim-lange.me>
PR analyzer/106845
diff --git a/gcc/testsuite/c-c++-common/Waddress-7.c b/gcc/testsuite/c-c++-common/Waddress-7.c
new file mode 100644
index 0000000..1799485
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/Waddress-7.c
@@ -0,0 +1,22 @@
+/* PR c/106947 */
+/* { dg-do compile } */
+/* { dg-options "-Waddress" } */
+
+#ifndef __cplusplus
+# define bool _Bool
+#endif
+
+#pragma GCC diagnostic ignored "-Waddress"
+int s; /* { dg-bogus "declared" } */
+bool e = &s;
+int
+main ()
+{
+ int error = 0;
+ {
+ bool e1 = &s;
+ if (!e1)
+ error = 1;
+ }
+ return error;
+}
diff --git a/gcc/testsuite/c-c++-common/goacc/deep-copy-arrayofstruct.c b/gcc/testsuite/c-c++-common/goacc/deep-copy-arrayofstruct.c
deleted file mode 100644
index 4247607..0000000
--- a/gcc/testsuite/c-c++-common/goacc/deep-copy-arrayofstruct.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/* { dg-do compile } */
-
-#include <stdlib.h>
-#include <stdio.h>
-
-typedef struct {
- int *a;
- int *b;
- int *c;
-} mystruct;
-
-int main(int argc, char* argv[])
-{
- const int N = 1024;
- const int S = 32;
- mystruct *m = (mystruct *) calloc (S, sizeof (*m));
- int i, j;
-
- for (i = 0; i < S; i++)
- {
- m[i].a = (int *) malloc (N * sizeof (int));
- m[i].b = (int *) malloc (N * sizeof (int));
- m[i].c = (int *) malloc (N * sizeof (int));
- }
-
- for (j = 0; j < S; j++)
- for (i = 0; i < N; i++)
- {
- m[j].a[i] = 0;
- m[j].b[i] = 0;
- m[j].c[i] = 0;
- }
-
-#pragma acc enter data copyin(m[0:1])
-
- for (int i = 0; i < 99; i++)
- {
- int j, k;
- for (k = 0; k < S; k++)
-#pragma acc parallel loop copy(m[k].a[0:N])
- for (j = 0; j < N; j++)
- m[k].a[j]++;
-
- for (k = 0; k < S; k++)
-#pragma acc parallel loop copy(m[k].b[0:N], m[k].c[5:N-10])
- for (j = 0; j < N; j++)
- {
- m[k].b[j]++;
- if (j > 5 && j < N - 5)
- m[k].c[j]++;
- }
- }
-
-#pragma acc exit data copyout(m[0:1])
-
- for (j = 0; j < S; j++)
- {
- for (i = 0; i < N; i++)
- {
- if (m[j].a[i] != 99)
- abort ();
- if (m[j].b[i] != 99)
- abort ();
- if (i > 5 && i < N-5)
- {
- if (m[j].c[i] != 99)
- abort ();
- }
- else
- {
- if (m[j].c[i] != 0)
- abort ();
- }
- }
-
- free (m[j].a);
- free (m[j].b);
- free (m[j].c);
- }
- free (m);
-
- return 0;
-}
diff --git a/gcc/testsuite/c-c++-common/goacc/mdc-2.c b/gcc/testsuite/c-c++-common/goacc/mdc-2.c
index df3ce54..246625c 100644
--- a/gcc/testsuite/c-c++-common/goacc/mdc-2.c
+++ b/gcc/testsuite/c-c++-common/goacc/mdc-2.c
@@ -37,7 +37,9 @@ t1 ()
#pragma acc exit data detach(z[:]) /* { dg-error "expected single pointer in .detach. clause" } */
/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc enter data attach(z[3]) /* { dg-error "expected pointer in .attach. clause" } */
+/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc exit data detach(z[3]) /* { dg-error "expected pointer in .detach. clause" } */
+/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc enter data attach(s.e)
#pragma acc exit data detach(s.e) attach(z) /* { dg-error ".attach. is not valid for" } */
diff --git a/gcc/testsuite/c-c++-common/gomp/target-50.c b/gcc/testsuite/c-c++-common/gomp/target-50.c
new file mode 100644
index 0000000..41f1d37
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/target-50.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-fdump-tree-gimple" } */
+
+typedef struct
+{
+ int *arr;
+} L;
+
+int main()
+{
+ L *tmp;
+
+ /* There shouldn't be an order dependency here... */
+
+ #pragma omp target map(to: tmp->arr) map(tofrom: tmp->arr[0:10])
+ { }
+
+ #pragma omp target map(tofrom: tmp->arr[0:10]) map(to: tmp->arr)
+ { }
+/* { dg-final { scan-tree-dump-times {map\(struct:\*tmp \[len: 1\]\) map\(to:tmp[._0-9]*->arr \[len: [0-9]+\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(attach:tmp[._0-9]*->arr \[bias: 0\]\)} 2 "gimple" { target { ! { nvptx*-*-* amdgcn*-*-* } } } } } */
+
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-mutable3.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-mutable3.C
new file mode 100644
index 0000000..51499fa
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-mutable3.C
@@ -0,0 +1,9 @@
+// PR c++/92505
+// { dg-do compile { target c++11 } }
+
+struct A { mutable int m; };
+
+constexpr int f(A a) { return a.m; }
+
+static_assert(f({42}) == 42, "");
+// { dg-error "non-constant|mutable" "" { target c++11_only } .-1 }
diff --git a/gcc/testsuite/g++.dg/cpp0x/initlist-array17.C b/gcc/testsuite/g++.dg/cpp0x/initlist-array17.C
new file mode 100644
index 0000000..c4284a7
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/initlist-array17.C
@@ -0,0 +1,37 @@
+// PR c++/93259
+// { dg-do compile { target c++11 } }
+
+template <class T, class U> struct is_same;
+template <class T> struct is_same<T,T> { };
+
+using Array = int[];
+
+template <typename ...Ts>
+void bar1(Ts ...)
+{
+ auto && array = Array{ 1, 2, 3 };
+
+ is_same<int (&&)[3], decltype(array)>{}; // this fails, deduces array as int (&&) []
+}
+
+template <typename T>
+void bar2()
+{
+ auto && array = Array{ 1, 2, 3 };
+
+ is_same<int (&&)[3], decltype(array)>{}; // this fails, deduces array as int (&&) []
+}
+
+void bar3()
+{
+ auto && array = Array{ 1, 2, 3 };
+
+ is_same<int (&&)[3], decltype(array)>{}; // OK
+}
+
+int main()
+{
+ bar1<int>(1, 2, 3);
+ bar2<int>();
+ bar3();
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-array4.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-array4.C
new file mode 100644
index 0000000..94ec7f8
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-array4.C
@@ -0,0 +1,29 @@
+// PR c++/106567
+// { dg-do compile { target c++11 } }
+
+template <class V>
+void urgh()
+{
+ const V x[] = {V(0), V(1), V(2), V(0)};
+
+ [&]() {
+ for (auto& v : x) {}
+ }();
+}
+
+void no_urgh()
+{
+ using V = int;
+
+ const V x[] = {V(0), V(1), V(2), V(0)};
+
+ [&]() {
+ for (auto& v : x) {}
+ }();
+}
+
+int main()
+{
+ no_urgh();
+ urgh<int>();
+}
diff --git a/gcc/testsuite/g++.dg/cpp1y/auto-fn65.C b/gcc/testsuite/g++.dg/cpp1y/auto-fn65.C
new file mode 100644
index 0000000..78bb004
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/auto-fn65.C
@@ -0,0 +1,10 @@
+// PR c++/106893
+// { dg-do compile { target c++14 } }
+
+template <typename T>
+struct CoordTraits
+{
+ static auto GetX(T const &p) { return 1; }
+};
+typedef CoordTraits<int> Traits;
+static constexpr auto GetX = Traits::GetX;
diff --git a/gcc/testsuite/g++.dg/cpp1y/constexpr-mutable1.C b/gcc/testsuite/g++.dg/cpp1y/constexpr-mutable1.C
new file mode 100644
index 0000000..6c47988
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/constexpr-mutable1.C
@@ -0,0 +1,16 @@
+// PR c++/92505
+// { dg-do compile { target c++14 } }
+
+struct S { mutable int m; };
+
+static_assert(S{42}.m == 42, "");
+
+constexpr int f() {
+ S s = {40};
+ s.m++;
+ const auto& cs = s;
+ ++cs.m;
+ return cs.m;
+}
+
+static_assert(f() == 42, "");
diff --git a/gcc/testsuite/g++.dg/cpp1z/noexcept-type26.C b/gcc/testsuite/g++.dg/cpp1z/noexcept-type26.C
new file mode 100644
index 0000000..491df4d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1z/noexcept-type26.C
@@ -0,0 +1,12 @@
+// Verify a non-constant conditional noexcept-specifier in a function type
+// respects SFINAE.
+// { dg-do compile { target c++17 } }
+
+template<class T> void f(void() noexcept(T::value)) = delete;
+template<class T> void f(...);
+
+struct B { static bool value; };
+
+int main() {
+ f<B>(nullptr);
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/explicit19.C b/gcc/testsuite/g++.dg/cpp2a/explicit19.C
new file mode 100644
index 0000000..4790381
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/explicit19.C
@@ -0,0 +1,12 @@
+// Verify a conditional explicit-specifier is a SFINAE context.
+// { dg-do compile { target c++20 } }
+
+struct A {
+ template<class T> explicit(T::value) A(T) = delete;
+ A(...);
+};
+
+struct B { static bool value; };
+
+A x(0);
+A y(B{});
diff --git a/gcc/testsuite/g++.dg/ext/integer-pack6.C b/gcc/testsuite/g++.dg/ext/integer-pack6.C
new file mode 100644
index 0000000..dc43116
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/integer-pack6.C
@@ -0,0 +1,13 @@
+// Verify a non-constant argument to __integer_pack respects SFINAE.
+// { dg-do compile { target c++11 } }
+
+template<int...> struct A { };
+
+template<class T> auto f(int) -> A<__integer_pack(T::value)...> = delete;
+template<class T> void f(...);
+
+struct B { static int value; };
+
+int main() {
+ f<B>(0);
+}
diff --git a/gcc/testsuite/g++.dg/ext/pr106877.C b/gcc/testsuite/g++.dg/ext/pr106877.C
new file mode 100644
index 0000000..6bffed9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/pr106877.C
@@ -0,0 +1,13 @@
+// PR target/106877
+// { dg-do compile { target i?86-*-* x86_64-*-* } }
+// { dg-options "-O1 -m16 -mtune=sandybridge -flive-range-shrinkage -fno-dce" }
+
+void
+foo (float b, double c)
+{
+ for (int e = 0; e < 2; e++)
+ {
+ asm volatile ("" : "+f" (c)); // { dg-error "must specify a single register" }
+ asm ("" : "+rm" (c = b));
+ }
+}
diff --git a/gcc/testsuite/g++.dg/gcov/gcov.exp b/gcc/testsuite/g++.dg/gcov/gcov.exp
index 88acd95..04e7a01 100644
--- a/gcc/testsuite/g++.dg/gcov/gcov.exp
+++ b/gcc/testsuite/g++.dg/gcov/gcov.exp
@@ -24,9 +24,9 @@ global GXX_UNDER_TEST
# Find gcov in the same directory as $GXX_UNDER_TEST.
if { ![is_remote host] && [string match "*/*" [lindex $GXX_UNDER_TEST 0]] } {
- set GCOV [file dirname [lindex $GXX_UNDER_TEST 0]]/gcov
+ set GCOV [file dirname [lindex $GXX_UNDER_TEST 0]]/[transform gcov]
} else {
- set GCOV gcov
+ set GCOV [transform gcov]
}
# Initialize harness.
diff --git a/gcc/testsuite/g++.dg/goacc/mdc.C b/gcc/testsuite/g++.dg/goacc/mdc.C
index e8ba1cc..9d460f2 100644
--- a/gcc/testsuite/g++.dg/goacc/mdc.C
+++ b/gcc/testsuite/g++.dg/goacc/mdc.C
@@ -43,7 +43,9 @@ t1 ()
#pragma acc exit data detach(rz[:]) /* { dg-error "expected single pointer in .detach. clause" } */
/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc enter data attach(rz[3]) /* { dg-error "expected pointer in .attach. clause" } */
+/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc exit data detach(rz[3]) /* { dg-error "expected pointer in .detach. clause" } */
+/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc enter data attach(rs.e)
#pragma acc exit data detach(rs.e) attach(rz) /* { dg-error ".attach. is not valid for" } */
diff --git a/gcc/testsuite/g++.dg/goacc/member-array-acc.C b/gcc/testsuite/g++.dg/goacc/member-array-acc.C
new file mode 100644
index 0000000..9993768
--- /dev/null
+++ b/gcc/testsuite/g++.dg/goacc/member-array-acc.C
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-fdump-tree-gimple" } */
+
+struct Foo {
+ float *a;
+ void init(int N) {
+ a = new float[N];
+ #pragma acc enter data create(a[0:N])
+ }
+};
+int main() { Foo x; x.init(1024); }
+
+/* { dg-final { scan-tree-dump {struct:\*\(struct Foo \*\) this \[len: 1\]\) map\(alloc:this->a \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: _[0-9]+\]\) map\(attach:this->a \[bias: 0\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/map-3.C b/gcc/testsuite/g++.dg/gomp/map-3.C
new file mode 100644
index 0000000..c45f850
--- /dev/null
+++ b/gcc/testsuite/g++.dg/gomp/map-3.C
@@ -0,0 +1,9 @@
+// PR c++/106858
+// { dg-additional-options "-fopenmp -fsanitize=undefined" }
+
+class A {
+ void f() {
+ #pragma omp target map(this->f) // { dg-error "member function" }
+ ;
+ }
+};
diff --git a/gcc/testsuite/g++.dg/gomp/member-array-omp.C b/gcc/testsuite/g++.dg/gomp/member-array-omp.C
new file mode 100644
index 0000000..a53aa44
--- /dev/null
+++ b/gcc/testsuite/g++.dg/gomp/member-array-omp.C
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-fdump-tree-gimple" } */
+
+struct Foo {
+ float *a;
+ void init(int N) {
+ a = new float[N];
+ #pragma omp target enter data map(alloc:a[0:N])
+ }
+};
+int main() { Foo x; x.init(1024); }
+
+/* { dg-final { scan-tree-dump {map\(alloc:\*_[0-9]+ \[len: _[0-9]+\]\) map\(attach:this->a \[bias: 0\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/target-3.C b/gcc/testsuite/g++.dg/gomp/target-3.C
index f4d40ec..432f026 100644
--- a/gcc/testsuite/g++.dg/gomp/target-3.C
+++ b/gcc/testsuite/g++.dg/gomp/target-3.C
@@ -33,4 +33,6 @@ T<N>::bar (int x)
template struct T<0>;
-/* { dg-final { scan-tree-dump-times "map\\(struct:\\*this \\\[len: 2\\\]\\) map\\(alloc:this->a \\\[len: \[0-9\]+\\\]\\) map\\(alloc:this->b \\\[len: \[0-9\]+\\\]\\)" 4 "gimple" } } */
+/* { dg-final { scan-tree-dump-times "map\\(struct:\\*\\(struct S \\*\\) this \\\[len: 2\\\]\\) map\\(alloc:this->a \\\[len: \[0-9\]+\\\]\\) map\\(alloc:this->b \\\[len: \[0-9\]+\\\]\\)" 2 "gimple" } } */
+
+/* { dg-final { scan-tree-dump-times "map\\(struct:\\*\\(struct T \\*\\) this \\\[len: 2\\\]\\) map\\(alloc:this->a \\\[len: \[0-9\]+\\\]\\) map\\(alloc:this->b \\\[len: \[0-9\]+\\\]\\)" 2 "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/target-lambda-1.C b/gcc/testsuite/g++.dg/gomp/target-lambda-1.C
index 7f83f92..5ce8cea 100644
--- a/gcc/testsuite/g++.dg/gomp/target-lambda-1.C
+++ b/gcc/testsuite/g++.dg/gomp/target-lambda-1.C
@@ -87,8 +87,8 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(b\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:iptr \[pointer assign, bias: 0\]\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(attach_zero_length_array_section:__closure->__iptr \[bias: 0\]\) map\(attach_zero_length_array_section:_[0-9]+->ptr \[bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(b\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:iptr \[pointer assign, bias: 0\]\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(attach_zero_length_array_section:__closure->__iptr \[bias: 0\]\) map\(attach_zero_length_array_section:_[0-9]+->ptr \[bias: 0\]\)} "gimple" } } */
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(end\) firstprivate\(begin\) map\(to:loop \[len: [0-9]+\]\) map\(attach_zero_length_array_section:loop\.__data1 \[bias: 0\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(end\) firstprivate\(begin\) map\(to:loop \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(attach_zero_length_array_section:loop\.__data1 \[bias: 0\]\)} "gimple" } } */
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(end\) firstprivate\(begin\) map\(to:loop \[len: [0-9]+\]\) map\(attach_zero_length_array_section:loop\.__data2 \[bias: 0\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(end\) firstprivate\(begin\) map\(to:loop \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(attach_zero_length_array_section:loop\.__data2 \[bias: 0\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/target-this-2.C b/gcc/testsuite/g++.dg/gomp/target-this-2.C
index 8a76bb8..cc08e7e 100644
--- a/gcc/testsuite/g++.dg/gomp/target-this-2.C
+++ b/gcc/testsuite/g++.dg/gomp/target-this-2.C
@@ -46,4 +46,4 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) firstprivate\(m\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(from:v \[len: [0-9]+\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {map\(alloc:MEM\[\(char \*\)_[0-9]+\] \[len: [0-9]+\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) firstprivate\(m\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(from:v \[len: [0-9]+\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/target-this-3.C b/gcc/testsuite/g++.dg/gomp/target-this-3.C
index 91cfbd6..bc2cc0b 100644
--- a/gcc/testsuite/g++.dg/gomp/target-this-3.C
+++ b/gcc/testsuite/g++.dg/gomp/target-this-3.C
@@ -100,6 +100,6 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) firstprivate\(n\) map\(tofrom:\*this \[len: [0-9]+\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(alloc:\*_[0-9]+ \[pointer assign, zero-length array section, bias: 0\]\) map\(attach:this->refptr \[bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(tofrom:\*this \[len: [0-9]+\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9+] \[len: 0\]\) map\(alloc:\*_[0-9]+ \[pointer assign, zero-length array section, bias: 0\]\) map\(attach:this->refptr \[bias: 0\]\)} "gimple" } } */
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) firstprivate\(n\) map\(tofrom:\*this \[len: [0-9]+\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(attach_zero_length_array_section:this->ptr \[bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(tofrom:\*this \[len: [0-9]+\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(attach_zero_length_array_section:this->ptr \[bias: 0\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/target-this-4.C b/gcc/testsuite/g++.dg/gomp/target-this-4.C
index e4b2a71..9ade3cc 100644
--- a/gcc/testsuite/g++.dg/gomp/target-this-4.C
+++ b/gcc/testsuite/g++.dg/gomp/target-this-4.C
@@ -102,6 +102,6 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(attach_zero_length_array_section:_[0-9]+->ptr \[bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(from:mapped \[len: 1\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(attach_zero_length_array_section:_[0-9]+->ptr \[bias: 0\]\)} "gimple" } } */
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(alloc:\*_[0-9]+ \[pointer assign, zero-length array section, bias: 0\]\) map\(attach:_[0-9]+->refptr \[bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(alloc:\*_[0-9]+ \[pointer assign, zero-length array section, bias: 0\]\) map\(attach:_[0-9]+->refptr \[bias: 0\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/modules/typename-friend_a.C b/gcc/testsuite/g++.dg/modules/typename-friend_a.C
new file mode 100644
index 0000000..aa426fe
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/typename-friend_a.C
@@ -0,0 +1,11 @@
+// { dg-additional-options "-fmodules-ts" }
+export module foo;
+// { dg-module-cmi foo }
+
+template<class T>
+struct A {
+ friend typename T::type;
+ friend void f(A) { }
+private:
+ static constexpr int value = 42;
+};
diff --git a/gcc/testsuite/g++.dg/modules/typename-friend_b.C b/gcc/testsuite/g++.dg/modules/typename-friend_b.C
new file mode 100644
index 0000000..97da9d8
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/typename-friend_b.C
@@ -0,0 +1,6 @@
+// { dg-additional-options "-fmodules-ts" }
+module foo;
+
+struct C;
+struct B { using type = C; };
+struct C { static_assert(A<B>::value == 42); };
diff --git a/gcc/testsuite/g++.dg/template/evaluated1.C b/gcc/testsuite/g++.dg/template/evaluated1.C
new file mode 100644
index 0000000..41845c6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/evaluated1.C
@@ -0,0 +1,17 @@
+// PR c++/101906
+// Verify the template arguments of an alias template-id are evaluated even
+// in an unevaluated context.
+// { dg-do compile { target c++11 } }
+
+template<int, class T> using skip = T;
+
+template<class T>
+constexpr unsigned sizeof_() {
+ return sizeof(skip<(T(), 0), T>);
+}
+
+struct A {
+ int m = -1;
+};
+
+static_assert(sizeof_<A>() == sizeof(A), "");
diff --git a/gcc/testsuite/g++.dg/template/evaluated1a.C b/gcc/testsuite/g++.dg/template/evaluated1a.C
new file mode 100644
index 0000000..7828687
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/evaluated1a.C
@@ -0,0 +1,16 @@
+// PR c++/101906
+// Like unevaluated1.C, but where the unevaluated context is a
+// constraint instead of sizeof.
+// { dg-do compile { target c++20 } }
+
+template<int> using voidify = void;
+
+template<class T>
+concept constant_value_initializable
+ = requires { typename voidify<(T(), 0)>; };
+
+struct A {
+ int m = -1;
+};
+
+static_assert(constant_value_initializable<A>);
diff --git a/gcc/testsuite/g++.dg/template/evaluated1b.C b/gcc/testsuite/g++.dg/template/evaluated1b.C
new file mode 100644
index 0000000..7994065
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/evaluated1b.C
@@ -0,0 +1,17 @@
+// PR c++/101906
+// Like unevaluated1.C, but using a function template instead of an
+// alias template.
+// { dg-do compile { target c++14 } }
+
+template<int, class T> T skip();
+
+template<class T>
+constexpr unsigned sizeof_() {
+ return sizeof(skip<(T(), 0), T>());
+}
+
+struct A {
+ int m = -1;
+};
+
+static_assert(sizeof_<A>() == sizeof(A), "");
diff --git a/gcc/testsuite/g++.dg/template/evaluated1c.C b/gcc/testsuite/g++.dg/template/evaluated1c.C
new file mode 100644
index 0000000..15c5582
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/evaluated1c.C
@@ -0,0 +1,17 @@
+// PR c++/101906
+// Like unevaluated1b.C, but using a variable template instead of a
+// function template.
+// { dg-do compile { target c++14 } }
+
+template<int, class T> T skip;
+
+template<class T>
+constexpr unsigned sizeof_() {
+ return sizeof(skip<(T(), 0), T>);
+}
+
+struct A {
+ int m = -1;
+};
+
+static_assert(sizeof_<A>() == sizeof(A), "");
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr106922.C b/gcc/testsuite/g++.dg/tree-ssa/pr106922.C
new file mode 100644
index 0000000..faf379b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr106922.C
@@ -0,0 +1,91 @@
+// { dg-require-effective-target c++20 }
+// { dg-options "-O2 -fdump-tree-pre-details -fdump-tree-cddce3" }
+
+template <typename> struct __new_allocator {
+ void deallocate(int *, int) { operator delete(0); }
+};
+template <typename _Tp> using __allocator_base = __new_allocator<_Tp>;
+template <typename> struct allocator : __allocator_base<int> {
+ [[__gnu__::__always_inline__]] void deallocate(int *__p, int __n) {
+ __allocator_base<int>::deallocate(__p, __n);
+ }
+};
+template <typename> struct allocator_traits;
+template <typename _Tp> struct allocator_traits<allocator<_Tp>> {
+ using allocator_type = allocator<_Tp>;
+ using pointer = _Tp *;
+ using size_type = int;
+ template <typename _Up> using rebind_alloc = allocator<_Up>;
+ static void deallocate(allocator_type &__a, pointer __p, size_type __n) {
+ __a.deallocate(__p, __n);
+ }
+};
+template <typename _Alloc> struct __alloc_traits : allocator_traits<_Alloc> {
+ typedef allocator_traits<_Alloc> _Base_type;
+ template <typename _Tp> struct rebind {
+ typedef _Base_type::template rebind_alloc<_Tp> other;
+ };
+};
+long _M_deallocate___n;
+struct _Vector_base {
+ typedef __alloc_traits<allocator<int>>::rebind<int>::other _Tp_alloc_type;
+ typedef __alloc_traits<_Tp_alloc_type>::pointer pointer;
+ struct _Vector_impl_data {
+ pointer _M_start;
+ };
+ struct _Vector_impl : _Tp_alloc_type, _Vector_impl_data {};
+ ~_Vector_base() { _M_deallocate(_M_impl._M_start); }
+ _Vector_impl _M_impl;
+ void _M_deallocate(pointer __p) {
+ if (__p)
+ __alloc_traits<_Tp_alloc_type>::deallocate(_M_impl, __p,
+ _M_deallocate___n);
+ }
+};
+struct vector : _Vector_base {};
+struct aligned_storage {
+ int dummy_;
+ int *ptr_ref0;
+ vector &ref() {
+ vector *__trans_tmp_2;
+ void *__trans_tmp_1 = &dummy_;
+ union {
+ void *ap_pvoid;
+ vector *as_ptype;
+ } caster{__trans_tmp_1};
+ __trans_tmp_2 = caster.as_ptype;
+ return *__trans_tmp_2;
+ }
+};
+struct optional_base {
+ optional_base operator=(optional_base &) {
+ bool __trans_tmp_3 = m_initialized;
+ if (__trans_tmp_3)
+ m_initialized = false;
+ return *this;
+ }
+ ~optional_base() {
+ if (m_initialized)
+ m_storage.ref().~vector();
+ }
+ bool m_initialized;
+ aligned_storage m_storage;
+};
+struct optional : optional_base {
+ optional() : optional_base() {}
+};
+template <class> using Optional = optional;
+struct Trans_NS___cxx11_basic_stringstream {};
+void operator<<(Trans_NS___cxx11_basic_stringstream, int);
+int testfunctionfoo_myStructs[10];
+void testfunctionfoo() {
+ Optional<char> external, internal;
+ for (auto myStruct : testfunctionfoo_myStructs) {
+ Trans_NS___cxx11_basic_stringstream address_stream;
+ address_stream << myStruct;
+ external = internal;
+ }
+}
+
+// { dg-final { scan-tree-dump-times "Found fully redundant value" 4 "pre" { xfail { ! lp64 } } } }
+// { dg-final { scan-tree-dump-not "m_initialized" "cddce3" { xfail { ! lp64 } } } }
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr106936.C b/gcc/testsuite/g++.dg/tree-ssa/pr106936.C
new file mode 100644
index 0000000..c3096e0
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr106936.C
@@ -0,0 +1,14 @@
+// { dg-do compile } */
+// { dg-options "-O2 -fno-tree-ccp -fno-tree-forwprop -fno-tree-fre" }
+
+namespace testPointerToMemberMiscCasts2 {
+struct B {
+ int f;
+};
+struct L : public B { };
+struct R : public B { };
+struct D : public L, R { };
+ int B::* pb = &B::f;
+ int R::* pr = pb;
+ int D::* pdr = pr;
+}
diff --git a/gcc/testsuite/g++.dg/warn/Wsubobject-linkage-5.C b/gcc/testsuite/g++.dg/warn/Wsubobject-linkage-5.C
new file mode 100644
index 0000000..e2c2fd9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/warn/Wsubobject-linkage-5.C
@@ -0,0 +1,7 @@
+// PR c++/86491
+// { dg-do compile { target c++11 } }
+
+template <int *> struct NT{};
+#line 6 "tM.C"
+static int d;
+struct D : NT<&d> {}; // { dg-warning "internal linkage" }
diff --git a/gcc/testsuite/g++.dg/warn/anonymous-namespace-3.C b/gcc/testsuite/g++.dg/warn/anonymous-namespace-3.C
index 8b72abd..ce5745b 100644
--- a/gcc/testsuite/g++.dg/warn/anonymous-namespace-3.C
+++ b/gcc/testsuite/g++.dg/warn/anonymous-namespace-3.C
@@ -7,7 +7,8 @@
struct B { std::auto_ptr<A> p; };
#line 10 "foo.C"
-struct C // { dg-warning "uses the anonymous namespace" }
+struct C // { dg-warning "has internal linkage" "" { target c++11 } }
+// { dg-warning "uses the anonymous namespace" "" { target c++98_only } .-1 }
{
std::auto_ptr<A> p;
};
diff --git a/gcc/testsuite/g++.target/powerpc/pr105485.C b/gcc/testsuite/g++.target/powerpc/pr105485.C
new file mode 100644
index 0000000..db1bd94
--- /dev/null
+++ b/gcc/testsuite/g++.target/powerpc/pr105485.C
@@ -0,0 +1,9 @@
+/* It's to verify no ICE here, ignore error/warning messages
+ since they are not test points here. */
+/* { dg-excess-errors "pr105485" } */
+
+template <class> void __builtin_vec_vslv();
+typedef __attribute__((altivec(vector__))) char T;
+T b (T c, T d) {
+ return __builtin_vec_vslv(c, d);
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr106878.c b/gcc/testsuite/gcc.c-torture/compile/pr106878.c
new file mode 100644
index 0000000..c845718
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr106878.c
@@ -0,0 +1,15 @@
+/* PR tree-optimization/106878 */
+
+typedef __INTPTR_TYPE__ intptr_t;
+typedef __UINTPTR_TYPE__ uintptr_t;
+int a;
+
+int
+foo (const int *c)
+{
+ uintptr_t d = ((intptr_t) c | (intptr_t) &a) & 65535 << 16;
+ intptr_t e = (intptr_t) c;
+ if (d != (e & 65535 << 16))
+ return 1;
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr106958.c b/gcc/testsuite/gcc.c-torture/compile/pr106958.c
new file mode 100644
index 0000000..98e6554
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr106958.c
@@ -0,0 +1,13 @@
+/* PR tree-optimization/106958 */
+
+int a;
+void bar (int);
+
+void
+foo (char *x, char *y)
+{
+ int b = a != 0;
+ int c = x != 0;
+ int d = y != 0;
+ bar (b | c | d);
+}
diff --git a/gcc/testsuite/gcc.dg/atomic/c2x-stdatomic-var-init-1.c b/gcc/testsuite/gcc.dg/atomic/c2x-stdatomic-var-init-1.c
new file mode 100644
index 0000000..1978a410
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/atomic/c2x-stdatomic-var-init-1.c
@@ -0,0 +1,9 @@
+/* Test ATOMIC_VAR_INIT not in C2x. */
+/* { dg-do compile } */
+/* { dg-options "-std=c2x -pedantic-errors" } */
+
+#include <stdatomic.h>
+
+#ifdef ATOMIC_VAR_INIT
+#error "ATOMIC_VAR_INIT defined"
+#endif
diff --git a/gcc/testsuite/gcc.dg/c2x-float-11.c b/gcc/testsuite/gcc.dg/c2x-float-11.c
new file mode 100644
index 0000000..0e2f3c0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/c2x-float-11.c
@@ -0,0 +1,9 @@
+/* Test INFINITY macro. Test when infinities not supported. */
+/* { dg-do compile { target { ! inff } } } */
+/* { dg-options "-std=c2x" } */
+
+#include <float.h>
+
+#ifdef INFINITY
+#error "INFINITY defined"
+#endif
diff --git a/gcc/testsuite/gcc.dg/c2x-float-2.c b/gcc/testsuite/gcc.dg/c2x-float-2.c
index 4f669fd..61a77f6 100644
--- a/gcc/testsuite/gcc.dg/c2x-float-2.c
+++ b/gcc/testsuite/gcc.dg/c2x-float-2.c
@@ -1,8 +1,8 @@
-/* Test INFINITY macro. Generic test even if infinities not
- supported. */
+/* Test INFINITY macro. Generic test. */
/* { dg-do run } */
/* { dg-options "-std=c2x -w" } */
/* { dg-add-options ieee } */
+/* { dg-require-effective-target inff } */
#include <float.h>
diff --git a/gcc/testsuite/gcc.dg/pr106938.c b/gcc/testsuite/gcc.dg/pr106938.c
new file mode 100644
index 0000000..7365a8c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr106938.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-O1 -fno-ipa-pure-const -fno-tree-ccp -Wuninitialized" } */
+
+int n;
+
+void
+undefined (void);
+
+__attribute__ ((returns_twice)) int
+zero (void)
+{
+ return 0;
+}
+
+void
+bar (int)
+{
+ int i;
+
+ for (i = 0; i < -1; ++i)
+ n = 0;
+}
+
+__attribute__ ((simd)) void
+foo (void)
+{
+ int uninitialized;
+
+ undefined ();
+
+ while (uninitialized < 1) /* { dg-warning "uninitialized" } */
+ {
+ bar (zero ());
+ ++uninitialized;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tree-prof/indir-call-prof-2.c b/gcc/testsuite/gcc.dg/tree-prof/indir-call-prof-2.c
index 594c3f3..1d64d9f 100644
--- a/gcc/testsuite/gcc.dg/tree-prof/indir-call-prof-2.c
+++ b/gcc/testsuite/gcc.dg/tree-prof/indir-call-prof-2.c
@@ -1,4 +1,4 @@
-/* { dg-options "-O2 -fno-early-inlining -fdump-ipa-profile-optimized -fdump-tree-einline-optimized" } */
+/* { dg-options "-O2 -fno-early-inlining -fdump-ipa-profile-optimized -fdump-ipa-afdo-optimized" } */
volatile int one;
static int
add1 (int val)
@@ -31,5 +31,5 @@ main (void)
}
/* { dg-final-use-not-autofdo { scan-ipa-dump "Indirect call -> direct call.* add1 .will resolve by ipa-profile" "profile"} } */
/* { dg-final-use-not-autofdo { scan-ipa-dump "Indirect call -> direct call.* sub1 .will resolve by ipa-profile" "profile"} } */
-/* { dg-final-use-autofdo { scan-tree-dump "Inlining add1/1 into main/4." "einline"} } */
-/* { dg-final-use-autofdo { scan-tree-dump "Inlining sub1/2 into main/4." "einline"} } */
+/* { dg-final-use-autofdo { scan-ipa-dump "Inlining add1/1 into main/4." "afdo"} } */
+/* { dg-final-use-autofdo { scan-ipa-dump "Inlining sub1/2 into main/4." "afdo"} } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-34.c b/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-34.c
index 8d2d364..41877e0 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-34.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-34.c
@@ -13,4 +13,4 @@ float summul(int n, float *arg1, float *arg2)
return res1;
}
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { ! { avr-*-* pru-*-* } } } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { ! { avr-*-* pru-*-* riscv*-*-* } } } } } */
diff --git a/gcc/testsuite/gcc.misc-tests/gcov.exp b/gcc/testsuite/gcc.misc-tests/gcov.exp
index 82376d9..b8e9661 100644
--- a/gcc/testsuite/gcc.misc-tests/gcov.exp
+++ b/gcc/testsuite/gcc.misc-tests/gcov.exp
@@ -24,9 +24,9 @@ global GCC_UNDER_TEST
# For now find gcov in the same directory as $GCC_UNDER_TEST.
if { ![is_remote host] && [string match "*/*" [lindex $GCC_UNDER_TEST 0]] } {
- set GCOV [file dirname [lindex $GCC_UNDER_TEST 0]]/gcov
+ set GCOV [file dirname [lindex $GCC_UNDER_TEST 0]]/[transform gcov]
} else {
- set GCOV gcov
+ set GCOV [transform gcov]
}
# Initialize harness.
diff --git a/gcc/testsuite/gcc.target/aarch64/acle/ls64_asm_2.c b/gcc/testsuite/gcc.target/aarch64/acle/ls64_asm_2.c
new file mode 100644
index 0000000..1b42771
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/acle/ls64_asm_2.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-O" } */
+
+#pragma GCC target "+ls64+nofp"
+
+#include "ls64_asm.c"
+
+/* { dg-final { scan-assembler-times {\tldp\t} 12 } } */
+/* { dg-final { scan-assembler-times {\tstp\t} 4 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_stp_20.c b/gcc/testsuite/gcc.target/aarch64/ldp_stp_20.c
new file mode 100644
index 0000000..7e705e1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_stp_20.c
@@ -0,0 +1,7 @@
+/* { dg-options "-O2" } */
+
+#pragma GCC target "+nosimd+fp"
+
+#include "ldp_stp_6.c"
+
+/* { dg-final { scan-assembler "stp\td\[0-9\]+, d\[0-9\]+, \\\[x\[0-9\]+\\\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_stp_21.c b/gcc/testsuite/gcc.target/aarch64/ldp_stp_21.c
new file mode 100644
index 0000000..462e3c9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_stp_21.c
@@ -0,0 +1,7 @@
+/* { dg-options "-O2" } */
+
+#pragma GCC target "+nosimd+fp"
+
+#include "ldp_stp_8.c"
+
+/* { dg-final { scan-assembler-times "ldp\td\[0-9\], d\[0-9\]+, \\\[x\[0-9\]+\\\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_stp_22.c b/gcc/testsuite/gcc.target/aarch64/ldp_stp_22.c
new file mode 100644
index 0000000..283c56d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_stp_22.c
@@ -0,0 +1,13 @@
+/* { dg-options "-O2" } */
+
+#pragma GCC target "+nosimd+fp"
+
+void
+foo (__Float32x4_t *ptr)
+{
+ ptr[0] = ptr[2];
+ ptr[1] = ptr[3];
+}
+
+/* { dg-final { scan-assembler {\tldp\tq[0-9]+, q[0-9]+} } } */
+/* { dg-final { scan-assembler {\tstp\tq[0-9]+, q[0-9]+} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_stp_23.c b/gcc/testsuite/gcc.target/aarch64/ldp_stp_23.c
new file mode 100644
index 0000000..b14976c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_stp_23.c
@@ -0,0 +1,16 @@
+/* { dg-options "-O2" } */
+
+#pragma GCC target "+nosimd+fp"
+
+void
+foo (char *char_ptr)
+{
+ __Float64x2_t *ptr = (__Float64x2_t *)(char_ptr + 1);
+ asm volatile ("" ::
+ "w" (ptr[1]),
+ "w" (ptr[2]),
+ "w" (ptr[3]),
+ "w" (ptr[4]));
+}
+
+/* { dg-final { scan-assembler-times {\tldp\tq[0-9]+, q[0-9]+} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_stp_24.c b/gcc/testsuite/gcc.target/aarch64/ldp_stp_24.c
new file mode 100644
index 0000000..a99426e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_stp_24.c
@@ -0,0 +1,16 @@
+/* { dg-options "-O2" } */
+
+#pragma GCC target "+nosimd+fp"
+
+void
+foo (char *char_ptr)
+{
+ __Float64x2_t *ptr = (__Float64x2_t *)(char_ptr + 1);
+ asm volatile ("" :
+ "=w" (ptr[1]),
+ "=w" (ptr[2]),
+ "=w" (ptr[3]),
+ "=w" (ptr[4]));
+}
+
+/* { dg-final { scan-assembler-times {\tstp\tq[0-9]+, q[0-9]+} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/movv16qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv16qi_1.c
index 8a6afb1..cac4241 100644
--- a/gcc/testsuite/gcc.target/aarch64/movv16qi_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/movv16qi_1.c
@@ -80,3 +80,24 @@ fpr_to_gpr (v16qi q0)
x0 = q0;
asm volatile ("" :: "r" (x0));
}
+
+/*
+** gpr_to_gpr:
+** (
+** mov x0, x2
+** mov x1, x3
+** |
+** mov x1, x3
+** mov x0, x2
+** )
+** ret
+*/
+void
+gpr_to_gpr ()
+{
+ register v16qi x0 asm ("x0");
+ register v16qi x2 asm ("x2");
+ asm volatile ("" : "=r" (x2));
+ x0 = x2;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movv16qi_2.c b/gcc/testsuite/gcc.target/aarch64/movv16qi_2.c
new file mode 100644
index 0000000..08a0a19
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv16qi_2.c
@@ -0,0 +1,27 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_GENERAL(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE zero_##TYPE () { return (TYPE) {}; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_GENERAL (__Int8x16_t)
+TEST_GENERAL (__Int16x8_t)
+TEST_GENERAL (__Int32x4_t)
+TEST_GENERAL (__Int64x2_t)
+TEST_GENERAL (__Bfloat16x8_t)
+TEST_GENERAL (__Float16x8_t)
+TEST_GENERAL (__Float32x4_t)
+TEST_GENERAL (__Float64x2_t)
+
+__Int8x16_t const_s8x8 () { return (__Int8x16_t) { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; }
+__Int16x8_t const_s16x4 () { return (__Int16x8_t) { 1, 0, 1, 0, 1, 0, 1, 0 }; }
+__Int32x4_t const_s32x2 () { return (__Int32x4_t) { 1, 2, 3, 4 }; }
+__Int64x2_t const_s64x1 () { return (__Int64x2_t) { 100, 100 }; }
+__Float16x8_t const_f16x4 () { return (__Float16x8_t) { 2, 2, 2, 2, 2, 2, 2, 2 }; }
+__Float32x4_t const_f32x2 () { return (__Float32x4_t) { 1, 2, 1, 2 }; }
+__Float64x2_t const_f64x1 () { return (__Float64x2_t) { 32, 32 }; }
diff --git a/gcc/testsuite/gcc.target/aarch64/movv16qi_3.c b/gcc/testsuite/gcc.target/aarch64/movv16qi_3.c
new file mode 100644
index 0000000..d43b994
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv16qi_3.c
@@ -0,0 +1,30 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE \
+ test_##TYPE (void) \
+ { \
+ typedef TYPE v __attribute__((aligned(1))); \
+ register v *ptr asm ("x0"); \
+ asm volatile ("" : "=r" (ptr)); \
+ return *ptr; \
+ }
+
+TEST_VECTOR (__Int8x16_t)
+TEST_VECTOR (__Int16x8_t)
+TEST_VECTOR (__Int32x4_t)
+TEST_VECTOR (__Int64x2_t)
+TEST_VECTOR (__Bfloat16x8_t)
+TEST_VECTOR (__Float16x8_t)
+TEST_VECTOR (__Float32x4_t)
+TEST_VECTOR (__Float64x2_t)
+
+/*
+** test___Int8x16_t:
+** ldr q0, \[x0\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv2di_1.c b/gcc/testsuite/gcc.target/aarch64/movv2di_1.c
new file mode 100644
index 0000000..e3b55fd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv2di_1.c
@@ -0,0 +1,103 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -mtune=neoverse-v1 --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nothing+nosimd+fp"
+
+typedef long long v2di __attribute__((vector_size(16)));
+
+/*
+** fpr_to_fpr:
+** sub sp, sp, #16
+** str q1, \[sp\]
+** ldr q0, \[sp\]
+** add sp, sp, #?16
+** ret
+*/
+v2di
+fpr_to_fpr (v2di q0, v2di q1)
+{
+ return q1;
+}
+
+/*
+** gpr_to_fpr: { target aarch64_little_endian }
+** fmov d0, x0
+** fmov v0.d\[1\], x1
+** ret
+*/
+/*
+** gpr_to_fpr: { target aarch64_big_endian }
+** fmov d0, x1
+** fmov v0.d\[1\], x0
+** ret
+*/
+v2di
+gpr_to_fpr ()
+{
+ register v2di x0 asm ("x0");
+ asm volatile ("" : "=r" (x0));
+ return x0;
+}
+
+/*
+** zero_to_fpr:
+** fmov d0, xzr
+** ret
+*/
+v2di
+zero_to_fpr ()
+{
+ return (v2di) {};
+}
+
+/*
+** fpr_to_gpr: { target aarch64_little_endian }
+** (
+** fmov x0, d0
+** fmov x1, v0.d\[1\]
+** |
+** fmov x1, v0.d\[1\]
+** fmov x0, d0
+** )
+** ret
+*/
+/*
+** fpr_to_gpr: { target aarch64_big_endian }
+** (
+** fmov x1, d0
+** fmov x0, v0.d\[1\]
+** |
+** fmov x0, v0.d\[1\]
+** fmov x1, d0
+** )
+** ret
+*/
+void
+fpr_to_gpr (v2di q0)
+{
+ register v2di x0 asm ("x0");
+ x0 = q0;
+ asm volatile ("" :: "r" (x0));
+}
+
+/*
+** gpr_to_gpr:
+** (
+** mov x0, x2
+** mov x1, x3
+** |
+** mov x1, x3
+** mov x0, x2
+** )
+** ret
+*/
+void
+gpr_to_gpr ()
+{
+ register v2di x0 asm ("x0");
+ register v2di x2 asm ("x2");
+ asm volatile ("" : "=r" (x2));
+ x0 = x2;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movv2x16qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv2x16qi_1.c
new file mode 100644
index 0000000..90e3b42
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv2x16qi_1.c
@@ -0,0 +1,40 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x16x2_t)
+TEST_VECTOR (int16x8x2_t)
+TEST_VECTOR (int32x4x2_t)
+TEST_VECTOR (int64x2x2_t)
+TEST_VECTOR (float16x8x2_t)
+TEST_VECTOR (bfloat16x8x2_t)
+TEST_VECTOR (float32x4x2_t)
+TEST_VECTOR (float64x2x2_t)
+
+/*
+** mov_int8x16x2_t:
+** sub sp, sp, #32
+** stp q2, q3, \[sp\]
+** ldp q0, q1, \[sp\]
+** add sp, sp, #?32
+** ret
+*/
+/*
+** load_int8x16x2_t:
+** ldp q0, q1, \[x0\]
+** ret
+*/
+/*
+** store_int8x16x2_t: { xfail *-*-* }
+** stp q0, q1, \[x0\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv2x8qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv2x8qi_1.c
new file mode 100644
index 0000000..883a0ea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv2x8qi_1.c
@@ -0,0 +1,38 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x8x2_t)
+TEST_VECTOR (int16x4x2_t)
+TEST_VECTOR (int32x2x2_t)
+TEST_VECTOR (int64x1x2_t)
+TEST_VECTOR (float16x4x2_t)
+TEST_VECTOR (bfloat16x4x2_t)
+TEST_VECTOR (float32x2x2_t)
+TEST_VECTOR (float64x1x2_t)
+
+/*
+** mov_int8x8x2_t:
+** fmov d0, d2
+** fmov d1, d3
+** ret
+*/
+/*
+** load_int8x8x2_t:
+** ldp d0, d1, \[x0\]
+** ret
+*/
+/*
+** store_int8x8x2_t:
+** stp d0, d1, \[x0\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv3x16qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv3x16qi_1.c
new file mode 100644
index 0000000..070a596
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv3x16qi_1.c
@@ -0,0 +1,44 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x16x3_t)
+TEST_VECTOR (int16x8x3_t)
+TEST_VECTOR (int32x4x3_t)
+TEST_VECTOR (int64x2x3_t)
+TEST_VECTOR (float16x8x3_t)
+TEST_VECTOR (bfloat16x8x3_t)
+TEST_VECTOR (float32x4x3_t)
+TEST_VECTOR (float64x2x3_t)
+
+/*
+** mov_int8x16x3_t:
+** sub sp, sp, #48
+** stp q3, q4, \[sp\]
+** str q5, \[sp, #?32\]
+** ldp q0, q1, \[sp\]
+** ldr q2, \[sp, #?32\]
+** add sp, sp, #?48
+** ret
+*/
+/*
+** load_int8x16x3_t:
+** ldp q0, q1, \[x0\]
+** ldr q2, \[x0, #?32\]
+** ret
+*/
+/*
+** store_int8x16x3_t: { xfail *-*-* }
+** stp q0, q1, \[x0\]
+** stp q2, \[x0, #?32\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv3x8qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv3x8qi_1.c
new file mode 100644
index 0000000..4b873d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv3x8qi_1.c
@@ -0,0 +1,41 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x8x3_t)
+TEST_VECTOR (int16x4x3_t)
+TEST_VECTOR (int32x2x3_t)
+TEST_VECTOR (int64x1x3_t)
+TEST_VECTOR (float16x4x3_t)
+TEST_VECTOR (bfloat16x4x3_t)
+TEST_VECTOR (float32x2x3_t)
+TEST_VECTOR (float64x1x3_t)
+
+/*
+** mov_int8x8x3_t:
+** fmov d0, d3
+** fmov d1, d4
+** fmov d2, d5
+** ret
+*/
+/*
+** load_int8x8x3_t:
+** ldp d0, d1, \[x0\]
+** ldr d2, \[x0, #?16\]
+** ret
+*/
+/*
+** store_int8x8x3_t:
+** stp d0, d1, \[x0\]
+** str d2, \[x0, #?16\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv4x16qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv4x16qi_1.c
new file mode 100644
index 0000000..6a517b4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv4x16qi_1.c
@@ -0,0 +1,44 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x16x4_t)
+TEST_VECTOR (int16x8x4_t)
+TEST_VECTOR (int32x4x4_t)
+TEST_VECTOR (int64x2x4_t)
+TEST_VECTOR (float16x8x4_t)
+TEST_VECTOR (bfloat16x8x4_t)
+TEST_VECTOR (float32x4x4_t)
+TEST_VECTOR (float64x2x4_t)
+
+/*
+** mov_int8x16x4_t:
+** sub sp, sp, #64
+** stp q4, q5, \[sp\]
+** stp q6, q7, \[sp, #?32\]
+** ldp q0, q1, \[sp\]
+** ldp q2, q3, \[sp, #?32\]
+** add sp, sp, #?64
+** ret
+*/
+/*
+** load_int8x16x4_t:
+** ldp q0, q1, \[x0\]
+** ldp q2, q3, \[x0, #?32\]
+** ret
+*/
+/*
+** store_int8x16x4_t: { xfail *-*-* }
+** stp q0, q1, \[x0\]
+** stp q2, q3, \[x0, #?32\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv4x8qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv4x8qi_1.c
new file mode 100644
index 0000000..f096be4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv4x8qi_1.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x8x4_t)
+TEST_VECTOR (int16x4x4_t)
+TEST_VECTOR (int32x2x4_t)
+TEST_VECTOR (int64x1x4_t)
+TEST_VECTOR (float16x4x4_t)
+TEST_VECTOR (bfloat16x4x4_t)
+TEST_VECTOR (float32x2x4_t)
+TEST_VECTOR (float64x1x4_t)
+
+/*
+** mov_int8x8x4_t:
+** fmov d0, d4
+** fmov d1, d5
+** fmov d2, d6
+** fmov d3, d7
+** ret
+*/
+/*
+** load_int8x8x4_t:
+** ldp d0, d1, \[x0\]
+** ldp d2, d3, \[x0, #?16\]
+** ret
+*/
+/*
+** store_int8x8x4_t:
+** stp d0, d1, \[x0\]
+** stp d2, d3, \[x0, #?16\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv8qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv8qi_1.c
index 4c97e6f..d2b5d80 100644
--- a/gcc/testsuite/gcc.target/aarch64/movv8qi_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/movv8qi_1.c
@@ -53,3 +53,18 @@ fpr_to_gpr (v8qi q0)
x0 = q0;
asm volatile ("" :: "r" (x0));
}
+
+/*
+** gpr_to_gpr:
+** mov x0, x1
+** ret
+*/
+void
+gpr_to_gpr ()
+{
+ register v8qi x0 asm ("x0");
+ register v8qi x1 asm ("x1");
+ asm volatile ("" : "=r" (x1));
+ x0 = x1;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movv8qi_2.c b/gcc/testsuite/gcc.target/aarch64/movv8qi_2.c
new file mode 100644
index 0000000..0d8576f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv8qi_2.c
@@ -0,0 +1,27 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_GENERAL(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE zero_##TYPE () { return (TYPE) {}; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_GENERAL (__Int8x8_t)
+TEST_GENERAL (__Int16x4_t)
+TEST_GENERAL (__Int32x2_t)
+TEST_GENERAL (__Int64x1_t)
+TEST_GENERAL (__Bfloat16x4_t)
+TEST_GENERAL (__Float16x4_t)
+TEST_GENERAL (__Float32x2_t)
+TEST_GENERAL (__Float64x1_t)
+
+__Int8x8_t const_s8x8 () { return (__Int8x8_t) { 1, 1, 1, 1, 1, 1, 1, 1 }; }
+__Int16x4_t const_s16x4 () { return (__Int16x4_t) { 1, 0, 1, 0 }; }
+__Int32x2_t const_s32x2 () { return (__Int32x2_t) { 1, 2 }; }
+__Int64x1_t const_s64x1 () { return (__Int64x1_t) { 100 }; }
+__Float16x4_t const_f16x4 () { return (__Float16x4_t) { 2, 2, 2, 2 }; }
+__Float32x2_t const_f32x2 () { return (__Float32x2_t) { 1, 2 }; }
+__Float64x1_t const_f64x1 () { return (__Float64x1_t) { 32 }; }
diff --git a/gcc/testsuite/gcc.target/aarch64/movv8qi_3.c b/gcc/testsuite/gcc.target/aarch64/movv8qi_3.c
new file mode 100644
index 0000000..1caa1a7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv8qi_3.c
@@ -0,0 +1,30 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE \
+ test_##TYPE (void) \
+ { \
+ typedef TYPE v __attribute__((aligned(1))); \
+ register v *ptr asm ("x0"); \
+ asm volatile ("" : "=r" (ptr)); \
+ return *ptr; \
+ }
+
+TEST_VECTOR (__Int8x8_t)
+TEST_VECTOR (__Int16x4_t)
+TEST_VECTOR (__Int32x2_t)
+TEST_VECTOR (__Int64x1_t)
+TEST_VECTOR (__Bfloat16x4_t)
+TEST_VECTOR (__Float16x4_t)
+TEST_VECTOR (__Float32x2_t)
+TEST_VECTOR (__Float64x1_t)
+
+/*
+** test___Int8x8_t:
+** ldr d0, \[x0\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/vect_unary_2.c b/gcc/testsuite/gcc.target/aarch64/vect_unary_2.c
new file mode 100644
index 0000000..454ac27
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect_unary_2.c
@@ -0,0 +1,5 @@
+/* { dg-options "-O3 -fno-math-errno --save-temps" } */
+
+#pragma GCC target "+nosimd+fp"
+
+#include "vect_unary_1.c"
diff --git a/gcc/testsuite/gcc.target/i386/pr105735-1.c b/gcc/testsuite/gcc.target/i386/pr105735-1.c
new file mode 100644
index 0000000..69de6b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr105735-1.c
@@ -0,0 +1,88 @@
+/* { dg-do compile } */
+/* { dg-options "-O1 -fdump-tree-sccp-details" } */
+/* { dg-final { scan-tree-dump-times {final value replacement} 8 "sccp" } } */
+
+unsigned int
+__attribute__((noipa))
+foo (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 0; bit < 64; bit++)
+ tmp &= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+foo1 (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 63; bit >= 0; bit -=3)
+ tmp &= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+foo2 (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 0; bit < 64; bit++)
+ tmp |= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+foo3 (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 63; bit >= 0; bit -=3)
+ tmp |= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+foo4 (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 0; bit < 64; bit++)
+ tmp ^= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+foo5 (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 0; bit < 63; bit++)
+ tmp ^= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+f (unsigned int tmp, int bit, unsigned int bit2)
+{
+ unsigned int res = tmp;
+ for (int i = 0; i < bit; i++)
+ res &= bit2;
+ return res;
+}
+
+unsigned int
+__attribute__((noipa))
+f1 (unsigned int tmp, int bit, unsigned int bit2)
+{
+ unsigned int res = tmp;
+ for (int i = 0; i < bit; i++)
+ res |= bit2;
+ return res;
+}
+
+unsigned int
+__attribute__((noipa))
+f2 (unsigned int tmp, int bit, unsigned int bit2)
+{
+ unsigned int res = tmp;
+ for (int i = 0; i < bit; i++)
+ res ^= bit2;
+ return res;
+}
+
diff --git a/gcc/testsuite/gcc.target/i386/pr105735-2.c b/gcc/testsuite/gcc.target/i386/pr105735-2.c
new file mode 100644
index 0000000..66cc5fb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr105735-2.c
@@ -0,0 +1,28 @@
+/* { dg-do run } */
+/* { dg-options "-O1" } */
+
+#include "pr105735-1.c"
+
+int main()
+{
+ unsigned int tmp = 0x1101;
+ unsigned int bit2 = 0x111101;
+ if (foo (tmp, bit2) != 0x1101)
+ __builtin_abort ();
+ if (foo1 (tmp, bit2) != 0x1101)
+ __builtin_abort ();
+ if (foo2 (tmp, bit2) != 0x111101)
+ __builtin_abort ();
+ if (foo3 (tmp, bit2) != 0x111101)
+ __builtin_abort ();
+ if (foo4 (tmp, bit2) != 0x1101)
+ __builtin_abort ();
+ if (foo5 (tmp, bit2) != 0x110000)
+ __builtin_abort ();
+ if (f (tmp, 64, bit2) != 0x1101)
+ __builtin_abort ();
+ if (f1 (tmp, 64, bit2) != 0x111101)
+ __builtin_abort ();
+ if (f2 (tmp, 64, bit2) != 0x1101)
+ __builtin_abort ();
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr106905.c b/gcc/testsuite/gcc.target/i386/pr106905.c
new file mode 100644
index 0000000..a190a1c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr106905.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-march=silvermont -O2 -fvect-cost-model=dynamic" } */
+
+void
+foo_mul_peel (int *a, int b)
+{
+ int i;
+
+ for (i = 0; i < 7; ++i)
+ {
+ b *= 2;
+ a[i] = b;
+ }
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr106910-1.c b/gcc/testsuite/gcc.target/i386/pr106910-1.c
new file mode 100644
index 0000000..c7685a3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr106910-1.c
@@ -0,0 +1,77 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-msse4.1 -O2 -Ofast" } */
+/* { dg-final { scan-assembler-times "roundps" 9 } } */
+/* { dg-final { scan-assembler-times "cvtps2dq" 1 } } */
+/* { dg-final { scan-assembler-times "cvttps2dq" 3 } } */
+
+#include<math.h>
+
+void
+foo (float* p, float* __restrict q)
+{
+ p[0] = truncf (q[0]);
+ p[1] = truncf (q[1]);
+}
+
+void
+foo1 (float* p, float* __restrict q)
+{
+ p[0] = floorf (q[0]);
+ p[1] = floorf (q[1]);
+}
+
+void
+foo1i (int* p, float* __restrict q)
+{
+ p[0] = (int) floorf (q[0]);
+ p[1] = (int) floorf (q[1]);
+}
+
+void
+foo2 (float* p, float* __restrict q)
+{
+ p[0] = ceilf (q[0]);
+ p[1] = ceilf (q[1]);
+}
+
+void
+foo2i (int* p, float* __restrict q)
+{
+ p[0] = (int) ceilf (q[0]);
+ p[1] = (int) ceilf (q[1]);
+}
+
+void
+foo3 (float* p, float* __restrict q)
+{
+ p[0] = rintf (q[0]);
+ p[1] = rintf (q[1]);
+}
+
+void
+foo3i (int* p, float* __restrict q)
+{
+ p[0] = (int) rintf (q[0]);
+ p[1] = (int) rintf (q[1]);
+}
+
+void
+foo4 (float* p, float* __restrict q)
+{
+ p[0] = nearbyintf (q[0]);
+ p[1] = nearbyintf (q[1]);
+}
+
+void
+foo5(float* p, float* __restrict q)
+{
+ p[0] = roundf (q[0]);
+ p[1] = roundf (q[1]);
+}
+
+void
+foo5i(int* p, float* __restrict q)
+{
+ p[0] = (int) roundf (q[0]);
+ p[1] = (int) roundf (q[1]);
+}
diff --git a/gcc/testsuite/gcc.target/i386/vect-bfloat16-2c.c b/gcc/testsuite/gcc.target/i386/vect-bfloat16-2c.c
new file mode 100644
index 0000000..bead94e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/vect-bfloat16-2c.c
@@ -0,0 +1,76 @@
+/* { dg-do compile } */
+/* { dg-options "-mf16c -msse2 -mno-avx2 -O2" } */
+
+typedef __bf16 v8bf __attribute__ ((__vector_size__ (16)));
+typedef __bf16 v16bf __attribute__ ((__vector_size__ (32)));
+
+#define VEC_EXTRACT(V,S,IDX) \
+ S \
+ __attribute__((noipa)) \
+ vec_extract_##V##_##IDX (V v) \
+ { \
+ return v[IDX]; \
+ }
+
+#define VEC_SET(V,S,IDX) \
+ V \
+ __attribute__((noipa)) \
+ vec_set_##V##_##IDX (V v, S s) \
+ { \
+ v[IDX] = s; \
+ return v; \
+ }
+
+v8bf
+vec_init_v8bf (__bf16 a1, __bf16 a2, __bf16 a3, __bf16 a4,
+ __bf16 a5, __bf16 a6, __bf16 a7, __bf16 a8)
+{
+ return __extension__ (v8bf) {a1, a2, a3, a4, a5, a6, a7, a8};
+}
+
+v16bf
+vec_init_v16bf (__bf16 a1, __bf16 a2, __bf16 a3, __bf16 a4,
+ __bf16 a5, __bf16 a6, __bf16 a7, __bf16 a8,
+ __bf16 a9, __bf16 a10, __bf16 a11, __bf16 a12,
+ __bf16 a13, __bf16 a14, __bf16 a15, __bf16 a16)
+{
+ return __extension__ (v16bf) {a1, a2, a3, a4, a5, a6, a7, a8,
+ a9, a10, a11, a12, a13, a14, a15, a16};
+}
+
+v8bf
+vec_init_dup_v8bf (__bf16 a1)
+{
+ return __extension__ (v8bf) {a1, a1, a1, a1, a1, a1, a1, a1};
+}
+
+v16bf
+vec_init_dup_v16bf (__bf16 a1)
+{
+ return __extension__ (v16bf) {a1, a1, a1, a1, a1, a1, a1, a1,
+ a1, a1, a1, a1, a1, a1, a1, a1};
+}
+
+/* { dg-final { scan-assembler-times "vpunpcklwd" 12 } } */
+/* { dg-final { scan-assembler-times "vpunpckldq" 6 } } */
+/* { dg-final { scan-assembler-times "vpunpcklqdq" 3 } } */
+
+VEC_EXTRACT (v8bf, __bf16, 0);
+VEC_EXTRACT (v8bf, __bf16, 4);
+VEC_EXTRACT (v16bf, __bf16, 0);
+VEC_EXTRACT (v16bf, __bf16, 3);
+VEC_EXTRACT (v16bf, __bf16, 8);
+VEC_EXTRACT (v16bf, __bf16, 15);
+/* { dg-final { scan-assembler-times "vpsrldq\[\t ]*\\\$8" 1 } } */
+/* { dg-final { scan-assembler-times "vpsrldq\[\t ]*\\\$6" 1 } } */
+/* { dg-final { scan-assembler-times "vpsrldq\[\t ]*\\\$14" 1 } } */
+/* { dg-final { scan-assembler-times "vextract" 4 } } */
+
+VEC_SET (v8bf, __bf16, 4);
+VEC_SET (v16bf, __bf16, 3);
+VEC_SET (v16bf, __bf16, 8);
+VEC_SET (v16bf, __bf16, 15);
+/* { dg-final { scan-assembler-times "vpblendw" 3 { target { ! ia32 } } } } */
+
+/* { dg-final { scan-assembler-times "vpinsrw" 30 { target ia32 } } } */
+
diff --git a/gcc/testsuite/gcc.target/ia64/pr106905.c b/gcc/testsuite/gcc.target/ia64/pr106905.c
new file mode 100644
index 0000000..1b9656e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/ia64/pr106905.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99 -O3 -fPIC" } */
+long ZDICT_fillNoise_p, ZDICT_trainFromBuffer_legacy_result;
+unsigned ZDICT_fillNoise_acc;
+int ZDICT_totalSampleSize_nbFiles;
+static void ZDICT_fillNoise(void *buffer, long length) {
+ unsigned prime2 = 9;
+ for (ZDICT_fillNoise_p = 0; ZDICT_fillNoise_p < length; ZDICT_fillNoise_p++)
+ ZDICT_fillNoise_acc *= ((char *)buffer)[ZDICT_fillNoise_p] = prime2;
+}
+long ZDICT_trainFromBuffer_legacy() {
+ void *newBuff;
+ long total = 0;
+ for (; ZDICT_totalSampleSize_nbFiles;)
+ total += 0;
+ long sBuffSize = total;
+ newBuff = 0;
+ ZDICT_fillNoise(newBuff + sBuffSize, 32);
+ return ZDICT_trainFromBuffer_legacy_result;
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/pr104482.c b/gcc/testsuite/gcc.target/powerpc/pr104482.c
new file mode 100644
index 0000000..9219126
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr104482.c
@@ -0,0 +1,16 @@
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-mvsx" } */
+
+/* It's to verify no ICE here, ignore error messages about
+ mismatch argument number since they are not test points
+ here. */
+/* { dg-excess-errors "pr104482" } */
+
+__attribute__ ((altivec (vector__))) int vsi;
+
+double
+testXXPERMDI (void)
+{
+ return __builtin_vsx_xxpermdi (vsi, vsi, 2, 4);
+}
+
diff --git a/gcc/testsuite/gcc.target/powerpc/pr106550.c b/gcc/testsuite/gcc.target/powerpc/pr106550.c
new file mode 100644
index 0000000..74e3953
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr106550.c
@@ -0,0 +1,14 @@
+/* PR target/106550 */
+/* { dg-options "-O2 -mdejagnu-cpu=power10" } */
+/* { dg-require-effective-target power10_ok } */
+
+void
+foo (unsigned long long *a)
+{
+ *a++ = 0x020805006106003; /* pli+pli+rldimi */
+ *a++ = 0x2351847027482577;/* pli+pli+rldimi */
+}
+
+/* { dg-final { scan-assembler-times {\mpli\M} 4 } } */
+/* { dg-final { scan-assembler-times {\mrldimi\M} 2 } } */
+
diff --git a/gcc/testsuite/gcc.target/powerpc/pr106550_1.c b/gcc/testsuite/gcc.target/powerpc/pr106550_1.c
new file mode 100644
index 0000000..7e709fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr106550_1.c
@@ -0,0 +1,22 @@
+/* PR target/106550 */
+/* { dg-require-effective-target power10_ok } */
+/* { dg-options "-O2 -mdejagnu-cpu=power10 -fdisable-rtl-split1" } */
+/* force the constant splitter run after RA: -fdisable-rtl-split1. */
+
+void
+foo (unsigned long long *a)
+{
+ /* Test oris/ori is used where paddi does not work with 'r0'. */
+ register long long d asm("r0") = 0x1245abcef9240dec; /* pli+sldi+oris+ori */
+ long long n;
+ asm("cntlzd %0, %1" : "=r"(n) : "r"(d));
+ *a++ = n;
+
+ *a++ = 0x235a8470a7480000ULL; /* pli+sldi+oris */
+ *a++ = 0x23a184700000b677ULL; /* pli+sldi+ori */
+}
+
+/* { dg-final { scan-assembler-times {\mpli\M} 3 } } */
+/* { dg-final { scan-assembler-times {\msldi\M} 3 } } */
+/* { dg-final { scan-assembler-times {\moris\M} 2 } } */
+/* { dg-final { scan-assembler-times {\mori\M} 2 } } */
diff --git a/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-bool.c b/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-bool.c
index 0a96b71..0c8c2f8 100644
--- a/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-bool.c
+++ b/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-bool.c
@@ -1,7 +1,7 @@
/* Check load on condition for bool. */
/* { dg-do compile { target { s390*-*-* } } } */
-/* { dg-options "-O2 -march=z13" } */
+/* { dg-options "-O2 -march=z13 -mzarch" } */
/* { dg-final { scan-assembler "lochinh\t%r.?,1" } } */
#include <stdbool.h>
diff --git a/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-char.c b/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-char.c
index 9c3d041..8c8e0ae 100644
--- a/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-char.c
+++ b/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-char.c
@@ -1,7 +1,7 @@
/* Check load on condition for global char. */
/* { dg-do compile { target { s390*-*-* } } } */
-/* { dg-options "-O2 -march=z13" } */
+/* { dg-options "-O2 -march=z13 -mzarch" } */
/* { dg-final { scan-assembler "locrnh\t%r.?,%r.?" } } */
#include <stdbool.h>
diff --git a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-bool.c b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-bool.c
index df0416a..1027ddc 100644
--- a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-bool.c
+++ b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-bool.c
@@ -1,7 +1,7 @@
/* Check if conversion for two instructions. */
/* { dg-do run } */
-/* { dg-options "-O2 -march=z13 --save-temps" } */
+/* { dg-options "-O2 -march=z13 -mzarch --save-temps" } */
/* { dg-final { scan-assembler "lochih\t%r.?,1" } } */
/* { dg-final { scan-assembler "locrh\t.*" } } */
diff --git a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-int.c b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-int.c
index 181173b..fc6946f 100644
--- a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-int.c
+++ b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-int.c
@@ -1,7 +1,7 @@
/* Check if conversion for two instructions. */
/* { dg-do run } */
-/* { dg-options "-O2 -march=z13 --save-temps" } */
+/* { dg-options "-O2 -march=z13 -mzarch --save-temps" } */
/* { dg-final { scan-assembler "lochih\t%r.?,1" } } */
/* { dg-final { scan-assembler "locrh\t.*" } } */
diff --git a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-long.c b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-long.c
index c66ef6c..51af498 100644
--- a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-long.c
+++ b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-long.c
@@ -1,19 +1,20 @@
/* Check if conversion for two instructions. */
/* { dg-do run } */
-/* { dg-options "-O2 -march=z13 --save-temps" } */
+/* { dg-options "-O2 -march=z13 -mzarch --save-temps" } */
/* { dg-final { scan-assembler "locghih\t%r.?,1" } } */
/* { dg-final { scan-assembler "locgrh\t.*" } } */
+
#include <limits.h>
#include <stdio.h>
#include <assert.h>
__attribute__ ((noinline))
-long foo (long *a, unsigned long n)
+long long foo (long long *a, unsigned long long n)
{
- long min = 999999;
- long bla = 0;
+ long long min = 999999;
+ long long bla = 0;
for (int i = 0; i < n; i++)
{
if (a[i] < min)
@@ -30,9 +31,9 @@ long foo (long *a, unsigned long n)
int main()
{
- long a[] = {2, 1, -13, LONG_MAX, LONG_MIN, 0};
+ long long a[] = {2, 1, -13, LONG_MAX, LONG_MIN, 0};
- long res = foo (a, sizeof (a) / sizeof (a[0]));
+ long long res = foo (a, sizeof (a) / sizeof (a[0]));
assert (res == (LONG_MIN + 1));
}
diff --git a/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z14.c b/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z14.c
index 5c64fac..eefacad 100644
--- a/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z14.c
+++ b/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z14.c
@@ -1,7 +1,7 @@
/* Make sure that the reverse permute patterns are optimized
correctly. */
/* { dg-do run { target { s390*-*-* } } } */
-/* { dg-options "-O2 -march=z14 -mzarch -fno-unroll-loops" } */
+/* { dg-options "-O2 -march=z14 -mzarch -fno-unroll-loops -save-temps" } */
/* { dg-final { scan-assembler-times "vpdi\t" 4 } } */
/* { dg-final { scan-assembler-times "verllg\t" 2 } } */
diff --git a/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z15.c b/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z15.c
index bff5240..079460b 100644
--- a/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z15.c
+++ b/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z15.c
@@ -1,12 +1,12 @@
/* Make sure that the reverse permute patterns are optimized
correctly. */
/* { dg-do run { target { s390*-*-* } } } */
-/* { dg-options "-O2 -march=z15 -mzarch -fno-unroll-loops" } */
+/* { dg-options "-O2 -march=z15 -mzarch -fno-unroll-loops -save-temps" } */
/* { dg-final { scan-assembler-times "vsterg\t" 2 } } */
-/* { dg-final { scan-assembler-times "vsterf" 2 } } */
+/* { dg-final { scan-assembler-times "vsterf\t" 2 } } */
/* { dg-final { scan-assembler-times "vstbrq\t" 1 } } */
-/* { dg-final { scan-assembler-times "vperm" 0 } } */
+/* { dg-final { scan-assembler-times "vperm\t" 0 } } */
#include <assert.h>
diff --git a/gcc/testsuite/gfortran.dg/ieee/modes_1.f90 b/gcc/testsuite/gfortran.dg/ieee/modes_1.f90
new file mode 100644
index 0000000..b6ab288
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/ieee/modes_1.f90
@@ -0,0 +1,95 @@
+! { dg-do run }
+!
+! Test IEEE_MODES_TYPE, IEEE_GET_MODES and IEEE_SET_MODES
+
+
+! The symbols should be accessible from both IEEE_EXCEPTIONS
+! and IEEE_ARITHMETIC.
+
+subroutine test_1
+ use ieee_exceptions, only : IEEE_GET_MODES, IEEE_SET_MODES
+end subroutine
+
+subroutine test_2
+ use ieee_arithmetic, only : IEEE_GET_MODES, IEEE_SET_MODES
+end subroutine
+
+subroutine test_3
+ use ieee_exceptions, only : IEEE_MODES_TYPE
+end subroutine
+
+subroutine test_4
+ use ieee_arithmetic, only : IEEE_MODES_TYPE
+end subroutine
+
+
+! Check that the functions actually do the job
+
+program foo
+ use ieee_arithmetic
+ implicit none
+
+ type(ieee_modes_type) :: modes1, modes2
+ type(ieee_round_type) :: rmode
+ logical :: f
+
+ ! Set some modes
+ if (ieee_support_underflow_control()) then
+ call ieee_set_underflow_mode(gradual=.false.)
+ endif
+ if (ieee_support_rounding(ieee_up)) then
+ call ieee_set_rounding_mode(ieee_up)
+ endif
+ if (ieee_support_halting(ieee_overflow)) then
+ call ieee_set_halting_mode(ieee_overflow, .true.)
+ endif
+
+ call ieee_get_modes(modes1)
+
+ ! Change modes
+ if (ieee_support_underflow_control()) then
+ call ieee_set_underflow_mode(gradual=.true.)
+ endif
+ if (ieee_support_rounding(ieee_down)) then
+ call ieee_set_rounding_mode(ieee_down)
+ endif
+ if (ieee_support_halting(ieee_overflow)) then
+ call ieee_set_halting_mode(ieee_overflow, .false.)
+ endif
+
+ ! Save and restore the previous modes
+ call ieee_get_modes(modes2)
+ call ieee_set_modes(modes1)
+
+ ! Check them
+ if (ieee_support_underflow_control()) then
+ call ieee_get_underflow_mode(f)
+ if (f) stop 1
+ endif
+ if (ieee_support_rounding(ieee_down)) then
+ call ieee_get_rounding_mode(rmode)
+ if (rmode /= ieee_up) stop 2
+ endif
+ if (ieee_support_halting(ieee_overflow)) then
+ call ieee_get_halting_mode(ieee_overflow, f)
+ if (.not. f) stop 3
+ endif
+
+ ! Restore the second set of modes
+ call ieee_set_modes(modes2)
+
+ ! Check again
+ if (ieee_support_underflow_control()) then
+ call ieee_get_underflow_mode(f)
+ if (.not. f) stop 3
+ endif
+ if (ieee_support_rounding(ieee_down)) then
+ call ieee_get_rounding_mode(rmode)
+ if (rmode /= ieee_down) stop 4
+ endif
+ if (ieee_support_halting(ieee_overflow)) then
+ call ieee_get_halting_mode(ieee_overflow, f)
+ if (f) stop 5
+ endif
+
+end program foo
diff --git a/gcc/testsuite/gfortran.dg/ieee/rounding_2.f90 b/gcc/testsuite/gfortran.dg/ieee/rounding_2.f90
new file mode 100644
index 0000000..8af6c91
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/ieee/rounding_2.f90
@@ -0,0 +1,20 @@
+! { dg-do run }
+
+ use, intrinsic :: ieee_arithmetic
+ implicit none
+
+ real :: sx1, sx2, sx3
+ double precision :: dx1, dx2, dx3
+
+ ! IEEE_AWAY was added in Fortran 2018 and not supported by any target
+ ! at the moment. Just check we can query for its support.
+
+ ! We should support at least C float and C double types
+ if (ieee_support_rounding(ieee_away) &
+ .or. ieee_support_rounding(ieee_away, 0.) &
+ .or. ieee_support_rounding(ieee_away, 0.d0)) then
+ print *, "If a target / libc now supports this, we need to add a proper check!"
+ stop 1
+ end if
+
+end
diff --git a/gcc/testsuite/gfortran.dg/pr104314.f90 b/gcc/testsuite/gfortran.dg/pr104314.f90
new file mode 100644
index 0000000..510ded0
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr104314.f90
@@ -0,0 +1,9 @@
+! { dg-do compile }
+! PR fortran/104314 - ICE in deferred_op_assign
+! Contributed by G.Steinmetz
+
+program p
+ character(:), allocatable :: c(:)
+ c = ['123']
+ c = c == c ! { dg-error "Cannot convert" }
+end
diff --git a/gcc/testsuite/gfortran.dg/pr106857.f90 b/gcc/testsuite/gfortran.dg/pr106857.f90
new file mode 100644
index 0000000..4b0f86a
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr106857.f90
@@ -0,0 +1,12 @@
+! { dg-do compile }
+! PR fortran/106857 - ICE in gfc_simplify_pack
+! Contributed by G.Steinmetz
+
+program p
+ type t
+ integer :: n
+ end type
+ type(t), parameter :: a(2,2) = t(1)
+ type(t), parameter :: b(4) = reshape(a, [2]) ! { dg-error "Different shape" }
+ type(t), parameter :: c(2) = pack(b, [.false.,.true.,.false.,.true.]) ! { dg-error "Different shape" }
+end
diff --git a/gcc/testsuite/gfortran.dg/pr106934.f90 b/gcc/testsuite/gfortran.dg/pr106934.f90
new file mode 100644
index 0000000..ac58a3e
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr106934.f90
@@ -0,0 +1,7 @@
+! { dg-do compile }
+! { dg-options "-O" }
+subroutine s
+ logical(1) :: a = .true.
+ logical(2) :: b
+ a = transfer(b, a)
+end
diff --git a/gcc/testsuite/lib/g++.exp b/gcc/testsuite/lib/g++.exp
index 24ef068..16e61fb 100644
--- a/gcc/testsuite/lib/g++.exp
+++ b/gcc/testsuite/lib/g++.exp
@@ -303,11 +303,6 @@ proc g++_target_compile { source dest type options } {
global flags_to_postpone
global board_info
- if { [target_info needs_status_wrapper] != "" && [info exists gluefile] } {
- lappend options "libs=${gluefile}"
- lappend options "ldflags=${wrap_flags}"
- }
-
global TEST_EXTRA_LIBS
if [info exists TEST_EXTRA_LIBS] {
lappend options "ldflags=$TEST_EXTRA_LIBS"
@@ -333,6 +328,11 @@ proc g++_target_compile { source dest type options } {
set options [dg-additional-files-options $options $source]
+ if { [target_info needs_status_wrapper] != "" && [info exists gluefile] } {
+ lappend options "libs=${gluefile}"
+ lappend options "ldflags=${wrap_flags}"
+ }
+
set result [target_compile $source $dest $type $options]
if {[board_info $tboard exists multilib_flags]} {
diff --git a/gcc/testsuite/lib/gcc.exp b/gcc/testsuite/lib/gcc.exp
index 1b25ebe..2f145d0 100644
--- a/gcc/testsuite/lib/gcc.exp
+++ b/gcc/testsuite/lib/gcc.exp
@@ -129,16 +129,6 @@ proc gcc_target_compile { source dest type options } {
global flags_to_postpone
global board_info
- if {[target_info needs_status_wrapper] != "" && \
- [target_info needs_status_wrapper] != "0" && \
- [info exists gluefile] } {
- lappend options "libs=${gluefile}"
- lappend options "ldflags=$wrap_flags"
- if { $type == "executable" } {
- set options [concat "{additional_flags=-dumpbase \"\"}" $options]
- }
- }
-
global TEST_EXTRA_LIBS
if [info exists TEST_EXTRA_LIBS] {
lappend options "ldflags=$TEST_EXTRA_LIBS"
@@ -170,6 +160,17 @@ proc gcc_target_compile { source dest type options } {
lappend options "timeout=[timeout_value]"
lappend options "compiler=$GCC_UNDER_TEST"
set options [dg-additional-files-options $options $source]
+
+ if {[target_info needs_status_wrapper] != "" && \
+ [target_info needs_status_wrapper] != "0" && \
+ [info exists gluefile] } {
+ lappend options "libs=${gluefile}"
+ lappend options "ldflags=$wrap_flags"
+ if { $type == "executable" } {
+ set options [concat "{additional_flags=-dumpbase \"\"}" $options]
+ }
+ }
+
set return_val [target_compile $source $dest $type $options]
if {[board_info $tboard exists multilib_flags]} {
diff --git a/gcc/testsuite/lib/wrapper.exp b/gcc/testsuite/lib/wrapper.exp
index 5a601b2..4a7d569 100644
--- a/gcc/testsuite/lib/wrapper.exp
+++ b/gcc/testsuite/lib/wrapper.exp
@@ -22,7 +22,7 @@
# the compiler when compiling FILENAME.
proc ${tool}_maybe_build_wrapper { filename args } {
- global gluefile wrap_flags
+ global gluefile wrap_flags gcc_adjusted_linker_flags
if { [target_info needs_status_wrapper] != "" \
&& [target_info needs_status_wrapper] != "0" \
@@ -43,6 +43,11 @@ proc ${tool}_maybe_build_wrapper { filename args } {
if { $result != "" } {
set gluefile [lindex $result 0]
set wrap_flags [lindex $result 1]
+
+ # Reset the cached state of the adjusted flags
+ if { [info exists gcc_adjusted_linker_flags] } {
+ set gcc_adjusted_linker_flags 0
+ }
}
}
}
diff --git a/gcc/tree-cfg.cc b/gcc/tree-cfg.cc
index e39d947..53be0c2 100644
--- a/gcc/tree-cfg.cc
+++ b/gcc/tree-cfg.cc
@@ -4167,6 +4167,8 @@ verify_gimple_assign_binary (gassign *stmt)
case ROUND_MOD_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
/* Disallow pointer and offset types for many of the binary gimple. */
if (POINTER_TYPE_P (lhs_type)
|| TREE_CODE (lhs_type) == OFFSET_TYPE)
@@ -4182,9 +4184,23 @@ verify_gimple_assign_binary (gassign *stmt)
case MIN_EXPR:
case MAX_EXPR:
- case BIT_IOR_EXPR:
- case BIT_XOR_EXPR:
+ /* Continue with generic binary expression handling. */
+ break;
+
case BIT_AND_EXPR:
+ if (POINTER_TYPE_P (lhs_type)
+ && TREE_CODE (rhs2) == INTEGER_CST)
+ break;
+ /* Disallow pointer and offset types for many of the binary gimple. */
+ if (POINTER_TYPE_P (lhs_type)
+ || TREE_CODE (lhs_type) == OFFSET_TYPE)
+ {
+ error ("invalid types for %qs", code_name);
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs1_type);
+ debug_generic_expr (rhs2_type);
+ return true;
+ }
/* Continue with generic binary expression handling. */
break;
@@ -9820,16 +9836,12 @@ execute_fixup_cfg (void)
int flags = gimple_call_flags (stmt);
if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE))
{
- if (gimple_purge_dead_abnormal_call_edges (bb))
- todo |= TODO_cleanup_cfg;
-
if (gimple_in_ssa_p (cfun))
{
todo |= TODO_update_ssa | TODO_cleanup_cfg;
update_stmt (stmt);
}
}
-
if (flags & ECF_NORETURN
&& fixup_noreturn_call (stmt))
todo |= TODO_cleanup_cfg;
@@ -9859,10 +9871,15 @@ execute_fixup_cfg (void)
}
}
- if (maybe_clean_eh_stmt (stmt)
+ gsi_next (&gsi);
+ }
+ if (gimple *last = last_stmt (bb))
+ {
+ if (maybe_clean_eh_stmt (last)
&& gimple_purge_dead_eh_edges (bb))
todo |= TODO_cleanup_cfg;
- gsi_next (&gsi);
+ if (gimple_purge_dead_abnormal_call_edges (bb))
+ todo |= TODO_cleanup_cfg;
}
/* If we have a basic block with no successors that does not
diff --git a/gcc/tree-scalar-evolution.cc b/gcc/tree-scalar-evolution.cc
index fc59d03..9f30f78 100644
--- a/gcc/tree-scalar-evolution.cc
+++ b/gcc/tree-scalar-evolution.cc
@@ -3635,6 +3635,64 @@ enum bit_op_kind
return fold_build2 (code1, type, inv, wide_int_to_tree (type, bits));
}
+/* Match.pd function to match bitop with invariant expression
+ .i.e.
+ tmp_7 = _0 & _1; */
+extern bool gimple_bitop_with_inv_p (tree, tree *, tree (*)(tree));
+
+/* Return the inductive expression of bitop with invariant if possible,
+ otherwise returns DEF. */
+static tree
+analyze_and_compute_bitop_with_inv_effect (class loop* loop, tree phidef,
+ tree niter)
+{
+ tree match_op[2],inv;
+ tree type = TREE_TYPE (phidef);
+ gphi* header_phi = NULL;
+ enum tree_code code;
+ /* match thing like op0 (match[0]), op1 (match[1]), phidef (PHIDEF)
+
+ op1 = PHI <phidef, inv>
+ phidef = op0 & op1
+ if op0 is an invariant, it could change to
+ phidef = op0 & inv. */
+ gimple *def;
+ def = SSA_NAME_DEF_STMT (phidef);
+ if (!(is_gimple_assign (def)
+ && ((code = gimple_assign_rhs_code (def)), true)
+ && (code == BIT_AND_EXPR || code == BIT_IOR_EXPR
+ || code == BIT_XOR_EXPR)))
+ return NULL_TREE;
+
+ match_op[0] = gimple_assign_rhs1 (def);
+ match_op[1] = gimple_assign_rhs2 (def);
+
+ if (TREE_CODE (match_op[1]) != SSA_NAME
+ || !expr_invariant_in_loop_p (loop, match_op[0])
+ || !(header_phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (match_op[1])))
+ || gimple_phi_num_args (header_phi) != 2)
+ return NULL_TREE;
+
+ if (PHI_ARG_DEF_FROM_EDGE (header_phi, loop_latch_edge (loop)) != phidef)
+ return NULL_TREE;
+
+ enum tree_code code1
+ = gimple_assign_rhs_code (def);
+
+ if (code1 == BIT_XOR_EXPR)
+ {
+ if (!tree_fits_uhwi_p (niter))
+ return NULL_TREE;
+ unsigned HOST_WIDE_INT niter_num;
+ niter_num = tree_to_uhwi (niter);
+ if (niter_num % 2 != 0)
+ match_op[0] = build_zero_cst (type);
+ }
+
+ inv = PHI_ARG_DEF_FROM_EDGE (header_phi, loop_preheader_edge (loop));
+ return fold_build2 (code1, type, inv, match_op[0]);
+}
+
/* Do final value replacement for LOOP, return true if we did anything. */
bool
@@ -3685,7 +3743,24 @@ final_value_replacement_loop (class loop *loop)
bool folded_casts;
def = analyze_scalar_evolution_in_loop (ex_loop, loop, def,
&folded_casts);
- def = compute_overall_effect_of_inner_loop (ex_loop, def);
+
+ tree bitinv_def, bit_def;
+ unsigned HOST_WIDE_INT niter_num;
+
+ if (def != chrec_dont_know)
+ def = compute_overall_effect_of_inner_loop (ex_loop, def);
+
+ /* Handle bitop with invariant induction expression.
+
+ .i.e
+ for (int i =0 ;i < 32; i++)
+ tmp &= bit2;
+ if bit2 is an invariant in loop which could simple to
+ tmp &= bit2. */
+ else if ((bitinv_def
+ = analyze_and_compute_bitop_with_inv_effect (loop,
+ phidef, niter)))
+ def = bitinv_def;
/* Handle bitwise induction expression.
@@ -3697,15 +3772,13 @@ final_value_replacement_loop (class loop *loop)
expressible, but in fact final value of RES can be replaced by
RES & CONSTANT where CONSTANT all ones with bit {0,3,6,9,... ,63}
being cleared, similar for BIT_IOR_EXPR/BIT_XOR_EXPR. */
- unsigned HOST_WIDE_INT niter_num;
- tree bit_def;
- if (tree_fits_uhwi_p (niter)
- && (niter_num = tree_to_uhwi (niter)) != 0
- && niter_num < TYPE_PRECISION (TREE_TYPE (phidef))
- && (bit_def
- = analyze_and_compute_bitwise_induction_effect (loop,
- phidef,
- niter_num)))
+ else if (tree_fits_uhwi_p (niter)
+ && (niter_num = tree_to_uhwi (niter)) != 0
+ && niter_num < TYPE_PRECISION (TREE_TYPE (phidef))
+ && (bit_def
+ = analyze_and_compute_bitwise_induction_effect (loop,
+ phidef,
+ niter_num)))
def = bit_def;
if (!tree_does_not_contain_chrecs (def)
diff --git a/gcc/tree-ssa-pre.cc b/gcc/tree-ssa-pre.cc
index e029bd3..2afc74f 100644
--- a/gcc/tree-ssa-pre.cc
+++ b/gcc/tree-ssa-pre.cc
@@ -1236,7 +1236,11 @@ translate_vuse_through_block (vec<vn_reference_op_s> operands,
if (same_valid)
*same_valid = true;
- if (gimple_bb (phi) != phiblock)
+ /* If value-numbering provided a memory state for this
+ that dominates PHIBLOCK we can just use that. */
+ if (gimple_nop_p (phi)
+ || (gimple_bb (phi) != phiblock
+ && dominated_by_p (CDI_DOMINATORS, phiblock, gimple_bb (phi))))
return vuse;
/* We have pruned expressions that are killed in PHIBLOCK via
@@ -2031,11 +2035,13 @@ prune_clobbered_mems (bitmap_set_t set, basic_block block)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (ref->vuse);
if (!gimple_nop_p (def_stmt)
- && ((gimple_bb (def_stmt) != block
- && !dominated_by_p (CDI_DOMINATORS,
- block, gimple_bb (def_stmt)))
- || (gimple_bb (def_stmt) == block
- && value_dies_in_block_x (expr, block))))
+ /* If value-numbering provided a memory state for this
+ that dominates BLOCK we're done, otherwise we have
+ to check if the value dies in BLOCK. */
+ && !(gimple_bb (def_stmt) != block
+ && dominated_by_p (CDI_DOMINATORS,
+ block, gimple_bb (def_stmt)))
+ && value_dies_in_block_x (expr, block))
to_remove = i;
}
/* If the REFERENCE may trap make sure the block does not contain
diff --git a/gcc/tree-ssa-reassoc.cc b/gcc/tree-ssa-reassoc.cc
index e13e2cb..c5c8b68 100644
--- a/gcc/tree-ssa-reassoc.cc
+++ b/gcc/tree-ssa-reassoc.cc
@@ -3608,10 +3608,14 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
tree type2 = NULL_TREE;
bool strict_overflow_p = false;
candidates.truncate (0);
+ if (POINTER_TYPE_P (type1))
+ type1 = pointer_sized_int_node;
for (j = i; j; j = chains[j - 1])
{
tree type = TREE_TYPE (ranges[j - 1].exp);
strict_overflow_p |= ranges[j - 1].strict_overflow_p;
+ if (POINTER_TYPE_P (type))
+ type = pointer_sized_int_node;
if ((b % 4) == 3)
{
/* For the signed < 0 cases, the types should be
@@ -3642,6 +3646,8 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
tree type = TREE_TYPE (ranges[j - 1].exp);
if (j == k)
continue;
+ if (POINTER_TYPE_P (type))
+ type = pointer_sized_int_node;
if ((b % 4) == 3)
{
if (!useless_type_conversion_p (type1, type))
@@ -3671,18 +3677,21 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
op = r->exp;
continue;
}
- if (id == l)
+ if (id == l || POINTER_TYPE_P (TREE_TYPE (op)))
{
code = (b % 4) == 3 ? BIT_NOT_EXPR : NOP_EXPR;
- g = gimple_build_assign (make_ssa_name (type1), code, op);
+ tree type3 = id >= l ? type1 : pointer_sized_int_node;
+ g = gimple_build_assign (make_ssa_name (type3), code, op);
gimple_seq_add_stmt_without_update (&seq, g);
op = gimple_assign_lhs (g);
}
tree type = TREE_TYPE (r->exp);
tree exp = r->exp;
- if (id >= l && !useless_type_conversion_p (type1, type))
+ if (POINTER_TYPE_P (type)
+ || (id >= l && !useless_type_conversion_p (type1, type)))
{
- g = gimple_build_assign (make_ssa_name (type1), NOP_EXPR, exp);
+ tree type3 = id >= l ? type1 : pointer_sized_int_node;
+ g = gimple_build_assign (make_ssa_name (type3), NOP_EXPR, exp);
gimple_seq_add_stmt_without_update (&seq, g);
exp = gimple_assign_lhs (g);
}
@@ -3695,6 +3704,14 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
gimple_seq_add_stmt_without_update (&seq, g);
op = gimple_assign_lhs (g);
}
+ type1 = TREE_TYPE (ranges[k - 1].exp);
+ if (POINTER_TYPE_P (type1))
+ {
+ gimple *g
+ = gimple_build_assign (make_ssa_name (type1), NOP_EXPR, op);
+ gimple_seq_add_stmt_without_update (&seq, g);
+ op = gimple_assign_lhs (g);
+ }
candidates.pop ();
if (update_range_test (&ranges[k - 1], NULL, candidates.address (),
candidates.length (), opcode, ops, op,
diff --git a/gcc/tree-ssa-uninit.cc b/gcc/tree-ssa-uninit.cc
index 4a1c333..eae29f8 100644
--- a/gcc/tree-ssa-uninit.cc
+++ b/gcc/tree-ssa-uninit.cc
@@ -1013,11 +1013,9 @@ warn_uninitialized_vars (bool wmaybe_uninit)
if (ee)
bb = ee->dest;
else
- {
- bb = get_immediate_dominator (CDI_POST_DOMINATORS, bb);
- if (!bb || bb->index == EXIT_BLOCK)
- break;
- }
+ bb = get_immediate_dominator (CDI_POST_DOMINATORS, bb);
+ if (!bb || bb->index == EXIT_BLOCK)
+ break;
}
FOR_EACH_BB_FN (bb, cfun)
diff --git a/gcc/tree-ssa.cc b/gcc/tree-ssa.cc
index 6507348..1a93ffd 100644
--- a/gcc/tree-ssa.cc
+++ b/gcc/tree-ssa.cc
@@ -1459,6 +1459,8 @@ maybe_rewrite_mem_ref_base (tree *tp, bitmap suitable_for_renaming)
&& (! INTEGRAL_TYPE_P (TREE_TYPE (*tp))
|| (wi::to_offset (TYPE_SIZE (TREE_TYPE (*tp)))
== TYPE_PRECISION (TREE_TYPE (*tp))))
+ && (! INTEGRAL_TYPE_P (TREE_TYPE (sym))
+ || type_has_mode_precision_p (TREE_TYPE (sym)))
&& wi::umod_trunc (wi::to_offset (TYPE_SIZE (TREE_TYPE (*tp))),
BITS_PER_UNIT) == 0)
{
@@ -1531,6 +1533,10 @@ non_rewritable_mem_ref_base (tree ref)
&& (! INTEGRAL_TYPE_P (TREE_TYPE (base))
|| (wi::to_offset (TYPE_SIZE (TREE_TYPE (base)))
== TYPE_PRECISION (TREE_TYPE (base))))
+ /* ??? Likewise for extracts from bitfields, we'd have
+ to pun the base object to a size precision mode first. */
+ && (! INTEGRAL_TYPE_P (TREE_TYPE (decl))
+ || type_has_mode_precision_p (TREE_TYPE (decl)))
&& wi::umod_trunc (wi::to_offset (TYPE_SIZE (TREE_TYPE (base))),
BITS_PER_UNIT) == 0)
return NULL_TREE;
diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
index 8f88f17..9c434b6 100644
--- a/gcc/tree-vect-loop.cc
+++ b/gcc/tree-vect-loop.cc
@@ -8646,8 +8646,10 @@ vectorizable_nonlinear_induction (loop_vec_info loop_vinfo,
/* Also doens't support peel for neg when niter is variable.
??? generate something like niter_expr & 1 ? init_expr : -init_expr? */
niters_skip = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
- if (niters_skip != NULL_TREE
- && TREE_CODE (niters_skip) != INTEGER_CST)
+ if ((niters_skip != NULL_TREE
+ && TREE_CODE (niters_skip) != INTEGER_CST)
+ || (!vect_use_loop_mask_for_alignment_p (loop_vinfo)
+ && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
diff --git a/gcc/tree.cc b/gcc/tree.cc
index 0546c8f..4165cbd 100644
--- a/gcc/tree.cc
+++ b/gcc/tree.cc
@@ -9414,6 +9414,8 @@ build_common_tree_nodes (bool signed_char)
void_node = make_node (VOID_CST);
TREE_TYPE (void_node) = void_type_node;
+ void_list_node = build_tree_list (NULL_TREE, void_type_node);
+
null_pointer_node = build_int_cst (build_pointer_type (void_type_node), 0);
layout_type (TREE_TYPE (null_pointer_node));
diff --git a/gcc/value-query.cc b/gcc/value-query.cc
index 201f679..0bdd670 100644
--- a/gcc/value-query.cc
+++ b/gcc/value-query.cc
@@ -167,7 +167,6 @@ range_query::free_value_range_equiv (value_range_equiv *v)
const class value_range_equiv *
range_query::get_value_range (const_tree expr, gimple *stmt)
{
- gcc_checking_assert (value_range_equiv::supports_p (TREE_TYPE (expr)));
int_range_max r;
if (range_of_expr (r, const_cast<tree> (expr), stmt))
return new (equiv_alloc->allocate ()) value_range_equiv (r);
@@ -218,22 +217,10 @@ range_query::get_tree_range (vrange &r, tree expr, gimple *stmt)
case REAL_CST:
{
- if (TREE_OVERFLOW_P (expr))
- expr = drop_tree_overflow (expr);
-
frange &f = as_a <frange> (r);
f.set (expr, expr);
-
- // Singletons from the tree world have known properties.
- REAL_VALUE_TYPE *rv = TREE_REAL_CST_PTR (expr);
- if (real_isnan (rv))
- f.set_nan (fp_prop::YES);
- else
- f.set_nan (fp_prop::NO);
- if (real_isneg (rv))
- f.set_signbit (fp_prop::YES);
- else
- f.set_signbit (fp_prop::NO);
+ if (!real_isnan (TREE_REAL_CST_PTR (expr)))
+ f.clear_nan ();
return true;
}
diff --git a/gcc/value-range-pretty-print.cc b/gcc/value-range-pretty-print.cc
index 93e18d3..eb74422 100644
--- a/gcc/value-range-pretty-print.cc
+++ b/gcc/value-range-pretty-print.cc
@@ -122,47 +122,51 @@ vrange_printer::print_irange_bitmasks (const irange &r) const
void
vrange_printer::visit (const frange &r) const
{
- tree type = r.type ();
-
pp_string (pp, "[frange] ");
if (r.undefined_p ())
{
pp_string (pp, "UNDEFINED");
return;
}
+ tree type = r.type ();
dump_generic_node (pp, type, 0, TDF_NONE, false);
pp_string (pp, " ");
if (r.varying_p ())
{
pp_string (pp, "VARYING");
+ print_frange_nan (r);
return;
}
pp_character (pp, '[');
- dump_generic_node (pp,
- build_real (type, r.lower_bound ()), 0, TDF_NONE, false);
- pp_string (pp, ", ");
- dump_generic_node (pp,
- build_real (type, r.upper_bound ()), 0, TDF_NONE, false);
- pp_string (pp, "] ");
-
- print_frange_prop ("NAN", r.get_nan ());
- print_frange_prop ("SIGN", r.get_signbit ());
+ bool has_endpoints = !r.known_isnan ();
+ if (has_endpoints)
+ {
+ dump_generic_node (pp,
+ build_real (type, r.lower_bound ()), 0, TDF_NONE, false);
+ pp_string (pp, ", ");
+ dump_generic_node (pp,
+ build_real (type, r.upper_bound ()), 0, TDF_NONE, false);
+ }
+ pp_character (pp, ']');
+ print_frange_nan (r);
}
-// Print the FP properties in an frange.
+// Print the NAN info for an frange.
void
-vrange_printer::print_frange_prop (const char *str, const fp_prop &prop) const
+vrange_printer::print_frange_nan (const frange &r) const
{
- if (prop.varying_p ())
- return;
-
- if (prop.yes_p ())
- pp_string (pp, str);
- else if (prop.no_p ())
+ if (r.maybe_isnan ())
{
- pp_character (pp, '!');
- pp_string (pp, str);
+ if (r.m_pos_nan && r.m_neg_nan)
+ {
+ pp_string (pp, " +-NAN");
+ return;
+ }
+ bool nan_sign = r.m_neg_nan;
+ if (nan_sign)
+ pp_string (pp, " -NAN");
+ else
+ pp_string (pp, " +NAN");
}
- pp_character (pp, ' ');
}
diff --git a/gcc/value-range-pretty-print.h b/gcc/value-range-pretty-print.h
index ad06c93..20c2659 100644
--- a/gcc/value-range-pretty-print.h
+++ b/gcc/value-range-pretty-print.h
@@ -31,7 +31,7 @@ public:
private:
void print_irange_bound (const wide_int &w, tree type) const;
void print_irange_bitmasks (const irange &) const;
- void print_frange_prop (const char *str, const fp_prop &) const;
+ void print_frange_nan (const frange &) const;
pretty_printer *pp;
};
diff --git a/gcc/value-range-storage.cc b/gcc/value-range-storage.cc
index b7a23fa..de7575e 100644
--- a/gcc/value-range-storage.cc
+++ b/gcc/value-range-storage.cc
@@ -253,9 +253,11 @@ frange_storage_slot::set_frange (const frange &r)
gcc_checking_assert (fits_p (r));
gcc_checking_assert (!r.undefined_p ());
+ m_kind = r.m_kind;
m_min = r.m_min;
m_max = r.m_max;
- m_props = r.m_props;
+ m_pos_nan = r.m_pos_nan;
+ m_neg_nan = r.m_neg_nan;
}
void
@@ -264,11 +266,12 @@ frange_storage_slot::get_frange (frange &r, tree type) const
gcc_checking_assert (r.supports_type_p (type));
r.set_undefined ();
- r.m_kind = VR_RANGE;
- r.m_props = m_props;
+ r.m_kind = m_kind;
r.m_type = type;
r.m_min = m_min;
r.m_max = m_max;
+ r.m_pos_nan = m_pos_nan;
+ r.m_neg_nan = m_neg_nan;
r.normalize_kind ();
if (flag_checking)
diff --git a/gcc/value-range-storage.h b/gcc/value-range-storage.h
index f506789..0cf95eb 100644
--- a/gcc/value-range-storage.h
+++ b/gcc/value-range-storage.h
@@ -113,12 +113,11 @@ class GTY (()) frange_storage_slot
frange_storage_slot (const frange &r) { set_frange (r); }
DISABLE_COPY_AND_ASSIGN (frange_storage_slot);
- // We can get away with just storing the properties and the
- // endpoints because the type can be gotten from the SSA, and
- // UNDEFINED is unsupported, so it can only be a VR_RANGE.
+ enum value_range_kind m_kind;
REAL_VALUE_TYPE m_min;
REAL_VALUE_TYPE m_max;
- frange_props m_props;
+ bool m_pos_nan;
+ bool m_neg_nan;
};
class obstack_vrange_allocator final: public vrange_allocator
diff --git a/gcc/value-range.cc b/gcc/value-range.cc
index adcaaa2..a8e3bb3 100644
--- a/gcc/value-range.cc
+++ b/gcc/value-range.cc
@@ -267,85 +267,24 @@ tree_compare (tree_code code, tree op1, tree op2)
return !integer_zerop (fold_build2 (code, integer_type_node, op1, op2));
}
-// Set the NAN property. Adjust the range if appopriate.
+// Flush denormal endpoints to the appropriate 0.0.
void
-frange::set_nan (fp_prop::kind k)
+frange::flush_denormals_to_zero ()
{
- if (k == fp_prop::YES)
- {
- if (!maybe_nan ())
- {
- set_undefined ();
- return;
- }
- gcc_checking_assert (!undefined_p ());
- *this = frange_nan (m_type);
- return;
- }
-
- if (k == fp_prop::NO && known_nan ())
- {
- set_undefined ();
- return;
- }
-
- // Setting VARYING on an obviously NAN range is a no-op.
- if (k == fp_prop::VARYING && real_isnan (&m_min))
+ if (undefined_p () || known_isnan ())
return;
- m_props.set_nan (k);
- normalize_kind ();
- if (flag_checking)
- verify_range ();
-}
-
-// Set the SIGNBIT property. Adjust the range if appropriate.
-
-void
-frange::set_signbit (fp_prop::kind k)
-{
- gcc_checking_assert (m_type);
-
- // No additional adjustments are needed for a NAN.
- if (known_nan ())
- {
- m_props.set_signbit (k);
- return;
- }
- // Ignore sign changes when they're set correctly.
- if (!maybe_nan ())
- {
- if (real_less (&m_max, &dconst0))
- return;
- if (real_less (&dconst0, &m_min))
- return;
- }
- // Adjust the range depending on the sign bit.
- if (k == fp_prop::YES)
- {
- // Crop the range to [-INF, 0].
- frange crop (m_type, dconstninf, dconst0);
- intersect (crop);
- if (!undefined_p ())
- m_props.set_signbit (fp_prop::YES);
- }
- else if (k == fp_prop::NO)
+ // Flush [x, -DENORMAL] to [x, -0.0].
+ if (real_isdenormal (&m_max) && real_isneg (&m_max))
{
- // Crop the range to [0, +INF].
- frange crop (m_type, dconst0, dconstinf);
- intersect (crop);
- if (!undefined_p ())
- m_props.set_signbit (fp_prop::NO);
+ m_max = dconst0;
+ if (HONOR_SIGNED_ZEROS (m_type))
+ m_max.sign = 1;
}
- else
- {
- m_props.set_signbit (fp_prop::VARYING);
- normalize_kind ();
- }
-
- if (flag_checking)
- verify_range ();
+ // Flush [+DENORMAL, x] to [+0.0, x].
+ if (real_isdenormal (&m_min) && !real_isneg (&m_min))
+ m_min = dconst0;
}
// Setter for franges.
@@ -353,51 +292,54 @@ frange::set_signbit (fp_prop::kind k)
void
frange::set (tree min, tree max, value_range_kind kind)
{
- gcc_checking_assert (TREE_CODE (min) == REAL_CST);
- gcc_checking_assert (TREE_CODE (max) == REAL_CST);
-
- if (kind == VR_UNDEFINED)
+ switch (kind)
{
+ case VR_UNDEFINED:
set_undefined ();
return;
+ case VR_VARYING:
+ case VR_ANTI_RANGE:
+ set_varying (TREE_TYPE (min));
+ return;
+ case VR_RANGE:
+ break;
+ default:
+ gcc_unreachable ();
}
- // Treat VR_ANTI_RANGE and VR_VARYING as varying.
- if (kind != VR_RANGE)
+ // Handle NANs.
+ if (real_isnan (TREE_REAL_CST_PTR (min)) || real_isnan (TREE_REAL_CST_PTR (max)))
{
- set_varying (TREE_TYPE (min));
+ gcc_checking_assert (real_identical (TREE_REAL_CST_PTR (min),
+ TREE_REAL_CST_PTR (max)));
+ tree type = TREE_TYPE (min);
+ bool sign = real_isneg (TREE_REAL_CST_PTR (min));
+ set_nan (type, sign);
return;
}
m_kind = kind;
m_type = TREE_TYPE (min);
- m_props.set_varying ();
m_min = *TREE_REAL_CST_PTR (min);
m_max = *TREE_REAL_CST_PTR (max);
-
- bool is_nan = (real_isnan (TREE_REAL_CST_PTR (min))
- || real_isnan (TREE_REAL_CST_PTR (max)));
-
- // Ranges with a NAN and a non-NAN endpoint are nonsensical.
- gcc_checking_assert (!is_nan || operand_equal_p (min, max));
-
- // Set NAN property if we're absolutely sure.
- if (is_nan && operand_equal_p (min, max))
- m_props.nan_set_yes ();
- else if (!HONOR_NANS (m_type))
- m_props.nan_set_no ();
-
- // Set SIGNBIT property for positive and negative ranges.
- if (real_less (&m_max, &dconst0))
- m_props.signbit_set_yes ();
- else if (real_less (&dconst0, &m_min))
- m_props.signbit_set_no ();
+ if (HONOR_NANS (m_type))
+ {
+ m_pos_nan = true;
+ m_neg_nan = true;
+ }
+ else
+ {
+ m_pos_nan = false;
+ m_neg_nan = false;
+ }
// Check for swapped ranges.
- gcc_checking_assert (is_nan || tree_compare (LE_EXPR, min, max));
+ gcc_checking_assert (tree_compare (LE_EXPR, min, max));
normalize_kind ();
+ flush_denormals_to_zero ();
+
if (flag_checking)
verify_range ();
}
@@ -423,18 +365,11 @@ frange::set (tree type,
bool
frange::normalize_kind ()
{
- // Undefined is viral.
- if (m_props.nan_undefined_p () || m_props.signbit_undefined_p ())
- {
- set_undefined ();
- return true;
- }
if (m_kind == VR_RANGE
&& real_isinf (&m_min, 1)
&& real_isinf (&m_max, 0))
{
- // No FP properties set means varying.
- if (m_props.varying_p ())
+ if (m_pos_nan && m_neg_nan)
{
set_varying (m_type);
return true;
@@ -442,8 +377,7 @@ frange::normalize_kind ()
}
else if (m_kind == VR_VARYING)
{
- // If a VARYING has any FP properties, it's no longer VARYING.
- if (!m_props.varying_p ())
+ if (!m_pos_nan || !m_neg_nan)
{
m_kind = VR_RANGE;
m_min = dconstninf;
@@ -451,9 +385,70 @@ frange::normalize_kind ()
return true;
}
}
+ else if (m_kind == VR_NAN && !m_pos_nan && !m_neg_nan)
+ set_undefined ();
return false;
}
+// Union or intersect the zero endpoints of two ranges. For example:
+// [-0, x] U [+0, x] => [-0, x]
+// [ x, -0] U [ x, +0] => [ x, +0]
+// [-0, x] ^ [+0, x] => [+0, x]
+// [ x, -0] ^ [ x, +0] => [ x, -0]
+//
+// UNION_P is true when performing a union, or false when intersecting.
+
+bool
+frange::combine_zeros (const frange &r, bool union_p)
+{
+ gcc_checking_assert (!undefined_p () && !known_isnan ());
+
+ bool changed = false;
+ if (real_iszero (&m_min) && real_iszero (&r.m_min)
+ && real_isneg (&m_min) != real_isneg (&r.m_min))
+ {
+ m_min.sign = union_p;
+ changed = true;
+ }
+ if (real_iszero (&m_max) && real_iszero (&r.m_max)
+ && real_isneg (&m_max) != real_isneg (&r.m_max))
+ {
+ m_max.sign = !union_p;
+ changed = true;
+ }
+ // If the signs are swapped, the resulting range is empty.
+ if (m_min.sign == 0 && m_max.sign == 1)
+ {
+ if (maybe_isnan ())
+ m_kind = VR_NAN;
+ else
+ m_kind = VR_UNDEFINED;
+ changed = true;
+ }
+ return changed;
+}
+
+// Union two ranges when one is known to be a NAN.
+
+bool
+frange::union_nans (const frange &r)
+{
+ gcc_checking_assert (known_isnan () || r.known_isnan ());
+
+ if (known_isnan ())
+ {
+ m_kind = r.m_kind;
+ m_min = r.m_min;
+ m_max = r.m_max;
+ }
+ m_pos_nan |= r.m_pos_nan;
+ m_neg_nan |= r.m_neg_nan;
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+ return true;
+}
+
bool
frange::union_ (const vrange &v)
{
@@ -467,29 +462,18 @@ frange::union_ (const vrange &v)
return true;
}
- // If one side has a NAN, the union is the other side, plus the union
- // of the properties and the possibility of a NAN.
- if (known_nan ())
- {
- frange_props save = m_props;
- *this = r;
- m_props = save;
- m_props.union_ (r.m_props);
- set_nan (fp_prop::VARYING);
- if (flag_checking)
- verify_range ();
- return true;
- }
- if (r.known_nan ())
+ // Combine NAN info.
+ if (known_isnan () || r.known_isnan ())
+ return union_nans (r);
+ bool changed = false;
+ if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
{
- m_props.union_ (r.m_props);
- set_nan (fp_prop::VARYING);
- if (flag_checking)
- verify_range ();
- return true;
+ m_pos_nan |= r.m_pos_nan;
+ m_neg_nan |= r.m_neg_nan;
+ changed = true;
}
- bool changed = m_props.union_ (r.m_props);
+ // Combine endpoints.
if (real_less (&r.m_min, &m_min))
{
m_min = r.m_min;
@@ -500,13 +484,34 @@ frange::union_ (const vrange &v)
m_max = r.m_max;
changed = true;
}
- changed |= normalize_kind ();
+ if (HONOR_SIGNED_ZEROS (m_type))
+ changed |= combine_zeros (r, true);
+
+ changed |= normalize_kind ();
if (flag_checking)
verify_range ();
return changed;
}
+// Intersect two ranges when one is known to be a NAN.
+
+bool
+frange::intersect_nans (const frange &r)
+{
+ gcc_checking_assert (known_isnan () || r.known_isnan ());
+
+ m_pos_nan &= r.m_pos_nan;
+ m_neg_nan &= r.m_neg_nan;
+ if (maybe_isnan ())
+ m_kind = VR_NAN;
+ else
+ m_kind = VR_UNDEFINED;
+ if (flag_checking)
+ verify_range ();
+ return true;
+}
+
bool
frange::intersect (const vrange &v)
{
@@ -525,25 +530,18 @@ frange::intersect (const vrange &v)
return true;
}
- // If two NANs are not exactly the same, drop to an unknown NAN,
- // otherwise there's nothing to do.
- if (known_nan () && r.known_nan ())
+ // Combine NAN info.
+ if (known_isnan () || r.known_isnan ())
+ return intersect_nans (r);
+ bool changed = false;
+ if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
{
- if (m_props == r.m_props)
- return false;
-
- *this = frange_nan (m_type);
- return true;
- }
- // ?? Perhaps the intersection of a NAN and anything is a NAN ??.
- if (known_nan () || r.known_nan ())
- {
- set_varying (m_type);
- return true;
+ m_pos_nan &= r.m_pos_nan;
+ m_neg_nan &= r.m_neg_nan;
+ changed = true;
}
- bool changed = m_props.intersect (r.m_props);
-
+ // Combine endpoints.
if (real_less (&m_min, &r.m_min))
{
m_min = r.m_min;
@@ -554,14 +552,22 @@ frange::intersect (const vrange &v)
m_max = r.m_max;
changed = true;
}
- // If the endpoints are swapped, the ranges are disjoint.
+ // If the endpoints are swapped, the resulting range is empty.
if (real_less (&m_max, &m_min))
{
- set_undefined ();
+ if (maybe_isnan ())
+ m_kind = VR_NAN;
+ else
+ m_kind = VR_UNDEFINED;
+ if (flag_checking)
+ verify_range ();
return true;
}
- changed |= normalize_kind ();
+ if (HONOR_SIGNED_ZEROS (m_type))
+ changed |= combine_zeros (r, false);
+
+ changed |= normalize_kind ();
if (flag_checking)
verify_range ();
return changed;
@@ -574,7 +580,8 @@ frange::operator= (const frange &src)
m_type = src.m_type;
m_min = src.m_min;
m_max = src.m_max;
- m_props = src.m_props;
+ m_pos_nan = src.m_pos_nan;
+ m_neg_nan = src.m_neg_nan;
if (flag_checking)
verify_range ();
@@ -592,12 +599,13 @@ frange::operator== (const frange &src) const
if (varying_p ())
return types_compatible_p (m_type, src.m_type);
- if (known_nan () || src.known_nan ())
+ if (known_isnan () || src.known_isnan ())
return false;
return (real_identical (&m_min, &src.m_min)
&& real_identical (&m_max, &src.m_max)
- && m_props == src.m_props
+ && m_pos_nan == src.m_pos_nan
+ && m_neg_nan == src.m_neg_nan
&& types_compatible_p (m_type, src.m_type));
}
return false;
@@ -608,30 +616,33 @@ frange::operator== (const frange &src) const
bool
frange::contains_p (tree cst) const
{
+ gcc_checking_assert (m_kind != VR_ANTI_RANGE);
+ const REAL_VALUE_TYPE *rv = TREE_REAL_CST_PTR (cst);
+
if (undefined_p ())
return false;
if (varying_p ())
return true;
- gcc_checking_assert (m_kind == VR_RANGE);
+ if (real_isnan (rv))
+ {
+ // No NAN in range.
+ if (!m_pos_nan && !m_neg_nan)
+ return false;
+ // Both +NAN and -NAN are present.
+ if (m_pos_nan && m_neg_nan)
+ return true;
+ return m_neg_nan == rv->sign;
+ }
+ if (known_isnan ())
+ return false;
- const REAL_VALUE_TYPE *rv = TREE_REAL_CST_PTR (cst);
- if (real_compare (GE_EXPR, rv, &m_min)
- && real_compare (LE_EXPR, rv, &m_max))
+ if (real_compare (GE_EXPR, rv, &m_min) && real_compare (LE_EXPR, rv, &m_max))
{
+ // Make sure the signs are equal for signed zeros.
if (HONOR_SIGNED_ZEROS (m_type) && real_iszero (rv))
- {
- // FIXME: This is still using get_signbit() instead of
- // known_signbit() because the latter bails on possible NANs
- // (for now).
- if (get_signbit ().yes_p ())
- return real_isneg (rv);
- else if (get_signbit ().no_p ())
- return !real_isneg (rv);
- else
- return true;
- }
+ return m_min.sign == m_max.sign && m_min.sign == rv->sign;
return true;
}
return false;
@@ -648,29 +659,24 @@ frange::singleton_p (tree *result) const
if (m_kind == VR_RANGE && real_identical (&m_min, &m_max))
{
// Return false for any singleton that may be a NAN.
- if (HONOR_NANS (m_type) && maybe_nan ())
+ if (HONOR_NANS (m_type) && maybe_isnan ())
return false;
- // Return the appropriate zero if known.
- if (HONOR_SIGNED_ZEROS (m_type) && zero_p ())
+ if (MODE_COMPOSITE_P (TYPE_MODE (m_type)))
{
- bool signbit;
- if (known_signbit (signbit))
- {
- if (signbit)
- {
- if (result)
- *result = build_real (m_type, real_value_negate (&dconst0));
- }
- else
- {
- if (result)
- *result = build_real (m_type, dconst0);
- }
- return true;
- }
- return false;
+ // For IBM long doubles, if the value is +-Inf or is exactly
+ // representable in double, the other double could be +0.0
+ // or -0.0. Since this means there is more than one way to
+ // represent a value, return false to avoid propagating it.
+ // See libgcc/config/rs6000/ibm-ldouble-format for details.
+ if (real_isinf (&m_min))
+ return false;
+ REAL_VALUE_TYPE r;
+ real_convert (&r, DFmode, &m_min);
+ if (real_identical (&r, &m_min))
+ return false;
}
+
if (result)
*result = build_real (m_type, m_min);
return true;
@@ -687,57 +693,40 @@ frange::supports_type_p (const_tree type) const
void
frange::verify_range ()
{
- if (undefined_p ())
+ switch (m_kind)
{
- gcc_checking_assert (m_props.undefined_p ());
+ case VR_UNDEFINED:
+ // m_type is ignored.
return;
- }
- gcc_checking_assert (!m_props.undefined_p ());
-
- if (varying_p ())
- {
- gcc_checking_assert (m_props.varying_p ());
+ case VR_VARYING:
+ gcc_checking_assert (m_type);
+ gcc_checking_assert (m_pos_nan && m_neg_nan);
+ gcc_checking_assert (real_isinf (&m_min, 1));
+ gcc_checking_assert (real_isinf (&m_max, 0));
+ return;
+ case VR_RANGE:
+ gcc_checking_assert (m_type);
+ break;
+ case VR_NAN:
+ gcc_checking_assert (m_type);
+ gcc_checking_assert (m_pos_nan || m_neg_nan);
return;
+ default:
+ gcc_unreachable ();
}
- // We don't support the inverse of an frange (yet).
- gcc_checking_assert (m_kind == VR_RANGE);
+ // NANs cannot appear in the endpoints of a range.
+ gcc_checking_assert (!real_isnan (&m_min) && !real_isnan (&m_max));
- bool is_nan = real_isnan (&m_min) || real_isnan (&m_max);
- if (is_nan)
- {
- // If either is a NAN, both must be a NAN.
- gcc_checking_assert (real_identical (&m_min, &m_max));
- gcc_checking_assert (known_nan ());
- }
- else
- // Make sure we don't have swapped ranges.
- gcc_checking_assert (!real_less (&m_max, &m_min));
+ // Make sure we don't have swapped ranges.
+ gcc_checking_assert (!real_less (&m_max, &m_min));
- // If we're absolutely sure we have a NAN, the endpoints should
- // reflect this, otherwise we'd have more than one way to represent
- // a NAN.
- if (known_nan ())
- {
- gcc_checking_assert (real_isnan (&m_min));
- gcc_checking_assert (real_isnan (&m_max));
- }
- else
- {
- // Make sure the signbit and range agree.
- bool signbit;
- if (known_signbit (signbit))
- {
- if (signbit)
- gcc_checking_assert (real_compare (LE_EXPR, &m_max, &dconst0));
- else
- gcc_checking_assert (real_compare (GE_EXPR, &m_min, &dconst0));
- }
- }
+ // [ +0.0, -0.0 ] is nonsensical.
+ gcc_checking_assert (!(real_iszero (&m_min, 0) && real_iszero (&m_max, 1)));
// If all the properties are clear, we better not span the entire
// domain, because that would make us varying.
- if (m_props.varying_p ())
+ if (m_pos_nan && m_neg_nan)
gcc_checking_assert (!real_isinf (&m_min, 1) || !real_isinf (&m_max, 0));
}
@@ -755,16 +744,24 @@ frange::nonzero_p () const
return false;
}
-// Set range to [+0.0, +0.0].
+// Set range to [+0.0, +0.0] if honoring signed zeros, or [0.0, 0.0]
+// otherwise.
void
frange::set_zero (tree type)
{
- tree zero = build_zero_cst (type);
- set (zero, zero);
+ if (HONOR_SIGNED_ZEROS (type))
+ {
+ REAL_VALUE_TYPE dconstm0 = dconst0;
+ dconstm0.sign = 1;
+ set (type, dconstm0, dconst0);
+ clear_nan ();
+ }
+ else
+ set (type, dconst0, dconst0);
}
-// Return TRUE for any [0.0, 0.0] regardless of sign.
+// Return TRUE for any zero regardless of sign.
bool
frange::zero_p () const
@@ -777,9 +774,7 @@ frange::zero_p () const
void
frange::set_nonnegative (tree type)
{
- tree zero = build_zero_cst (type);
- tree inf = vrp_val_max (type);
- set (zero, inf);
+ set (type, dconst0, dconstinf);
}
// Here we copy between any two irange's. The ranges can be legacy or
@@ -3635,62 +3630,110 @@ range_tests_nan ()
r1 = frange_float ("10", "12");
r0 = r1;
ASSERT_EQ (r0, r1);
- r0.set_nan (fp_prop::NO);
- ASSERT_NE (r0, r1);
- r0.set_nan (fp_prop::YES);
+ r0.clear_nan ();
ASSERT_NE (r0, r1);
+ r0.update_nan ();
+ ASSERT_EQ (r0, r1);
+
+ // [10, 20] NAN ^ [30, 40] NAN = NAN.
+ r0 = frange_float ("10", "20");
+ r1 = frange_float ("30", "40");
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.known_isnan ());
+
+ // [3,5] U [5,10] NAN = ... NAN
+ r0 = frange_float ("3", "5");
+ r0.clear_nan ();
+ r1 = frange_float ("5", "10");
+ r0.union_ (r1);
+ ASSERT_TRUE (r0.maybe_isnan ());
}
// NAN ranges are not equal to each other.
- r0 = frange_nan (float_type_node);
+ r0.set_nan (float_type_node);
r1 = r0;
ASSERT_FALSE (r0 == r1);
ASSERT_FALSE (r0 == r0);
ASSERT_TRUE (r0 != r0);
- // [5,6] U NAN.
+ // [5,6] U NAN = [5,6] NAN.
r0 = frange_float ("5", "6");
- r0.set_nan (fp_prop::NO);
- r1 = frange_nan (float_type_node);
+ r0.clear_nan ();
+ r1.set_nan (float_type_node);
r0.union_ (r1);
real_from_string (&q, "5");
real_from_string (&r, "6");
ASSERT_TRUE (real_identical (&q, &r0.lower_bound ()));
ASSERT_TRUE (real_identical (&r, &r0.upper_bound ()));
- ASSERT_TRUE (r0.maybe_nan ());
+ ASSERT_TRUE (r0.maybe_isnan ());
// NAN U NAN = NAN
- r0 = frange_nan (float_type_node);
- r1 = frange_nan (float_type_node);
+ r0.set_nan (float_type_node);
+ r1.set_nan (float_type_node);
r0.union_ (r1);
- ASSERT_TRUE (real_isnan (&r0.lower_bound ()));
- ASSERT_TRUE (real_isnan (&r1.upper_bound ()));
- ASSERT_TRUE (r0.known_nan ());
+ ASSERT_TRUE (r0.known_isnan ());
- // [INF, INF] ^ NAN = VARYING
- r0 = frange_nan (float_type_node);
+ // [INF, INF] NAN ^ NAN = NAN
+ r0.set_nan (float_type_node);
r1 = frange_float ("+Inf", "+Inf");
+ if (!HONOR_NANS (float_type_node))
+ r1.update_nan ();
r0.intersect (r1);
- ASSERT_TRUE (r0.varying_p ());
+ ASSERT_TRUE (r0.known_isnan ());
// NAN ^ NAN = NAN
- r0 = frange_nan (float_type_node);
- r1 = frange_nan (float_type_node);
+ r0.set_nan (float_type_node);
+ r1.set_nan (float_type_node);
r0.intersect (r1);
- ASSERT_TRUE (r0.known_nan ());
+ ASSERT_TRUE (r0.known_isnan ());
+
+ // +NAN ^ -NAN = UNDEFINED
+ r0.set_nan (float_type_node, false);
+ r1.set_nan (float_type_node, true);
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.undefined_p ());
// VARYING ^ NAN = NAN.
- r0 = frange_nan (float_type_node);
+ r0.set_nan (float_type_node);
r1.set_varying (float_type_node);
r0.intersect (r1);
- ASSERT_TRUE (r0.known_nan ());
+ ASSERT_TRUE (r0.known_isnan ());
+
+ // [3,4] ^ NAN = UNDEFINED.
+ r0 = frange_float ("3", "4");
+ r0.clear_nan ();
+ r1.set_nan (float_type_node);
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.undefined_p ());
+
+ // [-3, 5] ^ NAN = UNDEFINED
+ r0 = frange_float ("-3", "5");
+ r0.clear_nan ();
+ r1.set_nan (float_type_node);
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.undefined_p ());
+
+ // Setting the NAN bit to yes does not make us a known NAN.
+ r0.set_varying (float_type_node);
+ r0.update_nan ();
+ ASSERT_FALSE (r0.known_isnan ());
+
+ // NAN is in a VARYING.
+ r0.set_varying (float_type_node);
+ real_nan (&r, "", 1, TYPE_MODE (float_type_node));
+ tree nan = build_real (float_type_node, r);
+ ASSERT_TRUE (r0.contains_p (nan));
- // Setting the NAN bit to yes, forces to range to [NAN, NAN].
+ // -NAN is in a VARYING.
r0.set_varying (float_type_node);
- r0.set_nan (fp_prop::YES);
- ASSERT_TRUE (r0.known_nan ());
- ASSERT_TRUE (real_isnan (&r0.lower_bound ()));
- ASSERT_TRUE (real_isnan (&r0.upper_bound ()));
+ q = real_value_negate (&r);
+ tree neg_nan = build_real (float_type_node, q);
+ ASSERT_TRUE (r0.contains_p (neg_nan));
+
+ // Clearing the NAN on a [] NAN is the empty set.
+ r0.set_nan (float_type_node);
+ r0.clear_nan ();
+ ASSERT_TRUE (r0.undefined_p ());
}
static void
@@ -3702,49 +3745,84 @@ range_tests_signed_zeros ()
frange r0, r1;
bool signbit;
- // Since -0.0 == +0.0, a range of [-0.0, -0.0] should contain +0.0
- // and vice versa.
+ // [0,0] contains [0,0] but not [-0,-0] and vice versa.
r0 = frange (zero, zero);
r1 = frange (neg_zero, neg_zero);
ASSERT_TRUE (r0.contains_p (zero));
- ASSERT_TRUE (r0.contains_p (neg_zero));
- ASSERT_TRUE (r1.contains_p (zero));
+ ASSERT_TRUE (!r0.contains_p (neg_zero));
ASSERT_TRUE (r1.contains_p (neg_zero));
+ ASSERT_TRUE (!r1.contains_p (zero));
// Test contains_p() when we know the sign of the zero.
- r0 = frange(zero, zero);
- r0.set_signbit (fp_prop::NO);
+ r0 = frange (zero, zero);
ASSERT_TRUE (r0.contains_p (zero));
ASSERT_FALSE (r0.contains_p (neg_zero));
- r0.set_signbit (fp_prop::YES);
+ r0 = frange (neg_zero, neg_zero);
ASSERT_TRUE (r0.contains_p (neg_zero));
ASSERT_FALSE (r0.contains_p (zero));
- // The intersection of zeros that differ in sign is the empty set.
- r0 = frange (zero, zero);
- r0.set_signbit (fp_prop::YES);
+ // The intersection of zeros that differ in sign is a NAN (or
+ // undefined if not honoring NANs).
+ r0 = frange (neg_zero, neg_zero);
r1 = frange (zero, zero);
- r1.set_signbit (fp_prop::NO);
r0.intersect (r1);
- ASSERT_TRUE (r0.undefined_p ());
+ if (HONOR_NANS (float_type_node))
+ ASSERT_TRUE (r0.known_isnan ());
+ else
+ ASSERT_TRUE (r0.undefined_p ());
// The union of zeros that differ in sign is a zero with unknown sign.
r0 = frange (zero, zero);
- r0.set_signbit (fp_prop::NO);
- r1 = frange (zero, zero);
- r1.set_signbit (fp_prop::YES);
+ r1 = frange (neg_zero, neg_zero);
r0.union_ (r1);
- ASSERT_TRUE (r0.zero_p () && !r0.known_signbit (signbit));
+ ASSERT_TRUE (r0.zero_p () && !r0.signbit_p (signbit));
+
+ // [-0, +0] has an unknown sign.
+ r0 = frange (neg_zero, zero);
+ ASSERT_TRUE (r0.zero_p () && !r0.signbit_p (signbit));
+
+ // [-0, +0] ^ [0, 0] is [0, 0]
+ r0 = frange (neg_zero, zero);
+ r1 = frange (zero, zero);
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.zero_p ());
- // NAN U [5,6] should be [5,6] with no sign info.
- r0 = frange_nan (float_type_node);
+ // NAN U [5,6] should be [5,6] NAN.
+ r0.set_nan (float_type_node);
r1 = frange_float ("5", "6");
+ r1.clear_nan ();
r0.union_ (r1);
real_from_string (&q, "5");
real_from_string (&r, "6");
ASSERT_TRUE (real_identical (&q, &r0.lower_bound ()));
ASSERT_TRUE (real_identical (&r, &r0.upper_bound ()));
- ASSERT_TRUE (!r0.known_signbit (signbit));
+ ASSERT_TRUE (!r0.signbit_p (signbit));
+ ASSERT_TRUE (r0.maybe_isnan ());
+
+ r0 = frange_float ("+0", "5");
+ r0.clear_nan ();
+ ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
+
+ r0 = frange_float ("-0", "5");
+ r0.clear_nan ();
+ ASSERT_TRUE (!r0.signbit_p (signbit));
+
+ r0 = frange_float ("-0", "10");
+ r1 = frange_float ("0", "5");
+ r0.intersect (r1);
+ ASSERT_TRUE (real_iszero (&r0.lower_bound (), false));
+
+ r0 = frange_float ("-0", "5");
+ r1 = frange_float ("0", "5");
+ r0.union_ (r1);
+ ASSERT_TRUE (real_iszero (&r0.lower_bound (), true));
+
+ r0 = frange_float ("-5", "-0");
+ r0.update_nan ();
+ r1 = frange_float ("0", "0");
+ r1.update_nan ();
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.known_isnan ());
}
static void
@@ -3753,52 +3831,25 @@ range_tests_signbit ()
frange r0, r1;
bool signbit;
- // Setting the signbit drops the range to [-INF, 0].
- r0.set_varying (float_type_node);
- r0.set_signbit (fp_prop::YES);
- ASSERT_TRUE (real_isinf (&r0.lower_bound (), 1));
- ASSERT_TRUE (real_iszero (&r0.upper_bound ()));
-
- // Setting the signbit for [-5, 10] crops the range to [-5, 0] with
- // the signbit property set.
- r0 = frange_float ("-5", "10");
- r0.set_signbit (fp_prop::YES);
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (r0.known_signbit (signbit) && signbit);
- r1 = frange_float ("-5", "0");
- ASSERT_TRUE (real_identical (&r0.lower_bound (), &r1.lower_bound ()));
- ASSERT_TRUE (real_identical (&r0.upper_bound (), &r1.upper_bound ()));
-
// Negative numbers should have the SIGNBIT set.
r0 = frange_float ("-5", "-1");
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (r0.known_signbit (signbit) && signbit);
+ r0.clear_nan ();
+ ASSERT_TRUE (r0.signbit_p (signbit) && signbit);
// Positive numbers should have the SIGNBIT clear.
r0 = frange_float ("1", "10");
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (r0.known_signbit (signbit) && !signbit);
+ r0.clear_nan ();
+ ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
// Numbers containing zero should have an unknown SIGNBIT.
r0 = frange_float ("0", "10");
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (!r0.known_signbit (signbit));
+ r0.clear_nan ();
+ ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
// Numbers spanning both positive and negative should have an
// unknown SIGNBIT.
r0 = frange_float ("-10", "10");
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (!r0.known_signbit (signbit));
+ r0.clear_nan ();
+ ASSERT_TRUE (!r0.signbit_p (signbit));
r0.set_varying (float_type_node);
- ASSERT_TRUE (!r0.known_signbit (signbit));
-
- // Ignore signbit changes when the sign bit is obviously known from
- // the range.
- r0 = frange_float ("5", "10");
- r0.set_nan (fp_prop::NO);
- r0.set_signbit (fp_prop::VARYING);
- ASSERT_TRUE (r0.known_signbit (signbit) && !signbit);
- r0 = frange_float ("-5", "-1");
- r0.set_signbit (fp_prop::NO);
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (r0.undefined_p ());
+ ASSERT_TRUE (!r0.signbit_p (signbit));
}
static void
@@ -3815,10 +3866,10 @@ range_tests_floats ()
// A range of [-INF,+INF] is actually VARYING if no other properties
// are set.
r0 = frange_float ("-Inf", "+Inf");
- if (r0.maybe_nan ())
+ if (r0.maybe_isnan ())
ASSERT_TRUE (r0.varying_p ());
// ...unless it has some special property...
- r0.set_nan (fp_prop::NO);
+ r0.clear_nan ();
ASSERT_FALSE (r0.varying_p ());
// The endpoints of a VARYING are +-INF.
@@ -3896,9 +3947,19 @@ range_tests_floats ()
r0.intersect (r1);
ASSERT_EQ (r0, frange_float ("15", "20"));
+ // [10,20] NAN ^ [21,25] NAN = [NAN]
+ r0 = frange_float ("10", "20");
+ r0.update_nan ();
+ r1 = frange_float ("21", "25");
+ r1.update_nan ();
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.known_isnan ());
+
// [10,20] ^ [21,25] = []
r0 = frange_float ("10", "20");
+ r0.clear_nan ();
r1 = frange_float ("21", "25");
+ r1.clear_nan ();
r0.intersect (r1);
ASSERT_TRUE (r0.undefined_p ());
}
diff --git a/gcc/value-range.h b/gcc/value-range.h
index f9a01ee..795b1f0 100644
--- a/gcc/value-range.h
+++ b/gcc/value-range.h
@@ -35,6 +35,8 @@ enum value_range_kind
VR_RANGE,
/* Range is ~[MIN, MAX]. */
VR_ANTI_RANGE,
+ /* Range is a NAN. */
+ VR_NAN,
/* Range is a nice guy. */
VR_LAST
};
@@ -263,69 +265,10 @@ public:
virtual void accept (const vrange_visitor &v) const override;
};
-// Floating point property to represent possible values of a NAN, INF, etc.
-
-class fp_prop
-{
-public:
- enum kind {
- UNDEFINED = 0x0, // Prop is impossible.
- YES = 0x1, // Prop is definitely set.
- NO = 0x2, // Prop is definitely not set.
- VARYING = (YES | NO) // Prop may hold.
- };
- fp_prop (kind f) : m_kind (f) { }
- bool varying_p () const { return m_kind == VARYING; }
- bool undefined_p () const { return m_kind == UNDEFINED; }
- bool yes_p () const { return m_kind == YES; }
- bool no_p () const { return m_kind == NO; }
-private:
- unsigned char m_kind : 2;
-};
-
-// Accessors for individual FP properties.
-
-#define FP_PROP_ACCESSOR(NAME) \
- void NAME##_set_varying () { u.bits.NAME = fp_prop::VARYING; } \
- void NAME##_set_yes () { u.bits.NAME = fp_prop::YES; } \
- void NAME##_set_no () { u.bits.NAME = fp_prop::NO; } \
- bool NAME##_varying_p () const { return u.bits.NAME == fp_prop::VARYING; } \
- bool NAME##_undefined_p () const { return u.bits.NAME == fp_prop::UNDEFINED; } \
- bool NAME##_yes_p () const { return u.bits.NAME == fp_prop::YES; } \
- bool NAME##_no_p () const { return u.bits.NAME == fp_prop::NO; } \
- fp_prop get_##NAME () const \
- { return fp_prop ((fp_prop::kind) u.bits.NAME); } \
- void set_##NAME (fp_prop::kind f) { u.bits.NAME = f; }
-
-// Aggregate of all the FP properties in an frange packed into one
-// structure to save space. Using explicit fp_prop's in the frange,
-// would take one byte per property because of padding. Instead, we
-// can save all properties into one byte.
-
-class frange_props
-{
-public:
- frange_props () { set_varying (); }
- void set_varying () { u.bytes = 0xff; }
- void set_undefined () { u.bytes = 0; }
- bool varying_p () { return u.bytes == 0xff; }
- bool undefined_p () { return u.bytes == 0; }
- bool union_ (const frange_props &other);
- bool intersect (const frange_props &other);
- bool operator== (const frange_props &other) const;
- FP_PROP_ACCESSOR(nan)
- FP_PROP_ACCESSOR(signbit)
-private:
- union {
- struct {
- unsigned char nan : 2;
- unsigned char signbit : 2;
- } bits;
- unsigned char bytes;
- } u;
-};
-
// A floating point range.
+//
+// The representation is a type with a couple of endpoints, unioned
+// with the set of { -NAN, +Nan }.
class frange : public vrange
{
@@ -348,6 +291,8 @@ public:
virtual void set (tree, tree, value_range_kind = VR_RANGE) override;
void set (tree type, const REAL_VALUE_TYPE &, const REAL_VALUE_TYPE &,
value_range_kind = VR_RANGE);
+ void set_nan (tree type);
+ void set_nan (tree type, bool sign);
virtual void set_varying (tree type) override;
virtual void set_undefined () override;
virtual bool union_ (const vrange &) override;
@@ -366,41 +311,42 @@ public:
bool operator!= (const frange &r) const { return !(*this == r); }
const REAL_VALUE_TYPE &lower_bound () const;
const REAL_VALUE_TYPE &upper_bound () const;
+ void update_nan ();
+ void clear_nan ();
// fpclassify like API
- bool known_finite () const;
- bool maybe_inf () const;
- bool known_inf () const;
- bool maybe_nan () const;
- bool known_nan () const;
- bool known_signbit (bool &signbit) const;
-
- // Accessors for FP properties.
- void set_nan (fp_prop::kind f);
- void set_signbit (fp_prop::kind);
+ bool known_isfinite () const;
+ bool known_isnan () const;
+ bool known_isinf () const;
+ bool maybe_isnan () const;
+ bool maybe_isinf () const;
+ bool signbit_p (bool &signbit) const;
private:
- fp_prop get_nan () const { return m_props.get_nan (); }
- fp_prop get_signbit () const { return m_props.get_signbit (); }
void verify_range ();
bool normalize_kind ();
+ bool union_nans (const frange &);
+ bool intersect_nans (const frange &);
+ bool combine_zeros (const frange &, bool union_p);
+ void flush_denormals_to_zero ();
- frange_props m_props;
tree m_type;
REAL_VALUE_TYPE m_min;
REAL_VALUE_TYPE m_max;
+ bool m_pos_nan;
+ bool m_neg_nan;
};
inline const REAL_VALUE_TYPE &
frange::lower_bound () const
{
- gcc_checking_assert (!undefined_p ());
+ gcc_checking_assert (!undefined_p () && !known_isnan ());
return m_min;
}
inline const REAL_VALUE_TYPE &
frange::upper_bound () const
{
- gcc_checking_assert (!undefined_p ());
+ gcc_checking_assert (!undefined_p () && !known_isnan ());
return m_max;
}
@@ -1080,30 +1026,6 @@ vrp_val_min (const_tree type)
return NULL_TREE;
}
-// Supporting methods for frange.
-
-inline bool
-frange_props::operator== (const frange_props &other) const
-{
- return u.bytes == other.u.bytes;
-}
-
-inline bool
-frange_props::union_ (const frange_props &other)
-{
- unsigned char saved = u.bytes;
- u.bytes |= other.u.bytes;
- return u.bytes != saved;
-}
-
-inline bool
-frange_props::intersect (const frange_props &other)
-{
- unsigned char saved = u.bytes;
- u.bytes &= other.u.bytes;
- return u.bytes != saved;
-}
-
inline
frange::frange ()
{
@@ -1141,6 +1063,7 @@ frange::frange (tree min, tree max, value_range_kind kind)
inline tree
frange::type () const
{
+ gcc_checking_assert (!undefined_p ());
return m_type;
}
@@ -1151,17 +1074,42 @@ frange::set_varying (tree type)
m_type = type;
m_min = dconstninf;
m_max = dconstinf;
- m_props.set_varying ();
+ m_pos_nan = true;
+ m_neg_nan = true;
}
inline void
frange::set_undefined ()
{
m_kind = VR_UNDEFINED;
- m_type = NULL;
- m_props.set_undefined ();
- memset (&m_min, 0, sizeof (m_min));
- memset (&m_max, 0, sizeof (m_max));
+ if (flag_checking)
+ verify_range ();
+}
+
+// Set the NAN bit and adjust the range.
+
+inline void
+frange::update_nan ()
+{
+ gcc_checking_assert (!undefined_p ());
+ m_pos_nan = true;
+ m_neg_nan = true;
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+}
+
+// Clear the NAN bit and adjust the range.
+
+inline void
+frange::clear_nan ()
+{
+ gcc_checking_assert (!undefined_p ());
+ m_pos_nan = false;
+ m_neg_nan = false;
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
}
// Set R to maximum representable value for TYPE.
@@ -1185,35 +1133,48 @@ real_min_representable (REAL_VALUE_TYPE *r, tree type)
*r = real_value_negate (r);
}
-// Build a NAN of type TYPE.
+// Build a signless NAN of type TYPE.
-inline frange
-frange_nan (tree type)
+inline void
+frange::set_nan (tree type)
{
- REAL_VALUE_TYPE r;
+ m_kind = VR_NAN;
+ m_type = type;
+ m_pos_nan = true;
+ m_neg_nan = true;
+ if (flag_checking)
+ verify_range ();
+}
+
+// Build a NAN of type TYPE with SIGN.
- gcc_assert (real_nan (&r, "", 1, TYPE_MODE (type)));
- return frange (type, r, r);
+inline void
+frange::set_nan (tree type, bool sign)
+{
+ m_kind = VR_NAN;
+ m_type = type;
+ m_neg_nan = sign;
+ m_pos_nan = !sign;
+ if (flag_checking)
+ verify_range ();
}
// Return TRUE if range is known to be finite.
inline bool
-frange::known_finite () const
+frange::known_isfinite () const
{
if (undefined_p () || varying_p () || m_kind == VR_ANTI_RANGE)
return false;
- return (!real_isnan (&m_min)
- && !real_isinf (&m_min)
- && !real_isinf (&m_max));
+ return (!maybe_isnan () && !real_isinf (&m_min) && !real_isinf (&m_max));
}
// Return TRUE if range may be infinite.
inline bool
-frange::maybe_inf () const
+frange::maybe_isinf () const
{
- if (undefined_p () || m_kind == VR_ANTI_RANGE)
+ if (undefined_p () || m_kind == VR_ANTI_RANGE || m_kind == VR_NAN)
return false;
if (varying_p ())
return true;
@@ -1223,7 +1184,7 @@ frange::maybe_inf () const
// Return TRUE if range is known to be the [-INF,-INF] or [+INF,+INF].
inline bool
-frange::known_inf () const
+frange::known_isinf () const
{
return (m_kind == VR_RANGE
&& real_identical (&m_min, &m_max)
@@ -1233,32 +1194,50 @@ frange::known_inf () const
// Return TRUE if range is possibly a NAN.
inline bool
-frange::maybe_nan () const
+frange::maybe_isnan () const
{
- return !get_nan ().no_p ();
+ return m_pos_nan || m_neg_nan;
}
// Return TRUE if range is a +NAN or -NAN.
inline bool
-frange::known_nan () const
+frange::known_isnan () const
{
- return get_nan ().yes_p ();
+ return m_kind == VR_NAN;
}
// If the signbit for the range is known, set it in SIGNBIT and return
// TRUE.
inline bool
-frange::known_signbit (bool &signbit) const
+frange::signbit_p (bool &signbit) const
{
- // FIXME: Signed NANs are not supported yet.
- if (maybe_nan ())
+ if (undefined_p ())
return false;
- if (get_signbit ().varying_p ())
+
+ // NAN with unknown sign.
+ if (m_pos_nan && m_neg_nan)
return false;
- signbit = get_signbit ().yes_p ();
- return true;
+ // No NAN.
+ if (!m_pos_nan && !m_neg_nan)
+ {
+ if (m_min.sign == m_max.sign)
+ {
+ signbit = m_min.sign;
+ return true;
+ }
+ return false;
+ }
+ // NAN with known sign.
+ bool nan_sign = m_neg_nan;
+ if (known_isnan ()
+ || (nan_sign == m_min.sign && nan_sign == m_max.sign))
+ {
+ signbit = nan_sign;
+ return true;
+ }
+ return false;
}
#endif // GCC_VALUE_RANGE_H