aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Liska <mliska@suse.cz>2022-09-20 13:53:30 +0200
committerMartin Liska <mliska@suse.cz>2022-09-20 13:53:30 +0200
commit6df29b782e87c6c800be0425023d8438fdc67b92 (patch)
tree48eebe497e384d66a7f5cf861b4b1b963785a2cd
parentfdb97cd0b7d15efa39ba79dca44be93debb0ef12 (diff)
parent63e3cc294d835b43701eeef9410d1b8fc8922869 (diff)
downloadgcc-6df29b782e87c6c800be0425023d8438fdc67b92.zip
gcc-6df29b782e87c6c800be0425023d8438fdc67b92.tar.gz
gcc-6df29b782e87c6c800be0425023d8438fdc67b92.tar.bz2
Merge branch 'master' into devel/sphinx
-rw-r--r--ChangeLog4
-rw-r--r--MAINTAINERS1
-rwxr-xr-xcontrib/filter-clang-warnings.py6
-rw-r--r--gcc/ChangeLog482
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/ada/ChangeLog502
-rw-r--r--gcc/ada/gcc-interface/trans.cc1
-rw-r--r--gcc/analyzer/ChangeLog5
-rw-r--r--gcc/c-family/ChangeLog6
-rw-r--r--gcc/c-family/c-common.cc2
-rw-r--r--gcc/c-family/c-common.h1
-rw-r--r--gcc/c/ChangeLog15
-rw-r--r--gcc/c/c-decl.cc8
-rw-r--r--gcc/c/c-typeck.cc43
-rw-r--r--gcc/config/aarch64/aarch64-ldpstp.md11
-rw-r--r--gcc/config/aarch64/aarch64-simd.md217
-rw-r--r--gcc/config/aarch64/aarch64.cc94
-rw-r--r--gcc/config/aarch64/aarch64.md11
-rw-r--r--gcc/config/aarch64/iterators.md2
-rw-r--r--gcc/config/csky/csky.h2
-rw-r--r--gcc/config/gcn/mkoffload.cc2
-rw-r--r--gcc/config/i386/i386-builtins.cc185
-rw-r--r--gcc/config/i386/i386-expand.cc43
-rw-r--r--gcc/config/i386/mmx.md154
-rw-r--r--gcc/config/i386/sse.md80
-rw-r--r--gcc/config/i386/x86-tune-sched.cc14
-rw-r--r--gcc/config/loongarch/gnu-user.h6
-rw-r--r--gcc/config/mips/mips.cc2
-rw-r--r--gcc/config/nvptx/mkoffload.cc18
-rw-r--r--gcc/config/rs6000/rs6000-builtin.cc13
-rw-r--r--gcc/config/rs6000/rs6000-c.cc60
-rw-r--r--gcc/config/rs6000/rs6000.cc160
-rw-r--r--gcc/config/rs6000/rs6000.opt18
-rw-r--r--gcc/config/xtensa/xtensa.cc2
-rw-r--r--gcc/config/xtensa/xtensa.h1
-rw-r--r--gcc/config/xtensa/xtensa.md21
-rw-r--r--gcc/cp/ChangeLog153
-rw-r--r--gcc/cp/call.cc22
-rw-r--r--gcc/cp/constexpr.cc31
-rw-r--r--gcc/cp/cp-tree.h32
-rw-r--r--gcc/cp/cvt.cc4
-rw-r--r--gcc/cp/decl.cc17
-rw-r--r--gcc/cp/decl2.cc47
-rw-r--r--gcc/cp/except.cc4
-rw-r--r--gcc/cp/init.cc2
-rw-r--r--gcc/cp/lambda.cc1
-rw-r--r--gcc/cp/module.cc5
-rw-r--r--gcc/cp/name-lookup.cc2
-rw-r--r--gcc/cp/parser.cc7
-rw-r--r--gcc/cp/pt.cc35
-rw-r--r--gcc/cp/semantics.cc38
-rw-r--r--gcc/cp/tree.cc22
-rw-r--r--gcc/cp/typeck.cc24
-rw-r--r--gcc/cp/typeck2.cc33
-rw-r--r--gcc/d/ChangeLog5
-rw-r--r--gcc/d/d-builtins.cc1
-rw-r--r--gcc/doc/extend.texi2
-rw-r--r--gcc/doc/invoke.texi7
-rw-r--r--gcc/fortran/ChangeLog28
-rw-r--r--gcc/fortran/f95-lang.cc2
-rw-r--r--gcc/fortran/libgfortran.h1
-rw-r--r--gcc/fortran/resolve.cc1
-rw-r--r--gcc/fortran/simplify.cc2
-rw-r--r--gcc/fortran/trans-openmp.cc20
-rw-r--r--gcc/gimple-fold.cc1
-rw-r--r--gcc/gimple-range-fold.cc2
-rw-r--r--gcc/gimplify.cc2525
-rw-r--r--gcc/ginclude/float.h4
-rw-r--r--gcc/ginclude/stdatomic.h2
-rw-r--r--gcc/go/ChangeLog5
-rw-r--r--gcc/go/go-lang.cc3
-rw-r--r--gcc/jit/ChangeLog5
-rw-r--r--gcc/jit/dummy-frontend.cc3
-rw-r--r--gcc/lto/ChangeLog5
-rw-r--r--gcc/lto/lto-lang.cc1
-rw-r--r--gcc/match.pd6
-rw-r--r--gcc/omp-low.cc23
-rw-r--r--gcc/range-op-float.cc175
-rw-r--r--gcc/reg-stack.cc3
-rw-r--r--gcc/targhooks.cc17
-rw-r--r--gcc/testsuite/ChangeLog246
-rw-r--r--gcc/testsuite/c-c++-common/Waddress-7.c22
-rw-r--r--gcc/testsuite/c-c++-common/goacc/mdc-2.c2
-rw-r--r--gcc/testsuite/c-c++-common/gomp/target-50.c23
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-mutable3.C9
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/initlist-array17.C37
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-array4.C29
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/auto-fn65.C10
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/constexpr-mutable1.C16
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/noexcept-type26.C12
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/explicit19.C12
-rw-r--r--gcc/testsuite/g++.dg/ext/integer-pack6.C13
-rw-r--r--gcc/testsuite/g++.dg/ext/pr106877.C13
-rw-r--r--gcc/testsuite/g++.dg/gcov/gcov.exp4
-rw-r--r--gcc/testsuite/g++.dg/goacc/mdc.C2
-rw-r--r--gcc/testsuite/g++.dg/goacc/member-array-acc.C13
-rw-r--r--gcc/testsuite/g++.dg/gomp/map-3.C9
-rw-r--r--gcc/testsuite/g++.dg/gomp/member-array-omp.C13
-rw-r--r--gcc/testsuite/g++.dg/gomp/target-3.C4
-rw-r--r--gcc/testsuite/g++.dg/gomp/target-lambda-1.C6
-rw-r--r--gcc/testsuite/g++.dg/gomp/target-this-2.C2
-rw-r--r--gcc/testsuite/g++.dg/gomp/target-this-3.C4
-rw-r--r--gcc/testsuite/g++.dg/gomp/target-this-4.C4
-rw-r--r--gcc/testsuite/g++.dg/modules/typename-friend_a.C11
-rw-r--r--gcc/testsuite/g++.dg/modules/typename-friend_b.C6
-rw-r--r--gcc/testsuite/g++.dg/template/evaluated1.C17
-rw-r--r--gcc/testsuite/g++.dg/template/evaluated1a.C16
-rw-r--r--gcc/testsuite/g++.dg/template/evaluated1b.C17
-rw-r--r--gcc/testsuite/g++.dg/template/evaluated1c.C17
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr106922.C91
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr106936.C14
-rw-r--r--gcc/testsuite/g++.dg/warn/Wsubobject-linkage-5.C7
-rw-r--r--gcc/testsuite/g++.dg/warn/anonymous-namespace-3.C3
-rw-r--r--gcc/testsuite/g++.target/powerpc/pr105485.C9
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr106878.c15
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr106958.c13
-rw-r--r--gcc/testsuite/gcc.dg/atomic/c2x-stdatomic-var-init-1.c9
-rw-r--r--gcc/testsuite/gcc.dg/c2x-float-11.c9
-rw-r--r--gcc/testsuite/gcc.dg/c2x-float-2.c4
-rw-r--r--gcc/testsuite/gcc.dg/pr106938.c36
-rw-r--r--gcc/testsuite/gcc.dg/tree-prof/indir-call-prof-2.c6
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/gen-vect-34.c2
-rw-r--r--gcc/testsuite/gcc.misc-tests/gcov.exp4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/acle/ls64_asm_2.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_stp_20.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_stp_21.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_stp_22.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_stp_23.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ldp_stp_24.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv16qi_1.c21
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv16qi_2.c27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv16qi_3.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv2di_1.c103
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv2x16qi_1.c40
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv2x8qi_1.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv3x16qi_1.c44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv3x8qi_1.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv4x16qi_1.c44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv4x8qi_1.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv8qi_1.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv8qi_2.c27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv8qi_3.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect_unary_2.c5
-rw-r--r--gcc/testsuite/gcc.target/i386/pr105735-1.c88
-rw-r--r--gcc/testsuite/gcc.target/i386/pr105735-2.c28
-rw-r--r--gcc/testsuite/gcc.target/i386/pr106905.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/pr106910-1.c77
-rw-r--r--gcc/testsuite/gcc.target/i386/vect-bfloat16-2c.c76
-rw-r--r--gcc/testsuite/gcc.target/ia64/pr106905.c20
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr104482.c16
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr106550.c14
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pr106550_1.c22
-rw-r--r--gcc/testsuite/gcc.target/s390/ifcvt-one-insn-bool.c2
-rw-r--r--gcc/testsuite/gcc.target/s390/ifcvt-one-insn-char.c2
-rw-r--r--gcc/testsuite/gcc.target/s390/ifcvt-two-insns-bool.c2
-rw-r--r--gcc/testsuite/gcc.target/s390/ifcvt-two-insns-int.c2
-rw-r--r--gcc/testsuite/gcc.target/s390/ifcvt-two-insns-long.c13
-rw-r--r--gcc/testsuite/gcc.target/s390/vector/vperm-rev-z14.c2
-rw-r--r--gcc/testsuite/gcc.target/s390/vector/vperm-rev-z15.c6
-rw-r--r--gcc/testsuite/gfortran.dg/ieee/modes_1.f9095
-rw-r--r--gcc/testsuite/gfortran.dg/ieee/rounding_2.f9020
-rw-r--r--gcc/testsuite/gfortran.dg/pr104314.f909
-rw-r--r--gcc/testsuite/gfortran.dg/pr106857.f9012
-rw-r--r--gcc/testsuite/gfortran.dg/pr106934.f907
-rw-r--r--gcc/testsuite/lib/g++.exp10
-rw-r--r--gcc/testsuite/lib/gcc.exp21
-rw-r--r--gcc/testsuite/lib/wrapper.exp7
-rw-r--r--gcc/tree-cfg.cc33
-rw-r--r--gcc/tree-scalar-evolution.cc93
-rw-r--r--gcc/tree-ssa-pre.cc18
-rw-r--r--gcc/tree-ssa-reassoc.cc25
-rw-r--r--gcc/tree-ssa-uninit.cc8
-rw-r--r--gcc/tree-ssa.cc6
-rw-r--r--gcc/tree-vect-loop.cc6
-rw-r--r--gcc/tree.cc2
-rw-r--r--gcc/value-query.cc17
-rw-r--r--gcc/value-range-pretty-print.cc48
-rw-r--r--gcc/value-range-pretty-print.h2
-rw-r--r--gcc/value-range-storage.cc9
-rw-r--r--gcc/value-range-storage.h7
-rw-r--r--gcc/value-range.cc695
-rw-r--r--gcc/value-range.h237
-rw-r--r--libgcc/ChangeLog18
-rw-r--r--libgcc/config/avr/libf7/ChangeLog6
-rw-r--r--libgcc/config/avr/libf7/libf7-asm.sx50
-rw-r--r--libgcc/unwind-dw2-btree.h954
-rw-r--r--libgcc/unwind-dw2-fde.c196
-rw-r--r--libgcc/unwind-dw2-fde.h2
-rw-r--r--libgfortran/ChangeLog18
-rw-r--r--libgfortran/config/fpu-387.h7
-rw-r--r--libgfortran/config/fpu-aarch64.h7
-rw-r--r--libgfortran/config/fpu-aix.h22
-rw-r--r--libgfortran/config/fpu-generic.h11
-rw-r--r--libgfortran/config/fpu-glibc.h18
-rw-r--r--libgfortran/config/fpu-sysv.h7
-rw-r--r--libgfortran/ieee/ieee_arithmetic.F9010
-rw-r--r--libgfortran/ieee/ieee_exceptions.F9063
-rw-r--r--libgomp/ChangeLog44
-rw-r--r--libgomp/env.c6
-rw-r--r--libgomp/libgomp.texi15
-rw-r--r--libgomp/testsuite/libgomp.c-c++-common/icv-6.c26
-rw-r--r--libgomp/testsuite/libgomp.c-c++-common/requires-4.c1
-rw-r--r--libgomp/testsuite/libgomp.c-c++-common/requires-5.c1
-rw-r--r--libgomp/testsuite/libgomp.c-c++-common/requires-6.c2
-rw-r--r--libgomp/testsuite/libgomp.c-c++-common/reverse-offload-1.c1
-rw-r--r--libgomp/testsuite/libgomp.c/reverse-offload-sm30.c15
-rw-r--r--libgomp/testsuite/libgomp.fortran/reverse-offload-1.f901
-rw-r--r--libgomp/testsuite/libgomp.oacc-c++/deep-copy-17.C101
-rw-r--r--libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-15.c68
-rw-r--r--libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-16.c231
-rw-r--r--libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-arrayofstruct.c (renamed from gcc/testsuite/c-c++-common/goacc/deep-copy-arrayofstruct.c)2
-rw-r--r--libstdc++-v3/ChangeLog260
-rw-r--r--libstdc++-v3/acinclude.m42
-rw-r--r--libstdc++-v3/config/abi/pre/gnu-versioned-namespace.ver11
-rw-r--r--libstdc++-v3/config/abi/pre/gnu.ver6
-rwxr-xr-xlibstdc++-v3/configure2
-rw-r--r--libstdc++-v3/doc/doxygen/user.cfg.in1
-rw-r--r--libstdc++-v3/doc/html/manual/abi.html2
-rw-r--r--libstdc++-v3/doc/html/manual/bugs.html3
-rw-r--r--libstdc++-v3/doc/html/manual/debug_mode_using.html7
-rw-r--r--libstdc++-v3/doc/html/manual/using_macros.html5
-rw-r--r--libstdc++-v3/doc/xml/manual/abi.xml2
-rw-r--r--libstdc++-v3/doc/xml/manual/intro.xml6
-rw-r--r--libstdc++-v3/include/backward/auto_ptr.h4
-rw-r--r--libstdc++-v3/include/bits/alloc_traits.h61
-rw-r--r--libstdc++-v3/include/bits/allocator.h67
-rw-r--r--libstdc++-v3/include/bits/basic_string.h4
-rw-r--r--libstdc++-v3/include/bits/basic_string.tcc70
-rw-r--r--libstdc++-v3/include/bits/memoryfwd.h2
-rw-r--r--libstdc++-v3/include/bits/ranges_algo.h38
-rw-r--r--libstdc++-v3/include/bits/ranges_util.h38
-rw-r--r--libstdc++-v3/include/bits/shared_ptr_atomic.h42
-rw-r--r--libstdc++-v3/include/bits/stl_algo.h26
-rw-r--r--libstdc++-v3/include/bits/stl_algobase.h4
-rw-r--r--libstdc++-v3/include/bits/stl_list.h5
-rw-r--r--libstdc++-v3/include/bits/stl_tempbuf.h5
-rw-r--r--libstdc++-v3/include/bits/stl_uninitialized.h17
-rw-r--r--libstdc++-v3/include/bits/stream_iterator.h3
-rw-r--r--libstdc++-v3/include/bits/streambuf_iterator.h1
-rw-r--r--libstdc++-v3/include/bits/unique_ptr.h14
-rw-r--r--libstdc++-v3/include/c_global/cstdlib24
-rw-r--r--libstdc++-v3/include/debug/assertions.h16
-rw-r--r--libstdc++-v3/include/debug/debug.h2
-rw-r--r--libstdc++-v3/include/debug/formatter.h3
-rw-r--r--libstdc++-v3/include/debug/safe_iterator.h2
-rw-r--r--libstdc++-v3/include/ext/alloc_traits.h3
-rw-r--r--libstdc++-v3/include/std/bit2
-rw-r--r--libstdc++-v3/include/std/functional22
-rw-r--r--libstdc++-v3/include/std/future1
-rw-r--r--libstdc++-v3/include/std/iterator7
-rw-r--r--libstdc++-v3/include/std/memory12
-rw-r--r--libstdc++-v3/include/std/ranges1125
-rw-r--r--libstdc++-v3/include/tr1/random.h115
-rw-r--r--libstdc++-v3/python/libstdcxx/v6/printers.py56
-rw-r--r--libstdc++-v3/testsuite/17_intro/names.cc2
-rw-r--r--libstdc++-v3/testsuite/19_diagnostics/error_code/cons/lwg3629.cc4
-rw-r--r--libstdc++-v3/testsuite/19_diagnostics/error_condition/cons/lwg3629.cc4
-rw-r--r--libstdc++-v3/testsuite/20_util/is_complete_or_unbounded/memoization_neg.cc2
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic_ref/compare_exchange_padding.cc11
-rw-r--r--libstdc++-v3/testsuite/30_threads/promise/members/set_exception_neg.cc18
-rw-r--r--libstdc++-v3/testsuite/libstdc++-prettyprinters/debug.cc15
-rw-r--r--libstdc++-v3/testsuite/libstdc++-prettyprinters/simple.cc15
-rw-r--r--libstdc++-v3/testsuite/libstdc++-prettyprinters/simple11.cc15
-rw-r--r--libstdc++-v3/testsuite/std/ranges/adaptors/chunk/1.cc80
-rw-r--r--libstdc++-v3/testsuite/std/ranges/adaptors/chunk_by/1.cc58
-rw-r--r--libstdc++-v3/testsuite/std/ranges/adaptors/join.cc30
-rw-r--r--libstdc++-v3/testsuite/std/ranges/adaptors/slide/1.cc105
-rw-r--r--libstdc++-v3/testsuite/tr1/5_numerical_facilities/random/variate_generator/37986.cc2
-rw-r--r--libstdc++-v3/testsuite/tr1/5_numerical_facilities/random/variate_generator/requirements/typedefs.cc49
-rw-r--r--libstdc++-v3/testsuite/util/testsuite_abi.cc3
270 files changed, 10619 insertions, 2474 deletions
diff --git a/ChangeLog b/ChangeLog
index c947372..7212978 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2022-09-15 Torbjörn SVENSSON <torbjorn.svensson@foss.st.com>
+
+ * MAINTAINERS (Write After Approval): Add myself.
+
2022-09-07 Jeff Law <jeffreyalaw@gmail.com>
* MAINTAINERS: Update my email address.
diff --git a/MAINTAINERS b/MAINTAINERS
index e89eb34..be14685 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -641,6 +641,7 @@ YunQiang Su <yunqiang.su@cipunited.com>
Robert Suchanek <rsuchy84@gmail.com>
Andrew Sutton <andrew.n.sutton@gmail.com>
Gabriele Svelto <gabriele.svelto@st.com>
+Torbjörn Svensson <torbjorn.svensson@foss.st.com>
Toma Tabacu <toma.tabacu@gmail.com>
Omar Tahir <omar.tahir@arm.com>
Sriraman Tallam <tmsriram@google.com>
diff --git a/contrib/filter-clang-warnings.py b/contrib/filter-clang-warnings.py
index 942cd30..3c68be0 100755
--- a/contrib/filter-clang-warnings.py
+++ b/contrib/filter-clang-warnings.py
@@ -39,7 +39,7 @@ def skip_warning(filename, message):
'-Wignored-attributes', '-Wgnu-zero-variadic-macro-arguments',
'-Wformat-security', '-Wundefined-internal',
'-Wunknown-warning-option', '-Wc++20-extensions',
- '-Wbitwise-instead-of-logical'],
+ '-Wbitwise-instead-of-logical', 'egrep is obsolescent'],
'insn-modes.cc': ['-Wshift-count-overflow'],
'insn-emit.cc': ['-Wtautological-compare'],
'insn-attrtab.cc': ['-Wparentheses-equality'],
@@ -57,8 +57,8 @@ def skip_warning(filename, message):
'lex.cc': ['-Wc++20-attribute-extensions'],
}
- for name, ignores in ignores.items():
- for i in ignores:
+ for name, ignore in ignores.items():
+ for i in ignore:
if name in filename and i in message:
return True
return False
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index e670cae..6dded16 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,485 @@
+2022-09-19 Torbjörn SVENSSON <torbjorn.svensson@foss.st.com>
+
+ * targhooks.cc (default_zero_call_used_regs): Improve sorry
+ message.
+
+2022-09-18 Julian Brown <julian@codesourcery.com>
+
+ * gimplify.cc (omp_segregate_mapping_groups): Update comment.
+ (gimplify_adjust_omp_clauses): Move ATTACH and
+ ATTACH_ZERO_LENGTH_ARRAY_SECTION nodes to the end of the clause list
+ for offloaded OpenMP regions.
+
+2022-09-18 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/106831
+ * value-range.cc (frange::singleton_p): Avoid propagating long
+ doubles that may have multiple representations.
+
+2022-09-18 Aldy Hernandez <aldyh@redhat.com>
+
+ * range-op-float.cc (frange_add_zeros): Replace set_signbit with
+ union of zero.
+ * value-query.cc (range_query::get_tree_range): Remove set_signbit
+ use.
+ * value-range-pretty-print.cc (vrange_printer::print_frange_prop):
+ Remove.
+ (vrange_printer::print_frange_nan): New.
+ * value-range-pretty-print.h (print_frange_prop): Remove.
+ (print_frange_nan): New.
+ * value-range-storage.cc (frange_storage_slot::set_frange): Set
+ kind and NAN fields.
+ (frange_storage_slot::get_frange): Restore kind and NAN fields.
+ * value-range-storage.h (class frange_storage_slot): Add kind and
+ NAN fields.
+ * value-range.cc (frange::update_nan): Remove.
+ (frange::set_signbit): Remove.
+ (frange::set): Adjust for NAN fields.
+ (frange::normalize_kind): Remove m_props.
+ (frange::combine_zeros): New.
+ (frange::union_nans): New.
+ (frange::union_): Handle new NAN fields.
+ (frange::intersect_nans): New.
+ (frange::intersect): Handle new NAN fields.
+ (frange::operator=): Same.
+ (frange::operator==): Same.
+ (frange::contains_p): Same.
+ (frange::singleton_p): Remove special case for signed zeros.
+ (frange::verify_range): Adjust for new NAN fields.
+ (frange::set_zero): Handle signed zeros.
+ (frange::set_nonnegative): Same.
+ (range_tests_nan): Adjust tests.
+ (range_tests_signed_zeros): Same.
+ (range_tests_signbit): Same.
+ (range_tests_floats): Same.
+ * value-range.h (class fp_prop): Remove.
+ (FP_PROP_ACCESSOR): Remove.
+ (class frange_props): Remove
+ (frange::lower_bound): NANs don't have endpoints.
+ (frange::upper_bound): Same.
+ (frange_props::operator==): Remove.
+ (frange_props::union_): Remove.
+ (frange_props::intersect): Remove.
+ (frange::update_nan): New.
+ (frange::clear_nan): New.
+ (frange::undefined_p): New.
+ (frange::set_nan): New.
+ (frange::known_finite): Adjust for new NAN representation.
+ (frange::maybe_isnan): Same.
+ (frange::known_isnan): Same.
+ (frange::signbit_p): Same.
+ * gimple-range-fold.cc (range_of_builtin_int_call): Rename
+ known_signbit_p into signbit_p.
+
+2022-09-17 Jan-Benedict Glaw <jbglaw@lug-owl.de>
+
+ * config/csky/csky.h (FUNCTION_ARG_REGNO_P): Cast REGNO to (int)
+ to prevent warning.
+
+2022-09-17 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/106958
+ * tree-ssa-reassoc.cc (optimize_range_tests_cmp_bitwise): If
+ id >= l, cast op to type1, otherwise to pointer_sized_int_node.
+ If type has pointer type, cast exp to pointer_sized_int_node
+ even when id < l.
+
+2022-09-16 liuhongt <hongtao.liu@intel.com>
+
+ PR target/106910
+ * config/i386/i386-builtins.cc
+ (ix86_builtin_vectorized_function): Modernized with
+ corresponding expanders.
+ * config/i386/sse.md (lrint<mode><sseintvecmodelower>2): New
+ expander.
+ (floor<mode>2): Ditto.
+ (lfloor<mode><sseintvecmodelower>2): Ditto.
+ (ceil<mode>2): Ditto.
+ (lceil<mode><sseintvecmodelower>2): Ditto.
+ (btrunc<mode>2): Ditto.
+ (lround<mode><sseintvecmodelower>2): Ditto.
+ (exp2<mode>2): Ditto.
+
+2022-09-15 Joseph Myers <joseph@codesourcery.com>
+
+ * ginclude/float.h (INFINITY): Define only if
+ [__FLT_HAS_INFINITY__].
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106922
+ * tree-ssa-pre.cc (translate_vuse_through_block): Only
+ keep the VUSE if its def dominates PHIBLOCK.
+ (prune_clobbered_mems): Rewrite logic so we check whether
+ a value dies in a block when the VUSE def doesn't dominate it.
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * tree.cc (build_common_tree_nodes): Initialize void_list_node
+ here.
+
+2022-09-15 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ PR target/106550
+ * config/rs6000/rs6000.cc (rs6000_emit_set_long_const): Use pli.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ * range-op-float.cc (frange_add_zeros): New.
+ (build_le): Call frange_add_zeros.
+ (build_ge): Same.
+ (foperator_equal::op1_range): Same.
+ (foperator_not_equal::op1_range): Same.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ * range-op-float.cc (build_le): Accept frange instead of number.
+ (build_lt): Same.
+ (build_ge): Same.
+ (build_gt): Same.
+ (foperator_lt::op1_range): Pass full range to build_*.
+ (foperator_lt::op2_range): Same.
+ (foperator_le::op1_range): Same.
+ (foperator_le::op2_range): Same.
+ (foperator_gt::op1_range): Same.
+ (foperator_gt::op2_range): Same.
+ (foperator_ge::op1_range): Same.
+ (foperator_ge::op2_range): Same.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ * value-range.cc (frange::set): Use set_nan.
+ * value-range.h (frange::set_nan): Inline code originally in
+ set().
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ * range-op-float.cc (frange_set_nan): Remove.
+ (build_lt): Use set_nan, update_nan, clear_nan.
+ (build_gt): Same.
+ (foperator_equal::op1_range): Same.
+ (foperator_not_equal::op1_range): Same.
+ (foperator_lt::op1_range): Same.
+ (foperator_lt::op2_range): Same.
+ (foperator_le::op1_range): Same.
+ (foperator_le::op2_range): Same.
+ (foperator_gt::op1_range): Same.
+ (foperator_gt::op2_range): Same.
+ (foperator_ge::op1_range): Same.
+ (foperator_ge::op2_range): Same.
+ (foperator_unordered::op1_range): Same.
+ (foperator_ordered::op1_range): Same.
+ * value-query.cc (range_query::get_tree_range): Same.
+ * value-range.cc (frange::set_nan): Same.
+ (frange::update_nan): Same.
+ (frange::union_): Same.
+ (frange::intersect): Same.
+ (range_tests_nan): Same.
+ (range_tests_signed_zeros): Same.
+ (range_tests_signbit): Same.
+ (range_tests_floats): Same.
+ * value-range.h (class frange): Add update_nan and clear_nan.
+ (frange::set_nan): New.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ * value-query.cc (range_query::get_tree_range): Remove check for overflow.
+ * value-range-pretty-print.cc (vrange_printer::visit): Move read
+ of type until after undefined_p is checked.
+ * value-range.cc (frange::set): Remove asserts for REAL_CST.
+ (frange::contains_p): Tidy up.
+ (range_tests_nan): Add comment.
+ * value-range.h (frange::type): Check for undefined_p.
+ (frange::set_undefined): Remove set of endpoints.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * gimplify.cc (omp_group_last): Allow GOMP_MAP_ATTACH_DETACH after
+ GOMP_MAP_STRUCT (for reindexing).
+ (omp_gather_mapping_groups): Reimplement using...
+ (omp_gather_mapping_groups_1): This new function. Stop processing at
+ GATHER_SENTINEL.
+ (omp_group_base): Allow GOMP_MAP_TO_PSET without any following node.
+ (omp_index_mapping_groups): Reimplement using...
+ (omp_index_mapping_groups_1): This new function. Handle
+ REINDEX_SENTINEL.
+ (omp_reindex_mapping_groups, omp_mapped_by_containing_struct): New
+ functions.
+ (omp_tsort_mapping_groups_1): Adjust handling of base group being the
+ same as current group. Use omp_mapped_by_containing_struct.
+ (omp_build_struct_sibling_lists): Use omp_mapped_by_containing_struct
+ and omp_reindex_mapping_groups. Robustify group deletion for reordered
+ lists.
+ (gimplify_scan_omp_clauses): Update calls to
+ omp_build_struct_sibling_lists.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * gimplify.cc (gimplify_omp_var_data): Remove GOVD_MAP_HAS_ATTACHMENTS.
+ (GOMP_FIRSTPRIVATE_IMPLICIT): Renumber.
+ (insert_struct_comp_map): Refactor function into...
+ (build_omp_struct_comp_nodes): This new function. Remove list handling
+ and improve self-documentation.
+ (extract_base_bit_offset): Remove BASE_REF, OFFSETP parameters. Move
+ code to strip outer parts of address out of function, but strip no-op
+ conversions.
+ (omp_mapping_group): Add DELETED field for use during reindexing.
+ (omp_strip_components_and_deref, omp_strip_indirections): New functions.
+ (omp_group_last, omp_group_base): Add GOMP_MAP_STRUCT handling.
+ (omp_gather_mapping_groups): Initialise DELETED field for new groups.
+ (omp_index_mapping_groups): Notice DELETED groups when (re)indexing.
+ (omp_siblist_insert_node_after, omp_siblist_move_node_after,
+ omp_siblist_move_nodes_after, omp_siblist_move_concat_nodes_after): New
+ helper functions.
+ (omp_accumulate_sibling_list): New function to build up GOMP_MAP_STRUCT
+ node groups for sibling lists. Outlined from gimplify_scan_omp_clauses.
+ (omp_build_struct_sibling_lists): New function.
+ (gimplify_scan_omp_clauses): Remove struct_map_to_clause,
+ struct_seen_clause, struct_deref_set. Call
+ omp_build_struct_sibling_lists as pre-pass instead of handling sibling
+ lists in the function's main processing loop.
+ (gimplify_adjust_omp_clauses_1): Remove GOVD_MAP_HAS_ATTACHMENTS
+ handling, unused now.
+ * omp-low.cc (scan_sharing_clauses): Handle pointer-type indirect
+ struct references, and references to pointers to structs also.
+
+2022-09-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106938
+ * tree-cfg.cc (execute_fixup_cfg): Purge dead abnormal
+ edges for all last stmts in a block. Do EH cleanup
+ only on the last stmt in a block.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ PR tree-optimization/106936
+ * value-query.cc (range_query::get_value_range): Remove assert.
+
+2022-09-14 Jan-Benedict Glaw <jbglaw@lug-owl.de>
+
+ * config/mips/mips.cc (mips_option_override): Drop unused variable.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * gimplify.cc (is_or_contains_p, omp_target_reorder_clauses): Delete
+ functions.
+ (omp_tsort_mark): Add enum.
+ (omp_mapping_group): Add struct.
+ (debug_mapping_group, omp_get_base_pointer, omp_get_attachment,
+ omp_group_last, omp_gather_mapping_groups, omp_group_base,
+ omp_index_mapping_groups, omp_containing_struct,
+ omp_tsort_mapping_groups_1, omp_tsort_mapping_groups,
+ omp_segregate_mapping_groups, omp_reorder_mapping_groups): New
+ functions.
+ (gimplify_scan_omp_clauses): Call above functions instead of
+ omp_target_reorder_clauses, unless we've seen an error.
+ * omp-low.cc (scan_sharing_clauses): Avoid strict test if we haven't
+ sorted mapping groups.
+
+2022-09-14 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/106878
+ * tree-cfg.cc (verify_gimple_assign_binary): Disallow pointer,
+ reference or OFFSET_TYPE BIT_IOR_EXPR, BIT_XOR_EXPR or, unless
+ the second argument is INTEGER_CST, BIT_AND_EXPR.
+ * match.pd ((type) X op CST -> (type) (X op ((type-x) CST)),
+ (type) (((type2) X) op Y) -> (X op (type) Y)): Punt for
+ POINTER_TYPE_P or OFFSET_TYPE.
+ * tree-ssa-reassoc.cc (optimize_range_tests_cmp_bitwise): For
+ pointers cast them to pointer sized integers first.
+
+2022-09-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106934
+ * tree-ssa.cc (non_rewritable_mem_ref_base): Avoid BIT_FIELD_REFs
+ of bitfields.
+ (maybe_rewrite_mem_ref_base): Likewise.
+
+2022-09-14 liuhongt <hongtao.liu@intel.com>
+
+ PR tree-optimization/106905
+ * tree-vect-loop.cc (vectorizable_nonlinear_induction): Return
+ false when !vect_use_loop_mask_for_alignment_p (loop_vinfo) &&
+ LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0.
+
+2022-09-13 Roger Sayle <roger@nextmovesoftware.com>
+
+ PR target/106877
+ * reg-stack.cc (move_for_stack_reg): Check for any_malformed_asm
+ in gcc_assert.
+
+2022-09-13 Max Filippov <jcmvbkbc@gmail.com>
+
+ Revert:
+ 2022-09-12 Takayuki 'January June' Suwa <jjsuwa_sys3175@yahoo.co.jp>
+
+ * config/xtensa/xtensa.cc (TARGET_CONSTANT_OK_FOR_CPROP_P):
+ New macro definition.
+ (xtensa_constant_ok_for_cprop_p):
+ Implement the hook as mentioned above.
+
+2022-09-13 Kewen Lin <linkw@linux.ibm.com>
+
+ PR target/104482
+ * config/rs6000/rs6000-c.cc (altivec_resolve_overloaded_builtin): Fix
+ the equality check for argument number, and move this hunk ahead.
+
+2022-09-13 Kewen.Lin <linkw@gcc.gnu.org>
+
+ PR target/105485
+ * config/rs6000/rs6000-builtin.cc (rs6000_gimple_fold_builtin): Add
+ the handling for unresolved overloaded builtin function.
+ (rs6000_expand_builtin): Likewise.
+
+2022-09-13 Kewen Lin <linkw@linux.ibm.com>
+
+ * config/rs6000/rs6000.cc (class rs6000_cost_data): Add new members
+ m_nstores, m_reduc_factor, m_gather_load and member function
+ determine_suggested_unroll_factor.
+ (rs6000_cost_data::update_target_cost_per_stmt): Update for m_nstores,
+ m_reduc_factor and m_gather_load.
+ (rs6000_cost_data::determine_suggested_unroll_factor): New function.
+ (rs6000_cost_data::finish_cost): Use determine_suggested_unroll_factor.
+ * config/rs6000/rs6000.opt (rs6000-vect-unroll-limit): New parameter.
+ (rs6000-vect-unroll-issue): Likewise.
+ (rs6000-vect-unroll-reduc-threshold): Likewise.
+ * doc/invoke.texi (rs6000-vect-unroll-limit): Document new parameter.
+
+2022-09-13 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/106909
+ * gimple-fold.cc (gimple_fold_call): Clear the ctrl-altering
+ flag of a unreachable call.
+
+2022-09-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106913
+ * tree-ssa-uninit.cc (warn_uninitialized_vars): Do not set
+ ft_reachable on EXIT_BLOCK.
+
+2022-09-13 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.cc (aarch64_classify_vector_mode): Use
+ TARGET_FLOAT instead of TARGET_SIMD.
+ (aarch64_vectorize_related_mode): Restrict ADVSIMD handling to
+ TARGET_SIMD.
+ (aarch64_hard_regno_mode_ok): Don't allow tuples of 2 64-bit vectors
+ in GPRs.
+ (aarch64_classify_address): Treat little-endian structure moves
+ like big-endian for TARGET_FLOAT && !TARGET_SIMD.
+ (aarch64_secondary_memory_needed): New function.
+ (aarch64_secondary_reload): Handle 128-bit Advanced SIMD vectors
+ in the same way as TF, TI and TD.
+ (aarch64_rtx_mult_cost): Restrict ADVSIMD handling to TARGET_SIMD.
+ (aarch64_rtx_costs): Likewise.
+ (aarch64_register_move_cost): Treat a pair of 64-bit vectors
+ separately from a single 128-bit vector. Handle the cost implied
+ by aarch64_secondary_memory_needed.
+ (aarch64_simd_valid_immediate): Restrict ADVSIMD handling to
+ TARGET_SIMD.
+ (aarch64_expand_vec_perm_const_1): Likewise.
+ (TARGET_SECONDARY_MEMORY_NEEDED): New macro.
+ * config/aarch64/iterators.md (VTX): New iterator.
+ * config/aarch64/aarch64.md (arches): Add fp_q as a synonym of simd.
+ (arch_enabled): Adjust accordingly.
+ (@aarch64_reload_mov<TX:mode>): Extend to...
+ (@aarch64_reload_mov<VTX:mode>): ...this.
+ * config/aarch64/aarch64-simd.md (mov<mode>): Require TARGET_FLOAT
+ rather than TARGET_SIMD.
+ (movmisalign<mode>): Likewise.
+ (load_pair<DREG:mode><DREG2:mode>): Likewise.
+ (vec_store_pair<DREG:mode><DREG2:mode>): Likewise.
+ (load_pair<VQ:mode><VQ2:mode>): Likewise.
+ (vec_store_pair<VQ:mode><VQ2:mode>): Likewise.
+ (@aarch64_split_simd_mov<mode>): Likewise.
+ (aarch64_get_low<mode>): Likewise.
+ (aarch64_get_high<mode>): Likewise.
+ (aarch64_get_half<mode>): Likewise. Canonicalize to a move for
+ lowpart extracts.
+ (*aarch64_simd_mov<VDMOV:mode>): Require TARGET_FLOAT rather than
+ TARGET_SIMD. Use different w<-w and r<-w instructions for
+ !TARGET_SIMD. Disable immediate moves for !TARGET_SIMD but
+ add an alternative specifically for w<-Z.
+ (*aarch64_simd_mov<VQMOV:mode>): Require TARGET_FLOAT rather than
+ TARGET_SIMD. Likewise for the associated define_splits. Disable
+ FPR moves and immediate moves for !TARGET_SIMD but add an alternative
+ specifically for w<-Z.
+ (aarch64_simd_mov_from_<mode>high): Require TARGET_FLOAT rather than
+ TARGET_SIMD. Restrict the existing alternatives to TARGET_SIMD
+ but add a new r<-w one for !TARGET_SIMD.
+ (*aarch64_get_high<mode>): New pattern.
+ (load_pair_lanes<mode>): Require TARGET_FLOAT rather than TARGET_SIMD.
+ (store_pair_lanes<mode>): Likewise.
+ (*aarch64_combine_internal<mode>): Likewise. Restrict existing
+ w<-w, w<-r and w<-m alternatives to TARGET_SIMD but add a new w<-r
+ alternative for !TARGET_SIMD.
+ (*aarch64_combine_internal_be<mode>): Likewise.
+ (aarch64_combinez<mode>): Require TARGET_FLOAT rather than TARGET_SIMD.
+ Remove bogus arch attribute.
+ (*aarch64_combinez_be<mode>): Likewise.
+ (@aarch64_vec_concat<mode>): Require TARGET_FLOAT rather than
+ TARGET_SIMD.
+ (aarch64_combine<mode>): Likewise.
+ (aarch64_rev_reglist<mode>): Likewise.
+ (mov<mode>): Likewise.
+ (*aarch64_be_mov<VSTRUCT_2D:mode>): Extend to TARGET_FLOAT &&
+ !TARGET_SIMD, regardless of endianness. Extend associated
+ define_splits in the same way, both for this pattern and the
+ ones below.
+ (*aarch64_be_mov<VSTRUCT_2Qmode>): Likewise. Restrict w<-w
+ alternative to TARGET_SIMD.
+ (*aarch64_be_movoi): Likewise.
+ (*aarch64_be_movci): Likewise.
+ (*aarch64_be_movxi): Likewise.
+ (*aarch64_be_mov<VSTRUCT_4QD:mode>): Extend to TARGET_FLOAT
+ && !TARGET_SIMD, regardless of endianness. Restrict w<-w alternative
+ to TARGET_SIMD for tuples of 128-bit vectors.
+ (*aarch64_be_mov<VSTRUCT_4QD:mode>): Likewise.
+ * config/aarch64/aarch64-ldpstp.md: Remove TARGET_SIMD condition
+ from DREG STP peephole. Change TARGET_SIMD to TARGET_FLOAT in
+ the VQ and VP_2E LDP and STP peepholes.
+
+2022-09-13 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-simd.md (movv8di): Remove TARGET_SIMD
+ condition. Likewise for the related define_split. Tweak formatting.
+
+2022-09-12 Takayuki 'January June' Suwa <jjsuwa_sys3175@yahoo.co.jp>
+
+ * config/xtensa/xtensa.cc (TARGET_CONSTANT_OK_FOR_CPROP_P):
+ New macro definition.
+ (xtensa_constant_ok_for_cprop_p):
+ Implement the hook as mentioned above.
+
+2022-09-12 Joseph Myers <joseph@codesourcery.com>
+
+ * ginclude/stdatomic.h [defined __STDC_VERSION__ &&
+ __STDC_VERSION__ > 201710L] (ATOMIC_VAR_INIT): Do not define.
+
+2022-09-12 Tobias Burnus <tobias@codesourcery.com>
+
+ * config/nvptx/mkoffload.cc (process): Replace a fatal_error by
+ a warning + not enabling offloading if -misa=sm_30 prevents
+ reverse offload.
+ (main): Use tool_name as progname for diagnostic.
+ * config/gcn/mkoffload.cc (main): Likewise.
+
+2022-09-12 Aldy Hernandez <aldyh@redhat.com>
+
+ * value-range.cc (frange::set_signbit): Avoid changing sign when
+ already in the correct sign.
+
+2022-09-12 Max Filippov <jcmvbkbc@gmail.com>
+
+ * config/xtensa/xtensa.cc (xtensa_function_value_regno_p):
+ Recognize all 4 return registers.
+ * config/xtensa/xtensa.h (GP_RETURN_REG_COUNT): New definition.
+ * config/xtensa/xtensa.md (untyped_call): New pattern.
+
+2022-09-12 Jonathan Wakely <jwakely@redhat.com>
+
+ * doc/extend.texi (Floating Types): Fix "_float128" typo.
+
2022-09-10 Takayuki 'January June' Suwa <jjsuwa_sys3175@yahoo.co.jp>
* config/xtensa/xtensa.cc (xtensa_constantsynth):
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index eaf5982..54f97aa 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20220912
+20220920
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index b1fbd1e..a1c4375 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,505 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * gcc-interface/trans.cc (gigi): Do not initialize void_list_node.
+
+2022-09-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/decl.cc (gnat_to_gnu_entity): Relax assertion when
+ front-end unnesting is enabled.
+
+2022-09-12 Justin Squirek <squirek@adacore.com>
+
+ * sem_util.adb
+ (Innermost_Master_Scope_Depth): Detect and handle case where scope
+ depth is not set on an enclosing scope.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * bindgen.adb: When the binder is invoked for the host, generate a
+ "with CUDA.Internal;" with clause.
+
+2022-09-12 Piotr Trojanek <trojanek@adacore.com>
+
+ * doc/gnat_rm/implementation_defined_pragmas.rst
+ (Pragma Unreferenced): Sync description with
+ Sem_Warn.Has_Junk_Name routine.
+ * gnat_rm.texi: Regenerate.
+ * gnat_ugn.texi: Regenerate.
+
+2022-09-12 Piotr Trojanek <trojanek@adacore.com>
+
+ * sem_attr.adb (Analyze_Attribute [Valid_Scalars]): Move check for
+ unchecked union before checks for private and public types.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * bindgen.adb: When the binder is invoked for the host, it
+ declares imported subprograms corresponding to the Adainit and
+ Adafinal routines on the device. Declare string constants and
+ expression functions for the Ada source names and the link names
+ of these routines. Generate these subprogram declarations (and
+ accompanying Import pragmas) in Gen_CUDA_Defs. Generate
+ CUDA_Execute pragmas to call these subprograms from the host in
+ Gen_Adafinal and Gen_CUDA_Init. When the binder is invoked for the
+ device, include a CUDA_Global aspect declaration in the
+ declarations of Adainit and Adafinal and use the aforementioned
+ link names in the Export pragmas generated for those two routines.
+ * debug.adb: Update comments about "d_c" and "d_d" switches.
+ * opt.ads: Declare new Boolean variable,
+ Enable_CUDA_Device_Expansion. This complements the existing
+ Enable_CUDA_Expansion variable, which is used to enable host-side
+ CUDA expansion. The new variable enables device-side CUDA
+ expansion. It is currently never set during compilation; it is
+ only set via a binder switch.
+ * switch-b.adb
+ (scan_debug_switches): Add new use of the "-d_d" binder switch.
+ The new switch and the variable Opt.Enabled_CUDA_Device_Expansion
+ follow the existing pattern of the "-d_c" switch and the variable
+ Opt.Enabled_CUDA_Expansion. Flag error if both "-d_c" and "-d_d"
+ are specified.
+
+2022-09-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * contracts.adb (uild_Subprogram_Contract_Wrapper): Remove useless
+ local variable. In the case of a function, replace the extended
+ return statement by a block statement declaring a renaming of the
+ call to the local subprogram after removing side effects manually.
+ (Expand_Subprogram_Contract): Adjust description accordingly.
+ * exp_ch6.adb (Expand_Ctrl_Function_Call): Rewrite obsolete
+ comment and do not apply the transformation twice.
+ * sem_attr.adb (Analyze_Attribute_Old_Result): Now expect a block
+ statement instead of an extended return statement.
+
+2022-09-12 Piotr Trojanek <trojanek@adacore.com>
+
+ * erroutc.adb (Set_Msg_Insertion_Name): Special-case printing with
+ acronyms.
+
+2022-09-12 Yannick Moy <moy@adacore.com>
+
+ * libgnat/s-imagei.adb (Image_Integer): Add justification.
+
+2022-09-12 Piotr Trojanek <trojanek@adacore.com>
+
+ * sem_prag.adb (Get_SPARK_Mode_Type): Fix header box; replace
+ chained IF with a CASE statement.
+
+2022-09-12 Yannick Moy <moy@adacore.com>
+
+ * sem_prag.adb (Analyze_Pragma): Accept SPARK_Mode=>Auto as
+ configuration pragma.
+ (Get_SPARK_Mode): Make the value for Auto explicit.
+ * snames.ads-tmpl (Name_Auto): Add name.
+
+2022-09-12 Joffrey Huguet <huguet@adacore.com>
+
+ * doc/gnat_rm/the_gnat_library.rst: Remove paragraphs about SPARK
+ containers.
+ * gnat_rm.texi, gnat_ugn.texi: Regenerate.
+
+2022-09-12 Yannick Moy <moy@adacore.com>
+
+ * libgnat/s-maccod.ads: Mark package as SPARK_Mode Off.
+
+2022-09-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * fe.h (Unnest_Subprogram_Mode): Declare.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * contracts.adb
+ (Analyze_Package_Contract): Do not analyze the contract of a
+ temporary package created just to check conformance of an actual
+ package.
+
+2022-09-12 Joffrey Huguet <huguet@adacore.com>
+
+ * Makefile.rtl: Remove SPARK containers filenames.
+ * impunit.adb: Remove SPARK containers packages names.
+ * libgnat/a-cfdlli.adb, libgnat/a-cfdlli.ads: Remove content and
+ add pragma Compile_Time_Error with suitable message.
+ * libgnat/a-cfhama.adb, libgnat/a-cfhama.ads: Likewise.
+ * libgnat/a-cfhase.adb, libgnat/a-cfhase.ads: Likewise.
+ * libgnat/a-cfidll.adb, libgnat/a-cfidll.ads: Likewise.
+ * libgnat/a-cfinse.adb, libgnat/a-cfinse.ads: Likewise.
+ * libgnat/a-cfinve.adb, libgnat/a-cfinve.ads: Likewise.
+ * libgnat/a-cforma.adb, libgnat/a-cforma.ads: Likewise.
+ * libgnat/a-cforse.adb, libgnat/a-cforse.ads: Likewise.
+ * libgnat/a-cofove.adb, libgnat/a-cofove.ads: Likewise.
+ * libgnat/a-cofuma.adb, libgnat/a-cofuma.ads: Likewise.
+ * libgnat/a-cofuse.adb, libgnat/a-cofuse.ads: Likewise.
+ * libgnat/a-cofuve.adb, libgnat/a-cofuve.ads: Likewise.
+ * libgnat/a-cofuba.adb, libgnat/a-cofuba.ads: Remove package.
+
+2022-09-12 Piotr Trojanek <trojanek@adacore.com>
+
+ * exp_attr.adb (Expand_N_Attribute_Reference [Attribute_Old]):
+ Adapt to object declaration being rewritten into object renaming.
+
+2022-09-12 Justin Squirek <squirek@adacore.com>
+
+ * contracts.adb, contracts.ads
+ (Analyze_Pragmas_In_Declarations): Added to aid in the new
+ expansion model so that pragmas relating to contracts can get
+ processed early before the rest of the subprogram containing them.
+ (Build_Subprogram_Contract_Wrapper): Created to do the majority of
+ expansion for postconditions. It builds a local wrapper with the
+ statements and declarations within a given subprogram.
+ (Is_Prologue_Renaming): Moved out from Process_Preconditions to be
+ used generally within the contracts package.
+ (Build_Entry_Contract_Wrapper): Moved from exp_ch7.
+ (Expand_Subprogram_Contract): Add new local variable Decls to
+ store expanded declarations needed for evaluation of contracts.
+ Call new wrapper building procedure and modify comments to match
+ new expansion model.
+ (Get_Postcond_Enabled): Deleted.
+ (Get_Result_Object_For_Postcond): Deleted.
+ (Get_Return_Success_For_Postcond): Deleted.
+ (Process_Contract_Cases): Add new parameter to store declarations.
+ (Process_Postconditions): Add new parameter to store declarations.
+ (Process_Preconditions): Add new parameter to store declarations.
+ Add code to move entry-call prologue renamings
+ * einfo.ads: Document new field Wrapped_Statements and modify
+ comment for Postconditions_Proc.
+ * exp_attr.adb
+ (Analyze_Attribute): Modify expansion of the 'Old attribute to
+ recognize new expansion model and use Wrapped_Statements instead
+ of Postconditions_Proc.
+ * exp_ch6.adb
+ (Add_Return): Remove special expansion for postconditions.
+ (Expand_Call): Modify condition checking for calls to access
+ subprogram wrappers to handle new expansion models.
+ (Expand_Call_Helper): Remove special expansion for postconditions.
+ (Expand_Non_Function_Return): Remove special expansion for
+ postconditions.
+ (Expand_Simple_Function_Return): Remove special expansion for
+ postconditions.
+ * exp_ch7.adb
+ (Build_Finalizer): Deleted, but replaced by code in
+ Build_Finalizer_Helper
+ (Build_Finalizer_Helper): Renamed to Build_Finalizer, and special
+ handling of 'Old objects removed.
+ * exp_ch9.adb
+ (Build_Contract_Wrapper): Renamed and moved to contracts package.
+ * exp_prag.adb
+ (Expand_Pragma_Contract_Cases): Delay analysis of contracts since
+ they now instead get analyzed as part of the wrapper generation
+ instead of after analysis of their corresponding subprogram's
+ body.
+ (Expand_Pragma_Check): Label expanded if-statements which come
+ from the expansion of assertion statements as
+ Comes_From_Check_Or_Contract.
+ * freeze.adb
+ (Freeze_Entity): Add special case to avoid freezing when a freeze
+ node gets generated as part of the expansion of a postcondition
+ check.
+ * gen_il-gen-gen_nodes.adb: Add new flag
+ Comes_From_Check_Or_Contract.
+ * gen_il-fields.ads: Add new field Wrapped_Statements. Add new
+ flag Comes_From_Check_Or_Contract.
+ * gen_il-gen-gen_entities.adb: Add new field Wrapped_Statements.
+ * ghost.adb
+ (Is_OK_Declaration): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ (Is_OK_Statement): Simplify condition due to the loss of
+ Original_Node as a result of the new expansion model of contracts
+ and use new flag Comes_From_Check_Or_Contract in its place.
+ * inline.adb
+ (Declare_Postconditions_Result): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ (Expand_Inlined_Call): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ * lib.adb, lib.ads
+ (ipu): Created to aid in debugging.
+ * lib-xref.adb
+ (Generate_References): Remove special handling for postcondition
+ procedures.
+ * sem_attr.adb
+ (Analyze_Attribute_Old_Result): Add new context in which 'Old can
+ appear due to the changes in expansion. Replace
+ Name_uPostconditions with Name_uWrapped_Statements.
+ (Result): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ * sem_ch11.adb
+ (Analyze_Handled_Statements): Remove check to exclude warnings on
+ useless assignments within postcondition procedures since
+ postconditions no longer get isolated into separate subprograms.
+ * sem_ch6.adb
+ (Analyze_Generic_Subprogram_Body): Modify expansion of generic
+ subprogram bodies so that contracts (and their associated pragmas)
+ get analyzed first.
+ (Analyze_Subprogram_Body_Helper): Remove global HSS variable due
+ to the HSS of the body potentially changing during the expansion
+ of contracts. In cases where it was used instead directly call
+ Handled_Statement_Sequence. Modify expansion of subprogram bodies
+ so that contracts (and their associated pragmas) get analyzed
+ first.
+ (Check_Missing_Return): Create local HSS variable instead of using
+ a global one.
+ (Move_Pragmas): Use new pragma table instead of an explicit list.
+ * sem_elab.adb
+ (Is_Postconditions_Proc): Deleted since the new scheme of
+ expansion no longer divides postcondition checks to a separate
+ subprogram and so cannot be easily identified (similar to
+ pre-condition checks).
+ (Info_Call): Remove info printing for _Postconditions subprograms.
+ (Is_Assertion_Pragma_Target): Remove check for postconditions
+ procedure
+ (Is_Bridge_Target): Remove check for postconditions procedure.
+ (Get_Invocation_Attributes): Remove unneeded local variables and
+ check for postconditions procedure.
+ (Output_Call): Remove info printing for _Postconditions
+ subprograms.
+ * sem_prag.adb, sem_prag.ads: Add new Pragma table for pragmas
+ significant to subprograms, along with tech-debt comment.
+ (Check_Arg_Is_Local_Name): Modified to recognize the new
+ _Wrapped_Statements internal subprogram and the new expansion
+ model.
+ (Relocate_Pragmas_To_Body): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ * sem_res.adb
+ (Resolve_Entry_Call): Add conditional to detect both contract
+ based wrappers of entries, but also wrappers generated as part of
+ general contract expansion (e.g. local postconditions
+ subprograms).
+ * sem_util.adb
+ (Accessibility_Level): Verify 'Access is not taken based on a
+ component of a function result.
+ (Has_Significant_Contracts): Replace Name_uPostconditions with
+ Name_uWrapped_Statements.
+ (Same_Or_Aliased_Subprogram): Add conditional to detect and obtain
+ the original subprogram based on the new concept of
+ "postcondition" wrappers.
+ * sinfo.ads: Add documentation for new flag
+ Comes_From_Check_Or_Contract.
+ * snames.ads-tmpl: Remove Name_uPostconditions and add
+ Name_uWrapped_Statements
+
+2022-09-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_unst.adb (Unnest_Subprograms.Search_Subprograms): Skip the
+ subprogram bodies that are not to be unnested.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * sem_aggr.adb
+ (Resolve_Array_Aggregate): Generate an appropriate error message
+ in the case where an error in the source code leads to an
+ N_Iterated_Element_Association node in a bad context.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * sem_ch4.adb
+ (Analyze_Selected_Component): Initialize the local variable Comp
+ to avoid having CodePeer generate an uninitialized variable
+ warning.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * sem_ch4.adb
+ (Analyze_Selected_Component): Avoid initializing the local
+ variable Comp if the variable is not going to be subsequently
+ referenced. This is a correctness issue because the call to
+ First_Entity can fail.
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * sem_ch9.adb
+ (Satisfies_Lock_Free_Requirements): If Ceiling_Locking locking
+ policy has been specified, then either return False (if Lock_Free
+ was not explicitly specified) or generate a warning that ceiling
+ locking will not be implemented for this protected unit (if
+ Lock_Free was explicitly specified). Generate an error message (in
+ addition to returning False) if an explicit Lock_Free aspect
+ specification is rejected because atomic primitives are not
+ supported on the given target.
+ * doc/gnat_rm/implementation_defined_pragmas.rst: Clarify that the
+ Lock_Free aspect for a protected unit takes precedence over the
+ Ceiling_Locking locking policy in the case where both apply.
+ * gnat_rm.texi: Regenerate.
+
+2022-09-12 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_ch9.adb (Build_Protected_Spec): Tidy up and propagate the
+ Comes_From_Source flag onto the new formal parameters.
+ * sem_ch6.adb (Analyze_Subprogram_Body_Helper): Do not check
+ references for subprograms generated for protected subprograms.
+
+2022-09-12 Gary Dismukes <dismukes@adacore.com>
+
+ * sem_res.adb
+ (Resolve_Equality_Op): Add handling for equality ops with
+ user-defined literal operands.
+ * sem_util.ads
+ (Is_User_Defined_Literal): Update spec comment to indicate
+ inclusion of named number cases.
+ * sem_util.adb
+ (Corresponding_Primitive_Op): Rather than following the chain of
+ ancestor subprograms via Alias and Overridden_Operation links, we
+ check for matching profiles between primitive subprograms of the
+ descendant type and the ancestor subprogram (by calling a new
+ nested function Profile_Matches_Ancestor). This prevents the
+ compiler from hanging due to circular linkages via those fields
+ that can occur between inherited and overriding subprograms
+ (which might indicate a latent bug, but one that may be rather
+ delicate to resolve).
+ (Profile_Matches_Ancestor): New nested subprogram to compare the
+ profile of a primitive subprogram with the profile of a candidate
+ ancestor subprogram.
+ (Is_User_Defined_Literal): Also return True in cases where the
+ node N denotes a named number (E_Name_Integer and E_Named_Real).
+
+2022-09-12 Steve Baird <baird@adacore.com>
+
+ * debug.adb: remove a comment.
+
+2022-09-12 Bob Duff <duff@adacore.com>
+
+ * checks.adb
+ (Selected_Length_Checks): In the message for an aggregate that has
+ too few or too many elements, add "!!" to make sure the warning
+ gets printed in with'ed units. Note that we have to put "!!"
+ before the "??", because Compile_Time_Constraint_Error detects
+ warnings by comparing the last character of the message with '?'
+ (which is bit dubious, but we're not changing that here).
+ (Length_Mismatch_Info_Message): Use Unat for some things that
+ can't be negative. Specify Decimal instead of Auto in calls to
+ UI_Image.
+ * sem_util.adb
+ (Compile_Time_Constraint_Error): Minor.
+ * uintp.adb
+ (Image_Uint): It's always better to initialize objects on their
+ declaration.
+
+2022-09-12 Patrick Bernardi <bernardi@adacore.com>
+
+ * libgnat/system-vxworks7-x86_64-kernel.ads: Set
+ Support_Atomic_Primitives to false.
+ * libgnat/system-vxworks7-x86_64-rtp-smp.ads: Ditto.
+
+2022-09-12 Patrick Bernardi <bernardi@adacore.com>
+
+ * libgnat/system-qnx-arm.ads: Set Support_Atomic_Primitives to
+ false.
+ * libgnat/system-vxworks7-aarch64.ads: Ditto.
+ * libgnat/system-vxworks7-aarch64-rtp-smp.ads: Ditto.
+ * libgnat/system-vxworks7-arm.ads: Ditto.
+ * libgnat/system-vxworks7-arm-rtp-smp.ads: Ditto.
+ * libgnat/system-vxworks7-x86-kernel.ads: Ditto.
+ * libgnat/system-vxworks7-x86-rtp-smp.ads: Ditto.
+
+2022-09-12 Bob Duff <duff@adacore.com>
+
+ * par-tchk.adb, par-util.adb, prep.adb, prepcomp.adb, scng.adb:
+ Use "in" instead of chains of "=" connected with "or else".
+ Likewise for "not in", "/=", "and then". Misc cleanup.
+ * par-ch10.adb, par-ch12.adb, par-ch13.adb, par-ch4.adb: Likewise.
+ * par-ch8.adb, par-ch9.adb, par-endh.adb, par-sync.adb: Likewise.
+ * par.adb
+ (Pf_Rec): Remove filler, which was added August 25, 1993 to get
+ around a compiler limitation that no longer exists. Minor cleanup.
+ Remove useless qualfications.
+ * par-ch3.adb: Remove redundant return statements.
+ (Component_Scan_Loop): Remove loop name; there are no nested
+ loops, so it's unnecessary and possibly misleading, and it causes
+ too-long lines.
+ * par-ch5.adb: DRY: Remove comments that repeat the comments in
+ par.adb.
+ (P_Sequence_Of_Statements): It is better to initialize things on
+ the declaration. And constants are better than variables.
+ (Test_Statement_Required): Remove unnecessary insertion of a null
+ statement.
+ * par-ch6.adb, par-ch7.adb: DRY: Remove comments that repeat the
+ comments in par.adb.
+
+2022-09-12 Javier Miranda <miranda@adacore.com>
+
+ Revert:
+ 2022-09-06 Javier Miranda <miranda@adacore.com>
+
+ * debug.adb
+ (Debug_Flag_Underscore_X): Switch added temporarily to allow
+ disabling extra formal checks.
+ * exp_attr.adb
+ (Expand_N_Attribute_Reference [access types]): Add extra formals
+ to the subprogram referenced in the prefix of 'Unchecked_Access,
+ 'Unrestricted_Access or 'Access; required to check that its extra
+ formals match the extra formals of the corresponding subprogram
+ type.
+ * exp_ch3.adb
+ (Stream_Operation_OK): Declaration moved to the public part of the
+ package.
+ (Validate_Tagged_Type_Extra_Formals): New subprogram.
+ (Expand_Freeze_Record_Type): Improve the code that takes care of
+ adding the extra formals of dispatching primitives; extended to
+ add also the extra formals to renamings of dispatching primitives.
+ * exp_ch3.ads
+ (Stream_Operation_OK): Declaration moved from the package body.
+ * exp_ch6.adb
+ (Has_BIP_Extra_Formal): Subprogram declaration moved to the public
+ part of the package. In addition, a parameter has been added to
+ disable an assertion that requires its use with frozen entities.
+ (Expand_Call_Helper): Enforce assertion checking extra formals on
+ thunks.
+ (Is_Build_In_Place_Function): Return False for entities with
+ foreign convention.
+ (Make_Build_In_Place_Call_In_Object_Declaration): Occurrences of
+ Is_Return_Object replaced by the local variable
+ Is_OK_Return_Object that evaluates to False for scopes with
+ foreign convention.
+ (Might_Have_Tasks): Fix check of class-wide limited record types.
+ (Needs_BIP_Task_Actuals): Remove assertion to allow calling this
+ function in more contexts; in addition it returns False for
+ functions returning objects with foreign convention.
+ (Needs_BIP_Finalization_Master): Likewise.
+ (Needs_BIP_Alloc_Form): Likewise.
+ * exp_ch6.ads
+ (Stream_Operation_OK): Declaration moved from the package body. In
+ addition, a parameter has been added to disable assertion that
+ requires its use with frozen entities.
+ * freeze.adb
+ (Check_Itype): Add extra formals to anonymous access subprogram
+ itypes.
+ (Freeze_Expression): Improve code that disables the addition of
+ extra formals to functions with foreign convention.
+ (Check_Extra_Formals): Moved to package Sem_Ch6 as
+ Extra_Formals_OK.
+ (Freeze_Subprogram): Add extra formals to non-dispatching
+ subprograms.
+ * sem_ch3.adb
+ (Access_Subprogram_Declaration): Defer the addition of extra
+ formals to the freezing point so that we know the convention.
+ (Check_Anonymous_Access_Component): Likewise.
+ (Derive_Subprogram): Fix documentation.
+ * sem_ch6.adb
+ (Check_Anonymous_Return): Fix check of access to class-wide
+ limited record types.
+ (Check_Untagged_Equality): Placed in alphabetical order.
+ (Extra_Formals_OK): Subprogram moved from freeze.adb.
+ (Extra_Formals_Match_OK): New subprogram.
+ (Has_BIP_Formals): New subprogram.
+ (Has_Extra_Formals): New subprograms.
+ (Needs_Accessibility_Check_Extra): New subprogram.
+ (Needs_Constrained_Extra): New subprogram.
+ (Parent_Subprogram): New subprogram.
+ (Add_Extra_Formal): Minor code cleanup.
+ (Create_Extra_Formals): Enforce matching extra formals on
+ overridden and aliased entities.
+ (Has_Reliable_Extra_Formals): New subprogram.
+ * sem_ch6.ads
+ (Extra_Formals_OK): Subprogram moved from freeze.adb.
+ (Extra_Formals_Match_OK): New subprogram.
+ * sem_eval.adb
+ (Compile_Time_Known_Value): Improve predicate to avoid assertion
+ failure; found working on this ticket; this change does not affect
+ the behavior of the compiler because this subprogram has an
+ exception handler that returns False when the assertion fails.
+ * sem_util.adb
+ (Needs_Result_Accessibility_Level): Do not return False for
+ dispatching operations compiled with Ada_Version < 2012 since they
+ they may be overridden by primitives compiled with Ada_Version >=
+ Ada_2012.
+
2022-09-06 Eric Botcazou <ebotcazou@adacore.com>
* gcc-interface/decl.cc (gnat_to_gnu_param): Set DECL_ARTIFICIAL.
diff --git a/gcc/ada/gcc-interface/trans.cc b/gcc/ada/gcc-interface/trans.cc
index f2e0cb2..2d93947 100644
--- a/gcc/ada/gcc-interface/trans.cc
+++ b/gcc/ada/gcc-interface/trans.cc
@@ -413,7 +413,6 @@ gigi (Node_Id gnat_root,
save_gnu_tree (gnat_literal, t, false);
/* Declare the building blocks of function nodes. */
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
void_ftype = build_function_type_list (void_type_node, NULL_TREE);
ptr_void_ftype = build_pointer_type (void_ftype);
diff --git a/gcc/analyzer/ChangeLog b/gcc/analyzer/ChangeLog
index ea6d5ee..3af1a38 100644
--- a/gcc/analyzer/ChangeLog
+++ b/gcc/analyzer/ChangeLog
@@ -1,3 +1,8 @@
+2022-09-12 Martin Liska <mliska@suse.cz>
+
+ * region-model.cc (region_model::maybe_complain_about_infoleak):
+ Remove unused fields.
+
2022-09-11 Tim Lange <mail@tim-lange.me>
PR analyzer/106845
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 4144df5..ba3d76d 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,9 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * c-common.h (build_void_list_node): Remove.
+ * c-common.cc (c_common_nodes_and_builtins): Do not initialize
+ void_list_node.
+
2022-09-09 Jan-Benedict Glaw <jbglaw@lug-owl.de>
* c-format.cc (convert_format_name_to_system_name): Fix warning.
diff --git a/gcc/c-family/c-common.cc b/gcc/c-family/c-common.cc
index 0a5b7e1..c0f15f4 100644
--- a/gcc/c-family/c-common.cc
+++ b/gcc/c-family/c-common.cc
@@ -4505,8 +4505,6 @@ c_common_nodes_and_builtins (void)
TYPE_NAME (void_type_node) = void_name;
}
- void_list_node = build_void_list_node ();
-
/* Make a type to be the domain of a few array types
whose domains don't really matter.
200 is small enough that it always fits in size_t
diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
index ce971a2..2f592f5 100644
--- a/gcc/c-family/c-common.h
+++ b/gcc/c-family/c-common.h
@@ -853,7 +853,6 @@ extern tree identifier_global_tag (tree);
extern bool names_builtin_p (const char *);
extern tree c_linkage_bindings (tree);
extern void record_builtin_type (enum rid, const char *, tree);
-extern tree build_void_list_node (void);
extern void start_fname_decls (void);
extern void finish_fname_decls (void);
extern const char *fname_as_string (int);
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index 41dc86b..b7fe1a4 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,18 @@
+2022-09-19 Marek Polacek <polacek@redhat.com>
+
+ PR c/106947
+ * c-typeck.cc (maybe_warn_for_null_address): Don't emit stray
+ notes.
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * c-decl.cc (build_void_list_node): Remove.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * c-typeck.cc (c_finish_omp_clauses): Remove whole mapping node group
+ on error.
+
2022-09-07 Joseph Myers <joseph@codesourcery.com>
* c-parser.cc (c_parser_static_assert_declaration_no_semi)
diff --git a/gcc/c/c-decl.cc b/gcc/c/c-decl.cc
index 34f8fed..b09c639 100644
--- a/gcc/c/c-decl.cc
+++ b/gcc/c/c-decl.cc
@@ -10676,14 +10676,6 @@ record_builtin_type (enum rid rid_index, const char *name, tree type)
debug_hooks->type_decl (decl, false);
}
-/* Build the void_list_node (void_type_node having been created). */
-tree
-build_void_list_node (void)
-{
- tree t = build_tree_list (NULL_TREE, void_type_node);
- return t;
-}
-
/* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */
struct c_parm *
diff --git a/gcc/c/c-typeck.cc b/gcc/c/c-typeck.cc
index 9ada5d2..33d1e84 100644
--- a/gcc/c/c-typeck.cc
+++ b/gcc/c/c-typeck.cc
@@ -11738,18 +11738,19 @@ maybe_warn_for_null_address (location_t loc, tree op, tree_code code)
|| from_macro_expansion_at (loc))
return;
+ bool w;
if (code == EQ_EXPR)
- warning_at (loc, OPT_Waddress,
- "the comparison will always evaluate as %<false%> "
- "for the address of %qE will never be NULL",
- op);
+ w = warning_at (loc, OPT_Waddress,
+ "the comparison will always evaluate as %<false%> "
+ "for the address of %qE will never be NULL",
+ op);
else
- warning_at (loc, OPT_Waddress,
- "the comparison will always evaluate as %<true%> "
- "for the address of %qE will never be NULL",
- op);
+ w = warning_at (loc, OPT_Waddress,
+ "the comparison will always evaluate as %<true%> "
+ "for the address of %qE will never be NULL",
+ op);
- if (DECL_P (op))
+ if (w && DECL_P (op))
inform (DECL_SOURCE_LOCATION (op), "%qD declared here", op);
}
@@ -14238,12 +14239,19 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
break;
}
+ tree *grp_start_p = NULL, grp_sentinel = NULL_TREE;
+
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
bool need_complete = false;
bool need_implicitly_determined = false;
+ /* We've reached the end of a list of expanded nodes. Reset the group
+ start pointer. */
+ if (c == grp_sentinel)
+ grp_start_p = NULL;
+
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
@@ -15001,6 +15009,9 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
{
+ grp_start_p = pc;
+ grp_sentinel = OMP_CLAUSE_CHAIN (c);
+
if (handle_omp_array_sections (c, ort))
remove = true;
else
@@ -15644,7 +15655,19 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
}
if (remove)
- *pc = OMP_CLAUSE_CHAIN (c);
+ {
+ if (grp_start_p)
+ {
+ /* If we found a clause to remove, we want to remove the whole
+ expanded group, otherwise gimplify
+ (omp_resolve_clause_dependencies) can get confused. */
+ *grp_start_p = grp_sentinel;
+ pc = grp_start_p;
+ grp_start_p = NULL;
+ }
+ else
+ *pc = OMP_CLAUSE_CHAIN (c);
+ }
else
pc = &OMP_CLAUSE_CHAIN (c);
}
diff --git a/gcc/config/aarch64/aarch64-ldpstp.md b/gcc/config/aarch64/aarch64-ldpstp.md
index ba76a1b..f8446e2 100644
--- a/gcc/config/aarch64/aarch64-ldpstp.md
+++ b/gcc/config/aarch64/aarch64-ldpstp.md
@@ -83,8 +83,7 @@
(match_operand:DREG 1 "register_operand" ""))
(set (match_operand:DREG2 2 "memory_operand" "")
(match_operand:DREG2 3 "register_operand" ""))]
- "TARGET_SIMD
- && aarch64_operands_ok_for_ldpstp (operands, false, <DREG:MODE>mode)"
+ "aarch64_operands_ok_for_ldpstp (operands, false, <DREG:MODE>mode)"
[(parallel [(set (match_dup 0) (match_dup 1))
(set (match_dup 2) (match_dup 3))])]
{
@@ -96,7 +95,7 @@
(match_operand:VQ 1 "memory_operand" ""))
(set (match_operand:VQ2 2 "register_operand" "")
(match_operand:VQ2 3 "memory_operand" ""))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& aarch64_operands_ok_for_ldpstp (operands, true, <VQ:MODE>mode)
&& (aarch64_tune_params.extra_tuning_flags
& AARCH64_EXTRA_TUNE_NO_LDP_STP_QREGS) == 0"
@@ -111,7 +110,7 @@
(match_operand:VQ 1 "register_operand" ""))
(set (match_operand:VQ2 2 "memory_operand" "")
(match_operand:VQ2 3 "register_operand" ""))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& aarch64_operands_ok_for_ldpstp (operands, false, <VQ:MODE>mode)
&& (aarch64_tune_params.extra_tuning_flags
& AARCH64_EXTRA_TUNE_NO_LDP_STP_QREGS) == 0"
@@ -306,7 +305,7 @@
(set (match_operand:VP_2E 6 "memory_operand" "")
(match_operand:VP_2E 7 "aarch64_reg_or_zero" ""))
(match_dup 8)]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& aarch64_operands_adjust_ok_for_ldpstp (operands, false, <MODE>mode)"
[(const_int 0)]
{
@@ -327,7 +326,7 @@
(set (match_operand:VP_2E 6 "register_operand" "")
(match_operand:VP_2E 7 "memory_operand" ""))
(match_dup 8)]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& aarch64_operands_adjust_ok_for_ldpstp (operands, true, <MODE>mode)"
[(const_int 0)]
{
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 587a45d..dc80f82 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -21,7 +21,7 @@
(define_expand "mov<mode>"
[(set (match_operand:VALL_F16 0 "nonimmediate_operand")
(match_operand:VALL_F16 1 "general_operand"))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
"
/* Force the operand into a register if it is not an
immediate whose use can be replaced with xzr.
@@ -52,7 +52,7 @@
(define_expand "movmisalign<mode>"
[(set (match_operand:VALL_F16 0 "nonimmediate_operand")
(match_operand:VALL_F16 1 "general_operand"))]
- "TARGET_SIMD && !STRICT_ALIGNMENT"
+ "TARGET_FLOAT && !STRICT_ALIGNMENT"
{
/* This pattern is not permitted to fail during expansion: if both arguments
are non-registers (e.g. memory := constant, which can be created by the
@@ -116,10 +116,10 @@
(define_insn "*aarch64_simd_mov<VDMOV:mode>"
[(set (match_operand:VDMOV 0 "nonimmediate_operand"
- "=w, m, m, w, ?r, ?w, ?r, w")
+ "=w, m, m, w, ?r, ?w, ?r, w, w")
(match_operand:VDMOV 1 "general_operand"
- "m, Dz, w, w, w, r, r, Dn"))]
- "TARGET_SIMD
+ "m, Dz, w, w, w, r, r, Dn, Dz"))]
+ "TARGET_FLOAT
&& (register_operand (operands[0], <MODE>mode)
|| aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
{
@@ -128,26 +128,34 @@
case 0: return "ldr\t%d0, %1";
case 1: return "str\txzr, %0";
case 2: return "str\t%d1, %0";
- case 3: return "mov\t%0.<Vbtype>, %1.<Vbtype>";
- case 4: return "umov\t%0, %1.d[0]";
+ case 3:
+ if (TARGET_SIMD)
+ return "mov\t%0.<Vbtype>, %1.<Vbtype>";
+ return "fmov\t%d0, %d1";
+ case 4:
+ if (TARGET_SIMD)
+ return "umov\t%0, %1.d[0]";
+ return "fmov\t%x0, %d1";
case 5: return "fmov\t%d0, %1";
case 6: return "mov\t%0, %1";
case 7:
return aarch64_output_simd_mov_immediate (operands[1], 64);
+ case 8: return "fmov\t%d0, xzr";
default: gcc_unreachable ();
}
}
[(set_attr "type" "neon_load1_1reg<q>, store_8, neon_store1_1reg<q>,\
neon_logic<q>, neon_to_gp<q>, f_mcr,\
- mov_reg, neon_move<q>")]
+ mov_reg, neon_move<q>, f_mcr")
+ (set_attr "arch" "*,*,*,*,*,*,*,simd,*")]
)
(define_insn "*aarch64_simd_mov<VQMOV:mode>"
[(set (match_operand:VQMOV 0 "nonimmediate_operand"
- "=w, Umn, m, w, ?r, ?w, ?r, w")
+ "=w, Umn, m, w, ?r, ?w, ?r, w, w")
(match_operand:VQMOV 1 "general_operand"
- "m, Dz, w, w, w, r, r, Dn"))]
- "TARGET_SIMD
+ "m, Dz, w, w, w, r, r, Dn, Dz"))]
+ "TARGET_FLOAT
&& (register_operand (operands[0], <MODE>mode)
|| aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
{
@@ -167,14 +175,17 @@
return "#";
case 7:
return aarch64_output_simd_mov_immediate (operands[1], 128);
+ case 8:
+ return "fmov\t%d0, xzr";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "neon_load1_1reg<q>, store_16, neon_store1_1reg<q>,\
neon_logic<q>, multiple, multiple,\
- multiple, neon_move<q>")
- (set_attr "length" "4,4,4,4,8,8,8,4")]
+ multiple, neon_move<q>, fmov")
+ (set_attr "length" "4,4,4,4,8,8,8,4,4")
+ (set_attr "arch" "*,*,*,simd,*,*,*,simd,*")]
)
;; When storing lane zero we can use the normal STR and its more permissive
@@ -195,7 +206,7 @@
(match_operand:DREG 1 "aarch64_mem_pair_operand" "Ump"))
(set (match_operand:DREG2 2 "register_operand" "=w")
(match_operand:DREG2 3 "memory_operand" "m"))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
@@ -209,7 +220,7 @@
(match_operand:DREG 1 "register_operand" "w"))
(set (match_operand:DREG2 2 "memory_operand" "=m")
(match_operand:DREG2 3 "register_operand" "w"))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& rtx_equal_p (XEXP (operands[2], 0),
plus_constant (Pmode,
XEXP (operands[0], 0),
@@ -223,7 +234,7 @@
(match_operand:VQ 1 "aarch64_mem_pair_operand" "Ump"))
(set (match_operand:VQ2 2 "register_operand" "=w")
(match_operand:VQ2 3 "memory_operand" "m"))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
@@ -237,10 +248,11 @@
(match_operand:VQ 1 "register_operand" "w"))
(set (match_operand:VQ2 2 "memory_operand" "=m")
(match_operand:VQ2 3 "register_operand" "w"))]
- "TARGET_SIMD && rtx_equal_p (XEXP (operands[2], 0),
- plus_constant (Pmode,
- XEXP (operands[0], 0),
- GET_MODE_SIZE (<VQ:MODE>mode)))"
+ "TARGET_FLOAT
+ && rtx_equal_p (XEXP (operands[2], 0),
+ plus_constant (Pmode,
+ XEXP (operands[0], 0),
+ GET_MODE_SIZE (<VQ:MODE>mode)))"
"stp\\t%q1, %q3, %z0"
[(set_attr "type" "neon_stp_q")]
)
@@ -248,8 +260,9 @@
(define_split
[(set (match_operand:VQMOV 0 "register_operand" "")
- (match_operand:VQMOV 1 "register_operand" ""))]
- "TARGET_SIMD && reload_completed
+ (match_operand:VQMOV 1 "register_operand" ""))]
+ "TARGET_FLOAT
+ && reload_completed
&& GP_REGNUM_P (REGNO (operands[0]))
&& GP_REGNUM_P (REGNO (operands[1]))"
[(const_int 0)]
@@ -261,7 +274,8 @@
(define_split
[(set (match_operand:VQMOV 0 "register_operand" "")
(match_operand:VQMOV 1 "register_operand" ""))]
- "TARGET_SIMD && reload_completed
+ "TARGET_FLOAT
+ && reload_completed
&& ((FP_REGNUM_P (REGNO (operands[0])) && GP_REGNUM_P (REGNO (operands[1])))
|| (GP_REGNUM_P (REGNO (operands[0])) && FP_REGNUM_P (REGNO (operands[1]))))"
[(const_int 0)]
@@ -273,7 +287,7 @@
(define_expand "@aarch64_split_simd_mov<mode>"
[(set (match_operand:VQMOV 0)
(match_operand:VQMOV 1))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
rtx dst = operands[0];
rtx src = operands[1];
@@ -306,13 +320,20 @@
(vec_select:<VHALF>
(match_operand:VQMOV 1 "register_operand")
(match_operand 2 "ascending_int_parallel")))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
+ {
+ if (vect_par_cnst_lo_half (operands[2], <MODE>mode))
+ {
+ emit_move_insn (operands[0], gen_lowpart (<VHALF>mode, operands[1]));
+ DONE;
+ }
+ }
)
(define_expand "aarch64_get_low<mode>"
[(match_operand:<VHALF> 0 "register_operand")
(match_operand:VQMOV 1 "register_operand")]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
rtx lo = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, false);
emit_insn (gen_aarch64_get_half<mode> (operands[0], operands[1], lo));
@@ -323,7 +344,7 @@
(define_expand "aarch64_get_high<mode>"
[(match_operand:<VHALF> 0 "register_operand")
(match_operand:VQMOV 1 "register_operand")]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
rtx hi = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, true);
emit_insn (gen_aarch64_get_half<mode> (operands[0], operands[1], hi));
@@ -350,15 +371,17 @@
)
(define_insn "aarch64_simd_mov_from_<mode>high"
- [(set (match_operand:<VHALF> 0 "register_operand" "=w,?r")
+ [(set (match_operand:<VHALF> 0 "register_operand" "=w,?r,?r")
(vec_select:<VHALF>
- (match_operand:VQMOV_NO2E 1 "register_operand" "w,w")
+ (match_operand:VQMOV_NO2E 1 "register_operand" "w,w,w")
(match_operand:VQMOV_NO2E 2 "vect_par_cnst_hi_half" "")))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
"@
- dup\\t%d0, %1.d[1]
- umov\t%0, %1.d[1]"
- [(set_attr "type" "neon_dup<q>,neon_to_gp<q>")
+ dup\t%d0, %1.d[1]
+ umov\t%0, %1.d[1]
+ fmov\t%0, %1.d[1]"
+ [(set_attr "type" "neon_dup<q>,neon_to_gp<q>,f_mrc")
+ (set_attr "arch" "simd,simd,*")
(set_attr "length" "4")]
)
@@ -4226,12 +4249,22 @@
[(set_attr "type" "neon_to_gp<q>, neon_dup<q>, neon_store1_one_lane<q>")]
)
+(define_insn "*aarch64_get_high<mode>"
+ [(set (match_operand:<VEL> 0 "aarch64_simd_nonimmediate_operand" "=r")
+ (vec_select:<VEL>
+ (match_operand:VQ_2E 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))]
+ "TARGET_FLOAT && ENDIAN_LANE_N (<nunits>, INTVAL (operands[2])) == 1"
+ "fmov\t%0, %1.d[1]"
+ [(set_attr "type" "f_mrc")]
+)
+
(define_insn "load_pair_lanes<mode>"
[(set (match_operand:<VDBL> 0 "register_operand" "=w")
(vec_concat:<VDBL>
(match_operand:VDCSIF 1 "memory_operand" "Utq")
(match_operand:VDCSIF 2 "memory_operand" "m")))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& aarch64_mergeable_load_pair_p (<VDBL>mode, operands[1], operands[2])"
"ldr\\t%<single_dtype>0, %1"
[(set_attr "type" "neon_load1_1reg<dblq>")]
@@ -4261,7 +4294,7 @@
(vec_concat:<VDBL>
(match_operand:VDCSIF 1 "register_operand" "w, r")
(match_operand:VDCSIF 2 "register_operand" "w, r")))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
"@
stp\t%<single_type>1, %<single_type>2, %y0
stp\t%<single_wx>1, %<single_wx>2, %y0"
@@ -4276,39 +4309,44 @@
;; the register alternatives either don't accept or themselves disparage.
(define_insn "*aarch64_combine_internal<mode>"
- [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, Umn, Umn")
+ [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, w, Umn, Umn")
(vec_concat:<VDBL>
- (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, ?w, ?r")
- (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, Utv, w, ?r")))]
- "TARGET_SIMD
+ (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, 0, ?w, ?r")
+ (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, ?r, Utv, w, ?r")))]
+ "TARGET_FLOAT
&& !BYTES_BIG_ENDIAN
&& (register_operand (operands[0], <VDBL>mode)
|| register_operand (operands[2], <MODE>mode))"
"@
ins\t%0.<single_type>[1], %2.<single_type>[0]
ins\t%0.<single_type>[1], %<single_wx>2
+ fmov\t%0.d[1], %2
ld1\t{%0.<single_type>}[1], %2
stp\t%<single_type>1, %<single_type>2, %y0
stp\t%<single_wx>1, %<single_wx>2, %y0"
- [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, neon_load1_one_lane<dblq>, neon_stp, store_16")]
+ [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, f_mcr,
+ neon_load1_one_lane<dblq>, neon_stp, store_16")
+ (set_attr "arch" "simd,simd,*,simd,*,*")]
)
(define_insn "*aarch64_combine_internal_be<mode>"
- [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, Umn, Umn")
+ [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, w, Umn, Umn")
(vec_concat:<VDBL>
- (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, Utv, ?w, ?r")
- (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, ?w, ?r")))]
- "TARGET_SIMD
+ (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, ?r, Utv, ?w, ?r")
+ (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, 0, ?w, ?r")))]
+ "TARGET_FLOAT
&& BYTES_BIG_ENDIAN
&& (register_operand (operands[0], <VDBL>mode)
|| register_operand (operands[2], <MODE>mode))"
"@
ins\t%0.<single_type>[1], %2.<single_type>[0]
ins\t%0.<single_type>[1], %<single_wx>2
+ fmov\t%0.d[1], %2
ld1\t{%0.<single_type>}[1], %2
stp\t%<single_type>2, %<single_type>1, %y0
stp\t%<single_wx>2, %<single_wx>1, %y0"
- [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, neon_load1_one_lane<dblq>, neon_stp, store_16")]
+ [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, f_mcr, neon_load1_one_lane<dblq>, neon_stp, store_16")
+ (set_attr "arch" "simd,simd,*,simd,*,*")]
)
;; In this insn, operand 1 should be low, and operand 2 the high part of the
@@ -4319,13 +4357,12 @@
(vec_concat:<VDBL>
(match_operand:VDCSIF 1 "nonimmediate_operand" "w,?r,m")
(match_operand:VDCSIF 2 "aarch64_simd_or_scalar_imm_zero")))]
- "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "TARGET_FLOAT && !BYTES_BIG_ENDIAN"
"@
fmov\\t%<single_type>0, %<single_type>1
fmov\t%<single_type>0, %<single_wx>1
ldr\\t%<single_type>0, %1"
- [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")
- (set_attr "arch" "simd,fp,simd")]
+ [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")]
)
(define_insn "*aarch64_combinez_be<mode>"
@@ -4333,13 +4370,12 @@
(vec_concat:<VDBL>
(match_operand:VDCSIF 2 "aarch64_simd_or_scalar_imm_zero")
(match_operand:VDCSIF 1 "nonimmediate_operand" "w,?r,m")))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "TARGET_FLOAT && BYTES_BIG_ENDIAN"
"@
fmov\\t%<single_type>0, %<single_type>1
fmov\t%<single_type>0, %<single_wx>1
ldr\\t%<single_type>0, %1"
- [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")
- (set_attr "arch" "simd,fp,simd")]
+ [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")]
)
;; Form a vector whose first half (in array order) comes from operand 1
@@ -4350,7 +4386,7 @@
(vec_concat:<VDBL>
(match_operand:VDCSIF 1 "general_operand")
(match_operand:VDCSIF 2 "general_operand")))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
int lo = BYTES_BIG_ENDIAN ? 2 : 1;
int hi = BYTES_BIG_ENDIAN ? 1 : 2;
@@ -4368,7 +4404,7 @@
}
else
{
- /* Use *aarch64_combine_general<mode>. */
+ /* Use *aarch64_combine_internal<mode>. */
operands[lo] = force_reg (<MODE>mode, operands[lo]);
if (!aarch64_simd_nonimmediate_operand (operands[hi], <MODE>mode))
{
@@ -4390,7 +4426,7 @@
[(match_operand:<VDBL> 0 "register_operand")
(match_operand:VDC 1 "general_operand")
(match_operand:VDC 2 "general_operand")]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
if (BYTES_BIG_ENDIAN)
std::swap (operands[1], operands[2]);
@@ -7063,7 +7099,7 @@
(define_expand "mov<mode>"
[(set (match_operand:VSTRUCT_QD 0 "nonimmediate_operand")
(match_operand:VSTRUCT_QD 1 "general_operand"))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
if (can_create_pseudo_p ())
{
@@ -7075,7 +7111,7 @@
(define_expand "mov<mode>"
[(set (match_operand:VSTRUCT 0 "nonimmediate_operand")
(match_operand:VSTRUCT 1 "general_operand"))]
- "TARGET_SIMD"
+ "TARGET_FLOAT"
{
if (can_create_pseudo_p ())
{
@@ -7087,7 +7123,7 @@
(define_expand "movv8di"
[(set (match_operand:V8DI 0 "nonimmediate_operand")
(match_operand:V8DI 1 "general_operand"))]
- "TARGET_SIMD"
+ ""
{
if (can_create_pseudo_p () && MEM_P (operands[0]))
operands[1] = force_reg (V8DImode, operands[1]);
@@ -7255,7 +7291,8 @@
(define_insn "*aarch64_be_mov<mode>"
[(set (match_operand:VSTRUCT_2D 0 "nonimmediate_operand" "=w,m,w")
(match_operand:VSTRUCT_2D 1 "general_operand" " w,w,m"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
"@
@@ -7269,7 +7306,8 @@
(define_insn "*aarch64_be_mov<mode>"
[(set (match_operand:VSTRUCT_2Q 0 "nonimmediate_operand" "=w,m,w")
(match_operand:VSTRUCT_2Q 1 "general_operand" " w,w,m"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
"@
@@ -7277,13 +7315,15 @@
stp\\t%q1, %R1, %0
ldp\\t%q0, %R0, %1"
[(set_attr "type" "multiple,neon_stp_q,neon_ldp_q")
+ (set_attr "arch" "simd,*,*")
(set_attr "length" "8,4,4")]
)
(define_insn "*aarch64_be_movoi"
[(set (match_operand:OI 0 "nonimmediate_operand" "=w,m,w")
(match_operand:OI 1 "general_operand" " w,w,m"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], OImode)
|| register_operand (operands[1], OImode))"
"@
@@ -7291,57 +7331,66 @@
stp\\t%q1, %R1, %0
ldp\\t%q0, %R0, %1"
[(set_attr "type" "multiple,neon_stp_q,neon_ldp_q")
+ (set_attr "arch" "simd,*,*")
(set_attr "length" "8,4,4")]
)
(define_insn "*aarch64_be_mov<mode>"
[(set (match_operand:VSTRUCT_3QD 0 "nonimmediate_operand" "=w,o,w")
(match_operand:VSTRUCT_3QD 1 "general_operand" " w,w,o"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
"#"
[(set_attr "type" "multiple")
+ (set_attr "arch" "fp<q>,*,*")
(set_attr "length" "12,8,8")]
)
(define_insn "*aarch64_be_movci"
[(set (match_operand:CI 0 "nonimmediate_operand" "=w,o,w")
(match_operand:CI 1 "general_operand" " w,w,o"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], CImode)
|| register_operand (operands[1], CImode))"
"#"
[(set_attr "type" "multiple")
- (set_attr "length" "12,4,4")]
+ (set_attr "arch" "simd,*,*")
+ (set_attr "length" "12,8,8")]
)
(define_insn "*aarch64_be_mov<mode>"
[(set (match_operand:VSTRUCT_4QD 0 "nonimmediate_operand" "=w,o,w")
(match_operand:VSTRUCT_4QD 1 "general_operand" " w,w,o"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
"#"
[(set_attr "type" "multiple")
+ (set_attr "arch" "fp<q>,*,*")
(set_attr "length" "16,8,8")]
)
(define_insn "*aarch64_be_movxi"
[(set (match_operand:XI 0 "nonimmediate_operand" "=w,o,w")
(match_operand:XI 1 "general_operand" " w,w,o"))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN
+ "TARGET_FLOAT
+ && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
&& (register_operand (operands[0], XImode)
|| register_operand (operands[1], XImode))"
"#"
[(set_attr "type" "multiple")
- (set_attr "length" "16,4,4")]
+ (set_attr "arch" "simd,*,*")
+ (set_attr "length" "16,8,8")]
)
(define_split
[(set (match_operand:VSTRUCT_2QD 0 "register_operand")
(match_operand:VSTRUCT_2QD 1 "register_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
aarch64_simd_emit_reg_reg_move (operands, <VSTRUCT_ELT>mode, 2);
@@ -7351,7 +7400,7 @@
(define_split
[(set (match_operand:OI 0 "register_operand")
(match_operand:OI 1 "register_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
aarch64_simd_emit_reg_reg_move (operands, TImode, 2);
@@ -7361,7 +7410,7 @@
(define_split
[(set (match_operand:VSTRUCT_3QD 0 "nonimmediate_operand")
(match_operand:VSTRUCT_3QD 1 "general_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
if (register_operand (operands[0], <MODE>mode)
@@ -7370,7 +7419,7 @@
aarch64_simd_emit_reg_reg_move (operands, <VSTRUCT_ELT>mode, 3);
DONE;
}
- else if (BYTES_BIG_ENDIAN)
+ else if (!TARGET_SIMD || BYTES_BIG_ENDIAN)
{
int elt_size = GET_MODE_SIZE (<MODE>mode).to_constant () / <nregs>;
machine_mode pair_mode = elt_size == 16 ? V2x16QImode : V2x8QImode;
@@ -7397,7 +7446,7 @@
(define_split
[(set (match_operand:CI 0 "nonimmediate_operand")
(match_operand:CI 1 "general_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
if (register_operand (operands[0], CImode)
@@ -7406,7 +7455,7 @@
aarch64_simd_emit_reg_reg_move (operands, TImode, 3);
DONE;
}
- else if (BYTES_BIG_ENDIAN)
+ else if (!TARGET_SIMD || BYTES_BIG_ENDIAN)
{
emit_move_insn (simplify_gen_subreg (OImode, operands[0], CImode, 0),
simplify_gen_subreg (OImode, operands[1], CImode, 0));
@@ -7425,7 +7474,7 @@
(define_split
[(set (match_operand:VSTRUCT_4QD 0 "nonimmediate_operand")
(match_operand:VSTRUCT_4QD 1 "general_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
if (register_operand (operands[0], <MODE>mode)
@@ -7434,7 +7483,7 @@
aarch64_simd_emit_reg_reg_move (operands, <VSTRUCT_ELT>mode, 4);
DONE;
}
- else if (BYTES_BIG_ENDIAN)
+ else if (!TARGET_SIMD || BYTES_BIG_ENDIAN)
{
int elt_size = GET_MODE_SIZE (<MODE>mode).to_constant () / <nregs>;
machine_mode pair_mode = elt_size == 16 ? V2x16QImode : V2x8QImode;
@@ -7455,7 +7504,7 @@
(define_split
[(set (match_operand:XI 0 "nonimmediate_operand")
(match_operand:XI 1 "general_operand"))]
- "TARGET_SIMD && reload_completed"
+ "TARGET_FLOAT && reload_completed"
[(const_int 0)]
{
if (register_operand (operands[0], XImode)
@@ -7464,7 +7513,7 @@
aarch64_simd_emit_reg_reg_move (operands, TImode, 4);
DONE;
}
- else if (BYTES_BIG_ENDIAN)
+ else if (!TARGET_SIMD || BYTES_BIG_ENDIAN)
{
emit_move_insn (simplify_gen_subreg (OImode, operands[0], XImode, 0),
simplify_gen_subreg (OImode, operands[1], XImode, 0));
@@ -7479,7 +7528,7 @@
(define_split
[(set (match_operand:V8DI 0 "nonimmediate_operand")
(match_operand:V8DI 1 "general_operand"))]
- "TARGET_SIMD && reload_completed"
+ "reload_completed"
[(const_int 0)]
{
if (register_operand (operands[0], V8DImode)
@@ -7489,15 +7538,15 @@
DONE;
}
else if ((register_operand (operands[0], V8DImode)
- && memory_operand (operands[1], V8DImode))
- || (memory_operand (operands[0], V8DImode)
- && register_operand (operands[1], V8DImode)))
+ && memory_operand (operands[1], V8DImode))
+ || (memory_operand (operands[0], V8DImode)
+ && register_operand (operands[1], V8DImode)))
{
for (int offset = 0; offset < 64; offset += 16)
- emit_move_insn (simplify_gen_subreg (TImode, operands[0],
- V8DImode, offset),
- simplify_gen_subreg (TImode, operands[1],
- V8DImode, offset));
+ emit_move_insn (simplify_gen_subreg (TImode, operands[0],
+ V8DImode, offset),
+ simplify_gen_subreg (TImode, operands[1],
+ V8DImode, offset));
DONE;
}
else
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 786ede7..467979a 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -3492,7 +3492,7 @@ aarch64_classify_vector_mode (machine_mode mode)
case E_OImode:
case E_CImode:
case E_XImode:
- return TARGET_SIMD ? VEC_ADVSIMD | VEC_STRUCT : 0;
+ return TARGET_FLOAT ? VEC_ADVSIMD | VEC_STRUCT : 0;
/* Structures of 64-bit Advanced SIMD vectors. */
case E_V2x8QImode:
@@ -3519,7 +3519,7 @@ aarch64_classify_vector_mode (machine_mode mode)
case E_V4x4HFmode:
case E_V4x2SFmode:
case E_V4x1DFmode:
- return TARGET_SIMD ? VEC_ADVSIMD | VEC_STRUCT | VEC_PARTIAL : 0;
+ return TARGET_FLOAT ? VEC_ADVSIMD | VEC_STRUCT | VEC_PARTIAL : 0;
/* Structures of 128-bit Advanced SIMD vectors. */
case E_V2x16QImode:
@@ -3546,7 +3546,7 @@ aarch64_classify_vector_mode (machine_mode mode)
case E_V4x8HFmode:
case E_V4x4SFmode:
case E_V4x2DFmode:
- return TARGET_SIMD ? VEC_ADVSIMD | VEC_STRUCT : 0;
+ return TARGET_FLOAT ? VEC_ADVSIMD | VEC_STRUCT : 0;
/* 64-bit Advanced SIMD vectors. */
case E_V8QImode:
@@ -3566,7 +3566,7 @@ aarch64_classify_vector_mode (machine_mode mode)
case E_V8BFmode:
case E_V4SFmode:
case E_V2DFmode:
- return TARGET_SIMD ? VEC_ADVSIMD : 0;
+ return TARGET_FLOAT ? VEC_ADVSIMD : 0;
default:
return 0;
@@ -3854,7 +3854,8 @@ aarch64_vectorize_related_mode (machine_mode vector_mode,
}
/* Prefer to use 1 128-bit vector instead of 2 64-bit vectors. */
- if ((vec_flags & VEC_ADVSIMD)
+ if (TARGET_SIMD
+ && (vec_flags & VEC_ADVSIMD)
&& known_eq (nunits, 0U)
&& known_eq (GET_MODE_BITSIZE (vector_mode), 64U)
&& maybe_ge (GET_MODE_BITSIZE (element_mode)
@@ -3952,7 +3953,7 @@ aarch64_hard_regno_mode_ok (unsigned regno, machine_mode mode)
if (GP_REGNUM_P (regno))
{
- if (vec_flags & VEC_ANY_SVE)
+ if (vec_flags & (VEC_ANY_SVE | VEC_STRUCT))
return false;
if (known_le (GET_MODE_SIZE (mode), 8))
return true;
@@ -10602,7 +10603,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
|| mode == TImode
|| mode == TFmode
|| mode == TDmode
- || (BYTES_BIG_ENDIAN && advsimd_struct_p));
+ || ((!TARGET_SIMD || BYTES_BIG_ENDIAN)
+ && advsimd_struct_p));
/* If we are dealing with ADDR_QUERY_LDP_STP_N that means the incoming mode
corresponds to the actual size of the memory being loaded/stored and the
mode of the corresponding addressing mode is half of that. */
@@ -10632,6 +10634,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
/* On LE, for AdvSIMD, don't support anything other than POST_INC or
REG addressing. */
if (advsimd_struct_p
+ && TARGET_SIMD
&& !BYTES_BIG_ENDIAN
&& (code != POST_INC && code != REG))
return false;
@@ -10694,7 +10697,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
&& aarch64_offset_7bit_signed_scaled_p (DImode, offset + 48));
/* A 7bit offset check because OImode will emit a ldp/stp
- instruction (only big endian will get here).
+ instruction (only !TARGET_SIMD or big endian will get here).
For ldp/stp instructions, the offset is scaled for the size of a
single element of the pair. */
if (aarch64_advsimd_partial_struct_mode_p (mode)
@@ -10705,7 +10708,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
return aarch64_offset_7bit_signed_scaled_p (TImode, offset);
/* Three 9/12 bit offsets checks because CImode will emit three
- ldr/str instructions (only big endian will get here). */
+ ldr/str instructions (only !TARGET_SIMD or big endian will
+ get here). */
if (aarch64_advsimd_partial_struct_mode_p (mode)
&& known_eq (GET_MODE_SIZE (mode), 24))
return (aarch64_offset_7bit_signed_scaled_p (DImode, offset)
@@ -12428,18 +12432,16 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
/* Use aarch64_sve_reload_mem for SVE memory reloads that cannot use
LDR and STR. See the comment at the head of aarch64-sve.md for
more details about the big-endian handling. */
+ unsigned int vec_flags = aarch64_classify_vector_mode (mode);
if (reg_class_subset_p (rclass, FP_REGS)
&& !((REG_P (x) && HARD_REGISTER_P (x))
|| aarch64_simd_valid_immediate (x, NULL))
- && mode != VNx16QImode)
+ && mode != VNx16QImode
+ && (vec_flags & VEC_SVE_DATA)
+ && ((vec_flags & VEC_PARTIAL) || BYTES_BIG_ENDIAN))
{
- unsigned int vec_flags = aarch64_classify_vector_mode (mode);
- if ((vec_flags & VEC_SVE_DATA)
- && ((vec_flags & VEC_PARTIAL) || BYTES_BIG_ENDIAN))
- {
- sri->icode = CODE_FOR_aarch64_sve_reload_mem;
- return NO_REGS;
- }
+ sri->icode = CODE_FOR_aarch64_sve_reload_mem;
+ return NO_REGS;
}
/* If we have to disable direct literal pool loads and stores because the
@@ -12456,9 +12458,13 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
/* Without the TARGET_SIMD instructions we cannot move a Q register
to a Q register directly. We need a scratch. */
if (REG_P (x)
- && (mode == TFmode || mode == TImode || mode == TDmode)
+ && (mode == TFmode
+ || mode == TImode
+ || mode == TDmode
+ || (vec_flags == VEC_ADVSIMD && known_eq (GET_MODE_SIZE (mode), 16)))
&& mode == GET_MODE (x)
- && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
+ && !TARGET_SIMD
+ && FP_REGNUM_P (REGNO (x))
&& reg_class_subset_p (rclass, FP_REGS))
{
sri->icode = code_for_aarch64_reload_mov (mode);
@@ -12480,6 +12486,28 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
return NO_REGS;
}
+/* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
+
+static bool
+aarch64_secondary_memory_needed (machine_mode mode, reg_class_t class1,
+ reg_class_t class2)
+{
+ if (!TARGET_SIMD
+ && reg_classes_intersect_p (class1, FP_REGS)
+ && reg_classes_intersect_p (class2, FP_REGS))
+ {
+ /* We can't do a 128-bit FPR-to-FPR move without TARGET_SIMD,
+ so we can't easily split a move involving tuples of 128-bit
+ vectors. Force the copy through memory instead.
+
+ (Tuples of 64-bit vectors are fine.) */
+ unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+ if (vec_flags == (VEC_ADVSIMD | VEC_STRUCT))
+ return true;
+ }
+ return false;
+}
+
static bool
aarch64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
{
@@ -13023,7 +13051,7 @@ aarch64_rtx_mult_cost (rtx x, enum rtx_code code, int outer, bool speed)
if (VECTOR_MODE_P (mode))
{
unsigned int vec_flags = aarch64_classify_vector_mode (mode);
- if (vec_flags & VEC_ADVSIMD)
+ if (TARGET_SIMD && (vec_flags & VEC_ADVSIMD))
{
/* The select-operand-high-half versions of the instruction have the
same cost as the three vector version - don't add the costs of the
@@ -13969,7 +13997,7 @@ cost_minus:
{
/* SUBL2 and SUBW2. */
unsigned int vec_flags = aarch64_classify_vector_mode (mode);
- if (vec_flags & VEC_ADVSIMD)
+ if (TARGET_SIMD && (vec_flags & VEC_ADVSIMD))
{
/* The select-operand-high-half versions of the sub instruction
have the same cost as the regular three vector version -
@@ -14056,7 +14084,7 @@ cost_plus:
{
/* ADDL2 and ADDW2. */
unsigned int vec_flags = aarch64_classify_vector_mode (mode);
- if (vec_flags & VEC_ADVSIMD)
+ if (TARGET_SIMD && (vec_flags & VEC_ADVSIMD))
{
/* The select-operand-high-half versions of the add instruction
have the same cost as the regular three vector version -
@@ -14981,7 +15009,9 @@ aarch64_register_move_cost (machine_mode mode,
return aarch64_register_move_cost (mode, from, GENERAL_REGS)
+ aarch64_register_move_cost (mode, GENERAL_REGS, to);
- if (known_eq (GET_MODE_SIZE (mode), 16))
+ unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+ if (vec_flags != (VEC_ADVSIMD | VEC_STRUCT | VEC_PARTIAL)
+ && known_eq (GET_MODE_SIZE (mode), 16))
{
/* 128-bit operations on general registers require 2 instructions. */
if (from == GENERAL_REGS && to == GENERAL_REGS)
@@ -15009,6 +15039,16 @@ aarch64_register_move_cost (machine_mode mode,
else if (to == GENERAL_REGS)
return regmove_cost->FP2GP;
+ if (!TARGET_SIMD && vec_flags == (VEC_ADVSIMD | VEC_STRUCT))
+ {
+ /* Needs a round-trip through memory, which can use LDP/STP for pairs.
+ The cost must be greater than 2 units to indicate that direct
+ moves aren't possible. */
+ auto per_vector = (aarch64_tune_params.memmov_cost.load_fp
+ + aarch64_tune_params.memmov_cost.store_fp);
+ return MIN (CEIL (per_vector, 2), 4);
+ }
+
return regmove_cost->FP2FP;
}
@@ -21115,6 +21155,9 @@ aarch64_simd_valid_immediate (rtx op, simd_immediate_info *info,
if (vec_flags == 0 || vec_flags == (VEC_ADVSIMD | VEC_STRUCT))
return false;
+ if ((vec_flags & VEC_ADVSIMD) && !TARGET_SIMD)
+ return false;
+
if (vec_flags & VEC_SVE_PRED)
return aarch64_sve_pred_valid_immediate (op, info);
@@ -24048,7 +24091,7 @@ aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
std::swap (d->op0, d->op1);
}
- if ((d->vec_flags == VEC_ADVSIMD
+ if (((d->vec_flags == VEC_ADVSIMD && TARGET_SIMD)
|| d->vec_flags == VEC_SVE_DATA
|| d->vec_flags == (VEC_SVE_DATA | VEC_PARTIAL)
|| d->vec_flags == VEC_SVE_PRED)
@@ -27482,6 +27525,9 @@ aarch64_libgcc_floating_mode_supported_p
#undef TARGET_SECONDARY_RELOAD
#define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
+#undef TARGET_SECONDARY_MEMORY_NEEDED
+#define TARGET_SECONDARY_MEMORY_NEEDED aarch64_secondary_memory_needed
+
#undef TARGET_SHIFT_TRUNCATION_MASK
#define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index efcbecb..3f8e40a 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -370,8 +370,11 @@
;; Attributes of the architecture required to support the instruction (or
;; alternative). This attribute is used to compute attribute "enabled", use type
;; "any" to enable an alternative in all cases.
+;;
+;; As a convenience, "fp_q" means "fp" + the ability to move between
+;; Q registers and is equivalent to "simd".
-(define_enum "arches" [ any rcpc8_4 fp simd sve fp16])
+(define_enum "arches" [ any rcpc8_4 fp fp_q simd sve fp16])
(define_enum_attr "arch" "arches" (const_string "any"))
@@ -399,7 +402,7 @@
(and (eq_attr "arch" "fp")
(match_test "TARGET_FLOAT"))
- (and (eq_attr "arch" "simd")
+ (and (eq_attr "arch" "fp_q, simd")
(match_test "TARGET_SIMD"))
(and (eq_attr "arch" "fp16")
@@ -6819,8 +6822,8 @@
)
(define_expand "@aarch64_reload_mov<mode>"
- [(set (match_operand:TX 0 "register_operand" "=w")
- (match_operand:TX 1 "register_operand" "w"))
+ [(set (match_operand:VTX 0 "register_operand" "=w")
+ (match_operand:VTX 1 "register_operand" "w"))
(clobber (match_operand:DI 2 "register_operand" "=&r"))
]
"TARGET_FLOAT"
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 0dd9dc6..9354dbe 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -313,6 +313,8 @@
(define_mode_iterator TX [TI TF TD])
+(define_mode_iterator VTX [TI TF TD V16QI V8HI V4SI V2DI V8HF V4SF V2DF V8BF])
+
;; Advanced SIMD opaque structure modes.
(define_mode_iterator VSTRUCT [OI CI XI])
diff --git a/gcc/config/csky/csky.h b/gcc/config/csky/csky.h
index f786ad5..a9d1369 100644
--- a/gcc/config/csky/csky.h
+++ b/gcc/config/csky/csky.h
@@ -422,7 +422,7 @@ typedef struct
The int cast is to prevent a complaint about unsigned comparison to
zero, since CSKY_FIRST_PARM_REGNUM is zero. */
#define FUNCTION_ARG_REGNO_P(REGNO) \
- (((REGNO) >= CSKY_FIRST_PARM_REGNUM \
+ (((int)(REGNO) >= CSKY_FIRST_PARM_REGNUM \
&& (REGNO) < (CSKY_NPARM_REGS + CSKY_FIRST_PARM_REGNUM)) \
|| FUNCTION_VARG_REGNO_P(REGNO))
diff --git a/gcc/config/gcn/mkoffload.cc b/gcc/config/gcn/mkoffload.cc
index 24d3273..6403780 100644
--- a/gcc/config/gcn/mkoffload.cc
+++ b/gcc/config/gcn/mkoffload.cc
@@ -805,7 +805,7 @@ main (int argc, char **argv)
FILE *cfile = stdout;
const char *outname = 0;
- progname = "mkoffload";
+ progname = tool_name;
diagnostic_initialize (global_dc, 0);
obstack_init (&files_to_cleanup);
diff --git a/gcc/config/i386/i386-builtins.cc b/gcc/config/i386/i386-builtins.cc
index 6a04fb5..af2faee 100644
--- a/gcc/config/i386/i386-builtins.cc
+++ b/gcc/config/i386/i386-builtins.cc
@@ -1540,21 +1540,16 @@ ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
switch (fn)
{
- CASE_CFN_EXP2:
- if (out_mode == SFmode && in_mode == SFmode)
- {
- if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_EXP2PS);
- }
- break;
-
CASE_CFN_IFLOOR:
CASE_CFN_LFLOOR:
- CASE_CFN_LLFLOOR:
/* The round insn does not trap on denormals. */
if (flag_trapping_math || !TARGET_SSE4_1)
break;
+ /* PR106910, currently vectorizer doesn't go direct internal fn way
+ when out_n != in_n, so let's still keep this.
+ Otherwise, it relies on expander of
+ lceilmn2/lfloormn2/lroundmn2/lrintmn2. */
if (out_mode == SImode && in_mode == DFmode)
{
if (out_n == 4 && in_n == 2)
@@ -1564,20 +1559,10 @@ ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
else if (out_n == 16 && in_n == 8)
return ix86_get_builtin (IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX512);
}
- if (out_mode == SImode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS_SFIX);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS_SFIX256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS_SFIX512);
- }
break;
CASE_CFN_ICEIL:
CASE_CFN_LCEIL:
- CASE_CFN_LLCEIL:
/* The round insn does not trap on denormals. */
if (flag_trapping_math || !TARGET_SSE4_1)
break;
@@ -1591,20 +1576,10 @@ ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
else if (out_n == 16 && in_n == 8)
return ix86_get_builtin (IX86_BUILTIN_CEILPD_VEC_PACK_SFIX512);
}
- if (out_mode == SImode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS_SFIX);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS_SFIX256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS_SFIX512);
- }
break;
CASE_CFN_IRINT:
CASE_CFN_LRINT:
- CASE_CFN_LLRINT:
if (out_mode == SImode && in_mode == DFmode)
{
if (out_n == 4 && in_n == 2)
@@ -1614,20 +1589,10 @@ ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
else if (out_n == 16 && in_n == 8)
return ix86_get_builtin (IX86_BUILTIN_VEC_PACK_SFIX512);
}
- if (out_mode == SImode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_CVTPS2DQ);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_CVTPS2DQ256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_CVTPS2DQ512);
- }
break;
CASE_CFN_IROUND:
CASE_CFN_LROUND:
- CASE_CFN_LLROUND:
/* The round insn does not trap on denormals. */
if (flag_trapping_math || !TARGET_SSE4_1)
break;
@@ -1641,150 +1606,8 @@ ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
else if (out_n == 16 && in_n == 8)
return ix86_get_builtin (IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX512);
}
- if (out_mode == SImode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_ROUNDPS_AZ_SFIX);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_ROUNDPS_AZ_SFIX256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_ROUNDPS_AZ_SFIX512);
- }
break;
- CASE_CFN_FLOOR:
- /* The round insn does not trap on denormals. */
- if (flag_trapping_math || !TARGET_SSE4_1)
- break;
-
- if (out_mode == DFmode && in_mode == DFmode)
- {
- if (out_n == 2 && in_n == 2)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPD);
- else if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPD256);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPD512);
- }
- if (out_mode == SFmode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPS512);
- }
- if (out_mode == HFmode && in_mode == HFmode)
- {
- /* V8HF/V16HF is supported in ix86_vector_mode_supported_p
- under TARGET_AVX512FP16, TARGET_AVX512VL is needed here. */
- if (out_n < 32 && !TARGET_AVX512VL)
- break;
-
- if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPH);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPH256);
- else if (out_n == 32 && in_n == 32)
- return ix86_get_builtin (IX86_BUILTIN_FLOORPH512);
- }
- break;
-
- CASE_CFN_CEIL:
- /* The round insn does not trap on denormals. */
- if (flag_trapping_math || !TARGET_SSE4_1)
- break;
-
- if (out_mode == DFmode && in_mode == DFmode)
- {
- if (out_n == 2 && in_n == 2)
- return ix86_get_builtin (IX86_BUILTIN_CEILPD);
- else if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_CEILPD256);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_CEILPD512);
- }
- if (out_mode == SFmode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_CEILPS512);
- }
- if (out_mode == HFmode && in_mode == HFmode)
- {
- /* V8HF/V16HF is supported in ix86_vector_mode_supported_p
- under TARGET_AVX512FP16, TARGET_AVX512VL is needed here. */
- if (out_n < 32 && !TARGET_AVX512VL)
- break;
-
- if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_CEILPH);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_CEILPH256);
- else if (out_n == 32 && in_n == 32)
- return ix86_get_builtin (IX86_BUILTIN_CEILPH512);
- }
- break;
-
- CASE_CFN_TRUNC:
- /* The round insn does not trap on denormals. */
- if (flag_trapping_math || !TARGET_SSE4_1)
- break;
-
- if (out_mode == DFmode && in_mode == DFmode)
- {
- if (out_n == 2 && in_n == 2)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPD);
- else if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPD256);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPD512);
- }
- if (out_mode == SFmode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPS);
- else if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPS256);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPS512);
- }
- if (out_mode == HFmode && in_mode == HFmode)
- {
- /* V8HF/V16HF is supported in ix86_vector_mode_supported_p
- under TARGET_AVX512FP16, TARGET_AVX512VL is needed here. */
- if (out_n < 32 && !TARGET_AVX512VL)
- break;
-
- if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPH);
- else if (out_n == 16 && in_n == 16)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPH256);
- else if (out_n == 32 && in_n == 32)
- return ix86_get_builtin (IX86_BUILTIN_TRUNCPH512);
- }
- break;
-
- CASE_CFN_FMA:
- if (out_mode == DFmode && in_mode == DFmode)
- {
- if (out_n == 2 && in_n == 2)
- return ix86_get_builtin (IX86_BUILTIN_VFMADDPD);
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_VFMADDPD256);
- }
- if (out_mode == SFmode && in_mode == SFmode)
- {
- if (out_n == 4 && in_n == 4)
- return ix86_get_builtin (IX86_BUILTIN_VFMADDPS);
- if (out_n == 8 && in_n == 8)
- return ix86_get_builtin (IX86_BUILTIN_VFMADDPS256);
- }
- break;
default:
break;
diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
index d7b49c9..5334363 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -15109,9 +15109,24 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
return ix86_vector_duplicate_value (mode, target, val);
else
{
- machine_mode hvmode = (mode == V16HImode ? V8HImode
- : mode == V16HFmode ? V8HFmode
- : V16QImode);
+ machine_mode hvmode;
+ switch (mode)
+ {
+ case V16HImode:
+ hvmode = V8HImode;
+ break;
+ case V16HFmode:
+ hvmode = V8HFmode;
+ break;
+ case V16BFmode:
+ hvmode = V8BFmode;
+ break;
+ case V32QImode:
+ hvmode = V16QImode;
+ break;
+ default:
+ gcc_unreachable ();
+ }
rtx x = gen_reg_rtx (hvmode);
ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
@@ -15130,10 +15145,24 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
return ix86_vector_duplicate_value (mode, target, val);
else
{
- machine_mode hvmode = (mode == V32HImode ? V16HImode
- : mode == V32HFmode ? V16HFmode
- : mode == V32BFmode ? V16BFmode
- : V32QImode);
+ machine_mode hvmode;
+ switch (mode)
+ {
+ case V32HImode:
+ hvmode = V16HImode;
+ break;
+ case V32HFmode:
+ hvmode = V16HFmode;
+ break;
+ case V32BFmode:
+ hvmode = V16BFmode;
+ break;
+ case V64QImode:
+ hvmode = V32QImode;
+ break;
+ default:
+ gcc_unreachable ();
+ }
rtx x = gen_reg_rtx (hvmode);
ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md
index dda4b43..222a041 100644
--- a/gcc/config/i386/mmx.md
+++ b/gcc/config/i386/mmx.md
@@ -1629,6 +1629,160 @@
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
+;; Parallel single-precision floating point rounding operations.
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "nearbyintv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "register_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && TARGET_MMX_WITH_SSE"
+ "operands[2] = GEN_INT (ROUND_MXCSR | ROUND_NO_EXC);")
+
+(define_expand "rintv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "register_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && TARGET_MMX_WITH_SSE"
+ "operands[2] = GEN_INT (ROUND_MXCSR);")
+
+(define_expand "ceilv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "register_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+ "operands[2] = GEN_INT (ROUND_CEIL | ROUND_NO_EXC);")
+
+(define_expand "lceilv2sfv2si2"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V2SF 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+{
+ rtx tmp = gen_reg_rtx (V2SFmode);
+ emit_insn (gen_ceilv2sf2 (tmp, operands[1]));
+ emit_insn (gen_fix_truncv2sfv2si2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "floorv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "vector_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+ "operands[2] = GEN_INT (ROUND_FLOOR | ROUND_NO_EXC);")
+
+(define_expand "lfloorv2sfv2si2"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V2SF 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+{
+ rtx tmp = gen_reg_rtx (V2SFmode);
+ emit_insn (gen_floorv2sf2 (tmp, operands[1]));
+ emit_insn (gen_fix_truncv2sfv2si2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "btruncv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "register_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+ "operands[2] = GEN_INT (ROUND_TRUNC | ROUND_NO_EXC);")
+
+(define_insn "*mmx_roundv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand" "=Yr,*x,v")
+ (unspec:V2SF
+ [(match_operand:V2SF 1 "register_operand" "Yr,x,v")
+ (match_operand:SI 2 "const_0_to_15_operand")]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && TARGET_MMX_WITH_SSE"
+ "%vroundps\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "noavx,noavx,avx")
+ (set_attr "type" "ssecvt")
+ (set_attr "prefix_data16" "1,1,*")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "orig,orig,vex")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "lrintv2sfv2si2"
+ [(set (match_operand:V2SI 0 "register_operand" "=v")
+ (unspec:V2SI
+ [(match_operand:V2SF 1 "register_operand" "v")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_MMX_WITH_SSE"
+ "%vcvtps2dq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set (attr "prefix_data16")
+ (if_then_else
+ (match_test "TARGET_AVX")
+ (const_string "*")
+ (const_string "1")))
+ (set_attr "prefix" "maybe_vex")
+ (set_attr "mode" "TI")])
+
+(define_expand "roundv2sf2"
+ [(set (match_dup 3)
+ (plus:V2SF
+ (match_operand:V2SF 1 "register_operand")
+ (match_dup 2)))
+ (set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF
+ [(match_dup 3) (match_dup 4)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+{
+ const struct real_format *fmt;
+ REAL_VALUE_TYPE pred_half, half_minus_pred_half;
+ rtx half, vec_half;
+
+ /* load nextafter (0.5, 0.0) */
+ fmt = REAL_MODE_FORMAT (SFmode);
+ real_2expN (&half_minus_pred_half, -(fmt->p) - 1, SFmode);
+ real_arithmetic (&pred_half, MINUS_EXPR, &dconsthalf, &half_minus_pred_half);
+ half = const_double_from_real_value (pred_half, SFmode);
+
+ vec_half = ix86_build_const_vector (V2SFmode, true, half);
+ vec_half = force_reg (V2SFmode, vec_half);
+
+ operands[2] = gen_reg_rtx (V2SFmode);
+ emit_insn (gen_copysignv2sf3 (operands[2], vec_half, operands[1]));
+
+ operands[3] = gen_reg_rtx (V2SFmode);
+ operands[4] = GEN_INT (ROUND_TRUNC);
+})
+
+(define_expand "lroundv2sfv2si2"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V2SF 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math
+ && TARGET_MMX_WITH_SSE"
+{
+ rtx tmp = gen_reg_rtx (V2SFmode);
+ emit_insn (gen_roundv2sf2 (tmp, operands[1]));
+ emit_insn (gen_fix_truncv2sfv2si2 (operands[0], tmp));
+ DONE;
+})
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
;; Parallel half-precision floating point arithmetic
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index d535c0a..b60c0d3 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -321,6 +321,11 @@
[(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+(define_mode_iterator VF1_VF2_AVX512DQ
+ [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512DQ") (V4DF "TARGET_AVX512DQ && TARGET_AVX512VL")
+ (V2DF "TARGET_AVX512DQ && TARGET_AVX512VL")])
+
(define_mode_iterator VFH
[(V32HF "TARGET_AVX512FP16")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
@@ -23177,6 +23182,14 @@
"TARGET_SSE4_1"
"operands[2] = GEN_INT (ROUND_MXCSR);")
+;; Note vcvtpd2qq require avx512dq for all vector lengths.
+(define_expand "lrint<mode><sseintvecmodelower>2"
+ [(set (match_operand:<sseintvecmode> 0 "register_operand")
+ (unspec:<sseintvecmode>
+ [(match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_SSE2")
+
(define_insn "<sse4_1>_round<ssemodesuffix><avxsizesuffix>"
[(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x")
(unspec:VF_128_256
@@ -23316,6 +23329,55 @@
(set_attr "prefix" "orig,orig,vex,evex")
(set_attr "mode" "<MODE>")])
+(define_expand "floor<mode>2"
+ [(set (match_operand:VFH 0 "register_operand")
+ (unspec:VFH
+ [(match_operand:VFH 1 "vector_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+ "operands[2] = GEN_INT (ROUND_FLOOR | ROUND_NO_EXC);")
+
+(define_expand "lfloor<mode><sseintvecmodelower>2"
+ [(match_operand:<sseintvecmode> 0 "register_operand")
+ (match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_floor<mode>2 (tmp, operands[1]));
+ emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "ceil<mode>2"
+ [(set (match_operand:VFH 0 "register_operand")
+ (unspec:VFH
+ [(match_operand:VFH 1 "vector_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+ "operands[2] = GEN_INT (ROUND_CEIL | ROUND_NO_EXC);")
+
+(define_expand "lceil<mode><sseintvecmodelower>2"
+ [(match_operand:<sseintvecmode> 0 "register_operand")
+ (match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_ceil<mode>2 (tmp, operands[1]));
+ emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "btrunc<mode>2"
+ [(set (match_operand:VFH 0 "register_operand")
+ (unspec:VFH
+ [(match_operand:VFH 1 "vector_operand")
+ (match_dup 2)]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+ "operands[2] = GEN_INT (ROUND_TRUNC | ROUND_NO_EXC);")
+
(define_expand "round<mode>2"
[(set (match_dup 3)
(plus:VF
@@ -23350,6 +23412,17 @@
operands[4] = GEN_INT (ROUND_TRUNC);
})
+(define_expand "lround<mode><sseintvecmodelower>2"
+ [(match_operand:<sseintvecmode> 0 "register_operand")
+ (match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
+ "TARGET_SSE4_1 && !flag_trapping_math"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_round<mode>2 (tmp, operands[1]));
+ emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
(define_expand "round<mode>2_sfix"
[(match_operand:<sseintvecmode> 0 "register_operand")
(match_operand:VF1 1 "register_operand")]
@@ -23868,6 +23941,13 @@
(set_attr "prefix" "evex")
(set_attr "mode" "XI")])
+(define_expand "exp2<mode>2"
+ [(set (match_operand:VF_512 0 "register_operand")
+ (unspec:VF_512
+ [(match_operand:VF_512 1 "vector_operand")]
+ UNSPEC_EXP2))]
+ "TARGET_AVX512ER")
+
(define_insn "avx512er_exp2<mode><mask_name><round_saeonly_name>"
[(set (match_operand:VF_512 0 "register_operand" "=v")
(unspec:VF_512
diff --git a/gcc/config/i386/x86-tune-sched.cc b/gcc/config/i386/x86-tune-sched.cc
index 1ffaeef..e2765f8 100644
--- a/gcc/config/i386/x86-tune-sched.cc
+++ b/gcc/config/i386/x86-tune-sched.cc
@@ -73,10 +73,24 @@ ix86_issue_rate (void)
case PROCESSOR_SANDYBRIDGE:
case PROCESSOR_HASWELL:
case PROCESSOR_TREMONT:
+ case PROCESSOR_SKYLAKE:
+ case PROCESSOR_SKYLAKE_AVX512:
+ case PROCESSOR_CASCADELAKE:
+ case PROCESSOR_CANNONLAKE:
case PROCESSOR_ALDERLAKE:
case PROCESSOR_GENERIC:
return 4;
+ case PROCESSOR_ICELAKE_CLIENT:
+ case PROCESSOR_ICELAKE_SERVER:
+ case PROCESSOR_TIGERLAKE:
+ case PROCESSOR_COOPERLAKE:
+ case PROCESSOR_ROCKETLAKE:
+ return 5;
+
+ case PROCESSOR_SAPPHIRERAPIDS:
+ return 6;
+
default:
return 1;
}
diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h
index 664dc92..c5b1afe 100644
--- a/gcc/config/loongarch/gnu-user.h
+++ b/gcc/config/loongarch/gnu-user.h
@@ -40,8 +40,10 @@ along with GCC; see the file COPYING3. If not see
#undef GNU_USER_TARGET_LINK_SPEC
#define GNU_USER_TARGET_LINK_SPEC \
"%{G*} %{shared} -m " GNU_USER_LINK_EMULATION \
- "%{!shared: %{static} %{!static: %{rdynamic:-export-dynamic} " \
- "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}"
+ "%{!shared: %{static} " \
+ "%{!static: %{!static-pie: %{rdynamic:-export-dynamic} " \
+ "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}} " \
+ "%{static-pie: -static -pie --no-dynamic-linker -z text}}"
/* Similar to standard Linux, but adding -ffast-math support. */
diff --git a/gcc/config/mips/mips.cc b/gcc/config/mips/mips.cc
index 4772495..387376b 100644
--- a/gcc/config/mips/mips.cc
+++ b/gcc/config/mips/mips.cc
@@ -20018,7 +20018,7 @@ mips_set_tune (const struct mips_cpu_info *info)
static void
mips_option_override (void)
{
- int i, start, regno, mode;
+ int i, regno, mode;
if (OPTION_SET_P (mips_isa_option))
mips_isa_option_info = &mips_cpu_info_table[mips_isa_option];
diff --git a/gcc/config/nvptx/mkoffload.cc b/gcc/config/nvptx/mkoffload.cc
index 834b205..854cd72 100644
--- a/gcc/config/nvptx/mkoffload.cc
+++ b/gcc/config/nvptx/mkoffload.cc
@@ -324,9 +324,19 @@ process (FILE *in, FILE *out, uint32_t omp_requires)
{
if (sm_ver && sm_ver[0] == '3' && sm_ver[1] == '0'
&& sm_ver[2] == '\n')
- fatal_error (input_location,
- "%<omp requires reverse_offload%> requires at least "
- "%<sm_35%> for %<-misa=%>");
+ {
+ warning_at (input_location, 0,
+ "%<omp requires reverse_offload%> requires at "
+ "least %<sm_35%> for "
+ "%<-foffload-options=nvptx-none=-march=%> - disabling"
+ " offload-code generation for this device type");
+ /* As now an empty file is compiled and there is no call to
+ GOMP_offload_register_ver, this device type is effectively
+ disabled. */
+ fflush (out);
+ ftruncate (fileno (out), 0);
+ return;
+ }
sm_ver2 = sm_ver;
version2 = version;
}
@@ -526,7 +536,7 @@ main (int argc, char **argv)
FILE *out = stdout;
const char *outname = 0;
- progname = "mkoffload";
+ progname = tool_name;
diagnostic_initialize (global_dc, 0);
if (atexit (mkoffload_cleanup) != 0)
diff --git a/gcc/config/rs6000/rs6000-builtin.cc b/gcc/config/rs6000/rs6000-builtin.cc
index 6dfb0db..3ce729c 100644
--- a/gcc/config/rs6000/rs6000-builtin.cc
+++ b/gcc/config/rs6000/rs6000-builtin.cc
@@ -1260,6 +1260,11 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
enum tree_code bcode;
gimple *g;
+ /* For an unresolved overloaded builtin, return early here since there
+ is no builtin info for it and we are unable to fold it. */
+ if (fn_code > RS6000_OVLD_NONE)
+ return false;
+
size_t uns_fncode = (size_t) fn_code;
enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
const char *fn_name1 = rs6000_builtin_info[uns_fncode].bifname;
@@ -3256,6 +3261,14 @@ rs6000_expand_builtin (tree exp, rtx target, rtx /* subtarget */,
tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
enum rs6000_gen_builtins fcode
= (enum rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl);
+
+ /* Emit error message if it's an unresolved overloaded builtin. */
+ if (fcode > RS6000_OVLD_NONE)
+ {
+ error ("unresolved overload for builtin %qF", fndecl);
+ return const0_rtx;
+ }
+
size_t uns_fcode = (size_t)fcode;
enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
diff --git a/gcc/config/rs6000/rs6000-c.cc b/gcc/config/rs6000/rs6000-c.cc
index 4d051b9..ca9cc42 100644
--- a/gcc/config/rs6000/rs6000-c.cc
+++ b/gcc/config/rs6000/rs6000-c.cc
@@ -1749,6 +1749,36 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
unsigned int nargs = vec_safe_length (arglist);
+ /* If the number of arguments did not match the prototype, return NULL
+ and the generic code will issue the appropriate error message. Skip
+ this test for functions where we don't fully describe all the possible
+ overload signatures in rs6000-overload.def (because they aren't relevant
+ to the expansion here). If we don't, we get confusing error messages. */
+ /* As an example, for vec_splats we have:
+
+; There are no actual builtins for vec_splats. There is special handling for
+; this in altivec_resolve_overloaded_builtin in rs6000-c.cc, where the call
+; is replaced by a constructor. The single overload here causes
+; __builtin_vec_splats to be registered with the front end so that can happen.
+[VEC_SPLATS, vec_splats, __builtin_vec_splats]
+ vsi __builtin_vec_splats (vsi);
+ ABS_V4SI SPLATS_FAKERY
+
+ So even though __builtin_vec_splats accepts all vector types, the
+ infrastructure cheats and just records one prototype. We end up getting
+ an error message that refers to this specific prototype even when we
+ are handling a different argument type. That is completely confusing
+ to the user, so it's best to let these cases be handled individually
+ in the resolve_vec_splats, etc., helper functions. */
+
+ if (expected_args != nargs
+ && !(fcode == RS6000_OVLD_VEC_PROMOTE
+ || fcode == RS6000_OVLD_VEC_SPLATS
+ || fcode == RS6000_OVLD_VEC_EXTRACT
+ || fcode == RS6000_OVLD_VEC_INSERT
+ || fcode == RS6000_OVLD_VEC_STEP))
+ return NULL;
+
for (n = 0;
!VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs;
fnargs = TREE_CHAIN (fnargs), n++)
@@ -1809,36 +1839,6 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
types[n] = type;
}
- /* If the number of arguments did not match the prototype, return NULL
- and the generic code will issue the appropriate error message. Skip
- this test for functions where we don't fully describe all the possible
- overload signatures in rs6000-overload.def (because they aren't relevant
- to the expansion here). If we don't, we get confusing error messages. */
- /* As an example, for vec_splats we have:
-
-; There are no actual builtins for vec_splats. There is special handling for
-; this in altivec_resolve_overloaded_builtin in rs6000-c.cc, where the call
-; is replaced by a constructor. The single overload here causes
-; __builtin_vec_splats to be registered with the front end so that can happen.
-[VEC_SPLATS, vec_splats, __builtin_vec_splats]
- vsi __builtin_vec_splats (vsi);
- ABS_V4SI SPLATS_FAKERY
-
- So even though __builtin_vec_splats accepts all vector types, the
- infrastructure cheats and just records one prototype. We end up getting
- an error message that refers to this specific prototype even when we
- are handling a different argument type. That is completely confusing
- to the user, so it's best to let these cases be handled individually
- in the resolve_vec_splats, etc., helper functions. */
-
- if (n != expected_args
- && !(fcode == RS6000_OVLD_VEC_PROMOTE
- || fcode == RS6000_OVLD_VEC_SPLATS
- || fcode == RS6000_OVLD_VEC_EXTRACT
- || fcode == RS6000_OVLD_VEC_INSERT
- || fcode == RS6000_OVLD_VEC_STEP))
- return NULL;
-
/* Some overloads require special handling. */
tree returned_expr = NULL;
resolution res = unresolved;
diff --git a/gcc/config/rs6000/rs6000.cc b/gcc/config/rs6000/rs6000.cc
index bcf634a..5f347e9 100644
--- a/gcc/config/rs6000/rs6000.cc
+++ b/gcc/config/rs6000/rs6000.cc
@@ -5141,16 +5141,23 @@ protected:
vect_cost_model_location, unsigned int);
void density_test (loop_vec_info);
void adjust_vect_cost_per_loop (loop_vec_info);
+ unsigned int determine_suggested_unroll_factor (loop_vec_info);
/* Total number of vectorized stmts (loop only). */
unsigned m_nstmts = 0;
/* Total number of loads (loop only). */
unsigned m_nloads = 0;
+ /* Total number of stores (loop only). */
+ unsigned m_nstores = 0;
+ /* Reduction factor for suggesting unroll factor (loop only). */
+ unsigned m_reduc_factor = 0;
/* Possible extra penalized cost on vector construction (loop only). */
unsigned m_extra_ctor_cost = 0;
/* For each vectorized loop, this var holds TRUE iff a non-memory vector
instruction is needed by the vectorization. */
bool m_vect_nonmem = false;
+ /* If this loop gets vectorized with emulated gather load. */
+ bool m_gather_load = false;
};
/* Test for likely overcommitment of vector hardware resources. If a
@@ -5301,9 +5308,34 @@ rs6000_cost_data::update_target_cost_per_stmt (vect_cost_for_stmt kind,
{
m_nstmts += orig_count;
- if (kind == scalar_load || kind == vector_load
- || kind == unaligned_load || kind == vector_gather_load)
- m_nloads += orig_count;
+ if (kind == scalar_load
+ || kind == vector_load
+ || kind == unaligned_load
+ || kind == vector_gather_load)
+ {
+ m_nloads += orig_count;
+ if (stmt_info && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ m_gather_load = true;
+ }
+ else if (kind == scalar_store
+ || kind == vector_store
+ || kind == unaligned_store
+ || kind == vector_scatter_store)
+ m_nstores += orig_count;
+ else if ((kind == scalar_stmt
+ || kind == vector_stmt
+ || kind == vec_to_scalar)
+ && stmt_info
+ && vect_is_reduction (stmt_info))
+ {
+ /* Loop body contains normal int or fp operations and epilogue
+ contains vector reduction. For simplicity, we assume int
+ operation takes one cycle and fp operation takes one more. */
+ tree lhs = gimple_get_lhs (stmt_info->stmt);
+ bool is_float = FLOAT_TYPE_P (TREE_TYPE (lhs));
+ unsigned int basic_cost = is_float ? 2 : 1;
+ m_reduc_factor = MAX (basic_cost * orig_count, m_reduc_factor);
+ }
/* Power processors do not currently have instructions for strided
and elementwise loads, and instead we must generate multiple
@@ -5395,6 +5427,90 @@ rs6000_cost_data::adjust_vect_cost_per_loop (loop_vec_info loop_vinfo)
}
}
+/* Determine suggested unroll factor by considering some below factors:
+
+ - unroll option/pragma which can disable unrolling for this loop;
+ - simple hardware resource model for non memory vector insns;
+ - aggressive heuristics when iteration count is unknown:
+ - reduction case to break cross iteration dependency;
+ - emulated gather load;
+ - estimated iteration count when iteration count is unknown;
+*/
+
+
+unsigned int
+rs6000_cost_data::determine_suggested_unroll_factor (loop_vec_info loop_vinfo)
+{
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+
+ /* Don't unroll if it's specified explicitly not to be unrolled. */
+ if (loop->unroll == 1
+ || (OPTION_SET_P (flag_unroll_loops) && !flag_unroll_loops)
+ || (OPTION_SET_P (flag_unroll_all_loops) && !flag_unroll_all_loops))
+ return 1;
+
+ unsigned int nstmts_nonldst = m_nstmts - m_nloads - m_nstores;
+ /* Don't unroll if no vector instructions excepting for memory access. */
+ if (nstmts_nonldst == 0)
+ return 1;
+
+ /* Consider breaking cross iteration dependency for reduction. */
+ unsigned int reduc_factor = m_reduc_factor > 1 ? m_reduc_factor : 1;
+
+ /* Use this simple hardware resource model that how many non ld/st
+ vector instructions can be issued per cycle. */
+ unsigned int issue_width = rs6000_vect_unroll_issue;
+ unsigned int uf = CEIL (reduc_factor * issue_width, nstmts_nonldst);
+ uf = MIN ((unsigned int) rs6000_vect_unroll_limit, uf);
+ /* Make sure it is power of 2. */
+ uf = 1 << ceil_log2 (uf);
+
+ /* If the iteration count is known, the costing would be exact enough,
+ don't worry it could be worse. */
+ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
+ return uf;
+
+ /* Inspired by SPEC2017 parest_r, we want to aggressively unroll the
+ loop if either condition is satisfied:
+ - reduction factor exceeds the threshold;
+ - emulated gather load adopted. */
+ if (reduc_factor > (unsigned int) rs6000_vect_unroll_reduc_threshold
+ || m_gather_load)
+ return uf;
+
+ /* Check if we can conclude it's good to unroll from the estimated
+ iteration count. */
+ HOST_WIDE_INT est_niter = get_estimated_loop_iterations_int (loop);
+ unsigned int vf = vect_vf_for_cost (loop_vinfo);
+ unsigned int unrolled_vf = vf * uf;
+ if (est_niter == -1 || est_niter < unrolled_vf)
+ /* When the estimated iteration of this loop is unknown, it's possible
+ that we are able to vectorize this loop with the original VF but fail
+ to vectorize it with the unrolled VF any more if the actual iteration
+ count is in between. */
+ return 1;
+ else
+ {
+ unsigned int epil_niter_unr = est_niter % unrolled_vf;
+ unsigned int epil_niter = est_niter % vf;
+ /* Even if we have partial vector support, it can be still inefficent
+ to calculate the length when the iteration count is unknown, so
+ only expect it's good to unroll when the epilogue iteration count
+ is not bigger than VF (only one time length calculation). */
+ if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
+ && epil_niter_unr <= vf)
+ return uf;
+ /* Without partial vector support, conservatively unroll this when
+ the epilogue iteration count is less than the original one
+ (epilogue execution time wouldn't be longer than before). */
+ else if (!LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
+ && epil_niter_unr <= epil_niter)
+ return uf;
+ }
+
+ return 1;
+}
+
void
rs6000_cost_data::finish_cost (const vector_costs *scalar_costs)
{
@@ -5411,6 +5527,9 @@ rs6000_cost_data::finish_cost (const vector_costs *scalar_costs)
&& LOOP_VINFO_VECT_FACTOR (loop_vinfo) == 2
&& LOOP_REQUIRES_VERSIONING (loop_vinfo))
m_costs[vect_body] += 10000;
+
+ m_suggested_unroll_factor
+ = determine_suggested_unroll_factor (loop_vinfo);
}
vector_costs::finish_cost (scalar_costs);
@@ -10178,6 +10297,41 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
gen_rtx_IOR (DImode, copy_rtx (temp),
GEN_INT (ud1)));
}
+ else if (TARGET_PREFIXED)
+ {
+ if (can_create_pseudo_p ())
+ {
+ /* pli A,L + pli B,H + rldimi A,B,32,0. */
+ temp = gen_reg_rtx (DImode);
+ rtx temp1 = gen_reg_rtx (DImode);
+ emit_move_insn (temp, GEN_INT ((ud4 << 16) | ud3));
+ emit_move_insn (temp1, GEN_INT ((ud2 << 16) | ud1));
+
+ emit_insn (gen_rotldi3_insert_3 (dest, temp, GEN_INT (32), temp1,
+ GEN_INT (0xffffffff)));
+ }
+ else
+ {
+ /* pli A,H + sldi A,32 + paddi A,A,L. */
+ emit_move_insn (dest, GEN_INT ((ud4 << 16) | ud3));
+
+ emit_move_insn (dest, gen_rtx_ASHIFT (DImode, dest, GEN_INT (32)));
+
+ bool can_use_paddi = REGNO (dest) != FIRST_GPR_REGNO;
+
+ /* Use paddi for the low 32 bits. */
+ if (ud2 != 0 && ud1 != 0 && can_use_paddi)
+ emit_move_insn (dest, gen_rtx_PLUS (DImode, dest,
+ GEN_INT ((ud2 << 16) | ud1)));
+
+ /* Use oris, ori for low 32 bits. */
+ if (ud2 != 0 && (ud1 == 0 || !can_use_paddi))
+ emit_move_insn (dest,
+ gen_rtx_IOR (DImode, dest, GEN_INT (ud2 << 16)));
+ if (ud1 != 0 && (ud2 == 0 || !can_use_paddi))
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
+ }
+ }
else
{
temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
diff --git a/gcc/config/rs6000/rs6000.opt b/gcc/config/rs6000/rs6000.opt
index b227bf9..b63a5d4 100644
--- a/gcc/config/rs6000/rs6000.opt
+++ b/gcc/config/rs6000/rs6000.opt
@@ -620,6 +620,14 @@ mieee128-constant
Target Var(TARGET_IEEE128_CONSTANT) Init(1) Save
Generate (do not generate) code that uses the LXVKQ instruction.
+; Documented parameters
+
+-param=rs6000-vect-unroll-limit=
+Target Joined UInteger Var(rs6000_vect_unroll_limit) Init(4) IntegerRange(1, 64) Param
+Used to limit unroll factor which indicates how much the autovectorizer may
+unroll a loop. The default value is 4.
+
+; Undocumented parameters
-param=rs6000-density-pct-threshold=
Target Undocumented Joined UInteger Var(rs6000_density_pct_threshold) Init(85) IntegerRange(0, 100) Param
When costing for loop vectorization, we probably need to penalize the loop body
@@ -657,3 +665,13 @@ Like parameter rs6000-density-load-pct-threshold, we also check if the total
number of load statements exceeds the threshold specified by this parameter,
and penalize only if it's satisfied. The default value is 20.
+-param=rs6000-vect-unroll-issue=
+Target Undocumented Joined UInteger Var(rs6000_vect_unroll_issue) Init(4) IntegerRange(1, 128) Param
+Indicate how many non memory access vector instructions can be issued per
+cycle, it's used in unroll factor determination for autovectorizer. The
+default value is 4.
+
+-param=rs6000-vect-unroll-reduc-threshold=
+Target Undocumented Joined UInteger Var(rs6000_vect_unroll_reduc_threshold) Init(1) Param
+When reduction factor computed for a loop exceeds the threshold specified by
+this parameter, prefer to unroll this loop. The default value is 1.
diff --git a/gcc/config/xtensa/xtensa.cc b/gcc/config/xtensa/xtensa.cc
index ac52c01..f1b3331 100644
--- a/gcc/config/xtensa/xtensa.cc
+++ b/gcc/config/xtensa/xtensa.cc
@@ -4472,7 +4472,7 @@ xtensa_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
static bool
xtensa_function_value_regno_p (const unsigned int regno)
{
- return (regno == GP_RETURN);
+ return (regno >= GP_RETURN && regno < GP_RETURN + GP_RETURN_REG_COUNT);
}
/* The static chain is passed in memory. Provide rtx giving 'mem'
diff --git a/gcc/config/xtensa/xtensa.h b/gcc/config/xtensa/xtensa.h
index 0f3006d..16e3d55 100644
--- a/gcc/config/xtensa/xtensa.h
+++ b/gcc/config/xtensa/xtensa.h
@@ -488,6 +488,7 @@ enum reg_class
point, and values of coprocessor and user-defined modes. */
#define GP_RETURN (GP_REG_FIRST + 2 + WINDOW_SIZE)
#define GP_OUTGOING_RETURN (GP_REG_FIRST + 2)
+#define GP_RETURN_REG_COUNT 4
/* Symbolic macros for the first/last argument registers. */
#define GP_ARG_FIRST (GP_REG_FIRST + 2)
diff --git a/gcc/config/xtensa/xtensa.md b/gcc/config/xtensa/xtensa.md
index f722ea5..608110c 100644
--- a/gcc/config/xtensa/xtensa.md
+++ b/gcc/config/xtensa/xtensa.md
@@ -2305,6 +2305,27 @@
(set_attr "mode" "none")
(set_attr "length" "3")])
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "")
+ (const_int 0))
+ (match_operand 1 "")
+ (match_operand 2 "")])]
+ ""
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ emit_insn (gen_blockage ());
+ DONE;
+})
+
(define_insn "entry"
[(set (reg:SI A1_REG)
(unspec_volatile:SI [(match_operand:SI 0 "const_int_operand" "i")]
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 0f37423..dc4ce202 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,156 @@
+2022-09-17 Patrick Palka <ppalka@redhat.com>
+
+ * module.cc (friend_from_decl_list): Don't consider
+ CLASSTYPE_TEMPLATE_INFO for a TYPENAME_TYPE friend.
+ (trees_in::read_class_def): Don't add to
+ CLASSTYPE_BEFRIENDING_CLASSES for a TYPENAME_TYPE friend.
+
+2022-09-16 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/92505
+ * constexpr.cc (cxx_eval_component_reference): Check non_constant_p
+ sooner. In C++14 or later, reject a DECL_MUTABLE_P member access
+ only if CONSTRUCTOR_MUTABLE_POISION is also set.
+
+2022-09-16 Jason Merrill <jason@redhat.com>
+
+ PR c++/106858
+ * parser.cc (cp_parser_omp_var_list_no_open): Pass the
+ initial token location down.
+ * semantics.cc (finish_omp_clauses): Check
+ invalid_nonstatic_memfn_p.
+ * typeck.cc (invalid_nonstatic_memfn_p): Handle null TREE_TYPE.
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * decl.cc (cxx_init_decl_processing): Inline last
+ build_void_list_node call.
+ (build_void_list_node): Remove.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * semantics.cc (finish_omp_clauses): Likewise.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * cp-tree.h (mark_used): Remove single-parameter overload. Add
+ default argument to the two-parameter overload.
+ * decl2.cc (mark_used): Likewise.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * cp-tree.h (cxx_constant_value): Define two-parameter version
+ that omits the object parameter.
+ * decl.cc (build_explicit_specifier): Omit NULL_TREE object
+ argument to cxx_constant_value.
+ * except.cc (build_noexcept_spec): Likewise.
+ * pt.cc (expand_integer_pack): Likewise.
+ (fold_targs_r): Likewise.
+ * semantics.cc (finish_if_stmt_cond): Likewise.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * decl.cc (build_explicit_specifier): Pass complain to
+ cxx_constant_value.
+ * except.cc (build_noexcept_spec): Likewise.
+ * pt.cc (expand_integer_pack): Likewise.
+ (tsubst_function_decl): Propagate error_mark_node returned
+ from build_explicit_specifier.
+
+2022-09-12 Patrick Palka <ppalka@redhat.com>
+
+ * call.cc (build_conditional_expr): Adjust calls to
+ '_sfinae'-suffixed functions.
+ (build_temp): Likewise.
+ (convert_like_internal): Likewise.
+ (convert_arg_to_ellipsis): Likewise.
+ (build_over_call): Likewise.
+ (build_cxx_call): Likewise.
+ (build_new_method_call): Likewise.
+ * constexpr.cc (cxx_eval_outermost_constant_expr): Likewise.
+ (cxx_constant_value_sfinae): Rename to ...
+ (cxx_constant_value): ... this. Document its default arguments.
+ (fold_non_dependent_expr): Adjust function comment.
+ * cp-tree.h (instantiate_non_dependent_expr_sfinae): Rename to ...
+ (instantiate_non_dependent_expr): ... this. Give its 'complain'
+ parameter a default argument.
+ (get_target_expr_sfinae, get_target_expr): Likewise.
+ (require_complete_type_sfinae, require_complete_type): Likewise.
+ (abstract_virtuals_error_sfinae, abstract_virtuals_error):
+ Likewise.
+ (cxx_constant_value_sfinae, cxx_constant_value): Likewise.
+ * cvt.cc (build_up_reference): Adjust calls to '_sfinae'-suffixed
+ functions.
+ (ocp_convert): Likewise.
+ * decl.cc (build_explicit_specifier): Likewise.
+ * except.cc (build_noexcept_spec): Likewise.
+ * init.cc (build_new_1): Likewise.
+ * pt.cc (expand_integer_pack): Likewise.
+ (instantiate_non_dependent_expr_internal): Adjust function
+ comment.
+ (instantiate_non_dependent_expr): Rename to ...
+ (instantiate_non_dependent_expr_sfinae): ... this. Document its
+ default argument.
+ (tsubst_init): Adjust calls to '_sfinae'-suffixed functions.
+ (fold_targs_r): Likewise.
+ * semantics.cc (finish_compound_literal): Likewise.
+ (finish_decltype_type): Likewise.
+ (cp_build_bit_cast): Likewise.
+ * tree.cc (build_cplus_new): Likewise.
+ (get_target_expr): Rename to ...
+ (get_target_expr_sfinae): ... this. Document its default
+ argument.
+ * typeck.cc (require_complete_type): Rename to ...
+ (require_complete_type_sfinae): ... this. Document its default
+ argument.
+ (cp_build_array_ref): Adjust calls to '_sfinae'-suffixed
+ functions.
+ (convert_arguments): Likewise.
+ (cp_build_binary_op): Likewise.
+ (build_static_cast_1): Likewise.
+ (cp_build_modify_expr): Likewise.
+ (convert_for_initialization): Likewise.
+ * typeck2.cc (abstract_virtuals_error): Rename to ...
+ (abstract_virtuals_error_sfinae): ... this. Document its default
+ argument.
+ (build_functional_cast_1): Adjust calls to '_sfinae'-suffixed
+ functions.
+
+2022-09-12 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/101906
+ * pt.cc (tsubst_template_args): Set cp_evaluated here.
+ (tsubst_aggr_type): Not here.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/106893
+ PR c++/90451
+ * decl.cc (cp_finish_decl): Call mark_single_function.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/93259
+ * pt.cc (type_dependent_expression_p): Treat a compound
+ literal of array-of-unknown-bound type like a variable.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/106567
+ * lambda.cc (type_deducible_expression_p): Check
+ array_of_unknown_bound_p.
+
+2022-09-12 Jonathan Wakely <jwakely@redhat.com>
+
+ PR c++/86491
+ * decl2.cc (constrain_class_visibility): Adjust wording of
+ -Wsubobject-linkage for cases where anonymous
+ namespaces aren't used.
+ * tree.cc (decl_anon_ns_mem_p): Now only true for actual anonymous
+ namespace members, rename old semantics to...
+ (decl_internal_context_p): ...this.
+ * cp-tree.h, name-lookup.cc, pt.cc: Adjust.
+
2022-09-08 Jonathan Wakely <jwakely@redhat.com>
PR c++/106838
diff --git a/gcc/cp/call.cc b/gcc/cp/call.cc
index d107a28..7e9289f 100644
--- a/gcc/cp/call.cc
+++ b/gcc/cp/call.cc
@@ -5976,7 +5976,7 @@ build_conditional_expr (const op_location_t &loc,
but now we sometimes wrap them in NOP_EXPRs so the test would
fail. */
if (CLASS_TYPE_P (TREE_TYPE (result)))
- result = get_target_expr_sfinae (result, complain);
+ result = get_target_expr (result, complain);
/* If this expression is an rvalue, but might be mistaken for an
lvalue, we must add a NON_LVALUE_EXPR. */
result = rvalue (result);
@@ -7672,7 +7672,7 @@ build_temp (tree expr, tree type, int flags,
if ((lvalue_kind (expr) & clk_packed)
&& CLASS_TYPE_P (TREE_TYPE (expr))
&& !type_has_nontrivial_copy_init (TREE_TYPE (expr)))
- return get_target_expr_sfinae (expr, complain);
+ return get_target_expr (expr, complain);
/* In decltype, we might have decided not to wrap this call in a TARGET_EXPR.
But it turns out to be a subexpression, so perform temporary
@@ -8008,10 +8008,10 @@ convert_like_internal (conversion *convs, tree expr, tree fn, int argnum,
&& !processing_template_decl)
{
bool direct = CONSTRUCTOR_IS_DIRECT_INIT (expr);
- if (abstract_virtuals_error_sfinae (NULL_TREE, totype, complain))
+ if (abstract_virtuals_error (NULL_TREE, totype, complain))
return error_mark_node;
expr = build_value_init (totype, complain);
- expr = get_target_expr_sfinae (expr, complain);
+ expr = get_target_expr (expr, complain);
if (expr != error_mark_node)
{
TARGET_EXPR_LIST_INIT_P (expr) = true;
@@ -8137,7 +8137,7 @@ convert_like_internal (conversion *convs, tree expr, tree fn, int argnum,
field = next_aggregate_field (DECL_CHAIN (field));
CONSTRUCTOR_APPEND_ELT (vec, field, size_int (len));
tree new_ctor = build_constructor (totype, vec);
- return get_target_expr_sfinae (new_ctor, complain);
+ return get_target_expr (new_ctor, complain);
}
case ck_aggr:
@@ -8153,7 +8153,7 @@ convert_like_internal (conversion *convs, tree expr, tree fn, int argnum,
return expr;
}
expr = reshape_init (totype, expr, complain);
- expr = get_target_expr_sfinae (digest_init (totype, expr, complain),
+ expr = get_target_expr (digest_init (totype, expr, complain),
complain);
if (expr != error_mark_node)
TARGET_EXPR_LIST_INIT_P (expr) = true;
@@ -8580,12 +8580,12 @@ convert_arg_to_ellipsis (tree arg, tsubst_flags_t complain)
standard conversions are performed. */
arg = decay_conversion (arg, complain);
- arg = require_complete_type_sfinae (arg, complain);
+ arg = require_complete_type (arg, complain);
arg_type = TREE_TYPE (arg);
if (arg != error_mark_node
/* In a template (or ill-formed code), we can have an incomplete type
- even after require_complete_type_sfinae, in which case we don't know
+ even after require_complete_type, in which case we don't know
whether it has trivial copy or not. */
&& COMPLETE_TYPE_P (arg_type)
&& !cp_unevaluated_operand)
@@ -10000,7 +10000,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
obj_arg = TREE_OPERAND (addr, 0);
}
}
- call = cxx_constant_value_sfinae (call, obj_arg, complain);
+ call = cxx_constant_value (call, obj_arg, complain);
if (obj_arg && !error_operand_p (call))
call = build2 (INIT_EXPR, void_type_node, obj_arg, call);
call = convert_from_reference (call);
@@ -10505,7 +10505,7 @@ build_cxx_call (tree fn, int nargs, tree *argarray,
prvalue. The type of the prvalue may be incomplete. */
if (!(complain & tf_decltype))
{
- fn = require_complete_type_sfinae (fn, complain);
+ fn = require_complete_type (fn, complain);
if (fn == error_mark_node)
return error_mark_node;
@@ -11084,7 +11084,7 @@ build_new_method_call (tree instance, tree fns, vec<tree, va_gc> **args,
if (init)
{
if (is_dummy_object (instance))
- return get_target_expr_sfinae (init, complain);
+ return get_target_expr (init, complain);
init = build2 (INIT_EXPR, TREE_TYPE (instance), instance, init);
TREE_SIDE_EFFECTS (init) = true;
return init;
diff --git a/gcc/cp/constexpr.cc b/gcc/cp/constexpr.cc
index c047fe4..1063987 100644
--- a/gcc/cp/constexpr.cc
+++ b/gcc/cp/constexpr.cc
@@ -4088,6 +4088,8 @@ cxx_eval_component_reference (const constexpr_ctx *ctx, tree t,
tree whole = cxx_eval_constant_expression (ctx, orig_whole,
lval,
non_constant_p, overflow_p);
+ if (*non_constant_p)
+ return t;
if (INDIRECT_REF_P (whole)
&& integer_zerop (TREE_OPERAND (whole, 0)))
{
@@ -4108,20 +4110,21 @@ cxx_eval_component_reference (const constexpr_ctx *ctx, tree t,
whole, part, NULL_TREE);
/* Don't VERIFY_CONSTANT here; we only want to check that we got a
CONSTRUCTOR. */
- if (!*non_constant_p && TREE_CODE (whole) != CONSTRUCTOR)
+ if (TREE_CODE (whole) != CONSTRUCTOR)
{
if (!ctx->quiet)
error ("%qE is not a constant expression", orig_whole);
*non_constant_p = true;
+ return t;
}
- if (DECL_MUTABLE_P (part))
+ if ((cxx_dialect < cxx14 || CONSTRUCTOR_MUTABLE_POISON (whole))
+ && DECL_MUTABLE_P (part))
{
if (!ctx->quiet)
error ("mutable %qD is not usable in a constant expression", part);
*non_constant_p = true;
+ return t;
}
- if (*non_constant_p)
- return t;
bool pmf = TYPE_PTRMEMFUNC_P (TREE_TYPE (whole));
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (whole), i, field, value)
{
@@ -8068,7 +8071,7 @@ cxx_eval_outermost_constant_expr (tree t, bool allow_non_constant,
r = get_target_expr (r);
else
{
- r = get_target_expr_sfinae (r, tf_warning_or_error | tf_no_cleanup);
+ r = get_target_expr (r, tf_warning_or_error | tf_no_cleanup);
TREE_CONSTANT (r) = true;
}
}
@@ -8081,19 +8084,11 @@ cxx_eval_outermost_constant_expr (tree t, bool allow_non_constant,
}
/* If T represents a constant expression returns its reduced value.
- Otherwise return error_mark_node. If T is dependent, then
- return NULL. */
-
-tree
-cxx_constant_value (tree t, tree decl)
-{
- return cxx_eval_outermost_constant_expr (t, false, true, true, false, decl);
-}
-
-/* As above, but respect SFINAE. */
+ Otherwise return error_mark_node. */
tree
-cxx_constant_value_sfinae (tree t, tree decl, tsubst_flags_t complain)
+cxx_constant_value (tree t, tree decl /* = NULL_TREE */,
+ tsubst_flags_t complain /* = tf_error */)
{
bool sfinae = !(complain & tf_error);
tree r = cxx_eval_outermost_constant_expr (t, sfinae, true, true, false, decl);
@@ -8316,8 +8311,8 @@ fold_non_dependent_expr_template (tree t, tsubst_flags_t complain,
/* Like maybe_constant_value but first fully instantiate the argument.
- Note: this is equivalent to instantiate_non_dependent_expr_sfinae
- (t, complain) followed by maybe_constant_value but is more efficient,
+ Note: this is equivalent to instantiate_non_dependent_expr (t, complain)
+ followed by maybe_constant_value but is more efficient,
because it calls instantiation_dependent_expression_p and
potential_constant_expression at most once.
The manifestly_const_eval argument is passed to maybe_constant_value.
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 7b28405..f19ecaf 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -6945,8 +6945,8 @@ extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern void no_linkage_error (tree);
extern void check_default_args (tree);
-extern bool mark_used (tree);
-extern bool mark_used (tree, tsubst_flags_t);
+extern bool mark_used (tree,
+ tsubst_flags_t = tf_warning_or_error);
extern bool mark_single_function (tree, tsubst_flags_t);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree, tree);
@@ -7391,8 +7391,7 @@ extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
-extern tree instantiate_non_dependent_expr (tree);
-extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t);
+extern tree instantiate_non_dependent_expr (tree, tsubst_flags_t = tf_error);
extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_or_null (tree);
extern bool variable_template_specialization_p (tree);
@@ -7824,8 +7823,8 @@ extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_local_temp (tree);
extern bool is_local_temp (tree);
extern tree build_aggr_init_expr (tree, tree);
-extern tree get_target_expr (tree);
-extern tree get_target_expr_sfinae (tree, tsubst_flags_t);
+extern tree get_target_expr (tree,
+ tsubst_flags_t = tf_warning_or_error);
extern tree build_cplus_array_type (tree, tree, int is_dep = -1);
extern tree build_array_of_n_type (tree, int);
extern bool array_of_runtime_bound_p (tree);
@@ -7874,7 +7873,8 @@ extern tree replace_placeholders (tree, tree, bool * = NULL);
extern bool find_placeholders (tree);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
-extern bool decl_anon_ns_mem_p (const_tree);
+extern bool decl_anon_ns_mem_p (tree);
+extern bool decl_internal_context_p (const_tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (const_tree);
@@ -7937,8 +7937,8 @@ extern int string_conv_p (const_tree, const_tree, int);
extern tree cp_truthvalue_conversion (tree, tsubst_flags_t);
extern tree contextual_conv_bool (tree, tsubst_flags_t);
extern tree condition_conversion (tree);
-extern tree require_complete_type (tree);
-extern tree require_complete_type_sfinae (tree, tsubst_flags_t);
+extern tree require_complete_type (tree,
+ tsubst_flags_t = tf_warning_or_error);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
@@ -8152,10 +8152,10 @@ extern tree binfo_or_else (tree, tree);
extern void cxx_readonly_error (location_t, tree,
enum lvalue_use);
extern void complete_type_check_abstract (tree);
-extern int abstract_virtuals_error (tree, tree);
-extern int abstract_virtuals_error (abstract_class_use, tree);
-extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
-extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t);
+extern int abstract_virtuals_error (tree, tree,
+ tsubst_flags_t = tf_warning_or_error);
+extern int abstract_virtuals_error (abstract_class_use, tree,
+ tsubst_flags_t = tf_warning_or_error);
extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern tree split_nonconstant_init (tree, tree);
@@ -8412,8 +8412,10 @@ extern bool require_potential_constant_expression (tree);
extern bool require_constant_expression (tree);
extern bool require_rvalue_constant_expression (tree);
extern bool require_potential_rvalue_constant_expression (tree);
-extern tree cxx_constant_value (tree, tree = NULL_TREE);
-extern tree cxx_constant_value_sfinae (tree, tree, tsubst_flags_t);
+extern tree cxx_constant_value (tree, tree = NULL_TREE,
+ tsubst_flags_t = tf_error);
+inline tree cxx_constant_value (tree t, tsubst_flags_t complain)
+{ return cxx_constant_value (t, NULL_TREE, complain); }
extern void cxx_constant_dtor (tree, tree);
extern tree cxx_constant_init (tree, tree = NULL_TREE);
extern tree maybe_constant_value (tree, tree = NULL_TREE, bool = false);
diff --git a/gcc/cp/cvt.cc b/gcc/cp/cvt.cc
index 30a9806..434d306 100644
--- a/gcc/cp/cvt.cc
+++ b/gcc/cp/cvt.cc
@@ -339,7 +339,7 @@ build_up_reference (tree type, tree arg, int flags, tree decl,
LOOKUP_ONLYCONVERTING|DIRECT_BIND);
}
else if (!(flags & DIRECT_BIND) && ! obvalue_p (arg))
- return get_target_expr_sfinae (arg, complain);
+ return get_target_expr (arg, complain);
/* If we had a way to wrap this up, and say, if we ever needed its
address, transform all occurrences of the register, into a memory
@@ -939,7 +939,7 @@ ocp_convert (tree type, tree expr, int convtype, int flags,
ctor = e;
- if (abstract_virtuals_error_sfinae (NULL_TREE, type, complain))
+ if (abstract_virtuals_error (NULL_TREE, type, complain))
return error_mark_node;
if (BRACE_ENCLOSED_INITIALIZER_P (ctor))
diff --git a/gcc/cp/decl.cc b/gcc/cp/decl.cc
index 4665a29..070f673 100644
--- a/gcc/cp/decl.cc
+++ b/gcc/cp/decl.cc
@@ -4623,7 +4623,7 @@ cxx_init_decl_processing (void)
record_unknown_type (init_list_type_node, "init list");
/* Used when parsing to distinguish parameter-lists () and (void). */
- explicit_void_list_node = build_void_list_node ();
+ explicit_void_list_node = build_tree_list (NULL_TREE, void_type_node);
{
/* Make sure we get a unique function type, so we can give
@@ -8140,6 +8140,9 @@ cp_finish_decl (tree decl, tree init, bool init_const_expr_p,
d_init = build_x_compound_expr_from_list (d_init, ELK_INIT,
tf_warning_or_error);
d_init = resolve_nondeduced_context (d_init, tf_warning_or_error);
+ /* Force auto deduction now. Use tf_none to avoid redundant warnings
+ on deprecated-14.C. */
+ mark_single_function (d_init, tf_none);
}
enum auto_deduction_context adc = adc_variable_type;
if (DECL_DECOMPOSITION_P (decl))
@@ -18447,14 +18450,6 @@ cp_tree_node_structure (union lang_tree_node * t)
}
}
-/* Build the void_list_node (void_type_node having been created). */
-tree
-build_void_list_node (void)
-{
- tree t = build_tree_list (NULL_TREE, void_type_node);
- return t;
-}
-
bool
cp_missing_noreturn_ok_p (tree decl)
{
@@ -18553,8 +18548,8 @@ build_explicit_specifier (tree expr, tsubst_flags_t complain)
return expr;
expr = build_converted_constant_bool_expr (expr, complain);
- expr = instantiate_non_dependent_expr_sfinae (expr, complain);
- expr = cxx_constant_value (expr);
+ expr = instantiate_non_dependent_expr (expr, complain);
+ expr = cxx_constant_value (expr, complain);
return expr;
}
diff --git a/gcc/cp/decl2.cc b/gcc/cp/decl2.cc
index cd18881..9f18466 100644
--- a/gcc/cp/decl2.cc
+++ b/gcc/cp/decl2.cc
@@ -2851,7 +2851,7 @@ determine_visibility (tree decl)
if (class_type)
determine_visibility_from_class (decl, class_type);
- if (decl_anon_ns_mem_p (decl))
+ if (decl_internal_context_p (decl))
/* Names in an anonymous namespace get internal linkage. */
constrain_visibility (decl, VISIBILITY_ANON, false);
else if (TREE_CODE (decl) != TYPE_DECL)
@@ -2965,16 +2965,21 @@ constrain_class_visibility (tree type)
{
if (same_type_p (TREE_TYPE (t), nlt))
warning (OPT_Wsubobject_linkage, "\
-%qT has a field %qD whose type has no linkage",
+%qT has a field %q#D whose type has no linkage",
type, t);
else
warning (OPT_Wsubobject_linkage, "\
%qT has a field %qD whose type depends on the type %qT which has no linkage",
type, t, nlt);
}
- else
+ else if (cxx_dialect > cxx98
+ && !decl_anon_ns_mem_p (ftype))
warning (OPT_Wsubobject_linkage, "\
-%qT has a field %qD whose type uses the anonymous namespace",
+%qT has a field %q#D whose type has internal linkage",
+ type, t);
+ else // In C++98 this can only happen with unnamed namespaces.
+ warning (OPT_Wsubobject_linkage, "\
+%qT has a field %q#D whose type uses the anonymous namespace",
type, t);
}
}
@@ -2989,28 +2994,34 @@ constrain_class_visibility (tree type)
binfo = TYPE_BINFO (type);
for (i = 0; BINFO_BASE_ITERATE (binfo, i, t); ++i)
{
- int subvis = type_visibility (TREE_TYPE (t));
+ tree btype = BINFO_TYPE (t);
+ int subvis = type_visibility (btype);
if (subvis == VISIBILITY_ANON)
{
if (!in_main_input_context())
{
- tree nlt = no_linkage_check (TREE_TYPE (t), /*relaxed_p=*/false);
+ tree nlt = no_linkage_check (btype, /*relaxed_p=*/false);
if (nlt)
{
- if (same_type_p (TREE_TYPE (t), nlt))
+ if (same_type_p (btype, nlt))
warning (OPT_Wsubobject_linkage, "\
-%qT has a base %qT whose type has no linkage",
- type, TREE_TYPE (t));
+%qT has a base %qT which has no linkage",
+ type, btype);
else
warning (OPT_Wsubobject_linkage, "\
-%qT has a base %qT whose type depends on the type %qT which has no linkage",
- type, TREE_TYPE (t), nlt);
+%qT has a base %qT which depends on the type %qT which has no linkage",
+ type, btype, nlt);
}
- else
+ else if (cxx_dialect > cxx98
+ && !decl_anon_ns_mem_p (btype))
+ warning (OPT_Wsubobject_linkage, "\
+%qT has a base %qT which has internal linkage",
+ type, btype);
+ else // In C++98 this can only happen with unnamed namespaces.
warning (OPT_Wsubobject_linkage, "\
-%qT has a base %qT whose type uses the anonymous namespace",
- type, TREE_TYPE (t));
+%qT has a base %qT which uses the anonymous namespace",
+ type, btype);
}
}
else if (vis < VISIBILITY_HIDDEN
@@ -5564,7 +5575,7 @@ mark_single_function (tree expr, tsubst_flags_t complain)
wrong, true otherwise. */
bool
-mark_used (tree decl, tsubst_flags_t complain)
+mark_used (tree decl, tsubst_flags_t complain /* = tf_warning_or_error */)
{
/* If we're just testing conversions or resolving overloads, we
don't want any permanent effects like forcing functions to be
@@ -5806,12 +5817,6 @@ mark_used (tree decl, tsubst_flags_t complain)
return true;
}
-bool
-mark_used (tree decl)
-{
- return mark_used (decl, tf_warning_or_error);
-}
-
tree
vtv_start_verification_constructor_init_function (void)
{
diff --git a/gcc/cp/except.cc b/gcc/cp/except.cc
index da0a65c..048612d 100644
--- a/gcc/cp/except.cc
+++ b/gcc/cp/except.cc
@@ -1256,8 +1256,8 @@ build_noexcept_spec (tree expr, tsubst_flags_t complain)
&& !instantiation_dependent_expression_p (expr))
{
expr = build_converted_constant_bool_expr (expr, complain);
- expr = instantiate_non_dependent_expr_sfinae (expr, complain);
- expr = cxx_constant_value (expr);
+ expr = instantiate_non_dependent_expr (expr, complain);
+ expr = cxx_constant_value (expr, complain);
}
if (TREE_CODE (expr) == INTEGER_CST)
{
diff --git a/gcc/cp/init.cc b/gcc/cp/init.cc
index edca843..a85c303 100644
--- a/gcc/cp/init.cc
+++ b/gcc/cp/init.cc
@@ -3158,7 +3158,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
"%<new%> of %<initializer_list%> does not "
"extend the lifetime of the underlying array");
- if (abstract_virtuals_error_sfinae (ACU_NEW, elt_type, complain))
+ if (abstract_virtuals_error (ACU_NEW, elt_type, complain))
return error_mark_node;
is_initialized = (type_build_ctor_call (elt_type) || *init != NULL);
diff --git a/gcc/cp/lambda.cc b/gcc/cp/lambda.cc
index 3fb98a9..3ee1fe9 100644
--- a/gcc/cp/lambda.cc
+++ b/gcc/cp/lambda.cc
@@ -198,6 +198,7 @@ type_deducible_expression_p (tree expr)
tree t = non_reference (TREE_TYPE (expr));
return (t && TREE_CODE (t) != TYPE_PACK_EXPANSION
&& !WILDCARD_TYPE_P (t) && !LAMBDA_TYPE_P (t)
+ && !array_of_unknown_bound_p (t)
&& !type_uses_auto (t));
}
diff --git a/gcc/cp/module.cc b/gcc/cp/module.cc
index f27f4d0..1a1ff5b 100644
--- a/gcc/cp/module.cc
+++ b/gcc/cp/module.cc
@@ -4734,7 +4734,8 @@ friend_from_decl_list (tree frnd)
if (TYPE_P (frnd))
{
res = TYPE_NAME (frnd);
- if (CLASSTYPE_TEMPLATE_INFO (frnd))
+ if (CLASS_TYPE_P (frnd)
+ && CLASSTYPE_TEMPLATE_INFO (frnd))
tmpl = CLASSTYPE_TI_TEMPLATE (frnd);
}
else if (DECL_TEMPLATE_INFO (frnd))
@@ -12121,7 +12122,7 @@ trees_in::read_class_def (tree defn, tree maybe_template)
{
tree f = TREE_VALUE (friend_classes);
- if (TYPE_P (f))
+ if (CLASS_TYPE_P (f))
{
CLASSTYPE_BEFRIENDING_CLASSES (f)
= tree_cons (NULL_TREE, type,
diff --git a/gcc/cp/name-lookup.cc b/gcc/cp/name-lookup.cc
index f89a1dc..69d555d 100644
--- a/gcc/cp/name-lookup.cc
+++ b/gcc/cp/name-lookup.cc
@@ -402,7 +402,7 @@ add_decl_to_level (cp_binding_level *b, tree decl)
&& ((VAR_P (decl) && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
|| (TREE_CODE (decl) == FUNCTION_DECL
&& (!TREE_PUBLIC (decl)
- || decl_anon_ns_mem_p (decl)
+ || decl_internal_context_p (decl)
|| DECL_DECLARED_INLINE_P (decl)))))
vec_safe_push (static_decls, decl);
}
diff --git a/gcc/cp/parser.cc b/gcc/cp/parser.cc
index 841ba6e..3cbe0d6 100644
--- a/gcc/cp/parser.cc
+++ b/gcc/cp/parser.cc
@@ -36938,10 +36938,9 @@ cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
cp_id_kind idk = CP_ID_KIND_NONE;
cp_lexer_consume_token (parser->lexer);
decl = convert_from_reference (decl);
- decl
- = cp_parser_postfix_dot_deref_expression (parser, ttype,
- decl, false,
- &idk, loc);
+ decl = (cp_parser_postfix_dot_deref_expression
+ (parser, ttype, cp_expr (decl, token->location),
+ false, &idk, loc));
}
/* FALLTHROUGH. */
case OMP_CLAUSE_AFFINITY:
diff --git a/gcc/cp/pt.cc b/gcc/cp/pt.cc
index c5fc0f1..db4e808 100644
--- a/gcc/cp/pt.cc
+++ b/gcc/cp/pt.cc
@@ -3868,8 +3868,8 @@ expand_integer_pack (tree call, tree args, tsubst_flags_t complain,
}
else
{
- hi = instantiate_non_dependent_expr_sfinae (hi, complain);
- hi = cxx_constant_value (hi);
+ hi = instantiate_non_dependent_expr (hi, complain);
+ hi = cxx_constant_value (hi, complain);
int len = valid_constant_size_p (hi) ? tree_to_shwi (hi) : -1;
/* Calculate the largest value of len that won't make the size of the vec
@@ -6428,7 +6428,7 @@ redeclare_class_template (tree type, tree parms, tree cons)
return true;
}
-/* The actual substitution part of instantiate_non_dependent_expr_sfinae,
+/* The actual substitution part of instantiate_non_dependent_expr,
to be used when the caller has already checked
!instantiation_dependent_uneval_expression_p (expr)
and cleared processing_template_decl. */
@@ -6447,7 +6447,8 @@ instantiate_non_dependent_expr_internal (tree expr, tsubst_flags_t complain)
/* Instantiate the non-dependent expression EXPR. */
tree
-instantiate_non_dependent_expr_sfinae (tree expr, tsubst_flags_t complain)
+instantiate_non_dependent_expr (tree expr,
+ tsubst_flags_t complain /* = tf_error */)
{
if (expr == NULL_TREE)
return NULL_TREE;
@@ -6462,12 +6463,6 @@ instantiate_non_dependent_expr_sfinae (tree expr, tsubst_flags_t complain)
return expr;
}
-tree
-instantiate_non_dependent_expr (tree expr)
-{
- return instantiate_non_dependent_expr_sfinae (expr, tf_error);
-}
-
/* Like instantiate_non_dependent_expr, but return NULL_TREE if the
expression is dependent or non-constant. */
@@ -13616,6 +13611,9 @@ tsubst_template_args (tree t, tree args, tsubst_flags_t complain, tree in_decl)
if (t == error_mark_node)
return error_mark_node;
+ /* In "sizeof(X<I>)" we need to evaluate "I". */
+ cp_evaluated ev;
+
const int len = TREE_VEC_LENGTH (t);
tree *elts = XALLOCAVEC (tree, len);
int expanded_len_adjust = 0;
@@ -13888,9 +13886,6 @@ tsubst_aggr_type (tree t,
tree argvec;
tree r;
- /* In "sizeof(X<I>)" we need to evaluate "I". */
- cp_evaluated ev;
-
/* Figure out what arguments are appropriate for the
type we are trying to find. For example, given:
@@ -14317,6 +14312,8 @@ tsubst_function_decl (tree t, tree args, tsubst_flags_t complain,
/*function_p=*/false,
/*i_c_e_p=*/true);
spec = build_explicit_specifier (spec, complain);
+ if (spec == error_mark_node)
+ return error_mark_node;
if (instantiation_dependent_expression_p (spec))
store_explicit_specifier (r, spec);
else
@@ -16970,7 +16967,7 @@ tsubst_init (tree init, tree decl, tree args,
zero. */
init = build_value_init (type, complain);
if (TREE_CODE (init) == AGGR_INIT_EXPR)
- init = get_target_expr_sfinae (init, complain);
+ init = get_target_expr (init, complain);
if (TREE_CODE (init) == TARGET_EXPR)
TARGET_EXPR_DIRECT_INIT_P (init) = true;
}
@@ -20124,7 +20121,7 @@ fold_targs_r (tree targs, tsubst_flags_t complain)
&& !glvalue_p (elt)
&& !TREE_CONSTANT (elt))
{
- elt = cxx_constant_value_sfinae (elt, NULL_TREE, complain);
+ elt = cxx_constant_value (elt, complain);
if (elt == error_mark_node)
return false;
}
@@ -25025,7 +25022,7 @@ mark_decl_instantiated (tree result, int extern_p)
return;
/* For anonymous namespace we don't need to do anything. */
- if (decl_anon_ns_mem_p (result))
+ if (decl_internal_context_p (result))
{
gcc_assert (!TREE_PUBLIC (result));
return;
@@ -28082,11 +28079,11 @@ type_dependent_expression_p (tree expression)
If the array has no length and has an initializer, it must be that
we couldn't determine its length in cp_complete_array_type because
it is dependent. */
- if (VAR_P (expression)
+ if (((VAR_P (expression) && DECL_INITIAL (expression))
+ || COMPOUND_LITERAL_P (expression))
&& TREE_TYPE (expression) != NULL_TREE
&& TREE_CODE (TREE_TYPE (expression)) == ARRAY_TYPE
- && !TYPE_DOMAIN (TREE_TYPE (expression))
- && DECL_INITIAL (expression))
+ && !TYPE_DOMAIN (TREE_TYPE (expression)))
return true;
/* Pull a FUNCTION_DECL out of a BASELINK if we can. */
diff --git a/gcc/cp/semantics.cc b/gcc/cp/semantics.cc
index 6bda30e..8656207 100644
--- a/gcc/cp/semantics.cc
+++ b/gcc/cp/semantics.cc
@@ -1042,7 +1042,7 @@ finish_if_stmt_cond (tree orig_cond, tree if_stmt)
{
maybe_warn_for_constant_evaluated (cond, /*constexpr_if=*/true);
cond = instantiate_non_dependent_expr (cond);
- cond = cxx_constant_value (cond, NULL_TREE);
+ cond = cxx_constant_value (cond);
}
else
{
@@ -3318,7 +3318,7 @@ finish_compound_literal (tree type, tree compound_literal,
/* The CONSTRUCTOR is now an initializer, not a compound literal. */
if (TREE_CODE (compound_literal) == CONSTRUCTOR)
TREE_HAS_CONSTRUCTOR (compound_literal) = false;
- compound_literal = get_target_expr_sfinae (compound_literal, complain);
+ compound_literal = get_target_expr (compound_literal, complain);
}
else
/* For e.g. int{42} just make sure it's a prvalue. */
@@ -6755,11 +6755,18 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
break;
}
+ tree *grp_start_p = NULL, grp_sentinel = NULL_TREE;
+
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
bool field_ok = false;
+ /* We've reached the end of a list of expanded nodes. Reset the group
+ start pointer. */
+ if (c == grp_sentinel)
+ grp_start_p = NULL;
+
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
@@ -7982,6 +7989,9 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
{
+ grp_start_p = pc;
+ grp_sentinel = OMP_CLAUSE_CHAIN (c);
+
if (handle_omp_array_sections (c, ort))
remove = true;
else
@@ -8109,6 +8119,10 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
t = TREE_OPERAND (t, 1);
STRIP_NOPS (t);
}
+ if (TREE_CODE (t) == COMPONENT_REF
+ && invalid_nonstatic_memfn_p (EXPR_LOCATION (t), t,
+ tf_warning_or_error))
+ remove = true;
indir_component_ref_p = false;
if (TREE_CODE (t) == COMPONENT_REF
&& (TREE_CODE (TREE_OPERAND (t, 0)) == INDIRECT_REF
@@ -8353,6 +8367,9 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_ATTACH_DETACH))
{
+ grp_start_p = pc;
+ grp_sentinel = OMP_CLAUSE_CHAIN (c);
+
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
if (TREE_CODE (t) == COMPONENT_REF)
@@ -8763,7 +8780,18 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
}
if (remove)
- *pc = OMP_CLAUSE_CHAIN (c);
+ {
+ if (grp_start_p)
+ {
+ /* If we found a clause to remove, we want to remove the whole
+ expanded group, otherwise gimplify can get confused. */
+ *grp_start_p = grp_sentinel;
+ pc = grp_start_p;
+ grp_start_p = NULL;
+ }
+ else
+ *pc = OMP_CLAUSE_CHAIN (c);
+ }
else
pc = &OMP_CLAUSE_CHAIN (c);
}
@@ -11329,7 +11357,7 @@ finish_decltype_type (tree expr, bool id_expression_or_member_access_p,
}
else if (processing_template_decl)
{
- expr = instantiate_non_dependent_expr_sfinae (expr, complain|tf_decltype);
+ expr = instantiate_non_dependent_expr (expr, complain|tf_decltype);
if (expr == error_mark_node)
return error_mark_node;
/* Keep processing_template_decl cleared for the rest of the function
@@ -12500,7 +12528,7 @@ cp_build_bit_cast (location_t loc, tree type, tree arg,
SET_EXPR_LOCATION (ret, loc);
if (!processing_template_decl && CLASS_TYPE_P (type))
- ret = get_target_expr_sfinae (ret, complain);
+ ret = get_target_expr (ret, complain);
return ret;
}
diff --git a/gcc/cp/tree.cc b/gcc/cp/tree.cc
index c678e3b..d0bd41a 100644
--- a/gcc/cp/tree.cc
+++ b/gcc/cp/tree.cc
@@ -713,7 +713,7 @@ build_cplus_new (tree type, tree init, tsubst_flags_t complain)
/* Make sure that we're not trying to create an instance of an
abstract class. */
- if (abstract_virtuals_error_sfinae (NULL_TREE, type, complain))
+ if (abstract_virtuals_error (NULL_TREE, type, complain))
return error_mark_node;
if (TREE_CODE (rval) == AGGR_INIT_EXPR)
@@ -922,7 +922,7 @@ force_target_expr (tree type, tree init, tsubst_flags_t complain)
/* Like build_target_expr_with_type, but use the type of INIT. */
tree
-get_target_expr_sfinae (tree init, tsubst_flags_t complain)
+get_target_expr (tree init, tsubst_flags_t complain /* = tf_warning_or_error */)
{
if (TREE_CODE (init) == AGGR_INIT_EXPR)
return build_target_expr (AGGR_INIT_EXPR_SLOT (init), init, complain);
@@ -935,12 +935,6 @@ get_target_expr_sfinae (tree init, tsubst_flags_t complain)
}
}
-tree
-get_target_expr (tree init)
-{
- return get_target_expr_sfinae (init, tf_warning_or_error);
-}
-
/* If EXPR is a bitfield reference, convert it to the declared type of
the bitfield, and return the resulting expression. Otherwise,
return EXPR itself. */
@@ -2968,7 +2962,7 @@ verify_stmt_tree (tree t)
/* Check if the type T depends on a type with no linkage and if so,
return it. If RELAXED_P then do not consider a class type declared
within a vague-linkage function to have no linkage. Remember:
- no-linkage is not the same as internal-linkage*/
+ no-linkage is not the same as internal-linkage. */
tree
no_linkage_check (tree t, bool relaxed_p)
@@ -3817,7 +3811,15 @@ decl_namespace_context (tree decl)
nested, or false otherwise. */
bool
-decl_anon_ns_mem_p (const_tree decl)
+decl_anon_ns_mem_p (tree decl)
+{
+ return !TREE_PUBLIC (decl_namespace_context (decl));
+}
+
+/* Returns true if the enclosing scope of DECL has internal or no linkage. */
+
+bool
+decl_internal_context_p (const_tree decl)
{
while (TREE_CODE (decl) != NAMESPACE_DECL)
{
diff --git a/gcc/cp/typeck.cc b/gcc/cp/typeck.cc
index b99947c..22d834d 100644
--- a/gcc/cp/typeck.cc
+++ b/gcc/cp/typeck.cc
@@ -71,7 +71,8 @@ static bool is_std_forward_p (tree);
complete type when this function returns. */
tree
-require_complete_type_sfinae (tree value, tsubst_flags_t complain)
+require_complete_type (tree value,
+ tsubst_flags_t complain /* = tf_warning_or_error */)
{
tree type;
@@ -96,12 +97,6 @@ require_complete_type_sfinae (tree value, tsubst_flags_t complain)
return error_mark_node;
}
-tree
-require_complete_type (tree value)
-{
- return require_complete_type_sfinae (value, tf_warning_or_error);
-}
-
/* Try to complete TYPE, if it is incomplete. For example, if TYPE is
a template instantiation, do the instantiation. Returns TYPE,
whether or not it could be completed, unless something goes
@@ -2201,7 +2196,8 @@ invalid_nonstatic_memfn_p (location_t loc, tree expr, tsubst_flags_t complain)
return false;
if (is_overloaded_fn (expr) && !really_overloaded_fn (expr))
expr = get_first_fn (expr);
- if (DECL_NONSTATIC_MEMBER_FUNCTION_P (expr))
+ if (TREE_TYPE (expr)
+ && DECL_NONSTATIC_MEMBER_FUNCTION_P (expr))
{
if (complain & tf_error)
{
@@ -3899,7 +3895,7 @@ cp_build_array_ref (location_t loc, tree array, tree idx,
|= (CP_TYPE_VOLATILE_P (type) | TREE_SIDE_EFFECTS (array));
TREE_THIS_VOLATILE (rval)
|= (CP_TYPE_VOLATILE_P (type) | TREE_THIS_VOLATILE (array));
- ret = require_complete_type_sfinae (rval, complain);
+ ret = require_complete_type (rval, complain);
protected_set_expr_location (ret, loc);
if (non_lvalue)
ret = non_lvalue_loc (loc, ret);
@@ -4464,7 +4460,7 @@ convert_arguments (tree typelist, vec<tree, va_gc> **values, tree fndecl,
/* Don't do ellipsis conversion for __built_in_constant_p
as this will result in spurious errors for non-trivial
types. */
- val = require_complete_type_sfinae (val, complain);
+ val = require_complete_type (val, complain);
else
val = convert_arg_to_ellipsis (val, complain);
@@ -6264,7 +6260,7 @@ cp_build_binary_op (const op_location_t &location,
instrument_expr, result);
if (resultcode == SPACESHIP_EXPR && !processing_template_decl)
- result = get_target_expr_sfinae (result, complain);
+ result = get_target_expr (result, complain);
if (!c_inhibit_evaluation_warnings)
{
@@ -8016,7 +8012,7 @@ build_static_cast_1 (location_t loc, tree type, tree expr, bool c_cast_p,
/* [class.abstract]
An abstract class shall not be used ... as the type of an explicit
conversion. */
- if (abstract_virtuals_error_sfinae (ACU_CAST, type, complain))
+ if (abstract_virtuals_error (ACU_CAST, type, complain))
return error_mark_node;
/* [expr.static.cast]
@@ -9144,7 +9140,7 @@ cp_build_modify_expr (location_t loc, tree lhs, enum tree_code modifycode,
}
else
{
- lhs = require_complete_type_sfinae (lhs, complain);
+ lhs = require_complete_type (lhs, complain);
if (lhs == error_mark_node)
return error_mark_node;
@@ -10123,7 +10119,7 @@ convert_for_initialization (tree exp, tree type, tree rhs, int flags,
}
if (exp != 0)
- exp = require_complete_type_sfinae (exp, complain);
+ exp = require_complete_type (exp, complain);
if (exp == error_mark_node)
return error_mark_node;
diff --git a/gcc/cp/typeck2.cc b/gcc/cp/typeck2.cc
index 1a96be3..688e9c1 100644
--- a/gcc/cp/typeck2.cc
+++ b/gcc/cp/typeck2.cc
@@ -130,8 +130,8 @@ cxx_readonly_error (location_t loc, tree arg, enum lvalue_use errstring)
all was well. */
static int
-abstract_virtuals_error_sfinae (tree decl, tree type, abstract_class_use use,
- tsubst_flags_t complain)
+abstract_virtuals_error (tree decl, tree type, abstract_class_use use,
+ tsubst_flags_t complain)
{
vec<tree, va_gc> *pure;
@@ -251,32 +251,19 @@ abstract_virtuals_error_sfinae (tree decl, tree type, abstract_class_use use,
}
int
-abstract_virtuals_error_sfinae (tree decl, tree type, tsubst_flags_t complain)
-{
- return abstract_virtuals_error_sfinae (decl, type, ACU_UNKNOWN, complain);
-}
-
-int
-abstract_virtuals_error_sfinae (abstract_class_use use, tree type,
- tsubst_flags_t complain)
+abstract_virtuals_error (tree decl, tree type,
+ tsubst_flags_t complain /* = tf_warning_or_error */)
{
- return abstract_virtuals_error_sfinae (NULL_TREE, type, use, complain);
+ return abstract_virtuals_error (decl, type, ACU_UNKNOWN, complain);
}
-
-/* Wrapper for the above function in the common case of wanting errors. */
-
int
-abstract_virtuals_error (tree decl, tree type)
+abstract_virtuals_error (abstract_class_use use, tree type,
+ tsubst_flags_t complain /* = tf_warning_or_error */)
{
- return abstract_virtuals_error_sfinae (decl, type, tf_warning_or_error);
+ return abstract_virtuals_error (NULL_TREE, type, use, complain);
}
-int
-abstract_virtuals_error (abstract_class_use use, tree type)
-{
- return abstract_virtuals_error_sfinae (use, type, tf_warning_or_error);
-}
/* Print an inform about the declaration of the incomplete type TYPE. */
@@ -2502,7 +2489,7 @@ build_functional_cast_1 (location_t loc, tree exp, tree parms,
if (!complete_type_or_maybe_complain (type, NULL_TREE, complain))
return error_mark_node;
- if (abstract_virtuals_error_sfinae (ACU_CAST, type, complain))
+ if (abstract_virtuals_error (ACU_CAST, type, complain))
return error_mark_node;
/* [expr.type.conv]
@@ -2523,7 +2510,7 @@ build_functional_cast_1 (location_t loc, tree exp, tree parms,
if (parms == NULL_TREE)
{
exp = build_value_init (type, complain);
- exp = get_target_expr_sfinae (exp, complain);
+ exp = get_target_expr (exp, complain);
return exp;
}
diff --git a/gcc/d/ChangeLog b/gcc/d/ChangeLog
index eae3ac1..9bb29d3 100644
--- a/gcc/d/ChangeLog
+++ b/gcc/d/ChangeLog
@@ -1,3 +1,8 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * d-builtins.cc (d_build_c_type_nodes): Do not initialize
+ void_list_node.
+
2022-08-27 Iain Buclaw <ibuclaw@gdcproject.org>
* dmd/MERGE: Merge upstream dmd 817610b16d.
diff --git a/gcc/d/d-builtins.cc b/gcc/d/d-builtins.cc
index c2ef0c8..5997e5d 100644
--- a/gcc/d/d-builtins.cc
+++ b/gcc/d/d-builtins.cc
@@ -889,7 +889,6 @@ static GTY(()) tree signed_size_type_node;
static void
d_build_c_type_nodes (void)
{
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
string_type_node = build_pointer_type (char_type_node);
const_string_type_node
= build_pointer_type (build_qualified_type (char_type_node,
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index 8a34690..46eeb98 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -1104,7 +1104,7 @@ add, subtract, multiply, divide; unary arithmetic operators;
relational operators; equality operators; and conversions to and from
integer and other floating types. Use a suffix @samp{w} or @samp{W}
in a literal constant of type @code{__float80} or type
-@code{__ibm128}. Use a suffix @samp{q} or @samp{Q} for @code{_float128}.
+@code{__ibm128}. Use a suffix @samp{q} or @samp{Q} for @code{__float128}.
In order to use @code{_Float128}, @code{__float128}, and @code{__ibm128}
on PowerPC Linux systems, you must use the @option{-mfloat128} option. It is
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index a28d3a0..a134df7 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -29840,6 +29840,13 @@ Generate (do not generate) code that will run in privileged state.
@opindex no-block-ops-unaligned-vsx
Generate (do not generate) unaligned vsx loads and stores for
inline expansion of @code{memcpy} and @code{memmove}.
+
+@item --param rs6000-vect-unroll-limit=
+The vectorizer will check with target information to determine whether it
+would be beneficial to unroll the main vectorized loop and by how much. This
+parameter sets the upper bound of how much the vectorizer will unroll the main
+loop. The default value is four.
+
@end table
@node RX Options
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 676f89f..f5f8ac0 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,31 @@
+2022-09-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * libgfortran.h: Declare GFC_FPE_AWAY.
+
+2022-09-15 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/106857
+ * simplify.cc (gfc_simplify_pack): Check for NULL pointer dereferences
+ while walking through constructors (error recovery).
+
+2022-09-15 Harald Anlauf <anlauf@gmx.de>
+ Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/104314
+ * resolve.cc (deferred_op_assign): Do not try to generate temporary
+ for deferred character length assignment if types do not agree.
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * f95-lang.cc (gfc_init_decl_processing): Do not initialize
+ void_list_node.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * trans-openmp.cc (gfc_trans_omp_clauses): Don't create
+ GOMP_MAP_TO_PSET mappings for class metadata, nor GOMP_MAP_POINTER
+ mappings for POINTER_TYPE_P decls.
+
2022-09-10 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
PR fortran/95644
diff --git a/gcc/fortran/f95-lang.cc b/gcc/fortran/f95-lang.cc
index ff4bf80..a6750be 100644
--- a/gcc/fortran/f95-lang.cc
+++ b/gcc/fortran/f95-lang.cc
@@ -530,8 +530,6 @@ gfc_init_decl_processing (void)
only use it for actual characters, not for INTEGER(1). */
build_common_tree_nodes (false);
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
-
/* Set up F95 type nodes. */
gfc_init_kinds ();
gfc_init_types ();
diff --git a/gcc/fortran/libgfortran.h b/gcc/fortran/libgfortran.h
index ef06194..79a8c2f 100644
--- a/gcc/fortran/libgfortran.h
+++ b/gcc/fortran/libgfortran.h
@@ -60,6 +60,7 @@ along with GCC; see the file COPYING3. If not see
#define GFC_FPE_TONEAREST 2
#define GFC_FPE_TOWARDZERO 3
#define GFC_FPE_UPWARD 4
+#define GFC_FPE_AWAY 5
/* Size of the buffer required to store FPU state for any target.
In particular, this has to be larger than fenv_t on all glibc targets.
diff --git a/gcc/fortran/resolve.cc b/gcc/fortran/resolve.cc
index ca11475..ae7ebb6 100644
--- a/gcc/fortran/resolve.cc
+++ b/gcc/fortran/resolve.cc
@@ -11803,6 +11803,7 @@ deferred_op_assign (gfc_code **code, gfc_namespace *ns)
if (!((*code)->expr1->ts.type == BT_CHARACTER
&& (*code)->expr1->ts.deferred && (*code)->expr1->rank
+ && (*code)->expr2->ts.type == BT_CHARACTER
&& (*code)->expr2->expr_type == EXPR_OP))
return false;
diff --git a/gcc/fortran/simplify.cc b/gcc/fortran/simplify.cc
index bc178d5..140c177 100644
--- a/gcc/fortran/simplify.cc
+++ b/gcc/fortran/simplify.cc
@@ -6431,7 +6431,7 @@ gfc_simplify_pack (gfc_expr *array, gfc_expr *mask, gfc_expr *vector)
/* Copy only those elements of ARRAY to RESULT whose
MASK equals .TRUE.. */
mask_ctor = gfc_constructor_first (mask->value.constructor);
- while (mask_ctor)
+ while (mask_ctor && array_ctor)
{
if (mask_ctor->expr->value.logical)
{
diff --git a/gcc/fortran/trans-openmp.cc b/gcc/fortran/trans-openmp.cc
index 1be7d23..8e9d534 100644
--- a/gcc/fortran/trans-openmp.cc
+++ b/gcc/fortran/trans-openmp.cc
@@ -3125,30 +3125,16 @@ gfc_trans_omp_clauses (stmtblock_t *block, gfc_omp_clauses *clauses,
tree present = gfc_omp_check_optional_argument (decl, true);
if (openacc && n->sym->ts.type == BT_CLASS)
{
- tree type = TREE_TYPE (decl);
if (n->sym->attr.optional)
sorry ("optional class parameter");
- if (POINTER_TYPE_P (type))
- {
- node4 = build_omp_clause (input_location,
- OMP_CLAUSE_MAP);
- OMP_CLAUSE_SET_MAP_KIND (node4, GOMP_MAP_POINTER);
- OMP_CLAUSE_DECL (node4) = decl;
- OMP_CLAUSE_SIZE (node4) = size_int (0);
- decl = build_fold_indirect_ref (decl);
- }
tree ptr = gfc_class_data_get (decl);
ptr = build_fold_indirect_ref (ptr);
OMP_CLAUSE_DECL (node) = ptr;
OMP_CLAUSE_SIZE (node) = gfc_class_vtab_size_get (decl);
node2 = build_omp_clause (input_location, OMP_CLAUSE_MAP);
- OMP_CLAUSE_SET_MAP_KIND (node2, GOMP_MAP_TO_PSET);
- OMP_CLAUSE_DECL (node2) = decl;
- OMP_CLAUSE_SIZE (node2) = TYPE_SIZE_UNIT (type);
- node3 = build_omp_clause (input_location, OMP_CLAUSE_MAP);
- OMP_CLAUSE_SET_MAP_KIND (node3, GOMP_MAP_ATTACH_DETACH);
- OMP_CLAUSE_DECL (node3) = gfc_class_data_get (decl);
- OMP_CLAUSE_SIZE (node3) = size_int (0);
+ OMP_CLAUSE_SET_MAP_KIND (node2, GOMP_MAP_ATTACH_DETACH);
+ OMP_CLAUSE_DECL (node2) = gfc_class_data_get (decl);
+ OMP_CLAUSE_SIZE (node2) = size_int (0);
goto finalize_map_clause;
}
else if (POINTER_TYPE_P (TREE_TYPE (decl))
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index a170478..9055cd8 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -5512,6 +5512,7 @@ gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace)
{
location_t loc = gimple_location (stmt);
gimple *new_stmt = gimple_build_builtin_unreachable (loc);
+ gimple_call_set_ctrl_altering (new_stmt, false);
/* If the call had a SSA name as lhs morph that into
an uninitialized value. */
if (lhs && TREE_CODE (lhs) == SSA_NAME)
diff --git a/gcc/gimple-range-fold.cc b/gcc/gimple-range-fold.cc
index 85ed6f9..a45fc7a 100644
--- a/gcc/gimple-range-fold.cc
+++ b/gcc/gimple-range-fold.cc
@@ -1030,7 +1030,7 @@ fold_using_range::range_of_builtin_int_call (irange &r, gcall *call,
if (src.get_operand (tmp, arg))
{
bool signbit;
- if (tmp.known_signbit (signbit))
+ if (tmp.signbit_p (signbit))
{
if (signbit)
r.set_nonzero (type);
diff --git a/gcc/gimplify.cc b/gcc/gimplify.cc
index dcdc852..4d032c6 100644
--- a/gcc/gimplify.cc
+++ b/gcc/gimplify.cc
@@ -125,12 +125,8 @@ enum gimplify_omp_var_data
/* Flag for GOVD_REDUCTION: inscan seen in {in,ex}clusive clause. */
GOVD_REDUCTION_INSCAN = 0x2000000,
- /* Flag for GOVD_MAP: (struct) vars that have pointer attachments for
- fields. */
- GOVD_MAP_HAS_ATTACHMENTS = 0x4000000,
-
/* Flag for GOVD_FIRSTPRIVATE: OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT. */
- GOVD_FIRSTPRIVATE_IMPLICIT = 0x8000000,
+ GOVD_FIRSTPRIVATE_IMPLICIT = 0x4000000,
GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE
| GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LINEAR
@@ -8795,73 +8791,66 @@ gimplify_omp_depend (tree *list_p, gimple_seq *pre_p)
return 1;
}
-/* Insert a GOMP_MAP_ALLOC or GOMP_MAP_RELEASE node following a
- GOMP_MAP_STRUCT mapping. C is an always_pointer mapping. STRUCT_NODE is
- the struct node to insert the new mapping after (when the struct node is
- initially created). PREV_NODE is the first of two or three mappings for a
- pointer, and is either:
- - the node before C, when a pair of mappings is used, e.g. for a C/C++
- array section.
- - not the node before C. This is true when we have a reference-to-pointer
- type (with a mapping for the reference and for the pointer), or for
- Fortran derived-type mappings with a GOMP_MAP_TO_PSET.
- If SCP is non-null, the new node is inserted before *SCP.
- if SCP is null, the new node is inserted before PREV_NODE.
- The return type is:
- - PREV_NODE, if SCP is non-null.
- - The newly-created ALLOC or RELEASE node, if SCP is null.
- - The second newly-created ALLOC or RELEASE node, if we are mapping a
- reference to a pointer. */
+/* For a set of mappings describing an array section pointed to by a struct
+ (or derived type, etc.) component, create an "alloc" or "release" node to
+ insert into a list following a GOMP_MAP_STRUCT node. For some types of
+ mapping (e.g. Fortran arrays with descriptors), an additional mapping may
+ be created that is inserted into the list of mapping nodes attached to the
+ directive being processed -- not part of the sorted list of nodes after
+ GOMP_MAP_STRUCT.
+
+ CODE is the code of the directive being processed. GRP_START and GRP_END
+ are the first and last of two or three nodes representing this array section
+ mapping (e.g. a data movement node like GOMP_MAP_{TO,FROM}, optionally a
+ GOMP_MAP_TO_PSET, and finally a GOMP_MAP_ALWAYS_POINTER). EXTRA_NODE is
+ filled with the additional node described above, if needed.
+
+ This function does not add the new nodes to any lists itself. It is the
+ responsibility of the caller to do that. */
static tree
-insert_struct_comp_map (enum tree_code code, tree c, tree struct_node,
- tree prev_node, tree *scp)
+build_omp_struct_comp_nodes (enum tree_code code, tree grp_start, tree grp_end,
+ tree *extra_node)
{
enum gomp_map_kind mkind
= (code == OMP_TARGET_EXIT_DATA || code == OACC_EXIT_DATA)
? GOMP_MAP_RELEASE : GOMP_MAP_ALLOC;
- tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
- tree cl = scp ? prev_node : c2;
+ gcc_assert (grp_start != grp_end);
+
+ tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (grp_end), OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
- OMP_CLAUSE_DECL (c2) = unshare_expr (OMP_CLAUSE_DECL (c));
- OMP_CLAUSE_CHAIN (c2) = scp ? *scp : prev_node;
- if (OMP_CLAUSE_CHAIN (prev_node) != c
- && OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (prev_node)) == OMP_CLAUSE_MAP
- && (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node))
- == GOMP_MAP_TO_PSET))
- OMP_CLAUSE_SIZE (c2) = OMP_CLAUSE_SIZE (OMP_CLAUSE_CHAIN (prev_node));
+ OMP_CLAUSE_DECL (c2) = unshare_expr (OMP_CLAUSE_DECL (grp_end));
+ OMP_CLAUSE_CHAIN (c2) = NULL_TREE;
+ tree grp_mid = NULL_TREE;
+ if (OMP_CLAUSE_CHAIN (grp_start) != grp_end)
+ grp_mid = OMP_CLAUSE_CHAIN (grp_start);
+
+ if (grp_mid
+ && OMP_CLAUSE_CODE (grp_mid) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (grp_mid) == GOMP_MAP_TO_PSET)
+ OMP_CLAUSE_SIZE (c2) = OMP_CLAUSE_SIZE (grp_mid);
else
OMP_CLAUSE_SIZE (c2) = TYPE_SIZE_UNIT (ptr_type_node);
- if (struct_node)
- OMP_CLAUSE_CHAIN (struct_node) = c2;
-
- /* We might need to create an additional mapping if we have a reference to a
- pointer (in C++). Don't do this if we have something other than a
- GOMP_MAP_ALWAYS_POINTER though, i.e. a GOMP_MAP_TO_PSET. */
- if (OMP_CLAUSE_CHAIN (prev_node) != c
- && OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (prev_node)) == OMP_CLAUSE_MAP
- && ((OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node))
- == GOMP_MAP_ALWAYS_POINTER)
- || (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node))
- == GOMP_MAP_ATTACH_DETACH)))
- {
- tree c4 = OMP_CLAUSE_CHAIN (prev_node);
- tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
+
+ if (grp_mid
+ && OMP_CLAUSE_CODE (grp_mid) == OMP_CLAUSE_MAP
+ && (OMP_CLAUSE_MAP_KIND (grp_mid) == GOMP_MAP_ALWAYS_POINTER
+ || OMP_CLAUSE_MAP_KIND (grp_mid) == GOMP_MAP_ATTACH_DETACH))
+ {
+ tree c3
+ = build_omp_clause (OMP_CLAUSE_LOCATION (grp_end), OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c3, mkind);
- OMP_CLAUSE_DECL (c3) = unshare_expr (OMP_CLAUSE_DECL (c4));
+ OMP_CLAUSE_DECL (c3) = unshare_expr (OMP_CLAUSE_DECL (grp_mid));
OMP_CLAUSE_SIZE (c3) = TYPE_SIZE_UNIT (ptr_type_node);
- OMP_CLAUSE_CHAIN (c3) = prev_node;
- if (!scp)
- OMP_CLAUSE_CHAIN (c2) = c3;
- else
- cl = c3;
- }
+ OMP_CLAUSE_CHAIN (c3) = NULL_TREE;
- if (scp)
- *scp = c2;
+ *extra_node = c3;
+ }
+ else
+ *extra_node = NULL_TREE;
- return cl;
+ return c2;
}
/* Strip ARRAY_REFS or an indirect ref off BASE, find the containing object,
@@ -8872,8 +8861,8 @@ insert_struct_comp_map (enum tree_code code, tree c, tree struct_node,
has array type, else return NULL. */
static tree
-extract_base_bit_offset (tree base, tree *base_ref, poly_int64 *bitposp,
- poly_offset_int *poffsetp, tree *offsetp)
+extract_base_bit_offset (tree base, poly_int64 *bitposp,
+ poly_offset_int *poffsetp)
{
tree offset;
poly_int64 bitsize, bitpos;
@@ -8881,44 +8870,12 @@ extract_base_bit_offset (tree base, tree *base_ref, poly_int64 *bitposp,
int unsignedp, reversep, volatilep = 0;
poly_offset_int poffset;
- if (base_ref)
- {
- *base_ref = NULL_TREE;
-
- while (TREE_CODE (base) == ARRAY_REF)
- base = TREE_OPERAND (base, 0);
-
- if (TREE_CODE (base) == INDIRECT_REF)
- base = TREE_OPERAND (base, 0);
- }
- else
- {
- if (TREE_CODE (base) == ARRAY_REF)
- {
- while (TREE_CODE (base) == ARRAY_REF)
- base = TREE_OPERAND (base, 0);
- if (TREE_CODE (base) != COMPONENT_REF
- || TREE_CODE (TREE_TYPE (base)) != ARRAY_TYPE)
- return NULL_TREE;
- }
- else if (TREE_CODE (base) == INDIRECT_REF
- && TREE_CODE (TREE_OPERAND (base, 0)) == COMPONENT_REF
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0)))
- == REFERENCE_TYPE))
- base = TREE_OPERAND (base, 0);
- }
+ STRIP_NOPS (base);
base = get_inner_reference (base, &bitsize, &bitpos, &offset, &mode,
&unsignedp, &reversep, &volatilep);
- tree orig_base = base;
-
- if ((TREE_CODE (base) == INDIRECT_REF
- || (TREE_CODE (base) == MEM_REF
- && integer_zerop (TREE_OPERAND (base, 1))))
- && DECL_P (TREE_OPERAND (base, 0))
- && TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0))) == REFERENCE_TYPE)
- base = TREE_OPERAND (base, 0);
+ STRIP_NOPS (base);
if (offset && poly_int_tree_p (offset))
{
@@ -8933,216 +8890,897 @@ extract_base_bit_offset (tree base, tree *base_ref, poly_int64 *bitposp,
*bitposp = bitpos;
*poffsetp = poffset;
- *offsetp = offset;
-
- /* Set *BASE_REF if BASE was a dereferenced reference variable. */
- if (base_ref && orig_base != base)
- *base_ref = orig_base;
return base;
}
-/* Returns true if EXPR is or contains (as a sub-component) BASE_PTR. */
+/* Used for topological sorting of mapping groups. UNVISITED means we haven't
+ started processing the group yet. The TEMPORARY mark is used when we first
+ encounter a group on a depth-first traversal, and the PERMANENT mark is used
+ when we have processed all the group's children (i.e. all the base pointers
+ referred to by the group's mapping nodes, recursively). */
-static bool
-is_or_contains_p (tree expr, tree base_ptr)
+enum omp_tsort_mark {
+ UNVISITED,
+ TEMPORARY,
+ PERMANENT
+};
+
+/* A group of OMP_CLAUSE_MAP nodes that correspond to a single "map"
+ clause. */
+
+struct omp_mapping_group {
+ tree *grp_start;
+ tree grp_end;
+ omp_tsort_mark mark;
+ /* If we've removed the group but need to reindex, mark the group as
+ deleted. */
+ bool deleted;
+ struct omp_mapping_group *sibling;
+ struct omp_mapping_group *next;
+};
+
+DEBUG_FUNCTION void
+debug_mapping_group (omp_mapping_group *grp)
{
- if ((TREE_CODE (expr) == INDIRECT_REF && TREE_CODE (base_ptr) == MEM_REF)
- || (TREE_CODE (expr) == MEM_REF && TREE_CODE (base_ptr) == INDIRECT_REF))
- return operand_equal_p (TREE_OPERAND (expr, 0),
- TREE_OPERAND (base_ptr, 0));
- while (!operand_equal_p (expr, base_ptr))
- {
- if (TREE_CODE (base_ptr) == COMPOUND_EXPR)
- base_ptr = TREE_OPERAND (base_ptr, 1);
- if (TREE_CODE (base_ptr) == COMPONENT_REF
- || TREE_CODE (base_ptr) == POINTER_PLUS_EXPR
- || TREE_CODE (base_ptr) == SAVE_EXPR)
- base_ptr = TREE_OPERAND (base_ptr, 0);
- else
- break;
+ tree tmp = OMP_CLAUSE_CHAIN (grp->grp_end);
+ OMP_CLAUSE_CHAIN (grp->grp_end) = NULL;
+ debug_generic_expr (*grp->grp_start);
+ OMP_CLAUSE_CHAIN (grp->grp_end) = tmp;
+}
+
+/* Return the OpenMP "base pointer" of an expression EXPR, or NULL if there
+ isn't one. */
+
+static tree
+omp_get_base_pointer (tree expr)
+{
+ while (TREE_CODE (expr) == ARRAY_REF
+ || TREE_CODE (expr) == COMPONENT_REF)
+ expr = TREE_OPERAND (expr, 0);
+
+ if (TREE_CODE (expr) == INDIRECT_REF
+ || (TREE_CODE (expr) == MEM_REF
+ && integer_zerop (TREE_OPERAND (expr, 1))))
+ {
+ expr = TREE_OPERAND (expr, 0);
+ while (TREE_CODE (expr) == COMPOUND_EXPR)
+ expr = TREE_OPERAND (expr, 1);
+ if (TREE_CODE (expr) == POINTER_PLUS_EXPR)
+ expr = TREE_OPERAND (expr, 0);
+ if (TREE_CODE (expr) == SAVE_EXPR)
+ expr = TREE_OPERAND (expr, 0);
+ STRIP_NOPS (expr);
+ return expr;
}
- return operand_equal_p (expr, base_ptr);
+
+ return NULL_TREE;
}
-/* Implement OpenMP 5.x map ordering rules for target directives. There are
- several rules, and with some level of ambiguity, hopefully we can at least
- collect the complexity here in one place. */
+/* Remove COMPONENT_REFS and indirections from EXPR. */
-static void
-omp_target_reorder_clauses (tree *list_p)
+static tree
+omp_strip_components_and_deref (tree expr)
{
- /* Collect refs to alloc/release/delete maps. */
- auto_vec<tree, 32> ard;
- tree *cp = list_p;
- while (*cp != NULL_TREE)
- if (OMP_CLAUSE_CODE (*cp) == OMP_CLAUSE_MAP
- && (OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_ALLOC
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_RELEASE
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_DELETE))
- {
- /* Unlink cp and push to ard. */
- tree c = *cp;
- tree nc = OMP_CLAUSE_CHAIN (c);
- *cp = nc;
- ard.safe_push (c);
-
- /* Any associated pointer type maps should also move along. */
- while (*cp != NULL_TREE
- && OMP_CLAUSE_CODE (*cp) == OMP_CLAUSE_MAP
- && (OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_FIRSTPRIVATE_REFERENCE
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_FIRSTPRIVATE_POINTER
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_ATTACH_DETACH
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_POINTER
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_ALWAYS_POINTER
- || OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_TO_PSET))
+ while (TREE_CODE (expr) == COMPONENT_REF
+ || TREE_CODE (expr) == INDIRECT_REF
+ || (TREE_CODE (expr) == MEM_REF
+ && integer_zerop (TREE_OPERAND (expr, 1)))
+ || TREE_CODE (expr) == POINTER_PLUS_EXPR
+ || TREE_CODE (expr) == COMPOUND_EXPR)
+ if (TREE_CODE (expr) == COMPOUND_EXPR)
+ expr = TREE_OPERAND (expr, 1);
+ else
+ expr = TREE_OPERAND (expr, 0);
+
+ STRIP_NOPS (expr);
+
+ return expr;
+}
+
+static tree
+omp_strip_indirections (tree expr)
+{
+ while (TREE_CODE (expr) == INDIRECT_REF
+ || (TREE_CODE (expr) == MEM_REF
+ && integer_zerop (TREE_OPERAND (expr, 1))))
+ expr = TREE_OPERAND (expr, 0);
+
+ return expr;
+}
+
+/* An attach or detach operation depends directly on the address being
+ attached/detached. Return that address, or none if there are no
+ attachments/detachments. */
+
+static tree
+omp_get_attachment (omp_mapping_group *grp)
+{
+ tree node = *grp->grp_start;
+
+ switch (OMP_CLAUSE_MAP_KIND (node))
+ {
+ case GOMP_MAP_TO:
+ case GOMP_MAP_FROM:
+ case GOMP_MAP_TOFROM:
+ case GOMP_MAP_ALWAYS_FROM:
+ case GOMP_MAP_ALWAYS_TO:
+ case GOMP_MAP_ALWAYS_TOFROM:
+ case GOMP_MAP_FORCE_FROM:
+ case GOMP_MAP_FORCE_TO:
+ case GOMP_MAP_FORCE_TOFROM:
+ case GOMP_MAP_FORCE_PRESENT:
+ case GOMP_MAP_ALLOC:
+ case GOMP_MAP_RELEASE:
+ case GOMP_MAP_DELETE:
+ case GOMP_MAP_FORCE_ALLOC:
+ if (node == grp->grp_end)
+ return NULL_TREE;
+
+ node = OMP_CLAUSE_CHAIN (node);
+ if (node && OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_TO_PSET)
+ {
+ gcc_assert (node != grp->grp_end);
+ node = OMP_CLAUSE_CHAIN (node);
+ }
+ if (node)
+ switch (OMP_CLAUSE_MAP_KIND (node))
{
- c = *cp;
- nc = OMP_CLAUSE_CHAIN (c);
- *cp = nc;
- ard.safe_push (c);
+ case GOMP_MAP_POINTER:
+ case GOMP_MAP_ALWAYS_POINTER:
+ case GOMP_MAP_FIRSTPRIVATE_POINTER:
+ case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
+ case GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION:
+ return NULL_TREE;
+
+ case GOMP_MAP_ATTACH_DETACH:
+ case GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION:
+ return OMP_CLAUSE_DECL (node);
+
+ default:
+ internal_error ("unexpected mapping node");
}
- }
- else
- cp = &OMP_CLAUSE_CHAIN (*cp);
+ return error_mark_node;
+
+ case GOMP_MAP_TO_PSET:
+ gcc_assert (node != grp->grp_end);
+ node = OMP_CLAUSE_CHAIN (node);
+ if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_ATTACH
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_DETACH)
+ return OMP_CLAUSE_DECL (node);
+ else
+ internal_error ("unexpected mapping node");
+ return error_mark_node;
+
+ case GOMP_MAP_ATTACH:
+ case GOMP_MAP_DETACH:
+ node = OMP_CLAUSE_CHAIN (node);
+ if (!node || *grp->grp_start == grp->grp_end)
+ return OMP_CLAUSE_DECL (*grp->grp_start);
+ if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
+ return OMP_CLAUSE_DECL (*grp->grp_start);
+ else
+ internal_error ("unexpected mapping node");
+ return error_mark_node;
+
+ case GOMP_MAP_STRUCT:
+ case GOMP_MAP_FORCE_DEVICEPTR:
+ case GOMP_MAP_DEVICE_RESIDENT:
+ case GOMP_MAP_LINK:
+ case GOMP_MAP_IF_PRESENT:
+ case GOMP_MAP_FIRSTPRIVATE:
+ case GOMP_MAP_FIRSTPRIVATE_INT:
+ case GOMP_MAP_USE_DEVICE_PTR:
+ case GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION:
+ return NULL_TREE;
- /* Link alloc/release/delete maps to the end of list. */
- for (unsigned int i = 0; i < ard.length (); i++)
- {
- *cp = ard[i];
- cp = &OMP_CLAUSE_CHAIN (ard[i]);
+ default:
+ internal_error ("unexpected mapping node");
}
- *cp = NULL_TREE;
- /* OpenMP 5.0 requires that pointer variables are mapped before
- its use as a base-pointer. */
- auto_vec<tree *, 32> atf;
- for (tree *cp = list_p; *cp; cp = &OMP_CLAUSE_CHAIN (*cp))
- if (OMP_CLAUSE_CODE (*cp) == OMP_CLAUSE_MAP)
+ return error_mark_node;
+}
+
+/* Given a pointer START_P to the start of a group of related (e.g. pointer)
+ mappings, return the chain pointer to the end of that group in the list. */
+
+static tree *
+omp_group_last (tree *start_p)
+{
+ tree c = *start_p, nc, *grp_last_p = start_p;
+
+ gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP);
+
+ nc = OMP_CLAUSE_CHAIN (c);
+
+ if (!nc || OMP_CLAUSE_CODE (nc) != OMP_CLAUSE_MAP)
+ return grp_last_p;
+
+ switch (OMP_CLAUSE_MAP_KIND (c))
+ {
+ default:
+ while (nc
+ && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
+ && (OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_REFERENCE
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_ATTACH_DETACH
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
+ || (OMP_CLAUSE_MAP_KIND (nc)
+ == GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION)
+ || (OMP_CLAUSE_MAP_KIND (nc)
+ == GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION)
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_ALWAYS_POINTER
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_TO_PSET))
+ {
+ grp_last_p = &OMP_CLAUSE_CHAIN (c);
+ c = nc;
+ tree nc2 = OMP_CLAUSE_CHAIN (nc);
+ if (nc2
+ && OMP_CLAUSE_CODE (nc2) == OMP_CLAUSE_MAP
+ && (OMP_CLAUSE_MAP_KIND (nc)
+ == GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION)
+ && OMP_CLAUSE_MAP_KIND (nc2) == GOMP_MAP_ATTACH)
+ {
+ grp_last_p = &OMP_CLAUSE_CHAIN (nc);
+ c = nc2;
+ nc2 = OMP_CLAUSE_CHAIN (nc2);
+ }
+ nc = nc2;
+ }
+ break;
+
+ case GOMP_MAP_ATTACH:
+ case GOMP_MAP_DETACH:
+ /* This is a weird artifact of how directives are parsed: bare attach or
+ detach clauses get a subsequent (meaningless) FIRSTPRIVATE_POINTER or
+ FIRSTPRIVATE_REFERENCE node. FIXME. */
+ if (nc
+ && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
+ && (OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_REFERENCE
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_POINTER))
+ grp_last_p = &OMP_CLAUSE_CHAIN (c);
+ break;
+
+ case GOMP_MAP_TO_PSET:
+ if (OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
+ && (OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_ATTACH
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_DETACH))
+ grp_last_p = &OMP_CLAUSE_CHAIN (c);
+ break;
+
+ case GOMP_MAP_STRUCT:
{
- /* Collect alloc, to, from, to/from clause tree pointers. */
- gomp_map_kind k = OMP_CLAUSE_MAP_KIND (*cp);
- if (k == GOMP_MAP_ALLOC
- || k == GOMP_MAP_TO
- || k == GOMP_MAP_FROM
- || k == GOMP_MAP_TOFROM
- || k == GOMP_MAP_ALWAYS_TO
- || k == GOMP_MAP_ALWAYS_FROM
- || k == GOMP_MAP_ALWAYS_TOFROM)
- atf.safe_push (cp);
+ unsigned HOST_WIDE_INT num_mappings
+ = tree_to_uhwi (OMP_CLAUSE_SIZE (c));
+ if (OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_FIRSTPRIVATE_REFERENCE
+ || OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_ATTACH_DETACH)
+ grp_last_p = &OMP_CLAUSE_CHAIN (*grp_last_p);
+ for (unsigned i = 0; i < num_mappings; i++)
+ grp_last_p = &OMP_CLAUSE_CHAIN (*grp_last_p);
}
+ break;
+ }
- for (unsigned int i = 0; i < atf.length (); i++)
- if (atf[i])
- {
- tree *cp = atf[i];
- tree decl = OMP_CLAUSE_DECL (*cp);
- if (TREE_CODE (decl) == INDIRECT_REF || TREE_CODE (decl) == MEM_REF)
- {
- tree base_ptr = TREE_OPERAND (decl, 0);
- STRIP_TYPE_NOPS (base_ptr);
- for (unsigned int j = i + 1; j < atf.length (); j++)
- if (atf[j])
- {
- tree *cp2 = atf[j];
- tree decl2 = OMP_CLAUSE_DECL (*cp2);
+ return grp_last_p;
+}
- decl2 = OMP_CLAUSE_DECL (*cp2);
- if (is_or_contains_p (decl2, base_ptr))
- {
- /* Move *cp2 to before *cp. */
- tree c = *cp2;
- *cp2 = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = *cp;
- *cp = c;
-
- if (*cp2 != NULL_TREE
- && OMP_CLAUSE_CODE (*cp2) == OMP_CLAUSE_MAP
- && OMP_CLAUSE_MAP_KIND (*cp2) == GOMP_MAP_ALWAYS_POINTER)
- {
- tree c2 = *cp2;
- *cp2 = OMP_CLAUSE_CHAIN (c2);
- OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = c2;
- }
+/* Walk through LIST_P, and return a list of groups of mappings found (e.g.
+ OMP_CLAUSE_MAP with GOMP_MAP_{TO/FROM/TOFROM} followed by one or two
+ associated GOMP_MAP_POINTER mappings). Return a vector of omp_mapping_group
+ if we have more than one such group, else return NULL. */
- atf[j] = NULL;
- }
- }
+static void
+omp_gather_mapping_groups_1 (tree *list_p, vec<omp_mapping_group> *groups,
+ tree gather_sentinel)
+{
+ for (tree *cp = list_p;
+ *cp && *cp != gather_sentinel;
+ cp = &OMP_CLAUSE_CHAIN (*cp))
+ {
+ if (OMP_CLAUSE_CODE (*cp) != OMP_CLAUSE_MAP)
+ continue;
+
+ tree *grp_last_p = omp_group_last (cp);
+ omp_mapping_group grp;
+
+ grp.grp_start = cp;
+ grp.grp_end = *grp_last_p;
+ grp.mark = UNVISITED;
+ grp.sibling = NULL;
+ grp.deleted = false;
+ grp.next = NULL;
+ groups->safe_push (grp);
+
+ cp = grp_last_p;
+ }
+}
+
+static vec<omp_mapping_group> *
+omp_gather_mapping_groups (tree *list_p)
+{
+ vec<omp_mapping_group> *groups = new vec<omp_mapping_group> ();
+
+ omp_gather_mapping_groups_1 (list_p, groups, NULL_TREE);
+
+ if (groups->length () > 0)
+ return groups;
+ else
+ {
+ delete groups;
+ return NULL;
+ }
+}
+
+/* A pointer mapping group GRP may define a block of memory starting at some
+ base address, and maybe also define a firstprivate pointer or firstprivate
+ reference that points to that block. The return value is a node containing
+ the former, and the *FIRSTPRIVATE pointer is set if we have the latter.
+ If we define several base pointers, i.e. for a GOMP_MAP_STRUCT mapping,
+ return the number of consecutive chained nodes in CHAINED. */
+
+static tree
+omp_group_base (omp_mapping_group *grp, unsigned int *chained,
+ tree *firstprivate)
+{
+ tree node = *grp->grp_start;
+
+ *firstprivate = NULL_TREE;
+ *chained = 1;
+
+ switch (OMP_CLAUSE_MAP_KIND (node))
+ {
+ case GOMP_MAP_TO:
+ case GOMP_MAP_FROM:
+ case GOMP_MAP_TOFROM:
+ case GOMP_MAP_ALWAYS_FROM:
+ case GOMP_MAP_ALWAYS_TO:
+ case GOMP_MAP_ALWAYS_TOFROM:
+ case GOMP_MAP_FORCE_FROM:
+ case GOMP_MAP_FORCE_TO:
+ case GOMP_MAP_FORCE_TOFROM:
+ case GOMP_MAP_FORCE_PRESENT:
+ case GOMP_MAP_ALLOC:
+ case GOMP_MAP_RELEASE:
+ case GOMP_MAP_DELETE:
+ case GOMP_MAP_FORCE_ALLOC:
+ if (node == grp->grp_end)
+ return node;
+
+ node = OMP_CLAUSE_CHAIN (node);
+ if (node && OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_TO_PSET)
+ {
+ if (node == grp->grp_end)
+ return *grp->grp_start;
+ node = OMP_CLAUSE_CHAIN (node);
+ }
+ if (node)
+ switch (OMP_CLAUSE_MAP_KIND (node))
+ {
+ case GOMP_MAP_POINTER:
+ case GOMP_MAP_FIRSTPRIVATE_POINTER:
+ case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
+ case GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION:
+ *firstprivate = OMP_CLAUSE_DECL (node);
+ return *grp->grp_start;
+
+ case GOMP_MAP_ALWAYS_POINTER:
+ case GOMP_MAP_ATTACH_DETACH:
+ case GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION:
+ return *grp->grp_start;
+
+ default:
+ internal_error ("unexpected mapping node");
}
- }
+ else
+ internal_error ("unexpected mapping node");
+ return error_mark_node;
+
+ case GOMP_MAP_TO_PSET:
+ gcc_assert (node != grp->grp_end);
+ node = OMP_CLAUSE_CHAIN (node);
+ if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_ATTACH
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_DETACH)
+ return NULL_TREE;
+ else
+ internal_error ("unexpected mapping node");
+ return error_mark_node;
+
+ case GOMP_MAP_ATTACH:
+ case GOMP_MAP_DETACH:
+ node = OMP_CLAUSE_CHAIN (node);
+ if (!node || *grp->grp_start == grp->grp_end)
+ return NULL_TREE;
+ if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
+ {
+ /* We're mapping the base pointer itself in a bare attach or detach
+ node. This is a side effect of how parsing works, and the mapping
+ will be removed anyway (at least for enter/exit data directives).
+ We should ignore the mapping here. FIXME. */
+ return NULL_TREE;
+ }
+ else
+ internal_error ("unexpected mapping node");
+ return error_mark_node;
- /* For attach_detach map clauses, if there is another map that maps the
- attached/detached pointer, make sure that map is ordered before the
- attach_detach. */
- atf.truncate (0);
- for (tree *cp = list_p; *cp; cp = &OMP_CLAUSE_CHAIN (*cp))
- if (OMP_CLAUSE_CODE (*cp) == OMP_CLAUSE_MAP)
+ case GOMP_MAP_STRUCT:
{
- /* Collect alloc, to, from, to/from clauses, and
- always_pointer/attach_detach clauses. */
- gomp_map_kind k = OMP_CLAUSE_MAP_KIND (*cp);
- if (k == GOMP_MAP_ALLOC
- || k == GOMP_MAP_TO
- || k == GOMP_MAP_FROM
- || k == GOMP_MAP_TOFROM
- || k == GOMP_MAP_ALWAYS_TO
- || k == GOMP_MAP_ALWAYS_FROM
- || k == GOMP_MAP_ALWAYS_TOFROM
- || k == GOMP_MAP_ATTACH_DETACH
- || k == GOMP_MAP_ALWAYS_POINTER)
- atf.safe_push (cp);
+ unsigned HOST_WIDE_INT num_mappings
+ = tree_to_uhwi (OMP_CLAUSE_SIZE (node));
+ node = OMP_CLAUSE_CHAIN (node);
+ if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
+ {
+ *firstprivate = OMP_CLAUSE_DECL (node);
+ node = OMP_CLAUSE_CHAIN (node);
+ }
+ *chained = num_mappings;
+ return node;
}
- for (unsigned int i = 0; i < atf.length (); i++)
- if (atf[i])
- {
- tree *cp = atf[i];
- tree ptr = OMP_CLAUSE_DECL (*cp);
- STRIP_TYPE_NOPS (ptr);
- if (OMP_CLAUSE_MAP_KIND (*cp) == GOMP_MAP_ATTACH_DETACH)
- for (unsigned int j = i + 1; j < atf.length (); j++)
+ case GOMP_MAP_FORCE_DEVICEPTR:
+ case GOMP_MAP_DEVICE_RESIDENT:
+ case GOMP_MAP_LINK:
+ case GOMP_MAP_IF_PRESENT:
+ case GOMP_MAP_FIRSTPRIVATE:
+ case GOMP_MAP_FIRSTPRIVATE_INT:
+ case GOMP_MAP_USE_DEVICE_PTR:
+ case GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION:
+ return NULL_TREE;
+
+ case GOMP_MAP_FIRSTPRIVATE_POINTER:
+ case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
+ case GOMP_MAP_POINTER:
+ case GOMP_MAP_ALWAYS_POINTER:
+ case GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION:
+ /* These shouldn't appear by themselves. */
+ if (!seen_error ())
+ internal_error ("unexpected pointer mapping node");
+ return error_mark_node;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return error_mark_node;
+}
+
+/* Given a vector of omp_mapping_groups, build a hash table so we can look up
+ nodes by tree_operand_hash. */
+
+static void
+omp_index_mapping_groups_1 (hash_map<tree_operand_hash,
+ omp_mapping_group *> *grpmap,
+ vec<omp_mapping_group> *groups,
+ tree reindex_sentinel)
+{
+ omp_mapping_group *grp;
+ unsigned int i;
+ bool reindexing = reindex_sentinel != NULL_TREE, above_hwm = false;
+
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ {
+ if (reindexing && *grp->grp_start == reindex_sentinel)
+ above_hwm = true;
+
+ if (reindexing && !above_hwm)
+ continue;
+
+ tree fpp;
+ unsigned int chained;
+ tree node = omp_group_base (grp, &chained, &fpp);
+
+ if (node == error_mark_node || (!node && !fpp))
+ continue;
+
+ for (unsigned j = 0;
+ node && j < chained;
+ node = OMP_CLAUSE_CHAIN (node), j++)
+ {
+ tree decl = OMP_CLAUSE_DECL (node);
+
+ /* Sometimes we see zero-offset MEM_REF instead of INDIRECT_REF,
+ meaning node-hash lookups don't work. This is a workaround for
+ that, but ideally we should just create the INDIRECT_REF at
+ source instead. FIXME. */
+ if (TREE_CODE (decl) == MEM_REF
+ && integer_zerop (TREE_OPERAND (decl, 1)))
+ decl = build_fold_indirect_ref (TREE_OPERAND (decl, 0));
+
+ omp_mapping_group **prev = grpmap->get (decl);
+
+ if (prev && *prev == grp)
+ /* Empty. */;
+ else if (prev)
{
- tree *cp2 = atf[j];
- tree decl2 = OMP_CLAUSE_DECL (*cp2);
- if (OMP_CLAUSE_MAP_KIND (*cp2) != GOMP_MAP_ATTACH_DETACH
- && OMP_CLAUSE_MAP_KIND (*cp2) != GOMP_MAP_ALWAYS_POINTER
- && is_or_contains_p (decl2, ptr))
- {
- /* Move *cp2 to before *cp. */
- tree c = *cp2;
- *cp2 = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = *cp;
- *cp = c;
- atf[j] = NULL;
-
- /* If decl2 is of the form '*decl2_opnd0', and followed by an
- ALWAYS_POINTER or ATTACH_DETACH of 'decl2_opnd0', move the
- pointer operation along with *cp2. This can happen for C++
- reference sequences. */
- if (j + 1 < atf.length ()
- && (TREE_CODE (decl2) == INDIRECT_REF
- || TREE_CODE (decl2) == MEM_REF))
- {
- tree *cp3 = atf[j + 1];
- tree decl3 = OMP_CLAUSE_DECL (*cp3);
- tree decl2_opnd0 = TREE_OPERAND (decl2, 0);
- if ((OMP_CLAUSE_MAP_KIND (*cp3) == GOMP_MAP_ALWAYS_POINTER
- || OMP_CLAUSE_MAP_KIND (*cp3) == GOMP_MAP_ATTACH_DETACH)
- && operand_equal_p (decl3, decl2_opnd0))
- {
- /* Also move *cp3 to before *cp. */
- c = *cp3;
- *cp2 = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = *cp;
- *cp = c;
- atf[j + 1] = NULL;
- j += 1;
- }
- }
- }
+ /* Mapping the same thing twice is normally diagnosed as an error,
+ but can happen under some circumstances, e.g. in pr99928-16.c,
+ the directive:
+
+ #pragma omp target simd reduction(+:a[:3]) \
+ map(always, tofrom: a[:6])
+ ...
+
+ will result in two "a[0]" mappings (of different sizes). */
+
+ grp->sibling = (*prev)->sibling;
+ (*prev)->sibling = grp;
}
- }
+ else
+ grpmap->put (decl, grp);
+ }
+
+ if (!fpp)
+ continue;
+
+ omp_mapping_group **prev = grpmap->get (fpp);
+ if (prev && *prev != grp)
+ {
+ grp->sibling = (*prev)->sibling;
+ (*prev)->sibling = grp;
+ }
+ else
+ grpmap->put (fpp, grp);
+ }
+}
+
+static hash_map<tree_operand_hash, omp_mapping_group *> *
+omp_index_mapping_groups (vec<omp_mapping_group> *groups)
+{
+ hash_map<tree_operand_hash, omp_mapping_group *> *grpmap
+ = new hash_map<tree_operand_hash, omp_mapping_group *>;
+
+ omp_index_mapping_groups_1 (grpmap, groups, NULL_TREE);
+
+ return grpmap;
+}
+
+/* Rebuild group map from partially-processed clause list (during
+ omp_build_struct_sibling_lists). We have already processed nodes up until
+ a high-water mark (HWM). This is a bit tricky because the list is being
+ reordered as it is scanned, but we know:
+
+ 1. The list after HWM has not been touched yet, so we can reindex it safely.
+
+ 2. The list before and including HWM has been altered, but remains
+ well-formed throughout the sibling-list building operation.
+
+ so, we can do the reindex operation in two parts, on the processed and
+ then the unprocessed halves of the list. */
+
+static hash_map<tree_operand_hash, omp_mapping_group *> *
+omp_reindex_mapping_groups (tree *list_p,
+ vec<omp_mapping_group> *groups,
+ vec<omp_mapping_group> *processed_groups,
+ tree sentinel)
+{
+ hash_map<tree_operand_hash, omp_mapping_group *> *grpmap
+ = new hash_map<tree_operand_hash, omp_mapping_group *>;
+
+ processed_groups->truncate (0);
+
+ omp_gather_mapping_groups_1 (list_p, processed_groups, sentinel);
+ omp_index_mapping_groups_1 (grpmap, processed_groups, NULL_TREE);
+ if (sentinel)
+ omp_index_mapping_groups_1 (grpmap, groups, sentinel);
+
+ return grpmap;
+}
+
+/* Find the immediately-containing struct for a component ref (etc.)
+ expression EXPR. */
+
+static tree
+omp_containing_struct (tree expr)
+{
+ tree expr0 = expr;
+
+ STRIP_NOPS (expr);
+
+ /* Note: don't strip NOPs unless we're also stripping off array refs or a
+ component ref. */
+ if (TREE_CODE (expr) != ARRAY_REF && TREE_CODE (expr) != COMPONENT_REF)
+ return expr0;
+
+ while (TREE_CODE (expr) == ARRAY_REF)
+ expr = TREE_OPERAND (expr, 0);
+
+ if (TREE_CODE (expr) == COMPONENT_REF)
+ expr = TREE_OPERAND (expr, 0);
+
+ return expr;
+}
+
+/* Return TRUE if DECL describes a component that is part of a whole structure
+ that is mapped elsewhere in GRPMAP. *MAPPED_BY_GROUP is set to the group
+ that maps that structure, if present. */
+
+static bool
+omp_mapped_by_containing_struct (hash_map<tree_operand_hash,
+ omp_mapping_group *> *grpmap,
+ tree decl,
+ omp_mapping_group **mapped_by_group)
+{
+ tree wsdecl = NULL_TREE;
+
+ *mapped_by_group = NULL;
+
+ while (true)
+ {
+ wsdecl = omp_containing_struct (decl);
+ if (wsdecl == decl)
+ break;
+ omp_mapping_group **wholestruct = grpmap->get (wsdecl);
+ if (!wholestruct
+ && TREE_CODE (wsdecl) == MEM_REF
+ && integer_zerop (TREE_OPERAND (wsdecl, 1)))
+ {
+ tree deref = TREE_OPERAND (wsdecl, 0);
+ deref = build_fold_indirect_ref (deref);
+ wholestruct = grpmap->get (deref);
+ }
+ if (wholestruct)
+ {
+ *mapped_by_group = *wholestruct;
+ return true;
+ }
+ decl = wsdecl;
+ }
+
+ return false;
+}
+
+/* Helper function for omp_tsort_mapping_groups. Returns TRUE on success, or
+ FALSE on error. */
+
+static bool
+omp_tsort_mapping_groups_1 (omp_mapping_group ***outlist,
+ vec<omp_mapping_group> *groups,
+ hash_map<tree_operand_hash, omp_mapping_group *>
+ *grpmap,
+ omp_mapping_group *grp)
+{
+ if (grp->mark == PERMANENT)
+ return true;
+ if (grp->mark == TEMPORARY)
+ {
+ fprintf (stderr, "when processing group:\n");
+ debug_mapping_group (grp);
+ internal_error ("base pointer cycle detected");
+ return false;
+ }
+ grp->mark = TEMPORARY;
+
+ tree attaches_to = omp_get_attachment (grp);
+
+ if (attaches_to)
+ {
+ omp_mapping_group **basep = grpmap->get (attaches_to);
+
+ if (basep && *basep != grp)
+ {
+ for (omp_mapping_group *w = *basep; w; w = w->sibling)
+ if (!omp_tsort_mapping_groups_1 (outlist, groups, grpmap, w))
+ return false;
+ }
+ }
+
+ tree decl = OMP_CLAUSE_DECL (*grp->grp_start);
+
+ while (decl)
+ {
+ tree base = omp_get_base_pointer (decl);
+
+ if (!base)
+ break;
+
+ omp_mapping_group **innerp = grpmap->get (base);
+ omp_mapping_group *wholestruct;
+
+ /* We should treat whole-structure mappings as if all (pointer, in this
+ case) members are mapped as individual list items. Check if we have
+ such a whole-structure mapping, if we don't have an explicit reference
+ to the pointer member itself. */
+ if (!innerp
+ && TREE_CODE (base) == COMPONENT_REF
+ && omp_mapped_by_containing_struct (grpmap, base, &wholestruct))
+ innerp = &wholestruct;
+
+ if (innerp && *innerp != grp)
+ {
+ for (omp_mapping_group *w = *innerp; w; w = w->sibling)
+ if (!omp_tsort_mapping_groups_1 (outlist, groups, grpmap, w))
+ return false;
+ break;
+ }
+
+ decl = base;
+ }
+
+ grp->mark = PERMANENT;
+
+ /* Emit grp to output list. */
+
+ **outlist = grp;
+ *outlist = &grp->next;
+
+ return true;
+}
+
+/* Topologically sort GROUPS, so that OMP 5.0-defined base pointers come
+ before mappings that use those pointers. This is an implementation of the
+ depth-first search algorithm, described e.g. at:
+
+ https://en.wikipedia.org/wiki/Topological_sorting
+*/
+
+static omp_mapping_group *
+omp_tsort_mapping_groups (vec<omp_mapping_group> *groups,
+ hash_map<tree_operand_hash, omp_mapping_group *>
+ *grpmap)
+{
+ omp_mapping_group *grp, *outlist = NULL, **cursor;
+ unsigned int i;
+
+ cursor = &outlist;
+
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ {
+ if (grp->mark != PERMANENT)
+ if (!omp_tsort_mapping_groups_1 (&cursor, groups, grpmap, grp))
+ return NULL;
+ }
+
+ return outlist;
+}
+
+/* Split INLIST into two parts, moving groups corresponding to
+ ALLOC/RELEASE/DELETE mappings to one list, and other mappings to another.
+ The former list is then appended to the latter. Each sub-list retains the
+ order of the original list.
+ Note that ATTACH nodes are later moved to the end of the list in
+ gimplify_adjust_omp_clauses, for target regions. */
+
+static omp_mapping_group *
+omp_segregate_mapping_groups (omp_mapping_group *inlist)
+{
+ omp_mapping_group *ard_groups = NULL, *tf_groups = NULL;
+ omp_mapping_group **ard_tail = &ard_groups, **tf_tail = &tf_groups;
+
+ for (omp_mapping_group *w = inlist; w;)
+ {
+ tree c = *w->grp_start;
+ omp_mapping_group *next = w->next;
+
+ gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP);
+
+ switch (OMP_CLAUSE_MAP_KIND (c))
+ {
+ case GOMP_MAP_ALLOC:
+ case GOMP_MAP_RELEASE:
+ case GOMP_MAP_DELETE:
+ *ard_tail = w;
+ w->next = NULL;
+ ard_tail = &w->next;
+ break;
+
+ default:
+ *tf_tail = w;
+ w->next = NULL;
+ tf_tail = &w->next;
+ }
+
+ w = next;
+ }
+
+ /* Now splice the lists together... */
+ *tf_tail = ard_groups;
+
+ return tf_groups;
+}
+
+/* Given a list LIST_P containing groups of mappings given by GROUPS, reorder
+ those groups based on the output list of omp_tsort_mapping_groups --
+ singly-linked, threaded through each element's NEXT pointer starting at
+ HEAD. Each list element appears exactly once in that linked list.
+
+ Each element of GROUPS may correspond to one or several mapping nodes.
+ Node groups are kept together, and in the reordered list, the positions of
+ the original groups are reused for the positions of the reordered list.
+ Hence if we have e.g.
+
+ {to ptr ptr} firstprivate {tofrom ptr} ...
+ ^ ^ ^
+ first group non-"map" second group
+
+ and say the second group contains a base pointer for the first so must be
+ moved before it, the resulting list will contain:
+
+ {tofrom ptr} firstprivate {to ptr ptr} ...
+ ^ prev. second group ^ prev. first group
+*/
+
+static tree *
+omp_reorder_mapping_groups (vec<omp_mapping_group> *groups,
+ omp_mapping_group *head,
+ tree *list_p)
+{
+ omp_mapping_group *grp;
+ unsigned int i;
+ unsigned numgroups = groups->length ();
+ auto_vec<tree> old_heads (numgroups);
+ auto_vec<tree *> old_headps (numgroups);
+ auto_vec<tree> new_heads (numgroups);
+ auto_vec<tree> old_succs (numgroups);
+ bool map_at_start = (list_p == (*groups)[0].grp_start);
+
+ tree *new_grp_tail = NULL;
+
+ /* Stash the start & end nodes of each mapping group before we start
+ modifying the list. */
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ {
+ old_headps.quick_push (grp->grp_start);
+ old_heads.quick_push (*grp->grp_start);
+ old_succs.quick_push (OMP_CLAUSE_CHAIN (grp->grp_end));
+ }
+
+ /* And similarly, the heads of the groups in the order we want to rearrange
+ the list to. */
+ for (omp_mapping_group *w = head; w; w = w->next)
+ new_heads.quick_push (*w->grp_start);
+
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ {
+ gcc_assert (head);
+
+ if (new_grp_tail && old_succs[i - 1] == old_heads[i])
+ {
+ /* a {b c d} {e f g} h i j (original)
+ -->
+ a {k l m} {e f g} h i j (inserted new group on last iter)
+ -->
+ a {k l m} {n o p} h i j (this time, chain last group to new one)
+ ^new_grp_tail
+ */
+ *new_grp_tail = new_heads[i];
+ }
+ else if (new_grp_tail)
+ {
+ /* a {b c d} e {f g h} i j k (original)
+ -->
+ a {l m n} e {f g h} i j k (gap after last iter's group)
+ -->
+ a {l m n} e {o p q} h i j (chain last group to old successor)
+ ^new_grp_tail
+ */
+ *new_grp_tail = old_succs[i - 1];
+ *old_headps[i] = new_heads[i];
+ }
+ else
+ {
+ /* The first inserted group -- point to new group, and leave end
+ open.
+ a {b c d} e f
+ -->
+ a {g h i...
+ */
+ *grp->grp_start = new_heads[i];
+ }
+
+ new_grp_tail = &OMP_CLAUSE_CHAIN (head->grp_end);
+
+ head = head->next;
+ }
+
+ if (new_grp_tail)
+ *new_grp_tail = old_succs[numgroups - 1];
+
+ gcc_assert (!head);
+
+ return map_at_start ? (*groups)[0].grp_start : list_p;
}
/* DECL is supposed to have lastprivate semantics in the outer contexts
@@ -9223,6 +9861,688 @@ omp_lastprivate_for_combined_outer_constructs (struct gimplify_omp_ctx *octx,
omp_notice_variable (octx, decl, true);
}
+/* Link node NEWNODE so it is pointed to by chain INSERT_AT. NEWNODE's chain
+ is linked to the previous node pointed to by INSERT_AT. */
+
+static tree *
+omp_siblist_insert_node_after (tree newnode, tree *insert_at)
+{
+ OMP_CLAUSE_CHAIN (newnode) = *insert_at;
+ *insert_at = newnode;
+ return &OMP_CLAUSE_CHAIN (newnode);
+}
+
+/* Move NODE (which is currently pointed to by the chain OLD_POS) so it is
+ pointed to by chain MOVE_AFTER instead. */
+
+static void
+omp_siblist_move_node_after (tree node, tree *old_pos, tree *move_after)
+{
+ gcc_assert (node == *old_pos);
+ *old_pos = OMP_CLAUSE_CHAIN (node);
+ OMP_CLAUSE_CHAIN (node) = *move_after;
+ *move_after = node;
+}
+
+/* Move nodes from FIRST_PTR (pointed to by previous node's chain) to
+ LAST_NODE to after MOVE_AFTER chain. Similar to below function, but no
+ new nodes are prepended to the list before splicing into the new position.
+ Return the position we should continue scanning the list at, or NULL to
+ stay where we were. */
+
+static tree *
+omp_siblist_move_nodes_after (tree *first_ptr, tree last_node,
+ tree *move_after)
+{
+ if (first_ptr == move_after)
+ return NULL;
+
+ tree tmp = *first_ptr;
+ *first_ptr = OMP_CLAUSE_CHAIN (last_node);
+ OMP_CLAUSE_CHAIN (last_node) = *move_after;
+ *move_after = tmp;
+
+ return first_ptr;
+}
+
+/* Concatenate two lists described by [FIRST_NEW, LAST_NEW_TAIL] and
+ [FIRST_PTR, LAST_NODE], and insert them in the OMP clause list after chain
+ pointer MOVE_AFTER.
+
+ The latter list was previously part of the OMP clause list, and the former
+ (prepended) part is comprised of new nodes.
+
+ We start with a list of nodes starting with a struct mapping node. We
+ rearrange the list so that new nodes starting from FIRST_NEW and whose last
+ node's chain is LAST_NEW_TAIL comes directly after MOVE_AFTER, followed by
+ the group of mapping nodes we are currently processing (from the chain
+ FIRST_PTR to LAST_NODE). The return value is the pointer to the next chain
+ we should continue processing from, or NULL to stay where we were.
+
+ The transformation (in the case where MOVE_AFTER and FIRST_PTR are
+ different) is worked through below. Here we are processing LAST_NODE, and
+ FIRST_PTR points at the preceding mapping clause:
+
+ #. mapping node chain
+ ---------------------------------------------------
+ A. struct_node [->B]
+ B. comp_1 [->C]
+ C. comp_2 [->D (move_after)]
+ D. map_to_3 [->E]
+ E. attach_3 [->F (first_ptr)]
+ F. map_to_4 [->G (continue_at)]
+ G. attach_4 (last_node) [->H]
+ H. ...
+
+ *last_new_tail = *first_ptr;
+
+ I. new_node (first_new) [->F (last_new_tail)]
+
+ *first_ptr = OMP_CLAUSE_CHAIN (last_node)
+
+ #. mapping node chain
+ ----------------------------------------------------
+ A. struct_node [->B]
+ B. comp_1 [->C]
+ C. comp_2 [->D (move_after)]
+ D. map_to_3 [->E]
+ E. attach_3 [->H (first_ptr)]
+ F. map_to_4 [->G (continue_at)]
+ G. attach_4 (last_node) [->H]
+ H. ...
+
+ I. new_node (first_new) [->F (last_new_tail)]
+
+ OMP_CLAUSE_CHAIN (last_node) = *move_after;
+
+ #. mapping node chain
+ ---------------------------------------------------
+ A. struct_node [->B]
+ B. comp_1 [->C]
+ C. comp_2 [->D (move_after)]
+ D. map_to_3 [->E]
+ E. attach_3 [->H (continue_at)]
+ F. map_to_4 [->G]
+ G. attach_4 (last_node) [->D]
+ H. ...
+
+ I. new_node (first_new) [->F (last_new_tail)]
+
+ *move_after = first_new;
+
+ #. mapping node chain
+ ---------------------------------------------------
+ A. struct_node [->B]
+ B. comp_1 [->C]
+ C. comp_2 [->I (move_after)]
+ D. map_to_3 [->E]
+ E. attach_3 [->H (continue_at)]
+ F. map_to_4 [->G]
+ G. attach_4 (last_node) [->D]
+ H. ...
+ I. new_node (first_new) [->F (last_new_tail)]
+
+ or, in order:
+
+ #. mapping node chain
+ ---------------------------------------------------
+ A. struct_node [->B]
+ B. comp_1 [->C]
+ C. comp_2 [->I (move_after)]
+ I. new_node (first_new) [->F (last_new_tail)]
+ F. map_to_4 [->G]
+ G. attach_4 (last_node) [->D]
+ D. map_to_3 [->E]
+ E. attach_3 [->H (continue_at)]
+ H. ...
+*/
+
+static tree *
+omp_siblist_move_concat_nodes_after (tree first_new, tree *last_new_tail,
+ tree *first_ptr, tree last_node,
+ tree *move_after)
+{
+ tree *continue_at = NULL;
+ *last_new_tail = *first_ptr;
+ if (first_ptr == move_after)
+ *move_after = first_new;
+ else
+ {
+ *first_ptr = OMP_CLAUSE_CHAIN (last_node);
+ continue_at = first_ptr;
+ OMP_CLAUSE_CHAIN (last_node) = *move_after;
+ *move_after = first_new;
+ }
+ return continue_at;
+}
+
+/* Mapping struct members causes an additional set of nodes to be created,
+ starting with GOMP_MAP_STRUCT followed by a number of mappings equal to the
+ number of members being mapped, in order of ascending position (address or
+ bitwise).
+
+ We scan through the list of mapping clauses, calling this function for each
+ struct member mapping we find, and build up the list of mappings after the
+ initial GOMP_MAP_STRUCT node. For pointer members, these will be
+ newly-created ALLOC nodes. For non-pointer members, the existing mapping is
+ moved into place in the sorted list.
+
+ struct {
+ int *a;
+ int *b;
+ int c;
+ int *d;
+ };
+
+ #pragma (acc|omp directive) copy(struct.a[0:n], struct.b[0:n], struct.c,
+ struct.d[0:n])
+
+ GOMP_MAP_STRUCT (4)
+ [GOMP_MAP_FIRSTPRIVATE_REFERENCE -- for refs to structs]
+ GOMP_MAP_ALLOC (struct.a)
+ GOMP_MAP_ALLOC (struct.b)
+ GOMP_MAP_TO (struct.c)
+ GOMP_MAP_ALLOC (struct.d)
+ ...
+
+ In the case where we are mapping references to pointers, or in Fortran if
+ we are mapping an array with a descriptor, additional nodes may be created
+ after the struct node list also.
+
+ The return code is either a pointer to the next node to process (if the
+ list has been rearranged), else NULL to continue with the next node in the
+ original list. */
+
+static tree *
+omp_accumulate_sibling_list (enum omp_region_type region_type,
+ enum tree_code code,
+ hash_map<tree_operand_hash, tree>
+ *&struct_map_to_clause, tree *grp_start_p,
+ tree grp_end, tree *inner)
+{
+ poly_offset_int coffset;
+ poly_int64 cbitpos;
+ tree ocd = OMP_CLAUSE_DECL (grp_end);
+ bool openmp = !(region_type & ORT_ACC);
+ tree *continue_at = NULL;
+
+ while (TREE_CODE (ocd) == ARRAY_REF)
+ ocd = TREE_OPERAND (ocd, 0);
+
+ if (TREE_CODE (ocd) == INDIRECT_REF)
+ ocd = TREE_OPERAND (ocd, 0);
+
+ tree base = extract_base_bit_offset (ocd, &cbitpos, &coffset);
+
+ bool ptr = (OMP_CLAUSE_MAP_KIND (grp_end) == GOMP_MAP_ALWAYS_POINTER);
+ bool attach_detach = ((OMP_CLAUSE_MAP_KIND (grp_end)
+ == GOMP_MAP_ATTACH_DETACH)
+ || (OMP_CLAUSE_MAP_KIND (grp_end)
+ == GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION));
+ bool attach = (OMP_CLAUSE_MAP_KIND (grp_end) == GOMP_MAP_ATTACH
+ || OMP_CLAUSE_MAP_KIND (grp_end) == GOMP_MAP_DETACH);
+
+ /* FIXME: If we're not mapping the base pointer in some other clause on this
+ directive, I think we want to create ALLOC/RELEASE here -- i.e. not
+ early-exit. */
+ if (openmp && attach_detach)
+ return NULL;
+
+ if (!struct_map_to_clause || struct_map_to_clause->get (base) == NULL)
+ {
+ tree l = build_omp_clause (OMP_CLAUSE_LOCATION (grp_end), OMP_CLAUSE_MAP);
+ gomp_map_kind k = attach ? GOMP_MAP_FORCE_PRESENT : GOMP_MAP_STRUCT;
+
+ OMP_CLAUSE_SET_MAP_KIND (l, k);
+
+ OMP_CLAUSE_DECL (l) = unshare_expr (base);
+
+ OMP_CLAUSE_SIZE (l)
+ = (!attach ? size_int (1)
+ : (DECL_P (OMP_CLAUSE_DECL (l))
+ ? DECL_SIZE_UNIT (OMP_CLAUSE_DECL (l))
+ : TYPE_SIZE_UNIT (TREE_TYPE (OMP_CLAUSE_DECL (l)))));
+ if (struct_map_to_clause == NULL)
+ struct_map_to_clause = new hash_map<tree_operand_hash, tree>;
+ struct_map_to_clause->put (base, l);
+
+ if (ptr || attach_detach)
+ {
+ tree extra_node;
+ tree alloc_node
+ = build_omp_struct_comp_nodes (code, *grp_start_p, grp_end,
+ &extra_node);
+ OMP_CLAUSE_CHAIN (l) = alloc_node;
+
+ tree *insert_node_pos = grp_start_p;
+
+ if (extra_node)
+ {
+ OMP_CLAUSE_CHAIN (extra_node) = *insert_node_pos;
+ OMP_CLAUSE_CHAIN (alloc_node) = extra_node;
+ }
+ else
+ OMP_CLAUSE_CHAIN (alloc_node) = *insert_node_pos;
+
+ *insert_node_pos = l;
+ }
+ else
+ {
+ gcc_assert (*grp_start_p == grp_end);
+ grp_start_p = omp_siblist_insert_node_after (l, grp_start_p);
+ }
+
+ tree noind = omp_strip_indirections (base);
+
+ if (!openmp
+ && (region_type & ORT_TARGET)
+ && TREE_CODE (noind) == COMPONENT_REF)
+ {
+ /* The base for this component access is a struct component access
+ itself. Insert a node to be processed on the next iteration of
+ our caller's loop, which will subsequently be turned into a new,
+ inner GOMP_MAP_STRUCT mapping.
+
+ We need to do this else the non-DECL_P base won't be
+ rewritten correctly in the offloaded region. */
+ tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (grp_end),
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FORCE_PRESENT);
+ OMP_CLAUSE_DECL (c2) = unshare_expr (noind);
+ OMP_CLAUSE_SIZE (c2) = TYPE_SIZE_UNIT (TREE_TYPE (noind));
+ *inner = c2;
+ return NULL;
+ }
+
+ tree sdecl = omp_strip_components_and_deref (base);
+
+ if (POINTER_TYPE_P (TREE_TYPE (sdecl)) && (region_type & ORT_TARGET))
+ {
+ tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (grp_end),
+ OMP_CLAUSE_MAP);
+ bool base_ref
+ = (TREE_CODE (base) == INDIRECT_REF
+ && ((TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0)))
+ == REFERENCE_TYPE)
+ || ((TREE_CODE (TREE_OPERAND (base, 0))
+ == INDIRECT_REF)
+ && (TREE_CODE (TREE_TYPE (TREE_OPERAND
+ (TREE_OPERAND (base, 0), 0)))
+ == REFERENCE_TYPE))));
+ enum gomp_map_kind mkind = base_ref ? GOMP_MAP_FIRSTPRIVATE_REFERENCE
+ : GOMP_MAP_FIRSTPRIVATE_POINTER;
+ OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
+ OMP_CLAUSE_DECL (c2) = sdecl;
+ tree baddr = build_fold_addr_expr (base);
+ baddr = fold_convert_loc (OMP_CLAUSE_LOCATION (grp_end),
+ ptrdiff_type_node, baddr);
+ /* This isn't going to be good enough when we add support for more
+ complicated lvalue expressions. FIXME. */
+ if (TREE_CODE (TREE_TYPE (sdecl)) == REFERENCE_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (sdecl))) == POINTER_TYPE)
+ sdecl = build_simple_mem_ref (sdecl);
+ tree decladdr = fold_convert_loc (OMP_CLAUSE_LOCATION (grp_end),
+ ptrdiff_type_node, sdecl);
+ OMP_CLAUSE_SIZE (c2)
+ = fold_build2_loc (OMP_CLAUSE_LOCATION (grp_end), MINUS_EXPR,
+ ptrdiff_type_node, baddr, decladdr);
+ /* Insert after struct node. */
+ OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (l);
+ OMP_CLAUSE_CHAIN (l) = c2;
+ }
+
+ return NULL;
+ }
+ else if (struct_map_to_clause)
+ {
+ tree *osc = struct_map_to_clause->get (base);
+ tree *sc = NULL, *scp = NULL;
+ sc = &OMP_CLAUSE_CHAIN (*osc);
+ /* The struct mapping might be immediately followed by a
+ FIRSTPRIVATE_POINTER and/or FIRSTPRIVATE_REFERENCE -- if it's an
+ indirect access or a reference, or both. (This added node is removed
+ in omp-low.c after it has been processed there.) */
+ if (*sc != grp_end
+ && (OMP_CLAUSE_MAP_KIND (*sc) == GOMP_MAP_FIRSTPRIVATE_POINTER
+ || OMP_CLAUSE_MAP_KIND (*sc) == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
+ sc = &OMP_CLAUSE_CHAIN (*sc);
+ for (; *sc != grp_end; sc = &OMP_CLAUSE_CHAIN (*sc))
+ if ((ptr || attach_detach) && sc == grp_start_p)
+ break;
+ else if (TREE_CODE (OMP_CLAUSE_DECL (*sc)) != COMPONENT_REF
+ && TREE_CODE (OMP_CLAUSE_DECL (*sc)) != INDIRECT_REF
+ && TREE_CODE (OMP_CLAUSE_DECL (*sc)) != ARRAY_REF)
+ break;
+ else
+ {
+ tree sc_decl = OMP_CLAUSE_DECL (*sc);
+ poly_offset_int offset;
+ poly_int64 bitpos;
+
+ if (TREE_CODE (sc_decl) == ARRAY_REF)
+ {
+ while (TREE_CODE (sc_decl) == ARRAY_REF)
+ sc_decl = TREE_OPERAND (sc_decl, 0);
+ if (TREE_CODE (sc_decl) != COMPONENT_REF
+ || TREE_CODE (TREE_TYPE (sc_decl)) != ARRAY_TYPE)
+ break;
+ }
+ else if (TREE_CODE (sc_decl) == INDIRECT_REF
+ && TREE_CODE (TREE_OPERAND (sc_decl, 0)) == COMPONENT_REF
+ && (TREE_CODE (TREE_TYPE (TREE_OPERAND (sc_decl, 0)))
+ == REFERENCE_TYPE))
+ sc_decl = TREE_OPERAND (sc_decl, 0);
+
+ tree base2 = extract_base_bit_offset (sc_decl, &bitpos, &offset);
+ if (!base2 || !operand_equal_p (base2, base, 0))
+ break;
+ if (scp)
+ continue;
+ if ((region_type & ORT_ACC) != 0)
+ {
+ /* This duplicate checking code is currently only enabled for
+ OpenACC. */
+ tree d1 = OMP_CLAUSE_DECL (*sc);
+ tree d2 = OMP_CLAUSE_DECL (grp_end);
+ while (TREE_CODE (d1) == ARRAY_REF)
+ d1 = TREE_OPERAND (d1, 0);
+ while (TREE_CODE (d2) == ARRAY_REF)
+ d2 = TREE_OPERAND (d2, 0);
+ if (TREE_CODE (d1) == INDIRECT_REF)
+ d1 = TREE_OPERAND (d1, 0);
+ if (TREE_CODE (d2) == INDIRECT_REF)
+ d2 = TREE_OPERAND (d2, 0);
+ while (TREE_CODE (d1) == COMPONENT_REF)
+ if (TREE_CODE (d2) == COMPONENT_REF
+ && TREE_OPERAND (d1, 1) == TREE_OPERAND (d2, 1))
+ {
+ d1 = TREE_OPERAND (d1, 0);
+ d2 = TREE_OPERAND (d2, 0);
+ }
+ else
+ break;
+ if (d1 == d2)
+ {
+ error_at (OMP_CLAUSE_LOCATION (grp_end),
+ "%qE appears more than once in map clauses",
+ OMP_CLAUSE_DECL (grp_end));
+ return NULL;
+ }
+ }
+ if (maybe_lt (coffset, offset)
+ || (known_eq (coffset, offset)
+ && maybe_lt (cbitpos, bitpos)))
+ {
+ if (ptr || attach_detach)
+ scp = sc;
+ else
+ break;
+ }
+ }
+
+ if (!attach)
+ OMP_CLAUSE_SIZE (*osc)
+ = size_binop (PLUS_EXPR, OMP_CLAUSE_SIZE (*osc), size_one_node);
+ if (ptr || attach_detach)
+ {
+ tree cl = NULL_TREE, extra_node;
+ tree alloc_node = build_omp_struct_comp_nodes (code, *grp_start_p,
+ grp_end, &extra_node);
+ tree *tail_chain = NULL;
+
+ /* Here, we have:
+
+ grp_end : the last (or only) node in this group.
+ grp_start_p : pointer to the first node in a pointer mapping group
+ up to and including GRP_END.
+ sc : pointer to the chain for the end of the struct component
+ list.
+ scp : pointer to the chain for the sorted position at which we
+ should insert in the middle of the struct component list
+ (else NULL to insert at end).
+ alloc_node : the "alloc" node for the structure (pointer-type)
+ component. We insert at SCP (if present), else SC
+ (the end of the struct component list).
+ extra_node : a newly-synthesized node for an additional indirect
+ pointer mapping or a Fortran pointer set, if needed.
+ cl : first node to prepend before grp_start_p.
+ tail_chain : pointer to chain of last prepended node.
+
+ The general idea is we move the nodes for this struct mapping
+ together: the alloc node goes into the sorted list directly after
+ the struct mapping, and any extra nodes (together with the nodes
+ mapping arrays pointed to by struct components) get moved after
+ that list. When SCP is NULL, we insert the nodes at SC, i.e. at
+ the end of the struct component mapping list. It's important that
+ the alloc_node comes first in that case because it's part of the
+ sorted component mapping list (but subsequent nodes are not!). */
+
+ if (scp)
+ omp_siblist_insert_node_after (alloc_node, scp);
+
+ /* Make [cl,tail_chain] a list of the alloc node (if we haven't
+ already inserted it) and the extra_node (if it is present). The
+ list can be empty if we added alloc_node above and there is no
+ extra node. */
+ if (scp && extra_node)
+ {
+ cl = extra_node;
+ tail_chain = &OMP_CLAUSE_CHAIN (extra_node);
+ }
+ else if (extra_node)
+ {
+ OMP_CLAUSE_CHAIN (alloc_node) = extra_node;
+ cl = alloc_node;
+ tail_chain = &OMP_CLAUSE_CHAIN (extra_node);
+ }
+ else if (!scp)
+ {
+ cl = alloc_node;
+ tail_chain = &OMP_CLAUSE_CHAIN (alloc_node);
+ }
+
+ continue_at
+ = cl ? omp_siblist_move_concat_nodes_after (cl, tail_chain,
+ grp_start_p, grp_end,
+ sc)
+ : omp_siblist_move_nodes_after (grp_start_p, grp_end, sc);
+ }
+ else if (*sc != grp_end)
+ {
+ gcc_assert (*grp_start_p == grp_end);
+
+ /* We are moving the current node back to a previous struct node:
+ the node that used to point to the current node will now point to
+ the next node. */
+ continue_at = grp_start_p;
+ /* In the non-pointer case, the mapping clause itself is moved into
+ the correct position in the struct component list, which in this
+ case is just SC. */
+ omp_siblist_move_node_after (*grp_start_p, grp_start_p, sc);
+ }
+ }
+ return continue_at;
+}
+
+/* Scan through GROUPS, and create sorted structure sibling lists without
+ gimplifying. */
+
+static bool
+omp_build_struct_sibling_lists (enum tree_code code,
+ enum omp_region_type region_type,
+ vec<omp_mapping_group> *groups,
+ hash_map<tree_operand_hash, omp_mapping_group *>
+ **grpmap,
+ tree *list_p)
+{
+ unsigned i;
+ omp_mapping_group *grp;
+ hash_map<tree_operand_hash, tree> *struct_map_to_clause = NULL;
+ bool success = true;
+ tree *new_next = NULL;
+ tree *tail = &OMP_CLAUSE_CHAIN ((*groups)[groups->length () - 1].grp_end);
+ auto_vec<omp_mapping_group> pre_hwm_groups;
+
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ {
+ tree c = grp->grp_end;
+ tree decl = OMP_CLAUSE_DECL (c);
+ tree grp_end = grp->grp_end;
+ tree sentinel = OMP_CLAUSE_CHAIN (grp_end);
+
+ if (new_next)
+ grp->grp_start = new_next;
+
+ new_next = NULL;
+
+ tree *grp_start_p = grp->grp_start;
+
+ if (DECL_P (decl))
+ continue;
+
+ if (OMP_CLAUSE_CHAIN (*grp_start_p)
+ && OMP_CLAUSE_CHAIN (*grp_start_p) != grp_end)
+ {
+ /* Don't process an array descriptor that isn't inside a derived type
+ as a struct (the GOMP_MAP_POINTER following will have the form
+ "var.data", but such mappings are handled specially). */
+ tree grpmid = OMP_CLAUSE_CHAIN (*grp_start_p);
+ if (OMP_CLAUSE_CODE (grpmid) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (grpmid) == GOMP_MAP_TO_PSET
+ && DECL_P (OMP_CLAUSE_DECL (grpmid)))
+ continue;
+ }
+
+ tree d = decl;
+ if (TREE_CODE (d) == ARRAY_REF)
+ {
+ while (TREE_CODE (d) == ARRAY_REF)
+ d = TREE_OPERAND (d, 0);
+ if (TREE_CODE (d) == COMPONENT_REF
+ && TREE_CODE (TREE_TYPE (d)) == ARRAY_TYPE)
+ decl = d;
+ }
+ if (d == decl
+ && TREE_CODE (decl) == INDIRECT_REF
+ && TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
+ && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
+ == REFERENCE_TYPE)
+ && (OMP_CLAUSE_MAP_KIND (c)
+ != GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION))
+ decl = TREE_OPERAND (decl, 0);
+
+ STRIP_NOPS (decl);
+
+ if (TREE_CODE (decl) != COMPONENT_REF)
+ continue;
+
+ /* If we're mapping the whole struct in another node, skip creation of
+ sibling lists. */
+ omp_mapping_group *wholestruct;
+ if (!(region_type & ORT_ACC)
+ && omp_mapped_by_containing_struct (*grpmap, OMP_CLAUSE_DECL (c),
+ &wholestruct))
+ {
+ if (*grp_start_p == grp_end)
+ /* Remove the whole of this mapping -- redundant. */
+ grp->deleted = true;
+
+ continue;
+ }
+
+ if (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_TO_PSET
+ && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH
+ && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_DETACH
+ && code != OACC_UPDATE
+ && code != OMP_TARGET_UPDATE)
+ {
+ if (error_operand_p (decl))
+ {
+ success = false;
+ goto error_out;
+ }
+
+ tree stype = TREE_TYPE (decl);
+ if (TREE_CODE (stype) == REFERENCE_TYPE)
+ stype = TREE_TYPE (stype);
+ if (TYPE_SIZE_UNIT (stype) == NULL
+ || TREE_CODE (TYPE_SIZE_UNIT (stype)) != INTEGER_CST)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "mapping field %qE of variable length "
+ "structure", OMP_CLAUSE_DECL (c));
+ success = false;
+ goto error_out;
+ }
+
+ tree inner = NULL_TREE;
+
+ new_next
+ = omp_accumulate_sibling_list (region_type, code,
+ struct_map_to_clause, grp_start_p,
+ grp_end, &inner);
+
+ if (inner)
+ {
+ if (new_next && *new_next == NULL_TREE)
+ *new_next = inner;
+ else
+ *tail = inner;
+
+ OMP_CLAUSE_CHAIN (inner) = NULL_TREE;
+ omp_mapping_group newgrp;
+ newgrp.grp_start = new_next ? new_next : tail;
+ newgrp.grp_end = inner;
+ newgrp.mark = UNVISITED;
+ newgrp.sibling = NULL;
+ newgrp.deleted = false;
+ newgrp.next = NULL;
+ groups->safe_push (newgrp);
+
+ /* !!! Growing GROUPS might invalidate the pointers in the group
+ map. Rebuild it here. This is a bit inefficient, but
+ shouldn't happen very often. */
+ delete (*grpmap);
+ *grpmap
+ = omp_reindex_mapping_groups (list_p, groups, &pre_hwm_groups,
+ sentinel);
+
+ tail = &OMP_CLAUSE_CHAIN (inner);
+ }
+ }
+ }
+
+ /* Delete groups marked for deletion above. At this point the order of the
+ groups may no longer correspond to the order of the underlying list,
+ which complicates this a little. First clear out OMP_CLAUSE_DECL for
+ deleted nodes... */
+
+ FOR_EACH_VEC_ELT (*groups, i, grp)
+ if (grp->deleted)
+ for (tree d = *grp->grp_start;
+ d != OMP_CLAUSE_CHAIN (grp->grp_end);
+ d = OMP_CLAUSE_CHAIN (d))
+ OMP_CLAUSE_DECL (d) = NULL_TREE;
+
+ /* ...then sweep through the list removing the now-empty nodes. */
+
+ tail = list_p;
+ while (*tail)
+ {
+ if (OMP_CLAUSE_CODE (*tail) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_DECL (*tail) == NULL_TREE)
+ *tail = OMP_CLAUSE_CHAIN (*tail);
+ else
+ tail = &OMP_CLAUSE_CHAIN (*tail);
+ }
+
+error_out:
+ if (struct_map_to_clause)
+ delete struct_map_to_clause;
+
+ return success;
+}
+
/* Scan the OMP clauses in *LIST_P, installing mappings into a new
and previous omp contexts. */
@@ -9233,9 +10553,6 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
{
struct gimplify_omp_ctx *ctx, *outer_ctx;
tree c;
- hash_map<tree_operand_hash, tree> *struct_map_to_clause = NULL;
- hash_map<tree_operand_hash, tree *> *struct_seen_clause = NULL;
- hash_set<tree> *struct_deref_set = NULL;
tree *prev_list_p = NULL, *orig_list_p = list_p;
int handled_depend_iterators = -1;
int nowait = -1;
@@ -9271,7 +10588,57 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
|| code == OMP_TARGET_DATA
|| code == OMP_TARGET_ENTER_DATA
|| code == OMP_TARGET_EXIT_DATA)
- omp_target_reorder_clauses (list_p);
+ {
+ vec<omp_mapping_group> *groups;
+ groups = omp_gather_mapping_groups (list_p);
+ if (groups)
+ {
+ hash_map<tree_operand_hash, omp_mapping_group *> *grpmap;
+ grpmap = omp_index_mapping_groups (groups);
+
+ omp_build_struct_sibling_lists (code, region_type, groups, &grpmap,
+ list_p);
+
+ omp_mapping_group *outlist = NULL;
+
+ /* Topological sorting may fail if we have duplicate nodes, which
+ we should have detected and shown an error for already. Skip
+ sorting in that case. */
+ if (seen_error ())
+ goto failure;
+
+ delete grpmap;
+ delete groups;
+
+ /* Rebuild now we have struct sibling lists. */
+ groups = omp_gather_mapping_groups (list_p);
+ grpmap = omp_index_mapping_groups (groups);
+
+ outlist = omp_tsort_mapping_groups (groups, grpmap);
+ outlist = omp_segregate_mapping_groups (outlist);
+ list_p = omp_reorder_mapping_groups (groups, outlist, list_p);
+
+ failure:
+ delete grpmap;
+ delete groups;
+ }
+ }
+ else if (region_type & ORT_ACC)
+ {
+ vec<omp_mapping_group> *groups;
+ groups = omp_gather_mapping_groups (list_p);
+ if (groups)
+ {
+ hash_map<tree_operand_hash, omp_mapping_group *> *grpmap;
+ grpmap = omp_index_mapping_groups (groups);
+
+ omp_build_struct_sibling_lists (code, region_type, groups, &grpmap,
+ list_p);
+
+ delete groups;
+ delete grpmap;
+ }
+ }
while ((c = *list_p) != NULL)
{
@@ -9678,6 +11045,28 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
GOVD_FIRSTPRIVATE | GOVD_SEEN);
}
+ if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_STRUCT)
+ {
+ tree base = omp_strip_components_and_deref (decl);
+ if (DECL_P (base))
+ {
+ decl = base;
+ splay_tree_node n
+ = splay_tree_lookup (ctx->variables,
+ (splay_tree_key) decl);
+ if (seen_error ()
+ && n
+ && (n->value & (GOVD_MAP | GOVD_FIRSTPRIVATE)) != 0)
+ {
+ remove = true;
+ break;
+ }
+ flags = GOVD_MAP | GOVD_EXPLICIT;
+
+ goto do_add_decl;
+ }
+ }
+
if (TREE_CODE (decl) == TARGET_EXPR)
{
if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p, NULL,
@@ -9708,113 +11097,6 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
pd = &TREE_OPERAND (decl, 0);
decl = TREE_OPERAND (decl, 0);
}
- bool indir_p = false;
- bool component_ref_p = false;
- tree indir_base = NULL_TREE;
- tree orig_decl = decl;
- tree decl_ref = NULL_TREE;
- if ((region_type & (ORT_ACC | ORT_TARGET | ORT_TARGET_DATA)) != 0
- && TREE_CODE (*pd) == COMPONENT_REF
- && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH
- && code != OACC_UPDATE)
- {
- while (TREE_CODE (decl) == COMPONENT_REF)
- {
- decl = TREE_OPERAND (decl, 0);
- component_ref_p = true;
- if (((TREE_CODE (decl) == MEM_REF
- && integer_zerop (TREE_OPERAND (decl, 1)))
- || INDIRECT_REF_P (decl))
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
- == POINTER_TYPE))
- {
- indir_p = true;
- indir_base = decl;
- decl = TREE_OPERAND (decl, 0);
- STRIP_NOPS (decl);
- }
- if (TREE_CODE (decl) == INDIRECT_REF
- && DECL_P (TREE_OPERAND (decl, 0))
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
- == REFERENCE_TYPE))
- {
- decl_ref = decl;
- decl = TREE_OPERAND (decl, 0);
- }
- }
- }
- else if (TREE_CODE (decl) == COMPONENT_REF
- && (OMP_CLAUSE_MAP_KIND (c)
- != GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION))
- {
- component_ref_p = true;
- while (TREE_CODE (decl) == COMPONENT_REF)
- decl = TREE_OPERAND (decl, 0);
- if (TREE_CODE (decl) == INDIRECT_REF
- && DECL_P (TREE_OPERAND (decl, 0))
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
- == REFERENCE_TYPE))
- decl = TREE_OPERAND (decl, 0);
- }
- if (decl != orig_decl && DECL_P (decl) && indir_p
- && (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
- || (decl_ref
- && TREE_CODE (TREE_TYPE (decl_ref)) == POINTER_TYPE)))
- {
- gomp_map_kind k
- = ((code == OACC_EXIT_DATA || code == OMP_TARGET_EXIT_DATA)
- ? GOMP_MAP_DETACH : GOMP_MAP_ATTACH);
- /* We have a dereference of a struct member. Make this an
- attach/detach operation, and ensure the base pointer is
- mapped as a FIRSTPRIVATE_POINTER. */
- OMP_CLAUSE_SET_MAP_KIND (c, k);
- flags = GOVD_MAP | GOVD_SEEN | GOVD_EXPLICIT;
- tree next_clause = OMP_CLAUSE_CHAIN (c);
- if (k == GOMP_MAP_ATTACH
- && code != OACC_ENTER_DATA
- && code != OMP_TARGET_ENTER_DATA
- && (!next_clause
- || (OMP_CLAUSE_CODE (next_clause) != OMP_CLAUSE_MAP)
- || (OMP_CLAUSE_MAP_KIND (next_clause)
- != GOMP_MAP_POINTER)
- || OMP_CLAUSE_DECL (next_clause) != decl)
- && (!struct_deref_set
- || !struct_deref_set->contains (decl))
- && (!struct_map_to_clause
- || !struct_map_to_clause->get (indir_base)))
- {
- if (!struct_deref_set)
- struct_deref_set = new hash_set<tree> ();
- /* As well as the attach, we also need a
- FIRSTPRIVATE_POINTER clause to properly map the
- pointer to the struct base. */
- tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
- OMP_CLAUSE_MAP);
- OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALLOC);
- OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c2)
- = 1;
- tree charptr_zero
- = build_int_cst (build_pointer_type (char_type_node),
- 0);
- OMP_CLAUSE_DECL (c2)
- = build2 (MEM_REF, char_type_node,
- decl_ref ? decl_ref : decl, charptr_zero);
- OMP_CLAUSE_SIZE (c2) = size_zero_node;
- tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
- OMP_CLAUSE_MAP);
- OMP_CLAUSE_SET_MAP_KIND (c3,
- GOMP_MAP_FIRSTPRIVATE_POINTER);
- OMP_CLAUSE_DECL (c3) = decl;
- OMP_CLAUSE_SIZE (c3) = size_zero_node;
- tree mapgrp = *prev_list_p;
- *prev_list_p = c2;
- OMP_CLAUSE_CHAIN (c3) = mapgrp;
- OMP_CLAUSE_CHAIN (c2) = c3;
-
- struct_deref_set->add (decl);
- }
- goto do_add_decl;
- }
/* An "attach/detach" operation on an update directive should
behave as a GOMP_MAP_ALWAYS_POINTER. Beware that
unlike attach or detach map kinds, GOMP_MAP_ALWAYS_POINTER
@@ -9822,373 +11104,49 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
if (code == OACC_UPDATE
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)
OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_ALWAYS_POINTER);
- if ((DECL_P (decl)
- || (component_ref_p
- && (INDIRECT_REF_P (decl)
- || TREE_CODE (decl) == MEM_REF
- || TREE_CODE (decl) == ARRAY_REF)))
- && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_TO_PSET
- && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH
- && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_DETACH
- && code != OACC_UPDATE
- && code != OMP_TARGET_UPDATE)
- {
- if (error_operand_p (decl))
- {
- remove = true;
- break;
- }
-
- tree stype = TREE_TYPE (decl);
- if (TREE_CODE (stype) == REFERENCE_TYPE)
- stype = TREE_TYPE (stype);
- if (TYPE_SIZE_UNIT (stype) == NULL
- || TREE_CODE (TYPE_SIZE_UNIT (stype)) != INTEGER_CST)
- {
- error_at (OMP_CLAUSE_LOCATION (c),
- "mapping field %qE of variable length "
- "structure", OMP_CLAUSE_DECL (c));
- remove = true;
- break;
- }
-
- if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER
- || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)
- {
- /* Error recovery. */
- if (prev_list_p == NULL)
- {
- remove = true;
- break;
- }
-
- /* The below prev_list_p based error recovery code is
- currently no longer valid for OpenMP. */
- if (code != OMP_TARGET
- && code != OMP_TARGET_DATA
- && code != OMP_TARGET_UPDATE
- && code != OMP_TARGET_ENTER_DATA
- && code != OMP_TARGET_EXIT_DATA
- && OMP_CLAUSE_CHAIN (*prev_list_p) != c)
- {
- tree ch = OMP_CLAUSE_CHAIN (*prev_list_p);
- if (ch == NULL_TREE || OMP_CLAUSE_CHAIN (ch) != c)
- {
- remove = true;
- break;
- }
- }
- }
-
- poly_offset_int offset1;
- poly_int64 bitpos1;
- tree tree_offset1;
- tree base_ref;
-
- tree base
- = extract_base_bit_offset (OMP_CLAUSE_DECL (c), &base_ref,
- &bitpos1, &offset1,
- &tree_offset1);
-
- bool do_map_struct = (base == decl && !tree_offset1);
-
- splay_tree_node n
- = (DECL_P (decl)
- ? splay_tree_lookup (ctx->variables,
- (splay_tree_key) decl)
- : NULL);
- bool ptr = (OMP_CLAUSE_MAP_KIND (c)
- == GOMP_MAP_ALWAYS_POINTER);
- bool attach_detach = (OMP_CLAUSE_MAP_KIND (c)
- == GOMP_MAP_ATTACH_DETACH);
- bool attach = OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
- || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH;
- bool has_attachments = false;
- /* For OpenACC, pointers in structs should trigger an
- attach action. */
- if (attach_detach
- && ((region_type & (ORT_ACC | ORT_TARGET | ORT_TARGET_DATA))
- || code == OMP_TARGET_ENTER_DATA
- || code == OMP_TARGET_EXIT_DATA))
+ if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)
+ {
+ if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c)))
+ == ARRAY_TYPE)
+ remove = true;
+ else
{
- /* Turn a GOMP_MAP_ATTACH_DETACH clause into a
- GOMP_MAP_ATTACH or GOMP_MAP_DETACH clause after we
- have detected a case that needs a GOMP_MAP_STRUCT
- mapping added. */
- gomp_map_kind k
- = ((code == OACC_EXIT_DATA || code == OMP_TARGET_EXIT_DATA)
- ? GOMP_MAP_DETACH : GOMP_MAP_ATTACH);
+ gomp_map_kind k = ((code == OACC_EXIT_DATA
+ || code == OMP_TARGET_EXIT_DATA)
+ ? GOMP_MAP_DETACH : GOMP_MAP_ATTACH);
OMP_CLAUSE_SET_MAP_KIND (c, k);
- has_attachments = true;
}
+ }
- /* We currently don't handle non-constant offset accesses wrt to
- GOMP_MAP_STRUCT elements. */
- if (!do_map_struct)
- goto skip_map_struct;
-
- /* Nor for attach_detach for OpenMP. */
- if ((code == OMP_TARGET
- || code == OMP_TARGET_DATA
- || code == OMP_TARGET_UPDATE
- || code == OMP_TARGET_ENTER_DATA
- || code == OMP_TARGET_EXIT_DATA)
- && attach_detach)
- {
- if (DECL_P (decl))
- {
- if (struct_seen_clause == NULL)
- struct_seen_clause
- = new hash_map<tree_operand_hash, tree *>;
- if (!struct_seen_clause->get (decl))
- struct_seen_clause->put (decl, list_p);
- }
+ tree cref = decl;
- goto skip_map_struct;
- }
+ while (TREE_CODE (cref) == ARRAY_REF)
+ cref = TREE_OPERAND (cref, 0);
- if ((DECL_P (decl)
- && (n == NULL || (n->value & GOVD_MAP) == 0))
- || (!DECL_P (decl)
- && (!struct_map_to_clause
- || struct_map_to_clause->get (decl) == NULL)))
- {
- tree l = build_omp_clause (OMP_CLAUSE_LOCATION (c),
- OMP_CLAUSE_MAP);
- gomp_map_kind k = attach ? GOMP_MAP_FORCE_PRESENT
- : GOMP_MAP_STRUCT;
-
- OMP_CLAUSE_SET_MAP_KIND (l, k);
- if (base_ref)
- OMP_CLAUSE_DECL (l) = unshare_expr (base_ref);
- else
- {
- OMP_CLAUSE_DECL (l) = unshare_expr (decl);
- if (!DECL_P (OMP_CLAUSE_DECL (l))
- && (gimplify_expr (&OMP_CLAUSE_DECL (l),
- pre_p, NULL, is_gimple_lvalue,
- fb_lvalue)
- == GS_ERROR))
- {
- remove = true;
- break;
- }
- }
- OMP_CLAUSE_SIZE (l)
- = (!attach
- ? size_int (1)
- : DECL_P (OMP_CLAUSE_DECL (l))
- ? DECL_SIZE_UNIT (OMP_CLAUSE_DECL (l))
- : TYPE_SIZE_UNIT (TREE_TYPE (OMP_CLAUSE_DECL (l))));
- if (struct_map_to_clause == NULL)
- struct_map_to_clause
- = new hash_map<tree_operand_hash, tree>;
- struct_map_to_clause->put (decl, l);
- if (ptr || attach_detach)
- {
- tree **sc = (struct_seen_clause
- ? struct_seen_clause->get (decl)
- : NULL);
- tree *insert_node_pos = sc ? *sc : prev_list_p;
-
- insert_struct_comp_map (code, c, l, *insert_node_pos,
- NULL);
- *insert_node_pos = l;
- prev_list_p = NULL;
- }
- else
- {
- OMP_CLAUSE_CHAIN (l) = c;
- *list_p = l;
- list_p = &OMP_CLAUSE_CHAIN (l);
- }
- if (base_ref && code == OMP_TARGET)
- {
- tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
- OMP_CLAUSE_MAP);
- enum gomp_map_kind mkind
- = GOMP_MAP_FIRSTPRIVATE_REFERENCE;
- OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
- OMP_CLAUSE_DECL (c2) = decl;
- OMP_CLAUSE_SIZE (c2) = size_zero_node;
- OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (l);
- OMP_CLAUSE_CHAIN (l) = c2;
- }
- flags = GOVD_MAP | GOVD_EXPLICIT;
- if (GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c))
- || ptr
- || attach_detach)
- flags |= GOVD_SEEN;
- if (has_attachments)
- flags |= GOVD_MAP_HAS_ATTACHMENTS;
-
- /* If this is a *pointer-to-struct expression, make sure a
- firstprivate map of the base-pointer exists. */
- if (component_ref_p
- && ((TREE_CODE (decl) == MEM_REF
- && integer_zerop (TREE_OPERAND (decl, 1)))
- || INDIRECT_REF_P (decl))
- && DECL_P (TREE_OPERAND (decl, 0))
- && !splay_tree_lookup (ctx->variables,
- ((splay_tree_key)
- TREE_OPERAND (decl, 0))))
- {
- decl = TREE_OPERAND (decl, 0);
- tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
- OMP_CLAUSE_MAP);
- enum gomp_map_kind mkind
- = GOMP_MAP_FIRSTPRIVATE_POINTER;
- OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
- OMP_CLAUSE_DECL (c2) = decl;
- OMP_CLAUSE_SIZE (c2) = size_zero_node;
- OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = c2;
- }
+ if (TREE_CODE (cref) == INDIRECT_REF)
+ cref = TREE_OPERAND (cref, 0);
- if (DECL_P (decl))
- goto do_add_decl;
- }
- else if (struct_map_to_clause)
+ if (TREE_CODE (cref) == COMPONENT_REF)
+ {
+ tree base = cref;
+ while (base && !DECL_P (base))
{
- tree *osc = struct_map_to_clause->get (decl);
- tree *sc = NULL, *scp = NULL;
- if (n != NULL
- && (GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c))
- || ptr
- || attach_detach))
- n->value |= GOVD_SEEN;
- sc = &OMP_CLAUSE_CHAIN (*osc);
- if (*sc != c
- && (OMP_CLAUSE_MAP_KIND (*sc)
- == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
- sc = &OMP_CLAUSE_CHAIN (*sc);
- /* Here "prev_list_p" is the end of the inserted
- alloc/release nodes after the struct node, OSC. */
- for (; *sc != c; sc = &OMP_CLAUSE_CHAIN (*sc))
- if ((ptr || attach_detach) && sc == prev_list_p)
- break;
- else if (TREE_CODE (OMP_CLAUSE_DECL (*sc))
- != COMPONENT_REF
- && (TREE_CODE (OMP_CLAUSE_DECL (*sc))
- != INDIRECT_REF)
- && (TREE_CODE (OMP_CLAUSE_DECL (*sc))
- != ARRAY_REF))
- break;
- else
- {
- tree sc_decl = OMP_CLAUSE_DECL (*sc);
- poly_offset_int offsetn;
- poly_int64 bitposn;
- tree tree_offsetn;
- tree base
- = extract_base_bit_offset (sc_decl, NULL,
- &bitposn, &offsetn,
- &tree_offsetn);
- if (base != decl)
- break;
- if (scp)
- continue;
- if ((region_type & ORT_ACC) != 0)
- {
- /* This duplicate checking code is currently only
- enabled for OpenACC. */
- tree d1 = OMP_CLAUSE_DECL (*sc);
- tree d2 = OMP_CLAUSE_DECL (c);
- while (TREE_CODE (d1) == ARRAY_REF)
- d1 = TREE_OPERAND (d1, 0);
- while (TREE_CODE (d2) == ARRAY_REF)
- d2 = TREE_OPERAND (d2, 0);
- if (TREE_CODE (d1) == INDIRECT_REF)
- d1 = TREE_OPERAND (d1, 0);
- if (TREE_CODE (d2) == INDIRECT_REF)
- d2 = TREE_OPERAND (d2, 0);
- while (TREE_CODE (d1) == COMPONENT_REF)
- if (TREE_CODE (d2) == COMPONENT_REF
- && TREE_OPERAND (d1, 1)
- == TREE_OPERAND (d2, 1))
- {
- d1 = TREE_OPERAND (d1, 0);
- d2 = TREE_OPERAND (d2, 0);
- }
- else
- break;
- if (d1 == d2)
- {
- error_at (OMP_CLAUSE_LOCATION (c),
- "%qE appears more than once in map "
- "clauses", OMP_CLAUSE_DECL (c));
- remove = true;
- break;
- }
- }
- if (maybe_lt (offset1, offsetn)
- || (known_eq (offset1, offsetn)
- && maybe_lt (bitpos1, bitposn)))
- {
- if (ptr || attach_detach)
- scp = sc;
- else
- break;
- }
- }
- if (remove)
+ tree innerbase = omp_get_base_pointer (base);
+ if (!innerbase)
break;
- if (!attach)
- OMP_CLAUSE_SIZE (*osc)
- = size_binop (PLUS_EXPR, OMP_CLAUSE_SIZE (*osc),
- size_one_node);
- if (ptr || attach_detach)
- {
- tree cl = insert_struct_comp_map (code, c, NULL,
- *prev_list_p, scp);
- if (sc == prev_list_p)
- {
- *sc = cl;
- prev_list_p = NULL;
- }
- else
- {
- *prev_list_p = OMP_CLAUSE_CHAIN (c);
- list_p = prev_list_p;
- prev_list_p = NULL;
- OMP_CLAUSE_CHAIN (c) = *sc;
- *sc = cl;
- continue;
- }
- }
- else if (*sc != c)
- {
- if (gimplify_expr (pd, pre_p, NULL, is_gimple_lvalue,
- fb_lvalue)
- == GS_ERROR)
- {
- remove = true;
- break;
- }
- *list_p = OMP_CLAUSE_CHAIN (c);
- OMP_CLAUSE_CHAIN (c) = *sc;
- *sc = c;
- continue;
- }
+ base = innerbase;
+ }
+ if (base
+ && DECL_P (base)
+ && GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c))
+ && POINTER_TYPE_P (TREE_TYPE (base)))
+ {
+ splay_tree_node n
+ = splay_tree_lookup (ctx->variables,
+ (splay_tree_key) base);
+ n->value |= GOVD_SEEN;
}
- skip_map_struct:
- ;
- }
- else if ((code == OACC_ENTER_DATA
- || code == OACC_EXIT_DATA
- || code == OACC_DATA
- || code == OACC_PARALLEL
- || code == OACC_KERNELS
- || code == OACC_SERIAL
- || code == OMP_TARGET_ENTER_DATA
- || code == OMP_TARGET_EXIT_DATA)
- && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)
- {
- gomp_map_kind k = ((code == OACC_EXIT_DATA
- || code == OMP_TARGET_EXIT_DATA)
- ? GOMP_MAP_DETACH : GOMP_MAP_ATTACH);
- OMP_CLAUSE_SET_MAP_KIND (c, k);
}
if (code == OMP_TARGET && OMP_CLAUSE_MAP_IN_REDUCTION (c))
@@ -10306,24 +11264,6 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
break;
}
- /* If this was of the form map(*pointer_to_struct), then the
- 'pointer_to_struct' DECL should be considered deref'ed. */
- if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALLOC
- || GOMP_MAP_COPY_TO_P (OMP_CLAUSE_MAP_KIND (c))
- || GOMP_MAP_COPY_FROM_P (OMP_CLAUSE_MAP_KIND (c)))
- && INDIRECT_REF_P (orig_decl)
- && DECL_P (TREE_OPERAND (orig_decl, 0))
- && TREE_CODE (TREE_TYPE (orig_decl)) == RECORD_TYPE)
- {
- tree ptr = TREE_OPERAND (orig_decl, 0);
- if (!struct_deref_set || !struct_deref_set->contains (ptr))
- {
- if (!struct_deref_set)
- struct_deref_set = new hash_set<tree> ();
- struct_deref_set->add (ptr);
- }
- }
-
if (!remove
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_POINTER
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH_DETACH
@@ -10340,28 +11280,6 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
break;
}
- else
- {
- /* DECL_P (decl) == true */
- tree *sc;
- if (struct_map_to_clause
- && (sc = struct_map_to_clause->get (decl)) != NULL
- && OMP_CLAUSE_MAP_KIND (*sc) == GOMP_MAP_STRUCT
- && decl == OMP_CLAUSE_DECL (*sc))
- {
- /* We have found a map of the whole structure after a
- leading GOMP_MAP_STRUCT has been created, so refill the
- leading clause into a map of the whole structure
- variable, and remove the current one.
- TODO: we should be able to remove some maps of the
- following structure element maps if they are of
- compatible TO/FROM/ALLOC type. */
- OMP_CLAUSE_SET_MAP_KIND (*sc, OMP_CLAUSE_MAP_KIND (c));
- OMP_CLAUSE_SIZE (*sc) = unshare_expr (OMP_CLAUSE_SIZE (c));
- remove = true;
- break;
- }
- }
flags = GOVD_MAP | GOVD_EXPLICIT;
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TO
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TOFROM)
@@ -11031,12 +11949,6 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
ctx->clauses = *orig_list_p;
gimplify_omp_ctxp = ctx;
- if (struct_seen_clause)
- delete struct_seen_clause;
- if (struct_map_to_clause)
- delete struct_map_to_clause;
- if (struct_deref_set)
- delete struct_deref_set;
}
/* Return true if DECL is a candidate for shared to firstprivate
@@ -11185,8 +12097,6 @@ gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data)
return 0;
if ((flags & GOVD_SEEN) == 0)
return 0;
- if ((flags & GOVD_MAP_HAS_ATTACHMENTS) != 0)
- return 0;
if (flags & GOVD_DEBUG_PRIVATE)
{
gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_SHARED);
@@ -11503,10 +12413,15 @@ gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
*list_p = c2;
}
}
+
+ tree attach_list = NULL_TREE;
+ tree *attach_tail = &attach_list;
+
while ((c = *list_p) != NULL)
{
splay_tree_node n;
bool remove = false;
+ bool move_attach = false;
switch (OMP_CLAUSE_CODE (c))
{
@@ -11668,6 +12583,19 @@ gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
remove = true;
break;
}
+ /* If we have a target region, we can push all the attaches to the
+ end of the list (we may have standalone "attach" operations
+ synthesized for GOMP_MAP_STRUCT nodes that must be processed after
+ the attachment point AND the pointed-to block have been mapped).
+ If we have something else, e.g. "enter data", we need to keep
+ "attach" nodes together with the previous node they attach to so
+ that separate "exit data" operations work properly (see
+ libgomp/target.c). */
+ if ((ctx->region_type & ORT_TARGET) != 0
+ && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
+ || (OMP_CLAUSE_MAP_KIND (c)
+ == GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION)))
+ move_attach = true;
decl = OMP_CLAUSE_DECL (c);
/* Data clauses associated with reductions must be
compatible with present_or_copy. Warn and adjust the clause
@@ -11982,10 +12910,25 @@ gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
+ else if (move_attach)
+ {
+ /* Remove attach node from here, separate out into its own list. */
+ *attach_tail = c;
+ *list_p = OMP_CLAUSE_CHAIN (c);
+ OMP_CLAUSE_CHAIN (c) = NULL_TREE;
+ attach_tail = &OMP_CLAUSE_CHAIN (c);
+ }
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
+ /* Splice attach nodes at the end of the list. */
+ if (attach_list)
+ {
+ *list_p = attach_list;
+ list_p = attach_tail;
+ }
+
/* Add in any implicit data sharing. */
struct gimplify_adjust_omp_clauses_data data;
if ((gimplify_omp_ctxp->region_type & ORT_ACC) == 0)
diff --git a/gcc/ginclude/float.h b/gcc/ginclude/float.h
index 9d368c4..afe4a71 100644
--- a/gcc/ginclude/float.h
+++ b/gcc/ginclude/float.h
@@ -257,9 +257,11 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#define DBL_IS_IEC_60559 __DBL_IS_IEC_60559__
#define LDBL_IS_IEC_60559 __LDBL_IS_IEC_60559__
-/* Infinity in type float, or overflow if infinity not supported. */
+/* Infinity in type float; not defined if infinity not supported. */
+#if __FLT_HAS_INFINITY__
#undef INFINITY
#define INFINITY (__builtin_inff ())
+#endif
/* Quiet NaN, if supported for float. */
#if __FLT_HAS_QUIET_NAN__
diff --git a/gcc/ginclude/stdatomic.h b/gcc/ginclude/stdatomic.h
index 9f2475b..a56ba5d 100644
--- a/gcc/ginclude/stdatomic.h
+++ b/gcc/ginclude/stdatomic.h
@@ -79,7 +79,9 @@ typedef _Atomic __INTMAX_TYPE__ atomic_intmax_t;
typedef _Atomic __UINTMAX_TYPE__ atomic_uintmax_t;
+#if !(defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L)
#define ATOMIC_VAR_INIT(VALUE) (VALUE)
+#endif
/* Initialize an atomic object pointed to by PTR with VAL. */
#define atomic_init(PTR, VAL) \
diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog
index c3f3da1..43cc2e0 100644
--- a/gcc/go/ChangeLog
+++ b/gcc/go/ChangeLog
@@ -1,3 +1,8 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * go-lang.cc (go_langhook_init): Do not initialize
+ void_list_node.
+
2022-09-02 Martin Liska <mliska@suse.cz>
* go-lang.cc (go_langhook_pushdecl): Remove -gstabs option support, DBX-related
diff --git a/gcc/go/go-lang.cc b/gcc/go/go-lang.cc
index d519a69..4743370 100644
--- a/gcc/go/go-lang.cc
+++ b/gcc/go/go-lang.cc
@@ -98,9 +98,6 @@ go_langhook_init (void)
{
build_common_tree_nodes (false);
- /* I don't know why this has to be done explicitly. */
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
-
/* We must create the gogo IR after calling build_common_tree_nodes
(because Gogo::define_builtin_function_trees refers indirectly
to, e.g., unsigned_char_type_node) but before calling
diff --git a/gcc/jit/ChangeLog b/gcc/jit/ChangeLog
index 314b831..602cda3 100644
--- a/gcc/jit/ChangeLog
+++ b/gcc/jit/ChangeLog
@@ -1,3 +1,8 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * dummy-frontend.cc (jit_langhook_init): Do not initialize
+ void_list_node.
+
2022-08-09 Vibhav Pant <vibhavp@gmail.com>
* libgccjit.h (LIBGCCJIT_HAVE_gcc_jit_context_new_bitcast): Move
diff --git a/gcc/jit/dummy-frontend.cc b/gcc/jit/dummy-frontend.cc
index 84ff359..0687567 100644
--- a/gcc/jit/dummy-frontend.cc
+++ b/gcc/jit/dummy-frontend.cc
@@ -594,9 +594,6 @@ jit_langhook_init (void)
build_common_tree_nodes (false);
- /* I don't know why this has to be done explicitly. */
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
-
build_common_builtin_nodes ();
/* The default precision for floating point numbers. This is used
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index 09a86ef..84fc5a4 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,8 @@
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ * lto-lang.cc (lto_build_c_type_nodes): Do not initialize
+ void_list_node.
+
2022-08-22 Martin Liska <mliska@suse.cz>
PR lto/106700
diff --git a/gcc/lto/lto-lang.cc b/gcc/lto/lto-lang.cc
index 972a033..d36453b 100644
--- a/gcc/lto/lto-lang.cc
+++ b/gcc/lto/lto-lang.cc
@@ -1239,7 +1239,6 @@ lto_build_c_type_nodes (void)
{
gcc_assert (void_type_node);
- void_list_node = build_tree_list (NULL_TREE, void_type_node);
string_type_node = build_pointer_type (char_type_node);
const_string_type_node
= build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST));
diff --git a/gcc/match.pd b/gcc/match.pd
index 17318f52..345bcb7 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -1763,6 +1763,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& (int_fits_type_p (@1, TREE_TYPE (@0))
|| tree_nop_conversion_p (TREE_TYPE (@0), type)))
|| types_match (@0, @1))
+ && !POINTER_TYPE_P (TREE_TYPE (@0))
+ && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE
/* ??? This transform conflicts with fold-const.cc doing
Convert (T)(x & c) into (T)x & (T)c, if c is an integer
constants (if x has signed type, the sign bit cannot be set
@@ -1799,7 +1801,9 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (GIMPLE
&& TREE_CODE (@1) != INTEGER_CST
&& tree_nop_conversion_p (type, TREE_TYPE (@2))
- && types_match (type, @0))
+ && types_match (type, @0)
+ && !POINTER_TYPE_P (TREE_TYPE (@0))
+ && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE)
(bitop @0 (convert @1)))))
(for bitop (bit_and bit_ior)
diff --git a/gcc/omp-low.cc b/gcc/omp-low.cc
index fd0ccd5..f0469d2 100644
--- a/gcc/omp-low.cc
+++ b/gcc/omp-low.cc
@@ -1599,8 +1599,11 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
{
/* If this is an offloaded region, an attach operation should
only exist when the pointer variable is mapped in a prior
- clause. */
- if (is_gimple_omp_offloaded (ctx->stmt))
+ clause.
+ If we had an error, we may not have attempted to sort clauses
+ properly, so avoid the test. */
+ if (is_gimple_omp_offloaded (ctx->stmt)
+ && !seen_error ())
gcc_assert
(maybe_lookup_decl (decl, ctx)
|| (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
@@ -1633,8 +1636,10 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
if (TREE_CODE (decl) == COMPONENT_REF
|| (TREE_CODE (decl) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
- == REFERENCE_TYPE)))
+ && (((TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
+ == REFERENCE_TYPE)
+ || (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
+ == POINTER_TYPE)))))
break;
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
@@ -14012,6 +14017,7 @@ lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
is_ref = false;
bool ref_to_array = false;
+ bool ref_to_ptr = false;
if (is_ref)
{
type = TREE_TYPE (type);
@@ -14030,6 +14036,12 @@ lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
new_var = decl2;
type = TREE_TYPE (new_var);
}
+ else if (TREE_CODE (type) == REFERENCE_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == POINTER_TYPE)
+ {
+ type = TREE_TYPE (type);
+ ref_to_ptr = true;
+ }
x = build_receiver_ref (OMP_CLAUSE_DECL (prev), false, ctx);
x = fold_convert_loc (clause_loc, type, x);
if (!integer_zerop (OMP_CLAUSE_SIZE (c)))
@@ -14046,7 +14058,8 @@ lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
if (ref_to_array)
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
- if (is_ref && !ref_to_array)
+ if ((is_ref && !ref_to_array)
+ || ref_to_ptr)
{
tree t = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (t);
diff --git a/gcc/range-op-float.cc b/gcc/range-op-float.cc
index 0f928b6..1e39a07 100644
--- a/gcc/range-op-float.cc
+++ b/gcc/range-op-float.cc
@@ -150,24 +150,12 @@ range_operator_float::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED) cons
return VREL_VARYING;
}
-// Set R to [NAN, NAN].
-
-static inline void
-frange_set_nan (frange &r, tree type)
-{
- REAL_VALUE_TYPE rv;
- bool res = real_nan (&rv, "", 1, TYPE_MODE (type));
- if (flag_checking)
- gcc_assert (res);
- r.set (type, rv, rv);
-}
-
// Return TRUE if OP1 is known to be free of NANs.
static inline bool
finite_operand_p (const frange &op1)
{
- return flag_finite_math_only || !op1.maybe_nan ();
+ return flag_finite_math_only || !op1.maybe_isnan ();
}
// Return TRUE if OP1 and OP2 are known to be free of NANs.
@@ -175,7 +163,7 @@ finite_operand_p (const frange &op1)
static inline bool
finite_operands_p (const frange &op1, const frange &op2)
{
- return flag_finite_math_only || (!op1.maybe_nan () && !op2.maybe_nan ());
+ return flag_finite_math_only || (!op1.maybe_isnan () && !op2.maybe_isnan ());
}
// Floating version of relop_early_resolve that takes into account NAN
@@ -220,80 +208,105 @@ frange_drop_ninf (frange &r, tree type)
r.intersect (tmp);
}
-// (X <= VAL) produces the range of [-INF, VAL].
+// If zero is in R, make sure both -0.0 and +0.0 are in the range.
+
+static inline void
+frange_add_zeros (frange &r, tree type)
+{
+ if (r.undefined_p () || r.known_isnan ())
+ return;
+
+ if (HONOR_SIGNED_ZEROS (type)
+ && (real_iszero (&r.lower_bound ()) || real_iszero (&r.upper_bound ())))
+ {
+ frange zero;
+ zero.set_zero (type);
+ r.union_ (zero);
+ }
+}
+
+// Build a range that is <= VAL and store it in R.
static bool
-build_le (frange &r, tree type, const REAL_VALUE_TYPE &val)
+build_le (frange &r, tree type, const frange &val)
{
- if (real_isnan (&val))
+ if (val.known_isnan ())
{
r.set_undefined ();
return false;
}
- r.set (type, dconstninf, val);
+ r.set (type, dconstninf, val.upper_bound ());
+
+ // Add both zeros if there's the possibility of zero equality.
+ frange_add_zeros (r, type);
+
return true;
}
-// (X < VAL) produces the range of [-INF, VAL).
+// Build a range that is < VAL and store it in R.
static bool
-build_lt (frange &r, tree type, const REAL_VALUE_TYPE &val)
+build_lt (frange &r, tree type, const frange &val)
{
- if (real_isnan (&val))
+ if (val.known_isnan ())
{
r.set_undefined ();
return false;
}
// < -INF is outside the range.
- if (real_isinf (&val, 1))
+ if (real_isinf (&val.upper_bound (), 1))
{
if (HONOR_NANS (type))
- frange_set_nan (r, type);
+ r.set_nan (type);
else
r.set_undefined ();
return false;
}
- // Hijack LE because we only support closed intervals.
- build_le (r, type, val);
+ // We only support closed intervals.
+ r.set (type, dconstninf, val.upper_bound ());
return true;
}
-// (X >= VAL) produces the range of [VAL, +INF].
+// Build a range that is >= VAL and store it in R.
static bool
-build_ge (frange &r, tree type, const REAL_VALUE_TYPE &val)
+build_ge (frange &r, tree type, const frange &val)
{
- if (real_isnan (&val))
+ if (val.known_isnan ())
{
r.set_undefined ();
return false;
}
- r.set (type, val, dconstinf);
+ r.set (type, val.lower_bound (), dconstinf);
+
+ // Add both zeros if there's the possibility of zero equality.
+ frange_add_zeros (r, type);
+
return true;
}
-// (X > VAL) produces the range of (VAL, +INF].
+// Build a range that is > VAL and store it in R.
static bool
-build_gt (frange &r, tree type, const REAL_VALUE_TYPE &val)
+build_gt (frange &r, tree type, const frange &val)
{
- if (real_isnan (&val))
+ if (val.known_isnan ())
{
r.set_undefined ();
return false;
}
// > +INF is outside the range.
- if (real_isinf (&val, 0))
+ if (real_isinf (&val.lower_bound (), 0))
{
if (HONOR_NANS (type))
- frange_set_nan (r, type);
+ r.set_nan (type);
else
r.set_undefined ();
return false;
}
- // Hijack GE because we only support closed intervals.
- build_ge (r, type, val);
+ // We only support closed intervals.
+ r.set (type, val.lower_bound (), dconstinf);
return true;
}
@@ -388,18 +401,17 @@ foperator_equal::op1_range (frange &r, tree type,
case BRS_TRUE:
// If it's true, the result is the same as OP2.
r = op2;
- // Make sure we don't copy the sign bit if we may have a zero.
- if (HONOR_SIGNED_ZEROS (type) && r.contains_p (build_zero_cst (type)))
- r.set_signbit (fp_prop::VARYING);
+ // Add both zeros if there's the possibility of zero equality.
+ frange_add_zeros (r, type);
// The TRUE side of op1 == op2 implies op1 is !NAN.
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
break;
case BRS_FALSE:
r.set_varying (type);
// The FALSE side of op1 == op1 implies op1 is a NAN.
if (rel == VREL_EQ)
- frange_set_nan (r, type);
+ r.set_nan (type);
// If the result is false, the only time we know anything is
// if OP2 is a constant.
else if (op2.singleton_p ()
@@ -492,11 +504,10 @@ foperator_not_equal::op1_range (frange &r, tree type,
case BRS_FALSE:
// If it's false, the result is the same as OP2.
r = op2;
- // Make sure we don't copy the sign bit if we may have a zero.
- if (HONOR_SIGNED_ZEROS (type) && r.contains_p (build_zero_cst (type)))
- r.set_signbit (fp_prop::VARYING);
+ // Add both zeros if there's the possibility of zero equality.
+ frange_add_zeros (r, type);
// The FALSE side of op1 != op2 implies op1 is !NAN.
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
break;
default:
@@ -544,7 +555,7 @@ foperator_lt::fold_range (irange &r, tree type,
else
r = range_true_and_false (type);
}
- else if (op1.known_nan () || op2.known_nan ())
+ else if (op1.known_isnan () || op2.known_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -561,16 +572,16 @@ foperator_lt::op1_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_lt (r, type, op2.upper_bound ()))
+ if (build_lt (r, type, op2))
{
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
// x < y implies x is not +INF.
frange_drop_inf (r, type);
}
break;
case BRS_FALSE:
- build_ge (r, type, op2.lower_bound ());
+ build_ge (r, type, op2);
break;
default:
@@ -589,16 +600,16 @@ foperator_lt::op2_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_gt (r, type, op1.lower_bound ()))
+ if (build_gt (r, type, op1))
{
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
// x < y implies y is not -INF.
frange_drop_ninf (r, type);
}
break;
case BRS_FALSE:
- build_le (r, type, op1.upper_bound ());
+ build_le (r, type, op1);
break;
default:
@@ -646,7 +657,7 @@ foperator_le::fold_range (irange &r, tree type,
else
r = range_true_and_false (type);
}
- else if (op1.known_nan () || op2.known_nan ())
+ else if (op1.known_isnan () || op2.known_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -663,12 +674,12 @@ foperator_le::op1_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_le (r, type, op2.upper_bound ()))
- r.set_nan (fp_prop::NO);
+ if (build_le (r, type, op2))
+ r.clear_nan ();
break;
case BRS_FALSE:
- build_gt (r, type, op2.lower_bound ());
+ build_gt (r, type, op2);
break;
default:
@@ -687,12 +698,12 @@ foperator_le::op2_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_ge (r, type, op1.lower_bound ()))
- r.set_nan (fp_prop::NO);
+ if (build_ge (r, type, op1))
+ r.clear_nan ();
break;
case BRS_FALSE:
- build_lt (r, type, op1.upper_bound ());
+ build_lt (r, type, op1);
break;
default:
@@ -740,7 +751,7 @@ foperator_gt::fold_range (irange &r, tree type,
else
r = range_true_and_false (type);
}
- else if (op1.known_nan () || op2.known_nan ())
+ else if (op1.known_isnan () || op2.known_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -757,16 +768,16 @@ foperator_gt::op1_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_gt (r, type, op2.lower_bound ()))
+ if (build_gt (r, type, op2))
{
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
// x > y implies x is not -INF.
frange_drop_ninf (r, type);
}
break;
case BRS_FALSE:
- build_le (r, type, op2.upper_bound ());
+ build_le (r, type, op2);
break;
default:
@@ -785,16 +796,16 @@ foperator_gt::op2_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- if (build_lt (r, type, op1.upper_bound ()))
+ if (build_lt (r, type, op1))
{
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
// x > y implies y is not +INF.
frange_drop_inf (r, type);
}
break;
case BRS_FALSE:
- build_ge (r, type, op1.lower_bound ());
+ build_ge (r, type, op1);
break;
default:
@@ -842,7 +853,7 @@ foperator_ge::fold_range (irange &r, tree type,
else
r = range_true_and_false (type);
}
- else if (op1.known_nan () || op2.known_nan ())
+ else if (op1.known_isnan () || op2.known_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -859,12 +870,12 @@ foperator_ge::op1_range (frange &r,
switch (get_bool_state (r, lhs, type))
{
case BRS_TRUE:
- build_ge (r, type, op2.lower_bound ());
- r.set_nan (fp_prop::NO);
+ build_ge (r, type, op2);
+ r.clear_nan ();
break;
case BRS_FALSE:
- build_lt (r, type, op2.upper_bound ());
+ build_lt (r, type, op2);
break;
default:
@@ -882,12 +893,12 @@ foperator_ge::op2_range (frange &r, tree type,
switch (get_bool_state (r, lhs, type))
{
case BRS_FALSE:
- build_gt (r, type, op1.lower_bound ());
+ build_gt (r, type, op1);
break;
case BRS_TRUE:
- build_le (r, type, op1.upper_bound ());
- r.set_nan (fp_prop::NO);
+ build_le (r, type, op1);
+ r.clear_nan ();
break;
default:
@@ -925,10 +936,10 @@ foperator_unordered::fold_range (irange &r, tree type,
relation_kind) const
{
// UNORDERED is TRUE if either operand is a NAN.
- if (op1.known_nan () || op2.known_nan ())
+ if (op1.known_isnan () || op2.known_isnan ())
r = range_true (type);
// UNORDERED is FALSE if neither operand is a NAN.
- else if (!op1.maybe_nan () && !op2.maybe_nan ())
+ else if (!op1.maybe_isnan () && !op2.maybe_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -947,14 +958,14 @@ foperator_unordered::op1_range (frange &r, tree type,
r.set_varying (type);
// Since at least one operand must be NAN, if one of them is
// not, the other must be.
- if (!op2.maybe_nan ())
- frange_set_nan (r, type);
+ if (!op2.maybe_isnan ())
+ r.set_nan (type);
break;
case BRS_FALSE:
r.set_varying (type);
// A false UNORDERED means both operands are !NAN.
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
break;
default:
@@ -991,9 +1002,9 @@ foperator_ordered::fold_range (irange &r, tree type,
const frange &op1, const frange &op2,
relation_kind) const
{
- if (!op1.maybe_nan () && !op2.maybe_nan ())
+ if (!op1.maybe_isnan () && !op2.maybe_isnan ())
r = range_true (type);
- else if (op1.known_nan () || op2.known_nan ())
+ else if (op1.known_isnan () || op2.known_isnan ())
r = range_false (type);
else
r = range_true_and_false (type);
@@ -1011,14 +1022,14 @@ foperator_ordered::op1_range (frange &r, tree type,
case BRS_TRUE:
r.set_varying (type);
// The TRUE side of op1 ORDERED op2 implies op1 is !NAN.
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
break;
case BRS_FALSE:
r.set_varying (type);
// The FALSE side of op1 ORDERED op1 implies op1 is !NAN.
if (rel == VREL_EQ)
- r.set_nan (fp_prop::NO);
+ r.clear_nan ();
break;
default:
diff --git a/gcc/reg-stack.cc b/gcc/reg-stack.cc
index fd03250..95e0e61 100644
--- a/gcc/reg-stack.cc
+++ b/gcc/reg-stack.cc
@@ -1073,7 +1073,8 @@ move_for_stack_reg (rtx_insn *insn, stack_ptr regstack, rtx pat)
break;
/* The destination must be dead, or life analysis is borked. */
- gcc_assert (get_hard_regnum (regstack, dest) < FIRST_STACK_REG);
+ gcc_assert (get_hard_regnum (regstack, dest) < FIRST_STACK_REG
+ || any_malformed_asm);
/* If the source is not live, this is yet another case of
uninitialized variables. Load up a NaN instead. */
diff --git a/gcc/targhooks.cc b/gcc/targhooks.cc
index b15ae19..d17d393 100644
--- a/gcc/targhooks.cc
+++ b/gcc/targhooks.cc
@@ -93,6 +93,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "cfgloop.h"
#include "tree-vectorizer.h"
+#include "options.h"
bool
default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
@@ -1181,9 +1182,21 @@ default_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
static bool issued_error;
if (!issued_error)
{
+ const char *name = NULL;
+ for (unsigned int i = 0; zero_call_used_regs_opts[i].name != NULL;
+ ++i)
+ if (flag_zero_call_used_regs == zero_call_used_regs_opts[i].flag)
+ {
+ name = zero_call_used_regs_opts[i].name;
+ break;
+ }
+
+ if (!name)
+ name = "";
+
issued_error = true;
- sorry ("%qs not supported on this target",
- "-fzero-call-used-regs");
+ sorry ("argument %qs is not supported for %qs on this target",
+ name, "-fzero-call-used-regs");
}
}
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 21459ed..a6048da 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,249 @@
+2022-09-19 Marek Polacek <polacek@redhat.com>
+
+ PR c/106947
+ * c-c++-common/Waddress-7.c: New test.
+
+2022-09-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * gfortran.dg/ieee/modes_1.f90: New test.
+
+2022-09-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * gfortran.dg/ieee/rounding_2.f90: New test.
+
+2022-09-18 Julian Brown <julian@codesourcery.com>
+
+ * g++.dg/gomp/target-lambda-1.C: Adjust expected scan output.
+
+2022-09-18 Palmer Dabbelt <palmer@rivosinc.com>
+
+ * gcc.dg/tree-ssa/gen-vect-34.c: Skip RISC-V targets.
+
+2022-09-17 Patrick Palka <ppalka@redhat.com>
+
+ * g++.dg/modules/typename-friend_a.C: New test.
+ * g++.dg/modules/typename-friend_b.C: New test.
+
+2022-09-17 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/106958
+ * gcc.c-torture/compile/pr106958.c: New test.
+
+2022-09-16 Eugene Rozenfeld <erozen@microsoft.com>
+
+ * gcc.dg/tree-prof/indir-call-prof-2.c: Fix dg-final-use-autofdo.
+
+2022-09-16 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/92505
+ * g++.dg/cpp0x/constexpr-mutable3.C: New test.
+ * g++.dg/cpp1y/constexpr-mutable1.C: New test.
+
+2022-09-16 Jason Merrill <jason@redhat.com>
+
+ PR c++/106858
+ * g++.dg/gomp/map-3.C: New test.
+
+2022-09-15 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/106857
+ * gfortran.dg/pr106857.f90: New test.
+
+2022-09-15 Harald Anlauf <anlauf@gmx.de>
+ Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/104314
+ * gfortran.dg/pr104314.f90: New test.
+
+2022-09-15 Joseph Myers <joseph@codesourcery.com>
+
+ * gcc.dg/c2x-float-2.c: Require inff effective-target.
+ * gcc.dg/c2x-float-11.c: New test.
+
+2022-09-15 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106922
+ * g++.dg/tree-ssa/pr106922.C: New testcase.
+
+2022-09-15 Julian Brown <julian@codesourcery.com>
+
+ * c-c++-common/gomp/target-50.c: Modify scan pattern.
+
+2022-09-15 Julian Brown <julian@codesourcery.com>
+
+ * c-c++-common/goacc/mdc-2.c: Update expected errors.
+ * g++.dg/goacc/mdc.C: Likewise.
+
+2022-09-15 Jiufu Guo <guojiufu@linux.ibm.com>
+
+ PR target/106550
+ * gcc.target/powerpc/pr106550.c: New test.
+ * gcc.target/powerpc/pr106550_1.c: New test.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * g++.dg/goacc/member-array-acc.C: New test.
+ * g++.dg/gomp/member-array-omp.C: New test.
+ * g++.dg/gomp/target-3.C: Update expected output.
+ * g++.dg/gomp/target-lambda-1.C: Likewise.
+ * g++.dg/gomp/target-this-2.C: Likewise.
+ * c-c++-common/goacc/deep-copy-arrayofstruct.c: Move test from here.
+ * c-c++-common/gomp/target-50.c: New test.
+
+2022-09-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106938
+ * gcc.dg/pr106938.c: New testcase.
+
+2022-09-14 Aldy Hernandez <aldyh@redhat.com>
+
+ PR tree-optimization/106936
+ * g++.dg/tree-ssa/pr106936.C: New test.
+
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * g++.dg/gomp/target-lambda-1.C: Adjust expected output.
+ * g++.dg/gomp/target-this-3.C: Likewise.
+ * g++.dg/gomp/target-this-4.C: Likewise.
+
+2022-09-14 Robin Dapp <rdapp@linux.ibm.com>
+
+ * gcc.target/s390/ifcvt-one-insn-bool.c: Add -mzarch.
+ * gcc.target/s390/ifcvt-one-insn-char.c: Dito.
+ * gcc.target/s390/ifcvt-two-insns-bool.c: Dito.
+ * gcc.target/s390/ifcvt-two-insns-int.c: Dito.
+ * gcc.target/s390/ifcvt-two-insns-long.c: Add -mzarch and change
+ long into long long.
+
+2022-09-14 Robin Dapp <rdapp@linux.ibm.com>
+
+ * gcc.target/s390/vector/vperm-rev-z14.c: Add -save-temps.
+ * gcc.target/s390/vector/vperm-rev-z15.c: Likewise.
+
+2022-09-14 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/106878
+ * gcc.c-torture/compile/pr106878.c: New test.
+
+2022-09-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106934
+ * gfortran.dg/pr106934.f90: New testcase.
+
+2022-09-14 liuhongt <hongtao.liu@intel.com>
+
+ * gcc.target/i386/pr106905.c: New test.
+ * gcc.target/ia64/pr106905.c: New test.
+
+2022-09-14 Torbjörn SVENSSON <torbjorn.svensson@foss.st.com>
+ Yvan ROUX <yvan.roux@foss.st.com>
+
+ PR target/95720
+ * lib/g++.exp: Moved gluefile block to after flags have been
+ prefixed for the target_compile call.
+ * lib/gcc.exp: Likewise.
+ * lib/wrapper.exp: Reset adjusted state flag.
+
+2022-09-13 Roger Sayle <roger@nextmovesoftware.com>
+
+ PR target/106877
+ * g++.dg/ext/pr106877.C: New test case.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * g++.dg/cpp1z/noexcept-type26.C: New test.
+ * g++.dg/cpp2a/explicit19.C: New test.
+ * g++.dg/ext/integer-pack6.C: New test.
+
+2022-09-13 Kewen Lin <linkw@linux.ibm.com>
+
+ PR target/104482
+ * gcc.target/powerpc/pr104482.c: New test.
+
+2022-09-13 Kewen.Lin <linkw@gcc.gnu.org>
+
+ PR target/105485
+ * g++.target/powerpc/pr105485.C: New test.
+
+2022-09-13 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/ldp_stp_20.c: New test.
+ * gcc.target/aarch64/ldp_stp_21.c: Likewise.
+ * gcc.target/aarch64/ldp_stp_22.c: Likewise.
+ * gcc.target/aarch64/ldp_stp_23.c: Likewise.
+ * gcc.target/aarch64/ldp_stp_24.c: Likewise.
+ * gcc.target/aarch64/movv16qi_1.c (gpr_to_gpr): New function.
+ * gcc.target/aarch64/movv8qi_1.c (gpr_to_gpr): Likewise.
+ * gcc.target/aarch64/movv16qi_2.c: New test.
+ * gcc.target/aarch64/movv16qi_3.c: Likewise.
+ * gcc.target/aarch64/movv2di_1.c: Likewise.
+ * gcc.target/aarch64/movv2x16qi_1.c: Likewise.
+ * gcc.target/aarch64/movv2x8qi_1.c: Likewise.
+ * gcc.target/aarch64/movv3x16qi_1.c: Likewise.
+ * gcc.target/aarch64/movv3x8qi_1.c: Likewise.
+ * gcc.target/aarch64/movv4x16qi_1.c: Likewise.
+ * gcc.target/aarch64/movv4x8qi_1.c: Likewise.
+ * gcc.target/aarch64/movv8qi_2.c: Likewise.
+ * gcc.target/aarch64/movv8qi_3.c: Likewise.
+ * gcc.target/aarch64/vect_unary_2.c: Likewise.
+
+2022-09-13 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/acle/ls64_asm_2.c: New test.
+
+2022-09-12 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/101906
+ * g++.dg/template/evaluated1.C: New test.
+ * g++.dg/template/evaluated1a.C: New test.
+ * g++.dg/template/evaluated1b.C: New test.
+ * g++.dg/template/evaluated1c.C: New test.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/106893
+ PR c++/90451
+ * g++.dg/cpp1y/auto-fn65.C: New test.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/93259
+ * g++.dg/cpp0x/initlist-array17.C: New test.
+
+2022-09-12 Jason Merrill <jason@redhat.com>
+
+ PR c++/106567
+ * g++.dg/cpp0x/lambda/lambda-array4.C: New test.
+
+2022-09-12 Jonathan Wakely <jwakely@redhat.com>
+
+ PR c++/86491
+ * g++.dg/warn/anonymous-namespace-3.C: Use separate dg-warning
+ directives for C++98 and everything else.
+ * g++.dg/warn/Wsubobject-linkage-5.C: New test.
+
+2022-09-12 Joseph Myers <joseph@codesourcery.com>
+
+ * gcc.dg/atomic/c2x-stdatomic-var-init-1.c: New test.
+
+2022-09-12 Torbjörn SVENSSON <torbjorn.svensson@foss.st.com>
+
+ * g++.dg/gcov/gcov.exp: Respect triplet when looking for gcov.
+ * gcc.misc-tests/gcov.exp: Likewise.
+
+2022-09-12 Joffrey Huguet <huguet@adacore.com>
+
+ * gnat.dg/aspect2.adb: Removed.
+ * gnat.dg/aspect2.ads: Removed.
+ * gnat.dg/config_pragma1.adb: Removed.
+ * gnat.dg/config_pragma1_pkg.ads: Removed.
+ * gnat.dg/equal8.adb: Removed.
+ * gnat.dg/equal8.ads: Removed.
+ * gnat.dg/equal8_pkg.ads: Removed.
+ * gnat.dg/formal_containers.adb: Removed.
+ * gnat.dg/iter1.adb: Removed.
+ * gnat.dg/iter1.ads: Removed.
+
2022-09-11 Tim Lange <mail@tim-lange.me>
PR analyzer/106845
diff --git a/gcc/testsuite/c-c++-common/Waddress-7.c b/gcc/testsuite/c-c++-common/Waddress-7.c
new file mode 100644
index 0000000..1799485
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/Waddress-7.c
@@ -0,0 +1,22 @@
+/* PR c/106947 */
+/* { dg-do compile } */
+/* { dg-options "-Waddress" } */
+
+#ifndef __cplusplus
+# define bool _Bool
+#endif
+
+#pragma GCC diagnostic ignored "-Waddress"
+int s; /* { dg-bogus "declared" } */
+bool e = &s;
+int
+main ()
+{
+ int error = 0;
+ {
+ bool e1 = &s;
+ if (!e1)
+ error = 1;
+ }
+ return error;
+}
diff --git a/gcc/testsuite/c-c++-common/goacc/mdc-2.c b/gcc/testsuite/c-c++-common/goacc/mdc-2.c
index df3ce54..246625c 100644
--- a/gcc/testsuite/c-c++-common/goacc/mdc-2.c
+++ b/gcc/testsuite/c-c++-common/goacc/mdc-2.c
@@ -37,7 +37,9 @@ t1 ()
#pragma acc exit data detach(z[:]) /* { dg-error "expected single pointer in .detach. clause" } */
/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc enter data attach(z[3]) /* { dg-error "expected pointer in .attach. clause" } */
+/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc exit data detach(z[3]) /* { dg-error "expected pointer in .detach. clause" } */
+/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc enter data attach(s.e)
#pragma acc exit data detach(s.e) attach(z) /* { dg-error ".attach. is not valid for" } */
diff --git a/gcc/testsuite/c-c++-common/gomp/target-50.c b/gcc/testsuite/c-c++-common/gomp/target-50.c
new file mode 100644
index 0000000..41f1d37
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/target-50.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-fdump-tree-gimple" } */
+
+typedef struct
+{
+ int *arr;
+} L;
+
+int main()
+{
+ L *tmp;
+
+ /* There shouldn't be an order dependency here... */
+
+ #pragma omp target map(to: tmp->arr) map(tofrom: tmp->arr[0:10])
+ { }
+
+ #pragma omp target map(tofrom: tmp->arr[0:10]) map(to: tmp->arr)
+ { }
+/* { dg-final { scan-tree-dump-times {map\(struct:\*tmp \[len: 1\]\) map\(to:tmp[._0-9]*->arr \[len: [0-9]+\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(attach:tmp[._0-9]*->arr \[bias: 0\]\)} 2 "gimple" { target { ! { nvptx*-*-* amdgcn*-*-* } } } } } */
+
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-mutable3.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-mutable3.C
new file mode 100644
index 0000000..51499fa
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-mutable3.C
@@ -0,0 +1,9 @@
+// PR c++/92505
+// { dg-do compile { target c++11 } }
+
+struct A { mutable int m; };
+
+constexpr int f(A a) { return a.m; }
+
+static_assert(f({42}) == 42, "");
+// { dg-error "non-constant|mutable" "" { target c++11_only } .-1 }
diff --git a/gcc/testsuite/g++.dg/cpp0x/initlist-array17.C b/gcc/testsuite/g++.dg/cpp0x/initlist-array17.C
new file mode 100644
index 0000000..c4284a7
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/initlist-array17.C
@@ -0,0 +1,37 @@
+// PR c++/93259
+// { dg-do compile { target c++11 } }
+
+template <class T, class U> struct is_same;
+template <class T> struct is_same<T,T> { };
+
+using Array = int[];
+
+template <typename ...Ts>
+void bar1(Ts ...)
+{
+ auto && array = Array{ 1, 2, 3 };
+
+ is_same<int (&&)[3], decltype(array)>{}; // this fails, deduces array as int (&&) []
+}
+
+template <typename T>
+void bar2()
+{
+ auto && array = Array{ 1, 2, 3 };
+
+ is_same<int (&&)[3], decltype(array)>{}; // this fails, deduces array as int (&&) []
+}
+
+void bar3()
+{
+ auto && array = Array{ 1, 2, 3 };
+
+ is_same<int (&&)[3], decltype(array)>{}; // OK
+}
+
+int main()
+{
+ bar1<int>(1, 2, 3);
+ bar2<int>();
+ bar3();
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-array4.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-array4.C
new file mode 100644
index 0000000..94ec7f8
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-array4.C
@@ -0,0 +1,29 @@
+// PR c++/106567
+// { dg-do compile { target c++11 } }
+
+template <class V>
+void urgh()
+{
+ const V x[] = {V(0), V(1), V(2), V(0)};
+
+ [&]() {
+ for (auto& v : x) {}
+ }();
+}
+
+void no_urgh()
+{
+ using V = int;
+
+ const V x[] = {V(0), V(1), V(2), V(0)};
+
+ [&]() {
+ for (auto& v : x) {}
+ }();
+}
+
+int main()
+{
+ no_urgh();
+ urgh<int>();
+}
diff --git a/gcc/testsuite/g++.dg/cpp1y/auto-fn65.C b/gcc/testsuite/g++.dg/cpp1y/auto-fn65.C
new file mode 100644
index 0000000..78bb004
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/auto-fn65.C
@@ -0,0 +1,10 @@
+// PR c++/106893
+// { dg-do compile { target c++14 } }
+
+template <typename T>
+struct CoordTraits
+{
+ static auto GetX(T const &p) { return 1; }
+};
+typedef CoordTraits<int> Traits;
+static constexpr auto GetX = Traits::GetX;
diff --git a/gcc/testsuite/g++.dg/cpp1y/constexpr-mutable1.C b/gcc/testsuite/g++.dg/cpp1y/constexpr-mutable1.C
new file mode 100644
index 0000000..6c47988
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/constexpr-mutable1.C
@@ -0,0 +1,16 @@
+// PR c++/92505
+// { dg-do compile { target c++14 } }
+
+struct S { mutable int m; };
+
+static_assert(S{42}.m == 42, "");
+
+constexpr int f() {
+ S s = {40};
+ s.m++;
+ const auto& cs = s;
+ ++cs.m;
+ return cs.m;
+}
+
+static_assert(f() == 42, "");
diff --git a/gcc/testsuite/g++.dg/cpp1z/noexcept-type26.C b/gcc/testsuite/g++.dg/cpp1z/noexcept-type26.C
new file mode 100644
index 0000000..491df4d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1z/noexcept-type26.C
@@ -0,0 +1,12 @@
+// Verify a non-constant conditional noexcept-specifier in a function type
+// respects SFINAE.
+// { dg-do compile { target c++17 } }
+
+template<class T> void f(void() noexcept(T::value)) = delete;
+template<class T> void f(...);
+
+struct B { static bool value; };
+
+int main() {
+ f<B>(nullptr);
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/explicit19.C b/gcc/testsuite/g++.dg/cpp2a/explicit19.C
new file mode 100644
index 0000000..4790381
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/explicit19.C
@@ -0,0 +1,12 @@
+// Verify a conditional explicit-specifier is a SFINAE context.
+// { dg-do compile { target c++20 } }
+
+struct A {
+ template<class T> explicit(T::value) A(T) = delete;
+ A(...);
+};
+
+struct B { static bool value; };
+
+A x(0);
+A y(B{});
diff --git a/gcc/testsuite/g++.dg/ext/integer-pack6.C b/gcc/testsuite/g++.dg/ext/integer-pack6.C
new file mode 100644
index 0000000..dc43116
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/integer-pack6.C
@@ -0,0 +1,13 @@
+// Verify a non-constant argument to __integer_pack respects SFINAE.
+// { dg-do compile { target c++11 } }
+
+template<int...> struct A { };
+
+template<class T> auto f(int) -> A<__integer_pack(T::value)...> = delete;
+template<class T> void f(...);
+
+struct B { static int value; };
+
+int main() {
+ f<B>(0);
+}
diff --git a/gcc/testsuite/g++.dg/ext/pr106877.C b/gcc/testsuite/g++.dg/ext/pr106877.C
new file mode 100644
index 0000000..6bffed9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/pr106877.C
@@ -0,0 +1,13 @@
+// PR target/106877
+// { dg-do compile { target i?86-*-* x86_64-*-* } }
+// { dg-options "-O1 -m16 -mtune=sandybridge -flive-range-shrinkage -fno-dce" }
+
+void
+foo (float b, double c)
+{
+ for (int e = 0; e < 2; e++)
+ {
+ asm volatile ("" : "+f" (c)); // { dg-error "must specify a single register" }
+ asm ("" : "+rm" (c = b));
+ }
+}
diff --git a/gcc/testsuite/g++.dg/gcov/gcov.exp b/gcc/testsuite/g++.dg/gcov/gcov.exp
index 88acd95..04e7a01 100644
--- a/gcc/testsuite/g++.dg/gcov/gcov.exp
+++ b/gcc/testsuite/g++.dg/gcov/gcov.exp
@@ -24,9 +24,9 @@ global GXX_UNDER_TEST
# Find gcov in the same directory as $GXX_UNDER_TEST.
if { ![is_remote host] && [string match "*/*" [lindex $GXX_UNDER_TEST 0]] } {
- set GCOV [file dirname [lindex $GXX_UNDER_TEST 0]]/gcov
+ set GCOV [file dirname [lindex $GXX_UNDER_TEST 0]]/[transform gcov]
} else {
- set GCOV gcov
+ set GCOV [transform gcov]
}
# Initialize harness.
diff --git a/gcc/testsuite/g++.dg/goacc/mdc.C b/gcc/testsuite/g++.dg/goacc/mdc.C
index e8ba1cc..9d460f2 100644
--- a/gcc/testsuite/g++.dg/goacc/mdc.C
+++ b/gcc/testsuite/g++.dg/goacc/mdc.C
@@ -43,7 +43,9 @@ t1 ()
#pragma acc exit data detach(rz[:]) /* { dg-error "expected single pointer in .detach. clause" } */
/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc enter data attach(rz[3]) /* { dg-error "expected pointer in .attach. clause" } */
+/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc exit data detach(rz[3]) /* { dg-error "expected pointer in .detach. clause" } */
+/* { dg-error "has no data movement clause" "" { target *-*-* } .-1 } */
#pragma acc enter data attach(rs.e)
#pragma acc exit data detach(rs.e) attach(rz) /* { dg-error ".attach. is not valid for" } */
diff --git a/gcc/testsuite/g++.dg/goacc/member-array-acc.C b/gcc/testsuite/g++.dg/goacc/member-array-acc.C
new file mode 100644
index 0000000..9993768
--- /dev/null
+++ b/gcc/testsuite/g++.dg/goacc/member-array-acc.C
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-fdump-tree-gimple" } */
+
+struct Foo {
+ float *a;
+ void init(int N) {
+ a = new float[N];
+ #pragma acc enter data create(a[0:N])
+ }
+};
+int main() { Foo x; x.init(1024); }
+
+/* { dg-final { scan-tree-dump {struct:\*\(struct Foo \*\) this \[len: 1\]\) map\(alloc:this->a \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: _[0-9]+\]\) map\(attach:this->a \[bias: 0\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/map-3.C b/gcc/testsuite/g++.dg/gomp/map-3.C
new file mode 100644
index 0000000..c45f850
--- /dev/null
+++ b/gcc/testsuite/g++.dg/gomp/map-3.C
@@ -0,0 +1,9 @@
+// PR c++/106858
+// { dg-additional-options "-fopenmp -fsanitize=undefined" }
+
+class A {
+ void f() {
+ #pragma omp target map(this->f) // { dg-error "member function" }
+ ;
+ }
+};
diff --git a/gcc/testsuite/g++.dg/gomp/member-array-omp.C b/gcc/testsuite/g++.dg/gomp/member-array-omp.C
new file mode 100644
index 0000000..a53aa44
--- /dev/null
+++ b/gcc/testsuite/g++.dg/gomp/member-array-omp.C
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-fdump-tree-gimple" } */
+
+struct Foo {
+ float *a;
+ void init(int N) {
+ a = new float[N];
+ #pragma omp target enter data map(alloc:a[0:N])
+ }
+};
+int main() { Foo x; x.init(1024); }
+
+/* { dg-final { scan-tree-dump {map\(alloc:\*_[0-9]+ \[len: _[0-9]+\]\) map\(attach:this->a \[bias: 0\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/target-3.C b/gcc/testsuite/g++.dg/gomp/target-3.C
index f4d40ec..432f026 100644
--- a/gcc/testsuite/g++.dg/gomp/target-3.C
+++ b/gcc/testsuite/g++.dg/gomp/target-3.C
@@ -33,4 +33,6 @@ T<N>::bar (int x)
template struct T<0>;
-/* { dg-final { scan-tree-dump-times "map\\(struct:\\*this \\\[len: 2\\\]\\) map\\(alloc:this->a \\\[len: \[0-9\]+\\\]\\) map\\(alloc:this->b \\\[len: \[0-9\]+\\\]\\)" 4 "gimple" } } */
+/* { dg-final { scan-tree-dump-times "map\\(struct:\\*\\(struct S \\*\\) this \\\[len: 2\\\]\\) map\\(alloc:this->a \\\[len: \[0-9\]+\\\]\\) map\\(alloc:this->b \\\[len: \[0-9\]+\\\]\\)" 2 "gimple" } } */
+
+/* { dg-final { scan-tree-dump-times "map\\(struct:\\*\\(struct T \\*\\) this \\\[len: 2\\\]\\) map\\(alloc:this->a \\\[len: \[0-9\]+\\\]\\) map\\(alloc:this->b \\\[len: \[0-9\]+\\\]\\)" 2 "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/target-lambda-1.C b/gcc/testsuite/g++.dg/gomp/target-lambda-1.C
index 7f83f92..5ce8cea 100644
--- a/gcc/testsuite/g++.dg/gomp/target-lambda-1.C
+++ b/gcc/testsuite/g++.dg/gomp/target-lambda-1.C
@@ -87,8 +87,8 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(b\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:iptr \[pointer assign, bias: 0\]\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(attach_zero_length_array_section:__closure->__iptr \[bias: 0\]\) map\(attach_zero_length_array_section:_[0-9]+->ptr \[bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(b\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:iptr \[pointer assign, bias: 0\]\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(attach_zero_length_array_section:__closure->__iptr \[bias: 0\]\) map\(attach_zero_length_array_section:_[0-9]+->ptr \[bias: 0\]\)} "gimple" } } */
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(end\) firstprivate\(begin\) map\(to:loop \[len: [0-9]+\]\) map\(attach_zero_length_array_section:loop\.__data1 \[bias: 0\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(end\) firstprivate\(begin\) map\(to:loop \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(attach_zero_length_array_section:loop\.__data1 \[bias: 0\]\)} "gimple" } } */
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(end\) firstprivate\(begin\) map\(to:loop \[len: [0-9]+\]\) map\(attach_zero_length_array_section:loop\.__data2 \[bias: 0\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(end\) firstprivate\(begin\) map\(to:loop \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(attach_zero_length_array_section:loop\.__data2 \[bias: 0\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/target-this-2.C b/gcc/testsuite/g++.dg/gomp/target-this-2.C
index 8a76bb8..cc08e7e 100644
--- a/gcc/testsuite/g++.dg/gomp/target-this-2.C
+++ b/gcc/testsuite/g++.dg/gomp/target-this-2.C
@@ -46,4 +46,4 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) firstprivate\(m\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(from:v \[len: [0-9]+\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {map\(alloc:MEM\[\(char \*\)_[0-9]+\] \[len: [0-9]+\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) firstprivate\(m\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(from:v \[len: [0-9]+\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/target-this-3.C b/gcc/testsuite/g++.dg/gomp/target-this-3.C
index 91cfbd6..bc2cc0b 100644
--- a/gcc/testsuite/g++.dg/gomp/target-this-3.C
+++ b/gcc/testsuite/g++.dg/gomp/target-this-3.C
@@ -100,6 +100,6 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) firstprivate\(n\) map\(tofrom:\*this \[len: [0-9]+\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(alloc:\*_[0-9]+ \[pointer assign, zero-length array section, bias: 0\]\) map\(attach:this->refptr \[bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(tofrom:\*this \[len: [0-9]+\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9+] \[len: 0\]\) map\(alloc:\*_[0-9]+ \[pointer assign, zero-length array section, bias: 0\]\) map\(attach:this->refptr \[bias: 0\]\)} "gimple" } } */
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) firstprivate\(n\) map\(tofrom:\*this \[len: [0-9]+\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(attach_zero_length_array_section:this->ptr \[bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(tofrom:\*this \[len: [0-9]+\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(attach_zero_length_array_section:this->ptr \[bias: 0\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/gomp/target-this-4.C b/gcc/testsuite/g++.dg/gomp/target-this-4.C
index e4b2a71..9ade3cc 100644
--- a/gcc/testsuite/g++.dg/gomp/target-this-4.C
+++ b/gcc/testsuite/g++.dg/gomp/target-this-4.C
@@ -102,6 +102,6 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(attach_zero_length_array_section:_[0-9]+->ptr \[bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(from:mapped \[len: 1\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(attach_zero_length_array_section:_[0-9]+->ptr \[bias: 0\]\)} "gimple" } } */
-/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(alloc:\*_[0-9]+ \[pointer assign, zero-length array section, bias: 0\]\) map\(attach:_[0-9]+->refptr \[bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\)} "gimple" } } */
+/* { dg-final { scan-tree-dump {#pragma omp target num_teams.* firstprivate\(n\) map\(alloc:MEM.* \[len: 0\]\) map\(firstprivate:this \[pointer assign, bias: 0\]\) map\(to:\*__closure \[len: [0-9]+\]\) map\(firstprivate:__closure \[pointer assign, bias: 0\]\) map\(tofrom:\*_[0-9]+ \[len: [0-9]+\]\) map\(always_pointer:__closure->__this \[pointer assign, bias: 0\]\) map\(from:mapped \[len: [0-9]+\]\) map\(alloc:\*_[0-9]+ \[len: 0\]\) map\(alloc:\*_[0-9]+ \[pointer assign, zero-length array section, bias: 0\]\) map\(attach:_[0-9]+->refptr \[bias: 0\]\)} "gimple" } } */
diff --git a/gcc/testsuite/g++.dg/modules/typename-friend_a.C b/gcc/testsuite/g++.dg/modules/typename-friend_a.C
new file mode 100644
index 0000000..aa426fe
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/typename-friend_a.C
@@ -0,0 +1,11 @@
+// { dg-additional-options "-fmodules-ts" }
+export module foo;
+// { dg-module-cmi foo }
+
+template<class T>
+struct A {
+ friend typename T::type;
+ friend void f(A) { }
+private:
+ static constexpr int value = 42;
+};
diff --git a/gcc/testsuite/g++.dg/modules/typename-friend_b.C b/gcc/testsuite/g++.dg/modules/typename-friend_b.C
new file mode 100644
index 0000000..97da9d8
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/typename-friend_b.C
@@ -0,0 +1,6 @@
+// { dg-additional-options "-fmodules-ts" }
+module foo;
+
+struct C;
+struct B { using type = C; };
+struct C { static_assert(A<B>::value == 42); };
diff --git a/gcc/testsuite/g++.dg/template/evaluated1.C b/gcc/testsuite/g++.dg/template/evaluated1.C
new file mode 100644
index 0000000..41845c6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/evaluated1.C
@@ -0,0 +1,17 @@
+// PR c++/101906
+// Verify the template arguments of an alias template-id are evaluated even
+// in an unevaluated context.
+// { dg-do compile { target c++11 } }
+
+template<int, class T> using skip = T;
+
+template<class T>
+constexpr unsigned sizeof_() {
+ return sizeof(skip<(T(), 0), T>);
+}
+
+struct A {
+ int m = -1;
+};
+
+static_assert(sizeof_<A>() == sizeof(A), "");
diff --git a/gcc/testsuite/g++.dg/template/evaluated1a.C b/gcc/testsuite/g++.dg/template/evaluated1a.C
new file mode 100644
index 0000000..7828687
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/evaluated1a.C
@@ -0,0 +1,16 @@
+// PR c++/101906
+// Like unevaluated1.C, but where the unevaluated context is a
+// constraint instead of sizeof.
+// { dg-do compile { target c++20 } }
+
+template<int> using voidify = void;
+
+template<class T>
+concept constant_value_initializable
+ = requires { typename voidify<(T(), 0)>; };
+
+struct A {
+ int m = -1;
+};
+
+static_assert(constant_value_initializable<A>);
diff --git a/gcc/testsuite/g++.dg/template/evaluated1b.C b/gcc/testsuite/g++.dg/template/evaluated1b.C
new file mode 100644
index 0000000..7994065
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/evaluated1b.C
@@ -0,0 +1,17 @@
+// PR c++/101906
+// Like unevaluated1.C, but using a function template instead of an
+// alias template.
+// { dg-do compile { target c++14 } }
+
+template<int, class T> T skip();
+
+template<class T>
+constexpr unsigned sizeof_() {
+ return sizeof(skip<(T(), 0), T>());
+}
+
+struct A {
+ int m = -1;
+};
+
+static_assert(sizeof_<A>() == sizeof(A), "");
diff --git a/gcc/testsuite/g++.dg/template/evaluated1c.C b/gcc/testsuite/g++.dg/template/evaluated1c.C
new file mode 100644
index 0000000..15c5582
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/evaluated1c.C
@@ -0,0 +1,17 @@
+// PR c++/101906
+// Like unevaluated1b.C, but using a variable template instead of a
+// function template.
+// { dg-do compile { target c++14 } }
+
+template<int, class T> T skip;
+
+template<class T>
+constexpr unsigned sizeof_() {
+ return sizeof(skip<(T(), 0), T>);
+}
+
+struct A {
+ int m = -1;
+};
+
+static_assert(sizeof_<A>() == sizeof(A), "");
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr106922.C b/gcc/testsuite/g++.dg/tree-ssa/pr106922.C
new file mode 100644
index 0000000..faf379b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr106922.C
@@ -0,0 +1,91 @@
+// { dg-require-effective-target c++20 }
+// { dg-options "-O2 -fdump-tree-pre-details -fdump-tree-cddce3" }
+
+template <typename> struct __new_allocator {
+ void deallocate(int *, int) { operator delete(0); }
+};
+template <typename _Tp> using __allocator_base = __new_allocator<_Tp>;
+template <typename> struct allocator : __allocator_base<int> {
+ [[__gnu__::__always_inline__]] void deallocate(int *__p, int __n) {
+ __allocator_base<int>::deallocate(__p, __n);
+ }
+};
+template <typename> struct allocator_traits;
+template <typename _Tp> struct allocator_traits<allocator<_Tp>> {
+ using allocator_type = allocator<_Tp>;
+ using pointer = _Tp *;
+ using size_type = int;
+ template <typename _Up> using rebind_alloc = allocator<_Up>;
+ static void deallocate(allocator_type &__a, pointer __p, size_type __n) {
+ __a.deallocate(__p, __n);
+ }
+};
+template <typename _Alloc> struct __alloc_traits : allocator_traits<_Alloc> {
+ typedef allocator_traits<_Alloc> _Base_type;
+ template <typename _Tp> struct rebind {
+ typedef _Base_type::template rebind_alloc<_Tp> other;
+ };
+};
+long _M_deallocate___n;
+struct _Vector_base {
+ typedef __alloc_traits<allocator<int>>::rebind<int>::other _Tp_alloc_type;
+ typedef __alloc_traits<_Tp_alloc_type>::pointer pointer;
+ struct _Vector_impl_data {
+ pointer _M_start;
+ };
+ struct _Vector_impl : _Tp_alloc_type, _Vector_impl_data {};
+ ~_Vector_base() { _M_deallocate(_M_impl._M_start); }
+ _Vector_impl _M_impl;
+ void _M_deallocate(pointer __p) {
+ if (__p)
+ __alloc_traits<_Tp_alloc_type>::deallocate(_M_impl, __p,
+ _M_deallocate___n);
+ }
+};
+struct vector : _Vector_base {};
+struct aligned_storage {
+ int dummy_;
+ int *ptr_ref0;
+ vector &ref() {
+ vector *__trans_tmp_2;
+ void *__trans_tmp_1 = &dummy_;
+ union {
+ void *ap_pvoid;
+ vector *as_ptype;
+ } caster{__trans_tmp_1};
+ __trans_tmp_2 = caster.as_ptype;
+ return *__trans_tmp_2;
+ }
+};
+struct optional_base {
+ optional_base operator=(optional_base &) {
+ bool __trans_tmp_3 = m_initialized;
+ if (__trans_tmp_3)
+ m_initialized = false;
+ return *this;
+ }
+ ~optional_base() {
+ if (m_initialized)
+ m_storage.ref().~vector();
+ }
+ bool m_initialized;
+ aligned_storage m_storage;
+};
+struct optional : optional_base {
+ optional() : optional_base() {}
+};
+template <class> using Optional = optional;
+struct Trans_NS___cxx11_basic_stringstream {};
+void operator<<(Trans_NS___cxx11_basic_stringstream, int);
+int testfunctionfoo_myStructs[10];
+void testfunctionfoo() {
+ Optional<char> external, internal;
+ for (auto myStruct : testfunctionfoo_myStructs) {
+ Trans_NS___cxx11_basic_stringstream address_stream;
+ address_stream << myStruct;
+ external = internal;
+ }
+}
+
+// { dg-final { scan-tree-dump-times "Found fully redundant value" 4 "pre" { xfail { ! lp64 } } } }
+// { dg-final { scan-tree-dump-not "m_initialized" "cddce3" { xfail { ! lp64 } } } }
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr106936.C b/gcc/testsuite/g++.dg/tree-ssa/pr106936.C
new file mode 100644
index 0000000..c3096e0
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr106936.C
@@ -0,0 +1,14 @@
+// { dg-do compile } */
+// { dg-options "-O2 -fno-tree-ccp -fno-tree-forwprop -fno-tree-fre" }
+
+namespace testPointerToMemberMiscCasts2 {
+struct B {
+ int f;
+};
+struct L : public B { };
+struct R : public B { };
+struct D : public L, R { };
+ int B::* pb = &B::f;
+ int R::* pr = pb;
+ int D::* pdr = pr;
+}
diff --git a/gcc/testsuite/g++.dg/warn/Wsubobject-linkage-5.C b/gcc/testsuite/g++.dg/warn/Wsubobject-linkage-5.C
new file mode 100644
index 0000000..e2c2fd9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/warn/Wsubobject-linkage-5.C
@@ -0,0 +1,7 @@
+// PR c++/86491
+// { dg-do compile { target c++11 } }
+
+template <int *> struct NT{};
+#line 6 "tM.C"
+static int d;
+struct D : NT<&d> {}; // { dg-warning "internal linkage" }
diff --git a/gcc/testsuite/g++.dg/warn/anonymous-namespace-3.C b/gcc/testsuite/g++.dg/warn/anonymous-namespace-3.C
index 8b72abd..ce5745b 100644
--- a/gcc/testsuite/g++.dg/warn/anonymous-namespace-3.C
+++ b/gcc/testsuite/g++.dg/warn/anonymous-namespace-3.C
@@ -7,7 +7,8 @@
struct B { std::auto_ptr<A> p; };
#line 10 "foo.C"
-struct C // { dg-warning "uses the anonymous namespace" }
+struct C // { dg-warning "has internal linkage" "" { target c++11 } }
+// { dg-warning "uses the anonymous namespace" "" { target c++98_only } .-1 }
{
std::auto_ptr<A> p;
};
diff --git a/gcc/testsuite/g++.target/powerpc/pr105485.C b/gcc/testsuite/g++.target/powerpc/pr105485.C
new file mode 100644
index 0000000..db1bd94
--- /dev/null
+++ b/gcc/testsuite/g++.target/powerpc/pr105485.C
@@ -0,0 +1,9 @@
+/* It's to verify no ICE here, ignore error/warning messages
+ since they are not test points here. */
+/* { dg-excess-errors "pr105485" } */
+
+template <class> void __builtin_vec_vslv();
+typedef __attribute__((altivec(vector__))) char T;
+T b (T c, T d) {
+ return __builtin_vec_vslv(c, d);
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr106878.c b/gcc/testsuite/gcc.c-torture/compile/pr106878.c
new file mode 100644
index 0000000..c845718
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr106878.c
@@ -0,0 +1,15 @@
+/* PR tree-optimization/106878 */
+
+typedef __INTPTR_TYPE__ intptr_t;
+typedef __UINTPTR_TYPE__ uintptr_t;
+int a;
+
+int
+foo (const int *c)
+{
+ uintptr_t d = ((intptr_t) c | (intptr_t) &a) & 65535 << 16;
+ intptr_t e = (intptr_t) c;
+ if (d != (e & 65535 << 16))
+ return 1;
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr106958.c b/gcc/testsuite/gcc.c-torture/compile/pr106958.c
new file mode 100644
index 0000000..98e6554
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr106958.c
@@ -0,0 +1,13 @@
+/* PR tree-optimization/106958 */
+
+int a;
+void bar (int);
+
+void
+foo (char *x, char *y)
+{
+ int b = a != 0;
+ int c = x != 0;
+ int d = y != 0;
+ bar (b | c | d);
+}
diff --git a/gcc/testsuite/gcc.dg/atomic/c2x-stdatomic-var-init-1.c b/gcc/testsuite/gcc.dg/atomic/c2x-stdatomic-var-init-1.c
new file mode 100644
index 0000000..1978a410
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/atomic/c2x-stdatomic-var-init-1.c
@@ -0,0 +1,9 @@
+/* Test ATOMIC_VAR_INIT not in C2x. */
+/* { dg-do compile } */
+/* { dg-options "-std=c2x -pedantic-errors" } */
+
+#include <stdatomic.h>
+
+#ifdef ATOMIC_VAR_INIT
+#error "ATOMIC_VAR_INIT defined"
+#endif
diff --git a/gcc/testsuite/gcc.dg/c2x-float-11.c b/gcc/testsuite/gcc.dg/c2x-float-11.c
new file mode 100644
index 0000000..0e2f3c0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/c2x-float-11.c
@@ -0,0 +1,9 @@
+/* Test INFINITY macro. Test when infinities not supported. */
+/* { dg-do compile { target { ! inff } } } */
+/* { dg-options "-std=c2x" } */
+
+#include <float.h>
+
+#ifdef INFINITY
+#error "INFINITY defined"
+#endif
diff --git a/gcc/testsuite/gcc.dg/c2x-float-2.c b/gcc/testsuite/gcc.dg/c2x-float-2.c
index 4f669fd..61a77f6 100644
--- a/gcc/testsuite/gcc.dg/c2x-float-2.c
+++ b/gcc/testsuite/gcc.dg/c2x-float-2.c
@@ -1,8 +1,8 @@
-/* Test INFINITY macro. Generic test even if infinities not
- supported. */
+/* Test INFINITY macro. Generic test. */
/* { dg-do run } */
/* { dg-options "-std=c2x -w" } */
/* { dg-add-options ieee } */
+/* { dg-require-effective-target inff } */
#include <float.h>
diff --git a/gcc/testsuite/gcc.dg/pr106938.c b/gcc/testsuite/gcc.dg/pr106938.c
new file mode 100644
index 0000000..7365a8c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr106938.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-O1 -fno-ipa-pure-const -fno-tree-ccp -Wuninitialized" } */
+
+int n;
+
+void
+undefined (void);
+
+__attribute__ ((returns_twice)) int
+zero (void)
+{
+ return 0;
+}
+
+void
+bar (int)
+{
+ int i;
+
+ for (i = 0; i < -1; ++i)
+ n = 0;
+}
+
+__attribute__ ((simd)) void
+foo (void)
+{
+ int uninitialized;
+
+ undefined ();
+
+ while (uninitialized < 1) /* { dg-warning "uninitialized" } */
+ {
+ bar (zero ());
+ ++uninitialized;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tree-prof/indir-call-prof-2.c b/gcc/testsuite/gcc.dg/tree-prof/indir-call-prof-2.c
index 594c3f3..1d64d9f 100644
--- a/gcc/testsuite/gcc.dg/tree-prof/indir-call-prof-2.c
+++ b/gcc/testsuite/gcc.dg/tree-prof/indir-call-prof-2.c
@@ -1,4 +1,4 @@
-/* { dg-options "-O2 -fno-early-inlining -fdump-ipa-profile-optimized -fdump-tree-einline-optimized" } */
+/* { dg-options "-O2 -fno-early-inlining -fdump-ipa-profile-optimized -fdump-ipa-afdo-optimized" } */
volatile int one;
static int
add1 (int val)
@@ -31,5 +31,5 @@ main (void)
}
/* { dg-final-use-not-autofdo { scan-ipa-dump "Indirect call -> direct call.* add1 .will resolve by ipa-profile" "profile"} } */
/* { dg-final-use-not-autofdo { scan-ipa-dump "Indirect call -> direct call.* sub1 .will resolve by ipa-profile" "profile"} } */
-/* { dg-final-use-autofdo { scan-tree-dump "Inlining add1/1 into main/4." "einline"} } */
-/* { dg-final-use-autofdo { scan-tree-dump "Inlining sub1/2 into main/4." "einline"} } */
+/* { dg-final-use-autofdo { scan-ipa-dump "Inlining add1/1 into main/4." "afdo"} } */
+/* { dg-final-use-autofdo { scan-ipa-dump "Inlining sub1/2 into main/4." "afdo"} } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-34.c b/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-34.c
index 8d2d364..41877e0 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-34.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-34.c
@@ -13,4 +13,4 @@ float summul(int n, float *arg1, float *arg2)
return res1;
}
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { ! { avr-*-* pru-*-* } } } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { ! { avr-*-* pru-*-* riscv*-*-* } } } } } */
diff --git a/gcc/testsuite/gcc.misc-tests/gcov.exp b/gcc/testsuite/gcc.misc-tests/gcov.exp
index 82376d9..b8e9661 100644
--- a/gcc/testsuite/gcc.misc-tests/gcov.exp
+++ b/gcc/testsuite/gcc.misc-tests/gcov.exp
@@ -24,9 +24,9 @@ global GCC_UNDER_TEST
# For now find gcov in the same directory as $GCC_UNDER_TEST.
if { ![is_remote host] && [string match "*/*" [lindex $GCC_UNDER_TEST 0]] } {
- set GCOV [file dirname [lindex $GCC_UNDER_TEST 0]]/gcov
+ set GCOV [file dirname [lindex $GCC_UNDER_TEST 0]]/[transform gcov]
} else {
- set GCOV gcov
+ set GCOV [transform gcov]
}
# Initialize harness.
diff --git a/gcc/testsuite/gcc.target/aarch64/acle/ls64_asm_2.c b/gcc/testsuite/gcc.target/aarch64/acle/ls64_asm_2.c
new file mode 100644
index 0000000..1b42771
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/acle/ls64_asm_2.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-O" } */
+
+#pragma GCC target "+ls64+nofp"
+
+#include "ls64_asm.c"
+
+/* { dg-final { scan-assembler-times {\tldp\t} 12 } } */
+/* { dg-final { scan-assembler-times {\tstp\t} 4 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_stp_20.c b/gcc/testsuite/gcc.target/aarch64/ldp_stp_20.c
new file mode 100644
index 0000000..7e705e1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_stp_20.c
@@ -0,0 +1,7 @@
+/* { dg-options "-O2" } */
+
+#pragma GCC target "+nosimd+fp"
+
+#include "ldp_stp_6.c"
+
+/* { dg-final { scan-assembler "stp\td\[0-9\]+, d\[0-9\]+, \\\[x\[0-9\]+\\\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_stp_21.c b/gcc/testsuite/gcc.target/aarch64/ldp_stp_21.c
new file mode 100644
index 0000000..462e3c9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_stp_21.c
@@ -0,0 +1,7 @@
+/* { dg-options "-O2" } */
+
+#pragma GCC target "+nosimd+fp"
+
+#include "ldp_stp_8.c"
+
+/* { dg-final { scan-assembler-times "ldp\td\[0-9\], d\[0-9\]+, \\\[x\[0-9\]+\\\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_stp_22.c b/gcc/testsuite/gcc.target/aarch64/ldp_stp_22.c
new file mode 100644
index 0000000..283c56d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_stp_22.c
@@ -0,0 +1,13 @@
+/* { dg-options "-O2" } */
+
+#pragma GCC target "+nosimd+fp"
+
+void
+foo (__Float32x4_t *ptr)
+{
+ ptr[0] = ptr[2];
+ ptr[1] = ptr[3];
+}
+
+/* { dg-final { scan-assembler {\tldp\tq[0-9]+, q[0-9]+} } } */
+/* { dg-final { scan-assembler {\tstp\tq[0-9]+, q[0-9]+} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_stp_23.c b/gcc/testsuite/gcc.target/aarch64/ldp_stp_23.c
new file mode 100644
index 0000000..b14976c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_stp_23.c
@@ -0,0 +1,16 @@
+/* { dg-options "-O2" } */
+
+#pragma GCC target "+nosimd+fp"
+
+void
+foo (char *char_ptr)
+{
+ __Float64x2_t *ptr = (__Float64x2_t *)(char_ptr + 1);
+ asm volatile ("" ::
+ "w" (ptr[1]),
+ "w" (ptr[2]),
+ "w" (ptr[3]),
+ "w" (ptr[4]));
+}
+
+/* { dg-final { scan-assembler-times {\tldp\tq[0-9]+, q[0-9]+} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_stp_24.c b/gcc/testsuite/gcc.target/aarch64/ldp_stp_24.c
new file mode 100644
index 0000000..a99426e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ldp_stp_24.c
@@ -0,0 +1,16 @@
+/* { dg-options "-O2" } */
+
+#pragma GCC target "+nosimd+fp"
+
+void
+foo (char *char_ptr)
+{
+ __Float64x2_t *ptr = (__Float64x2_t *)(char_ptr + 1);
+ asm volatile ("" :
+ "=w" (ptr[1]),
+ "=w" (ptr[2]),
+ "=w" (ptr[3]),
+ "=w" (ptr[4]));
+}
+
+/* { dg-final { scan-assembler-times {\tstp\tq[0-9]+, q[0-9]+} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/movv16qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv16qi_1.c
index 8a6afb1..cac4241 100644
--- a/gcc/testsuite/gcc.target/aarch64/movv16qi_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/movv16qi_1.c
@@ -80,3 +80,24 @@ fpr_to_gpr (v16qi q0)
x0 = q0;
asm volatile ("" :: "r" (x0));
}
+
+/*
+** gpr_to_gpr:
+** (
+** mov x0, x2
+** mov x1, x3
+** |
+** mov x1, x3
+** mov x0, x2
+** )
+** ret
+*/
+void
+gpr_to_gpr ()
+{
+ register v16qi x0 asm ("x0");
+ register v16qi x2 asm ("x2");
+ asm volatile ("" : "=r" (x2));
+ x0 = x2;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movv16qi_2.c b/gcc/testsuite/gcc.target/aarch64/movv16qi_2.c
new file mode 100644
index 0000000..08a0a19
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv16qi_2.c
@@ -0,0 +1,27 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_GENERAL(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE zero_##TYPE () { return (TYPE) {}; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_GENERAL (__Int8x16_t)
+TEST_GENERAL (__Int16x8_t)
+TEST_GENERAL (__Int32x4_t)
+TEST_GENERAL (__Int64x2_t)
+TEST_GENERAL (__Bfloat16x8_t)
+TEST_GENERAL (__Float16x8_t)
+TEST_GENERAL (__Float32x4_t)
+TEST_GENERAL (__Float64x2_t)
+
+__Int8x16_t const_s8x8 () { return (__Int8x16_t) { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; }
+__Int16x8_t const_s16x4 () { return (__Int16x8_t) { 1, 0, 1, 0, 1, 0, 1, 0 }; }
+__Int32x4_t const_s32x2 () { return (__Int32x4_t) { 1, 2, 3, 4 }; }
+__Int64x2_t const_s64x1 () { return (__Int64x2_t) { 100, 100 }; }
+__Float16x8_t const_f16x4 () { return (__Float16x8_t) { 2, 2, 2, 2, 2, 2, 2, 2 }; }
+__Float32x4_t const_f32x2 () { return (__Float32x4_t) { 1, 2, 1, 2 }; }
+__Float64x2_t const_f64x1 () { return (__Float64x2_t) { 32, 32 }; }
diff --git a/gcc/testsuite/gcc.target/aarch64/movv16qi_3.c b/gcc/testsuite/gcc.target/aarch64/movv16qi_3.c
new file mode 100644
index 0000000..d43b994
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv16qi_3.c
@@ -0,0 +1,30 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE \
+ test_##TYPE (void) \
+ { \
+ typedef TYPE v __attribute__((aligned(1))); \
+ register v *ptr asm ("x0"); \
+ asm volatile ("" : "=r" (ptr)); \
+ return *ptr; \
+ }
+
+TEST_VECTOR (__Int8x16_t)
+TEST_VECTOR (__Int16x8_t)
+TEST_VECTOR (__Int32x4_t)
+TEST_VECTOR (__Int64x2_t)
+TEST_VECTOR (__Bfloat16x8_t)
+TEST_VECTOR (__Float16x8_t)
+TEST_VECTOR (__Float32x4_t)
+TEST_VECTOR (__Float64x2_t)
+
+/*
+** test___Int8x16_t:
+** ldr q0, \[x0\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv2di_1.c b/gcc/testsuite/gcc.target/aarch64/movv2di_1.c
new file mode 100644
index 0000000..e3b55fd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv2di_1.c
@@ -0,0 +1,103 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -mtune=neoverse-v1 --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nothing+nosimd+fp"
+
+typedef long long v2di __attribute__((vector_size(16)));
+
+/*
+** fpr_to_fpr:
+** sub sp, sp, #16
+** str q1, \[sp\]
+** ldr q0, \[sp\]
+** add sp, sp, #?16
+** ret
+*/
+v2di
+fpr_to_fpr (v2di q0, v2di q1)
+{
+ return q1;
+}
+
+/*
+** gpr_to_fpr: { target aarch64_little_endian }
+** fmov d0, x0
+** fmov v0.d\[1\], x1
+** ret
+*/
+/*
+** gpr_to_fpr: { target aarch64_big_endian }
+** fmov d0, x1
+** fmov v0.d\[1\], x0
+** ret
+*/
+v2di
+gpr_to_fpr ()
+{
+ register v2di x0 asm ("x0");
+ asm volatile ("" : "=r" (x0));
+ return x0;
+}
+
+/*
+** zero_to_fpr:
+** fmov d0, xzr
+** ret
+*/
+v2di
+zero_to_fpr ()
+{
+ return (v2di) {};
+}
+
+/*
+** fpr_to_gpr: { target aarch64_little_endian }
+** (
+** fmov x0, d0
+** fmov x1, v0.d\[1\]
+** |
+** fmov x1, v0.d\[1\]
+** fmov x0, d0
+** )
+** ret
+*/
+/*
+** fpr_to_gpr: { target aarch64_big_endian }
+** (
+** fmov x1, d0
+** fmov x0, v0.d\[1\]
+** |
+** fmov x0, v0.d\[1\]
+** fmov x1, d0
+** )
+** ret
+*/
+void
+fpr_to_gpr (v2di q0)
+{
+ register v2di x0 asm ("x0");
+ x0 = q0;
+ asm volatile ("" :: "r" (x0));
+}
+
+/*
+** gpr_to_gpr:
+** (
+** mov x0, x2
+** mov x1, x3
+** |
+** mov x1, x3
+** mov x0, x2
+** )
+** ret
+*/
+void
+gpr_to_gpr ()
+{
+ register v2di x0 asm ("x0");
+ register v2di x2 asm ("x2");
+ asm volatile ("" : "=r" (x2));
+ x0 = x2;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movv2x16qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv2x16qi_1.c
new file mode 100644
index 0000000..90e3b42
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv2x16qi_1.c
@@ -0,0 +1,40 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x16x2_t)
+TEST_VECTOR (int16x8x2_t)
+TEST_VECTOR (int32x4x2_t)
+TEST_VECTOR (int64x2x2_t)
+TEST_VECTOR (float16x8x2_t)
+TEST_VECTOR (bfloat16x8x2_t)
+TEST_VECTOR (float32x4x2_t)
+TEST_VECTOR (float64x2x2_t)
+
+/*
+** mov_int8x16x2_t:
+** sub sp, sp, #32
+** stp q2, q3, \[sp\]
+** ldp q0, q1, \[sp\]
+** add sp, sp, #?32
+** ret
+*/
+/*
+** load_int8x16x2_t:
+** ldp q0, q1, \[x0\]
+** ret
+*/
+/*
+** store_int8x16x2_t: { xfail *-*-* }
+** stp q0, q1, \[x0\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv2x8qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv2x8qi_1.c
new file mode 100644
index 0000000..883a0ea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv2x8qi_1.c
@@ -0,0 +1,38 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x8x2_t)
+TEST_VECTOR (int16x4x2_t)
+TEST_VECTOR (int32x2x2_t)
+TEST_VECTOR (int64x1x2_t)
+TEST_VECTOR (float16x4x2_t)
+TEST_VECTOR (bfloat16x4x2_t)
+TEST_VECTOR (float32x2x2_t)
+TEST_VECTOR (float64x1x2_t)
+
+/*
+** mov_int8x8x2_t:
+** fmov d0, d2
+** fmov d1, d3
+** ret
+*/
+/*
+** load_int8x8x2_t:
+** ldp d0, d1, \[x0\]
+** ret
+*/
+/*
+** store_int8x8x2_t:
+** stp d0, d1, \[x0\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv3x16qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv3x16qi_1.c
new file mode 100644
index 0000000..070a596
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv3x16qi_1.c
@@ -0,0 +1,44 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x16x3_t)
+TEST_VECTOR (int16x8x3_t)
+TEST_VECTOR (int32x4x3_t)
+TEST_VECTOR (int64x2x3_t)
+TEST_VECTOR (float16x8x3_t)
+TEST_VECTOR (bfloat16x8x3_t)
+TEST_VECTOR (float32x4x3_t)
+TEST_VECTOR (float64x2x3_t)
+
+/*
+** mov_int8x16x3_t:
+** sub sp, sp, #48
+** stp q3, q4, \[sp\]
+** str q5, \[sp, #?32\]
+** ldp q0, q1, \[sp\]
+** ldr q2, \[sp, #?32\]
+** add sp, sp, #?48
+** ret
+*/
+/*
+** load_int8x16x3_t:
+** ldp q0, q1, \[x0\]
+** ldr q2, \[x0, #?32\]
+** ret
+*/
+/*
+** store_int8x16x3_t: { xfail *-*-* }
+** stp q0, q1, \[x0\]
+** stp q2, \[x0, #?32\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv3x8qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv3x8qi_1.c
new file mode 100644
index 0000000..4b873d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv3x8qi_1.c
@@ -0,0 +1,41 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x8x3_t)
+TEST_VECTOR (int16x4x3_t)
+TEST_VECTOR (int32x2x3_t)
+TEST_VECTOR (int64x1x3_t)
+TEST_VECTOR (float16x4x3_t)
+TEST_VECTOR (bfloat16x4x3_t)
+TEST_VECTOR (float32x2x3_t)
+TEST_VECTOR (float64x1x3_t)
+
+/*
+** mov_int8x8x3_t:
+** fmov d0, d3
+** fmov d1, d4
+** fmov d2, d5
+** ret
+*/
+/*
+** load_int8x8x3_t:
+** ldp d0, d1, \[x0\]
+** ldr d2, \[x0, #?16\]
+** ret
+*/
+/*
+** store_int8x8x3_t:
+** stp d0, d1, \[x0\]
+** str d2, \[x0, #?16\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv4x16qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv4x16qi_1.c
new file mode 100644
index 0000000..6a517b4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv4x16qi_1.c
@@ -0,0 +1,44 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x16x4_t)
+TEST_VECTOR (int16x8x4_t)
+TEST_VECTOR (int32x4x4_t)
+TEST_VECTOR (int64x2x4_t)
+TEST_VECTOR (float16x8x4_t)
+TEST_VECTOR (bfloat16x8x4_t)
+TEST_VECTOR (float32x4x4_t)
+TEST_VECTOR (float64x2x4_t)
+
+/*
+** mov_int8x16x4_t:
+** sub sp, sp, #64
+** stp q4, q5, \[sp\]
+** stp q6, q7, \[sp, #?32\]
+** ldp q0, q1, \[sp\]
+** ldp q2, q3, \[sp, #?32\]
+** add sp, sp, #?64
+** ret
+*/
+/*
+** load_int8x16x4_t:
+** ldp q0, q1, \[x0\]
+** ldp q2, q3, \[x0, #?32\]
+** ret
+*/
+/*
+** store_int8x16x4_t: { xfail *-*-* }
+** stp q0, q1, \[x0\]
+** stp q2, q3, \[x0, #?32\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv4x8qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv4x8qi_1.c
new file mode 100644
index 0000000..f096be4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv4x8qi_1.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC aarch64 "arm_neon.h"
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_VECTOR (int8x8x4_t)
+TEST_VECTOR (int16x4x4_t)
+TEST_VECTOR (int32x2x4_t)
+TEST_VECTOR (int64x1x4_t)
+TEST_VECTOR (float16x4x4_t)
+TEST_VECTOR (bfloat16x4x4_t)
+TEST_VECTOR (float32x2x4_t)
+TEST_VECTOR (float64x1x4_t)
+
+/*
+** mov_int8x8x4_t:
+** fmov d0, d4
+** fmov d1, d5
+** fmov d2, d6
+** fmov d3, d7
+** ret
+*/
+/*
+** load_int8x8x4_t:
+** ldp d0, d1, \[x0\]
+** ldp d2, d3, \[x0, #?16\]
+** ret
+*/
+/*
+** store_int8x8x4_t:
+** stp d0, d1, \[x0\]
+** stp d2, d3, \[x0, #?16\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/movv8qi_1.c b/gcc/testsuite/gcc.target/aarch64/movv8qi_1.c
index 4c97e6f..d2b5d80 100644
--- a/gcc/testsuite/gcc.target/aarch64/movv8qi_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/movv8qi_1.c
@@ -53,3 +53,18 @@ fpr_to_gpr (v8qi q0)
x0 = q0;
asm volatile ("" :: "r" (x0));
}
+
+/*
+** gpr_to_gpr:
+** mov x0, x1
+** ret
+*/
+void
+gpr_to_gpr ()
+{
+ register v8qi x0 asm ("x0");
+ register v8qi x1 asm ("x1");
+ asm volatile ("" : "=r" (x1));
+ x0 = x1;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movv8qi_2.c b/gcc/testsuite/gcc.target/aarch64/movv8qi_2.c
new file mode 100644
index 0000000..0d8576f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv8qi_2.c
@@ -0,0 +1,27 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_GENERAL(TYPE) \
+ TYPE mov_##TYPE (TYPE a, TYPE b) { return b; } \
+ TYPE zero_##TYPE () { return (TYPE) {}; } \
+ TYPE load_##TYPE (TYPE *ptr) { return *ptr; } \
+ void store_##TYPE (TYPE *ptr, TYPE a) { *ptr = a; }
+
+TEST_GENERAL (__Int8x8_t)
+TEST_GENERAL (__Int16x4_t)
+TEST_GENERAL (__Int32x2_t)
+TEST_GENERAL (__Int64x1_t)
+TEST_GENERAL (__Bfloat16x4_t)
+TEST_GENERAL (__Float16x4_t)
+TEST_GENERAL (__Float32x2_t)
+TEST_GENERAL (__Float64x1_t)
+
+__Int8x8_t const_s8x8 () { return (__Int8x8_t) { 1, 1, 1, 1, 1, 1, 1, 1 }; }
+__Int16x4_t const_s16x4 () { return (__Int16x4_t) { 1, 0, 1, 0 }; }
+__Int32x2_t const_s32x2 () { return (__Int32x2_t) { 1, 2 }; }
+__Int64x1_t const_s64x1 () { return (__Int64x1_t) { 100 }; }
+__Float16x4_t const_f16x4 () { return (__Float16x4_t) { 2, 2, 2, 2 }; }
+__Float32x2_t const_f32x2 () { return (__Float32x2_t) { 1, 2 }; }
+__Float64x1_t const_f64x1 () { return (__Float64x1_t) { 32 }; }
diff --git a/gcc/testsuite/gcc.target/aarch64/movv8qi_3.c b/gcc/testsuite/gcc.target/aarch64/movv8qi_3.c
new file mode 100644
index 0000000..1caa1a7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv8qi_3.c
@@ -0,0 +1,30 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nosimd+fp"
+
+#define TEST_VECTOR(TYPE) \
+ TYPE \
+ test_##TYPE (void) \
+ { \
+ typedef TYPE v __attribute__((aligned(1))); \
+ register v *ptr asm ("x0"); \
+ asm volatile ("" : "=r" (ptr)); \
+ return *ptr; \
+ }
+
+TEST_VECTOR (__Int8x8_t)
+TEST_VECTOR (__Int16x4_t)
+TEST_VECTOR (__Int32x2_t)
+TEST_VECTOR (__Int64x1_t)
+TEST_VECTOR (__Bfloat16x4_t)
+TEST_VECTOR (__Float16x4_t)
+TEST_VECTOR (__Float32x2_t)
+TEST_VECTOR (__Float64x1_t)
+
+/*
+** test___Int8x8_t:
+** ldr d0, \[x0\]
+** ret
+*/
diff --git a/gcc/testsuite/gcc.target/aarch64/vect_unary_2.c b/gcc/testsuite/gcc.target/aarch64/vect_unary_2.c
new file mode 100644
index 0000000..454ac27
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect_unary_2.c
@@ -0,0 +1,5 @@
+/* { dg-options "-O3 -fno-math-errno --save-temps" } */
+
+#pragma GCC target "+nosimd+fp"
+
+#include "vect_unary_1.c"
diff --git a/gcc/testsuite/gcc.target/i386/pr105735-1.c b/gcc/testsuite/gcc.target/i386/pr105735-1.c
new file mode 100644
index 0000000..69de6b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr105735-1.c
@@ -0,0 +1,88 @@
+/* { dg-do compile } */
+/* { dg-options "-O1 -fdump-tree-sccp-details" } */
+/* { dg-final { scan-tree-dump-times {final value replacement} 8 "sccp" } } */
+
+unsigned int
+__attribute__((noipa))
+foo (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 0; bit < 64; bit++)
+ tmp &= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+foo1 (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 63; bit >= 0; bit -=3)
+ tmp &= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+foo2 (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 0; bit < 64; bit++)
+ tmp |= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+foo3 (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 63; bit >= 0; bit -=3)
+ tmp |= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+foo4 (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 0; bit < 64; bit++)
+ tmp ^= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+foo5 (unsigned int tmp, unsigned int bit2)
+{
+ for (int bit = 0; bit < 63; bit++)
+ tmp ^= bit2;
+ return tmp;
+}
+
+unsigned int
+__attribute__((noipa))
+f (unsigned int tmp, int bit, unsigned int bit2)
+{
+ unsigned int res = tmp;
+ for (int i = 0; i < bit; i++)
+ res &= bit2;
+ return res;
+}
+
+unsigned int
+__attribute__((noipa))
+f1 (unsigned int tmp, int bit, unsigned int bit2)
+{
+ unsigned int res = tmp;
+ for (int i = 0; i < bit; i++)
+ res |= bit2;
+ return res;
+}
+
+unsigned int
+__attribute__((noipa))
+f2 (unsigned int tmp, int bit, unsigned int bit2)
+{
+ unsigned int res = tmp;
+ for (int i = 0; i < bit; i++)
+ res ^= bit2;
+ return res;
+}
+
diff --git a/gcc/testsuite/gcc.target/i386/pr105735-2.c b/gcc/testsuite/gcc.target/i386/pr105735-2.c
new file mode 100644
index 0000000..66cc5fb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr105735-2.c
@@ -0,0 +1,28 @@
+/* { dg-do run } */
+/* { dg-options "-O1" } */
+
+#include "pr105735-1.c"
+
+int main()
+{
+ unsigned int tmp = 0x1101;
+ unsigned int bit2 = 0x111101;
+ if (foo (tmp, bit2) != 0x1101)
+ __builtin_abort ();
+ if (foo1 (tmp, bit2) != 0x1101)
+ __builtin_abort ();
+ if (foo2 (tmp, bit2) != 0x111101)
+ __builtin_abort ();
+ if (foo3 (tmp, bit2) != 0x111101)
+ __builtin_abort ();
+ if (foo4 (tmp, bit2) != 0x1101)
+ __builtin_abort ();
+ if (foo5 (tmp, bit2) != 0x110000)
+ __builtin_abort ();
+ if (f (tmp, 64, bit2) != 0x1101)
+ __builtin_abort ();
+ if (f1 (tmp, 64, bit2) != 0x111101)
+ __builtin_abort ();
+ if (f2 (tmp, 64, bit2) != 0x1101)
+ __builtin_abort ();
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr106905.c b/gcc/testsuite/gcc.target/i386/pr106905.c
new file mode 100644
index 0000000..a190a1c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr106905.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-march=silvermont -O2 -fvect-cost-model=dynamic" } */
+
+void
+foo_mul_peel (int *a, int b)
+{
+ int i;
+
+ for (i = 0; i < 7; ++i)
+ {
+ b *= 2;
+ a[i] = b;
+ }
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr106910-1.c b/gcc/testsuite/gcc.target/i386/pr106910-1.c
new file mode 100644
index 0000000..c7685a3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr106910-1.c
@@ -0,0 +1,77 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-msse4.1 -O2 -Ofast" } */
+/* { dg-final { scan-assembler-times "roundps" 9 } } */
+/* { dg-final { scan-assembler-times "cvtps2dq" 1 } } */
+/* { dg-final { scan-assembler-times "cvttps2dq" 3 } } */
+
+#include<math.h>
+
+void
+foo (float* p, float* __restrict q)
+{
+ p[0] = truncf (q[0]);
+ p[1] = truncf (q[1]);
+}
+
+void
+foo1 (float* p, float* __restrict q)
+{
+ p[0] = floorf (q[0]);
+ p[1] = floorf (q[1]);
+}
+
+void
+foo1i (int* p, float* __restrict q)
+{
+ p[0] = (int) floorf (q[0]);
+ p[1] = (int) floorf (q[1]);
+}
+
+void
+foo2 (float* p, float* __restrict q)
+{
+ p[0] = ceilf (q[0]);
+ p[1] = ceilf (q[1]);
+}
+
+void
+foo2i (int* p, float* __restrict q)
+{
+ p[0] = (int) ceilf (q[0]);
+ p[1] = (int) ceilf (q[1]);
+}
+
+void
+foo3 (float* p, float* __restrict q)
+{
+ p[0] = rintf (q[0]);
+ p[1] = rintf (q[1]);
+}
+
+void
+foo3i (int* p, float* __restrict q)
+{
+ p[0] = (int) rintf (q[0]);
+ p[1] = (int) rintf (q[1]);
+}
+
+void
+foo4 (float* p, float* __restrict q)
+{
+ p[0] = nearbyintf (q[0]);
+ p[1] = nearbyintf (q[1]);
+}
+
+void
+foo5(float* p, float* __restrict q)
+{
+ p[0] = roundf (q[0]);
+ p[1] = roundf (q[1]);
+}
+
+void
+foo5i(int* p, float* __restrict q)
+{
+ p[0] = (int) roundf (q[0]);
+ p[1] = (int) roundf (q[1]);
+}
diff --git a/gcc/testsuite/gcc.target/i386/vect-bfloat16-2c.c b/gcc/testsuite/gcc.target/i386/vect-bfloat16-2c.c
new file mode 100644
index 0000000..bead94e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/vect-bfloat16-2c.c
@@ -0,0 +1,76 @@
+/* { dg-do compile } */
+/* { dg-options "-mf16c -msse2 -mno-avx2 -O2" } */
+
+typedef __bf16 v8bf __attribute__ ((__vector_size__ (16)));
+typedef __bf16 v16bf __attribute__ ((__vector_size__ (32)));
+
+#define VEC_EXTRACT(V,S,IDX) \
+ S \
+ __attribute__((noipa)) \
+ vec_extract_##V##_##IDX (V v) \
+ { \
+ return v[IDX]; \
+ }
+
+#define VEC_SET(V,S,IDX) \
+ V \
+ __attribute__((noipa)) \
+ vec_set_##V##_##IDX (V v, S s) \
+ { \
+ v[IDX] = s; \
+ return v; \
+ }
+
+v8bf
+vec_init_v8bf (__bf16 a1, __bf16 a2, __bf16 a3, __bf16 a4,
+ __bf16 a5, __bf16 a6, __bf16 a7, __bf16 a8)
+{
+ return __extension__ (v8bf) {a1, a2, a3, a4, a5, a6, a7, a8};
+}
+
+v16bf
+vec_init_v16bf (__bf16 a1, __bf16 a2, __bf16 a3, __bf16 a4,
+ __bf16 a5, __bf16 a6, __bf16 a7, __bf16 a8,
+ __bf16 a9, __bf16 a10, __bf16 a11, __bf16 a12,
+ __bf16 a13, __bf16 a14, __bf16 a15, __bf16 a16)
+{
+ return __extension__ (v16bf) {a1, a2, a3, a4, a5, a6, a7, a8,
+ a9, a10, a11, a12, a13, a14, a15, a16};
+}
+
+v8bf
+vec_init_dup_v8bf (__bf16 a1)
+{
+ return __extension__ (v8bf) {a1, a1, a1, a1, a1, a1, a1, a1};
+}
+
+v16bf
+vec_init_dup_v16bf (__bf16 a1)
+{
+ return __extension__ (v16bf) {a1, a1, a1, a1, a1, a1, a1, a1,
+ a1, a1, a1, a1, a1, a1, a1, a1};
+}
+
+/* { dg-final { scan-assembler-times "vpunpcklwd" 12 } } */
+/* { dg-final { scan-assembler-times "vpunpckldq" 6 } } */
+/* { dg-final { scan-assembler-times "vpunpcklqdq" 3 } } */
+
+VEC_EXTRACT (v8bf, __bf16, 0);
+VEC_EXTRACT (v8bf, __bf16, 4);
+VEC_EXTRACT (v16bf, __bf16, 0);
+VEC_EXTRACT (v16bf, __bf16, 3);
+VEC_EXTRACT (v16bf, __bf16, 8);
+VEC_EXTRACT (v16bf, __bf16, 15);
+/* { dg-final { scan-assembler-times "vpsrldq\[\t ]*\\\$8" 1 } } */
+/* { dg-final { scan-assembler-times "vpsrldq\[\t ]*\\\$6" 1 } } */
+/* { dg-final { scan-assembler-times "vpsrldq\[\t ]*\\\$14" 1 } } */
+/* { dg-final { scan-assembler-times "vextract" 4 } } */
+
+VEC_SET (v8bf, __bf16, 4);
+VEC_SET (v16bf, __bf16, 3);
+VEC_SET (v16bf, __bf16, 8);
+VEC_SET (v16bf, __bf16, 15);
+/* { dg-final { scan-assembler-times "vpblendw" 3 { target { ! ia32 } } } } */
+
+/* { dg-final { scan-assembler-times "vpinsrw" 30 { target ia32 } } } */
+
diff --git a/gcc/testsuite/gcc.target/ia64/pr106905.c b/gcc/testsuite/gcc.target/ia64/pr106905.c
new file mode 100644
index 0000000..1b9656e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/ia64/pr106905.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99 -O3 -fPIC" } */
+long ZDICT_fillNoise_p, ZDICT_trainFromBuffer_legacy_result;
+unsigned ZDICT_fillNoise_acc;
+int ZDICT_totalSampleSize_nbFiles;
+static void ZDICT_fillNoise(void *buffer, long length) {
+ unsigned prime2 = 9;
+ for (ZDICT_fillNoise_p = 0; ZDICT_fillNoise_p < length; ZDICT_fillNoise_p++)
+ ZDICT_fillNoise_acc *= ((char *)buffer)[ZDICT_fillNoise_p] = prime2;
+}
+long ZDICT_trainFromBuffer_legacy() {
+ void *newBuff;
+ long total = 0;
+ for (; ZDICT_totalSampleSize_nbFiles;)
+ total += 0;
+ long sBuffSize = total;
+ newBuff = 0;
+ ZDICT_fillNoise(newBuff + sBuffSize, 32);
+ return ZDICT_trainFromBuffer_legacy_result;
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/pr104482.c b/gcc/testsuite/gcc.target/powerpc/pr104482.c
new file mode 100644
index 0000000..9219126
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr104482.c
@@ -0,0 +1,16 @@
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-mvsx" } */
+
+/* It's to verify no ICE here, ignore error messages about
+ mismatch argument number since they are not test points
+ here. */
+/* { dg-excess-errors "pr104482" } */
+
+__attribute__ ((altivec (vector__))) int vsi;
+
+double
+testXXPERMDI (void)
+{
+ return __builtin_vsx_xxpermdi (vsi, vsi, 2, 4);
+}
+
diff --git a/gcc/testsuite/gcc.target/powerpc/pr106550.c b/gcc/testsuite/gcc.target/powerpc/pr106550.c
new file mode 100644
index 0000000..74e3953
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr106550.c
@@ -0,0 +1,14 @@
+/* PR target/106550 */
+/* { dg-options "-O2 -mdejagnu-cpu=power10" } */
+/* { dg-require-effective-target power10_ok } */
+
+void
+foo (unsigned long long *a)
+{
+ *a++ = 0x020805006106003; /* pli+pli+rldimi */
+ *a++ = 0x2351847027482577;/* pli+pli+rldimi */
+}
+
+/* { dg-final { scan-assembler-times {\mpli\M} 4 } } */
+/* { dg-final { scan-assembler-times {\mrldimi\M} 2 } } */
+
diff --git a/gcc/testsuite/gcc.target/powerpc/pr106550_1.c b/gcc/testsuite/gcc.target/powerpc/pr106550_1.c
new file mode 100644
index 0000000..7e709fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr106550_1.c
@@ -0,0 +1,22 @@
+/* PR target/106550 */
+/* { dg-require-effective-target power10_ok } */
+/* { dg-options "-O2 -mdejagnu-cpu=power10 -fdisable-rtl-split1" } */
+/* force the constant splitter run after RA: -fdisable-rtl-split1. */
+
+void
+foo (unsigned long long *a)
+{
+ /* Test oris/ori is used where paddi does not work with 'r0'. */
+ register long long d asm("r0") = 0x1245abcef9240dec; /* pli+sldi+oris+ori */
+ long long n;
+ asm("cntlzd %0, %1" : "=r"(n) : "r"(d));
+ *a++ = n;
+
+ *a++ = 0x235a8470a7480000ULL; /* pli+sldi+oris */
+ *a++ = 0x23a184700000b677ULL; /* pli+sldi+ori */
+}
+
+/* { dg-final { scan-assembler-times {\mpli\M} 3 } } */
+/* { dg-final { scan-assembler-times {\msldi\M} 3 } } */
+/* { dg-final { scan-assembler-times {\moris\M} 2 } } */
+/* { dg-final { scan-assembler-times {\mori\M} 2 } } */
diff --git a/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-bool.c b/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-bool.c
index 0a96b71..0c8c2f8 100644
--- a/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-bool.c
+++ b/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-bool.c
@@ -1,7 +1,7 @@
/* Check load on condition for bool. */
/* { dg-do compile { target { s390*-*-* } } } */
-/* { dg-options "-O2 -march=z13" } */
+/* { dg-options "-O2 -march=z13 -mzarch" } */
/* { dg-final { scan-assembler "lochinh\t%r.?,1" } } */
#include <stdbool.h>
diff --git a/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-char.c b/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-char.c
index 9c3d041..8c8e0ae 100644
--- a/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-char.c
+++ b/gcc/testsuite/gcc.target/s390/ifcvt-one-insn-char.c
@@ -1,7 +1,7 @@
/* Check load on condition for global char. */
/* { dg-do compile { target { s390*-*-* } } } */
-/* { dg-options "-O2 -march=z13" } */
+/* { dg-options "-O2 -march=z13 -mzarch" } */
/* { dg-final { scan-assembler "locrnh\t%r.?,%r.?" } } */
#include <stdbool.h>
diff --git a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-bool.c b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-bool.c
index df0416a..1027ddc 100644
--- a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-bool.c
+++ b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-bool.c
@@ -1,7 +1,7 @@
/* Check if conversion for two instructions. */
/* { dg-do run } */
-/* { dg-options "-O2 -march=z13 --save-temps" } */
+/* { dg-options "-O2 -march=z13 -mzarch --save-temps" } */
/* { dg-final { scan-assembler "lochih\t%r.?,1" } } */
/* { dg-final { scan-assembler "locrh\t.*" } } */
diff --git a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-int.c b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-int.c
index 181173b..fc6946f 100644
--- a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-int.c
+++ b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-int.c
@@ -1,7 +1,7 @@
/* Check if conversion for two instructions. */
/* { dg-do run } */
-/* { dg-options "-O2 -march=z13 --save-temps" } */
+/* { dg-options "-O2 -march=z13 -mzarch --save-temps" } */
/* { dg-final { scan-assembler "lochih\t%r.?,1" } } */
/* { dg-final { scan-assembler "locrh\t.*" } } */
diff --git a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-long.c b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-long.c
index c66ef6c..51af498 100644
--- a/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-long.c
+++ b/gcc/testsuite/gcc.target/s390/ifcvt-two-insns-long.c
@@ -1,19 +1,20 @@
/* Check if conversion for two instructions. */
/* { dg-do run } */
-/* { dg-options "-O2 -march=z13 --save-temps" } */
+/* { dg-options "-O2 -march=z13 -mzarch --save-temps" } */
/* { dg-final { scan-assembler "locghih\t%r.?,1" } } */
/* { dg-final { scan-assembler "locgrh\t.*" } } */
+
#include <limits.h>
#include <stdio.h>
#include <assert.h>
__attribute__ ((noinline))
-long foo (long *a, unsigned long n)
+long long foo (long long *a, unsigned long long n)
{
- long min = 999999;
- long bla = 0;
+ long long min = 999999;
+ long long bla = 0;
for (int i = 0; i < n; i++)
{
if (a[i] < min)
@@ -30,9 +31,9 @@ long foo (long *a, unsigned long n)
int main()
{
- long a[] = {2, 1, -13, LONG_MAX, LONG_MIN, 0};
+ long long a[] = {2, 1, -13, LONG_MAX, LONG_MIN, 0};
- long res = foo (a, sizeof (a) / sizeof (a[0]));
+ long long res = foo (a, sizeof (a) / sizeof (a[0]));
assert (res == (LONG_MIN + 1));
}
diff --git a/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z14.c b/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z14.c
index 5c64fac..eefacad 100644
--- a/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z14.c
+++ b/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z14.c
@@ -1,7 +1,7 @@
/* Make sure that the reverse permute patterns are optimized
correctly. */
/* { dg-do run { target { s390*-*-* } } } */
-/* { dg-options "-O2 -march=z14 -mzarch -fno-unroll-loops" } */
+/* { dg-options "-O2 -march=z14 -mzarch -fno-unroll-loops -save-temps" } */
/* { dg-final { scan-assembler-times "vpdi\t" 4 } } */
/* { dg-final { scan-assembler-times "verllg\t" 2 } } */
diff --git a/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z15.c b/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z15.c
index bff5240..079460b 100644
--- a/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z15.c
+++ b/gcc/testsuite/gcc.target/s390/vector/vperm-rev-z15.c
@@ -1,12 +1,12 @@
/* Make sure that the reverse permute patterns are optimized
correctly. */
/* { dg-do run { target { s390*-*-* } } } */
-/* { dg-options "-O2 -march=z15 -mzarch -fno-unroll-loops" } */
+/* { dg-options "-O2 -march=z15 -mzarch -fno-unroll-loops -save-temps" } */
/* { dg-final { scan-assembler-times "vsterg\t" 2 } } */
-/* { dg-final { scan-assembler-times "vsterf" 2 } } */
+/* { dg-final { scan-assembler-times "vsterf\t" 2 } } */
/* { dg-final { scan-assembler-times "vstbrq\t" 1 } } */
-/* { dg-final { scan-assembler-times "vperm" 0 } } */
+/* { dg-final { scan-assembler-times "vperm\t" 0 } } */
#include <assert.h>
diff --git a/gcc/testsuite/gfortran.dg/ieee/modes_1.f90 b/gcc/testsuite/gfortran.dg/ieee/modes_1.f90
new file mode 100644
index 0000000..b6ab288
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/ieee/modes_1.f90
@@ -0,0 +1,95 @@
+! { dg-do run }
+!
+! Test IEEE_MODES_TYPE, IEEE_GET_MODES and IEEE_SET_MODES
+
+
+! The symbols should be accessible from both IEEE_EXCEPTIONS
+! and IEEE_ARITHMETIC.
+
+subroutine test_1
+ use ieee_exceptions, only : IEEE_GET_MODES, IEEE_SET_MODES
+end subroutine
+
+subroutine test_2
+ use ieee_arithmetic, only : IEEE_GET_MODES, IEEE_SET_MODES
+end subroutine
+
+subroutine test_3
+ use ieee_exceptions, only : IEEE_MODES_TYPE
+end subroutine
+
+subroutine test_4
+ use ieee_arithmetic, only : IEEE_MODES_TYPE
+end subroutine
+
+
+! Check that the functions actually do the job
+
+program foo
+ use ieee_arithmetic
+ implicit none
+
+ type(ieee_modes_type) :: modes1, modes2
+ type(ieee_round_type) :: rmode
+ logical :: f
+
+ ! Set some modes
+ if (ieee_support_underflow_control()) then
+ call ieee_set_underflow_mode(gradual=.false.)
+ endif
+ if (ieee_support_rounding(ieee_up)) then
+ call ieee_set_rounding_mode(ieee_up)
+ endif
+ if (ieee_support_halting(ieee_overflow)) then
+ call ieee_set_halting_mode(ieee_overflow, .true.)
+ endif
+
+ call ieee_get_modes(modes1)
+
+ ! Change modes
+ if (ieee_support_underflow_control()) then
+ call ieee_set_underflow_mode(gradual=.true.)
+ endif
+ if (ieee_support_rounding(ieee_down)) then
+ call ieee_set_rounding_mode(ieee_down)
+ endif
+ if (ieee_support_halting(ieee_overflow)) then
+ call ieee_set_halting_mode(ieee_overflow, .false.)
+ endif
+
+ ! Save and restore the previous modes
+ call ieee_get_modes(modes2)
+ call ieee_set_modes(modes1)
+
+ ! Check them
+ if (ieee_support_underflow_control()) then
+ call ieee_get_underflow_mode(f)
+ if (f) stop 1
+ endif
+ if (ieee_support_rounding(ieee_down)) then
+ call ieee_get_rounding_mode(rmode)
+ if (rmode /= ieee_up) stop 2
+ endif
+ if (ieee_support_halting(ieee_overflow)) then
+ call ieee_get_halting_mode(ieee_overflow, f)
+ if (.not. f) stop 3
+ endif
+
+ ! Restore the second set of modes
+ call ieee_set_modes(modes2)
+
+ ! Check again
+ if (ieee_support_underflow_control()) then
+ call ieee_get_underflow_mode(f)
+ if (.not. f) stop 3
+ endif
+ if (ieee_support_rounding(ieee_down)) then
+ call ieee_get_rounding_mode(rmode)
+ if (rmode /= ieee_down) stop 4
+ endif
+ if (ieee_support_halting(ieee_overflow)) then
+ call ieee_get_halting_mode(ieee_overflow, f)
+ if (f) stop 5
+ endif
+
+end program foo
diff --git a/gcc/testsuite/gfortran.dg/ieee/rounding_2.f90 b/gcc/testsuite/gfortran.dg/ieee/rounding_2.f90
new file mode 100644
index 0000000..8af6c91
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/ieee/rounding_2.f90
@@ -0,0 +1,20 @@
+! { dg-do run }
+
+ use, intrinsic :: ieee_arithmetic
+ implicit none
+
+ real :: sx1, sx2, sx3
+ double precision :: dx1, dx2, dx3
+
+ ! IEEE_AWAY was added in Fortran 2018 and not supported by any target
+ ! at the moment. Just check we can query for its support.
+
+ ! We should support at least C float and C double types
+ if (ieee_support_rounding(ieee_away) &
+ .or. ieee_support_rounding(ieee_away, 0.) &
+ .or. ieee_support_rounding(ieee_away, 0.d0)) then
+ print *, "If a target / libc now supports this, we need to add a proper check!"
+ stop 1
+ end if
+
+end
diff --git a/gcc/testsuite/gfortran.dg/pr104314.f90 b/gcc/testsuite/gfortran.dg/pr104314.f90
new file mode 100644
index 0000000..510ded0
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr104314.f90
@@ -0,0 +1,9 @@
+! { dg-do compile }
+! PR fortran/104314 - ICE in deferred_op_assign
+! Contributed by G.Steinmetz
+
+program p
+ character(:), allocatable :: c(:)
+ c = ['123']
+ c = c == c ! { dg-error "Cannot convert" }
+end
diff --git a/gcc/testsuite/gfortran.dg/pr106857.f90 b/gcc/testsuite/gfortran.dg/pr106857.f90
new file mode 100644
index 0000000..4b0f86a
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr106857.f90
@@ -0,0 +1,12 @@
+! { dg-do compile }
+! PR fortran/106857 - ICE in gfc_simplify_pack
+! Contributed by G.Steinmetz
+
+program p
+ type t
+ integer :: n
+ end type
+ type(t), parameter :: a(2,2) = t(1)
+ type(t), parameter :: b(4) = reshape(a, [2]) ! { dg-error "Different shape" }
+ type(t), parameter :: c(2) = pack(b, [.false.,.true.,.false.,.true.]) ! { dg-error "Different shape" }
+end
diff --git a/gcc/testsuite/gfortran.dg/pr106934.f90 b/gcc/testsuite/gfortran.dg/pr106934.f90
new file mode 100644
index 0000000..ac58a3e
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr106934.f90
@@ -0,0 +1,7 @@
+! { dg-do compile }
+! { dg-options "-O" }
+subroutine s
+ logical(1) :: a = .true.
+ logical(2) :: b
+ a = transfer(b, a)
+end
diff --git a/gcc/testsuite/lib/g++.exp b/gcc/testsuite/lib/g++.exp
index 24ef068..16e61fb 100644
--- a/gcc/testsuite/lib/g++.exp
+++ b/gcc/testsuite/lib/g++.exp
@@ -303,11 +303,6 @@ proc g++_target_compile { source dest type options } {
global flags_to_postpone
global board_info
- if { [target_info needs_status_wrapper] != "" && [info exists gluefile] } {
- lappend options "libs=${gluefile}"
- lappend options "ldflags=${wrap_flags}"
- }
-
global TEST_EXTRA_LIBS
if [info exists TEST_EXTRA_LIBS] {
lappend options "ldflags=$TEST_EXTRA_LIBS"
@@ -333,6 +328,11 @@ proc g++_target_compile { source dest type options } {
set options [dg-additional-files-options $options $source]
+ if { [target_info needs_status_wrapper] != "" && [info exists gluefile] } {
+ lappend options "libs=${gluefile}"
+ lappend options "ldflags=${wrap_flags}"
+ }
+
set result [target_compile $source $dest $type $options]
if {[board_info $tboard exists multilib_flags]} {
diff --git a/gcc/testsuite/lib/gcc.exp b/gcc/testsuite/lib/gcc.exp
index 1b25ebe..2f145d0 100644
--- a/gcc/testsuite/lib/gcc.exp
+++ b/gcc/testsuite/lib/gcc.exp
@@ -129,16 +129,6 @@ proc gcc_target_compile { source dest type options } {
global flags_to_postpone
global board_info
- if {[target_info needs_status_wrapper] != "" && \
- [target_info needs_status_wrapper] != "0" && \
- [info exists gluefile] } {
- lappend options "libs=${gluefile}"
- lappend options "ldflags=$wrap_flags"
- if { $type == "executable" } {
- set options [concat "{additional_flags=-dumpbase \"\"}" $options]
- }
- }
-
global TEST_EXTRA_LIBS
if [info exists TEST_EXTRA_LIBS] {
lappend options "ldflags=$TEST_EXTRA_LIBS"
@@ -170,6 +160,17 @@ proc gcc_target_compile { source dest type options } {
lappend options "timeout=[timeout_value]"
lappend options "compiler=$GCC_UNDER_TEST"
set options [dg-additional-files-options $options $source]
+
+ if {[target_info needs_status_wrapper] != "" && \
+ [target_info needs_status_wrapper] != "0" && \
+ [info exists gluefile] } {
+ lappend options "libs=${gluefile}"
+ lappend options "ldflags=$wrap_flags"
+ if { $type == "executable" } {
+ set options [concat "{additional_flags=-dumpbase \"\"}" $options]
+ }
+ }
+
set return_val [target_compile $source $dest $type $options]
if {[board_info $tboard exists multilib_flags]} {
diff --git a/gcc/testsuite/lib/wrapper.exp b/gcc/testsuite/lib/wrapper.exp
index 5a601b2..4a7d569 100644
--- a/gcc/testsuite/lib/wrapper.exp
+++ b/gcc/testsuite/lib/wrapper.exp
@@ -22,7 +22,7 @@
# the compiler when compiling FILENAME.
proc ${tool}_maybe_build_wrapper { filename args } {
- global gluefile wrap_flags
+ global gluefile wrap_flags gcc_adjusted_linker_flags
if { [target_info needs_status_wrapper] != "" \
&& [target_info needs_status_wrapper] != "0" \
@@ -43,6 +43,11 @@ proc ${tool}_maybe_build_wrapper { filename args } {
if { $result != "" } {
set gluefile [lindex $result 0]
set wrap_flags [lindex $result 1]
+
+ # Reset the cached state of the adjusted flags
+ if { [info exists gcc_adjusted_linker_flags] } {
+ set gcc_adjusted_linker_flags 0
+ }
}
}
}
diff --git a/gcc/tree-cfg.cc b/gcc/tree-cfg.cc
index e39d947..53be0c2 100644
--- a/gcc/tree-cfg.cc
+++ b/gcc/tree-cfg.cc
@@ -4167,6 +4167,8 @@ verify_gimple_assign_binary (gassign *stmt)
case ROUND_MOD_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
/* Disallow pointer and offset types for many of the binary gimple. */
if (POINTER_TYPE_P (lhs_type)
|| TREE_CODE (lhs_type) == OFFSET_TYPE)
@@ -4182,9 +4184,23 @@ verify_gimple_assign_binary (gassign *stmt)
case MIN_EXPR:
case MAX_EXPR:
- case BIT_IOR_EXPR:
- case BIT_XOR_EXPR:
+ /* Continue with generic binary expression handling. */
+ break;
+
case BIT_AND_EXPR:
+ if (POINTER_TYPE_P (lhs_type)
+ && TREE_CODE (rhs2) == INTEGER_CST)
+ break;
+ /* Disallow pointer and offset types for many of the binary gimple. */
+ if (POINTER_TYPE_P (lhs_type)
+ || TREE_CODE (lhs_type) == OFFSET_TYPE)
+ {
+ error ("invalid types for %qs", code_name);
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs1_type);
+ debug_generic_expr (rhs2_type);
+ return true;
+ }
/* Continue with generic binary expression handling. */
break;
@@ -9820,16 +9836,12 @@ execute_fixup_cfg (void)
int flags = gimple_call_flags (stmt);
if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE))
{
- if (gimple_purge_dead_abnormal_call_edges (bb))
- todo |= TODO_cleanup_cfg;
-
if (gimple_in_ssa_p (cfun))
{
todo |= TODO_update_ssa | TODO_cleanup_cfg;
update_stmt (stmt);
}
}
-
if (flags & ECF_NORETURN
&& fixup_noreturn_call (stmt))
todo |= TODO_cleanup_cfg;
@@ -9859,10 +9871,15 @@ execute_fixup_cfg (void)
}
}
- if (maybe_clean_eh_stmt (stmt)
+ gsi_next (&gsi);
+ }
+ if (gimple *last = last_stmt (bb))
+ {
+ if (maybe_clean_eh_stmt (last)
&& gimple_purge_dead_eh_edges (bb))
todo |= TODO_cleanup_cfg;
- gsi_next (&gsi);
+ if (gimple_purge_dead_abnormal_call_edges (bb))
+ todo |= TODO_cleanup_cfg;
}
/* If we have a basic block with no successors that does not
diff --git a/gcc/tree-scalar-evolution.cc b/gcc/tree-scalar-evolution.cc
index fc59d03..9f30f78 100644
--- a/gcc/tree-scalar-evolution.cc
+++ b/gcc/tree-scalar-evolution.cc
@@ -3635,6 +3635,64 @@ enum bit_op_kind
return fold_build2 (code1, type, inv, wide_int_to_tree (type, bits));
}
+/* Match.pd function to match bitop with invariant expression
+ .i.e.
+ tmp_7 = _0 & _1; */
+extern bool gimple_bitop_with_inv_p (tree, tree *, tree (*)(tree));
+
+/* Return the inductive expression of bitop with invariant if possible,
+ otherwise returns DEF. */
+static tree
+analyze_and_compute_bitop_with_inv_effect (class loop* loop, tree phidef,
+ tree niter)
+{
+ tree match_op[2],inv;
+ tree type = TREE_TYPE (phidef);
+ gphi* header_phi = NULL;
+ enum tree_code code;
+ /* match thing like op0 (match[0]), op1 (match[1]), phidef (PHIDEF)
+
+ op1 = PHI <phidef, inv>
+ phidef = op0 & op1
+ if op0 is an invariant, it could change to
+ phidef = op0 & inv. */
+ gimple *def;
+ def = SSA_NAME_DEF_STMT (phidef);
+ if (!(is_gimple_assign (def)
+ && ((code = gimple_assign_rhs_code (def)), true)
+ && (code == BIT_AND_EXPR || code == BIT_IOR_EXPR
+ || code == BIT_XOR_EXPR)))
+ return NULL_TREE;
+
+ match_op[0] = gimple_assign_rhs1 (def);
+ match_op[1] = gimple_assign_rhs2 (def);
+
+ if (TREE_CODE (match_op[1]) != SSA_NAME
+ || !expr_invariant_in_loop_p (loop, match_op[0])
+ || !(header_phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (match_op[1])))
+ || gimple_phi_num_args (header_phi) != 2)
+ return NULL_TREE;
+
+ if (PHI_ARG_DEF_FROM_EDGE (header_phi, loop_latch_edge (loop)) != phidef)
+ return NULL_TREE;
+
+ enum tree_code code1
+ = gimple_assign_rhs_code (def);
+
+ if (code1 == BIT_XOR_EXPR)
+ {
+ if (!tree_fits_uhwi_p (niter))
+ return NULL_TREE;
+ unsigned HOST_WIDE_INT niter_num;
+ niter_num = tree_to_uhwi (niter);
+ if (niter_num % 2 != 0)
+ match_op[0] = build_zero_cst (type);
+ }
+
+ inv = PHI_ARG_DEF_FROM_EDGE (header_phi, loop_preheader_edge (loop));
+ return fold_build2 (code1, type, inv, match_op[0]);
+}
+
/* Do final value replacement for LOOP, return true if we did anything. */
bool
@@ -3685,7 +3743,24 @@ final_value_replacement_loop (class loop *loop)
bool folded_casts;
def = analyze_scalar_evolution_in_loop (ex_loop, loop, def,
&folded_casts);
- def = compute_overall_effect_of_inner_loop (ex_loop, def);
+
+ tree bitinv_def, bit_def;
+ unsigned HOST_WIDE_INT niter_num;
+
+ if (def != chrec_dont_know)
+ def = compute_overall_effect_of_inner_loop (ex_loop, def);
+
+ /* Handle bitop with invariant induction expression.
+
+ .i.e
+ for (int i =0 ;i < 32; i++)
+ tmp &= bit2;
+ if bit2 is an invariant in loop which could simple to
+ tmp &= bit2. */
+ else if ((bitinv_def
+ = analyze_and_compute_bitop_with_inv_effect (loop,
+ phidef, niter)))
+ def = bitinv_def;
/* Handle bitwise induction expression.
@@ -3697,15 +3772,13 @@ final_value_replacement_loop (class loop *loop)
expressible, but in fact final value of RES can be replaced by
RES & CONSTANT where CONSTANT all ones with bit {0,3,6,9,... ,63}
being cleared, similar for BIT_IOR_EXPR/BIT_XOR_EXPR. */
- unsigned HOST_WIDE_INT niter_num;
- tree bit_def;
- if (tree_fits_uhwi_p (niter)
- && (niter_num = tree_to_uhwi (niter)) != 0
- && niter_num < TYPE_PRECISION (TREE_TYPE (phidef))
- && (bit_def
- = analyze_and_compute_bitwise_induction_effect (loop,
- phidef,
- niter_num)))
+ else if (tree_fits_uhwi_p (niter)
+ && (niter_num = tree_to_uhwi (niter)) != 0
+ && niter_num < TYPE_PRECISION (TREE_TYPE (phidef))
+ && (bit_def
+ = analyze_and_compute_bitwise_induction_effect (loop,
+ phidef,
+ niter_num)))
def = bit_def;
if (!tree_does_not_contain_chrecs (def)
diff --git a/gcc/tree-ssa-pre.cc b/gcc/tree-ssa-pre.cc
index e029bd3..2afc74f 100644
--- a/gcc/tree-ssa-pre.cc
+++ b/gcc/tree-ssa-pre.cc
@@ -1236,7 +1236,11 @@ translate_vuse_through_block (vec<vn_reference_op_s> operands,
if (same_valid)
*same_valid = true;
- if (gimple_bb (phi) != phiblock)
+ /* If value-numbering provided a memory state for this
+ that dominates PHIBLOCK we can just use that. */
+ if (gimple_nop_p (phi)
+ || (gimple_bb (phi) != phiblock
+ && dominated_by_p (CDI_DOMINATORS, phiblock, gimple_bb (phi))))
return vuse;
/* We have pruned expressions that are killed in PHIBLOCK via
@@ -2031,11 +2035,13 @@ prune_clobbered_mems (bitmap_set_t set, basic_block block)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (ref->vuse);
if (!gimple_nop_p (def_stmt)
- && ((gimple_bb (def_stmt) != block
- && !dominated_by_p (CDI_DOMINATORS,
- block, gimple_bb (def_stmt)))
- || (gimple_bb (def_stmt) == block
- && value_dies_in_block_x (expr, block))))
+ /* If value-numbering provided a memory state for this
+ that dominates BLOCK we're done, otherwise we have
+ to check if the value dies in BLOCK. */
+ && !(gimple_bb (def_stmt) != block
+ && dominated_by_p (CDI_DOMINATORS,
+ block, gimple_bb (def_stmt)))
+ && value_dies_in_block_x (expr, block))
to_remove = i;
}
/* If the REFERENCE may trap make sure the block does not contain
diff --git a/gcc/tree-ssa-reassoc.cc b/gcc/tree-ssa-reassoc.cc
index e13e2cb..c5c8b68 100644
--- a/gcc/tree-ssa-reassoc.cc
+++ b/gcc/tree-ssa-reassoc.cc
@@ -3608,10 +3608,14 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
tree type2 = NULL_TREE;
bool strict_overflow_p = false;
candidates.truncate (0);
+ if (POINTER_TYPE_P (type1))
+ type1 = pointer_sized_int_node;
for (j = i; j; j = chains[j - 1])
{
tree type = TREE_TYPE (ranges[j - 1].exp);
strict_overflow_p |= ranges[j - 1].strict_overflow_p;
+ if (POINTER_TYPE_P (type))
+ type = pointer_sized_int_node;
if ((b % 4) == 3)
{
/* For the signed < 0 cases, the types should be
@@ -3642,6 +3646,8 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
tree type = TREE_TYPE (ranges[j - 1].exp);
if (j == k)
continue;
+ if (POINTER_TYPE_P (type))
+ type = pointer_sized_int_node;
if ((b % 4) == 3)
{
if (!useless_type_conversion_p (type1, type))
@@ -3671,18 +3677,21 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
op = r->exp;
continue;
}
- if (id == l)
+ if (id == l || POINTER_TYPE_P (TREE_TYPE (op)))
{
code = (b % 4) == 3 ? BIT_NOT_EXPR : NOP_EXPR;
- g = gimple_build_assign (make_ssa_name (type1), code, op);
+ tree type3 = id >= l ? type1 : pointer_sized_int_node;
+ g = gimple_build_assign (make_ssa_name (type3), code, op);
gimple_seq_add_stmt_without_update (&seq, g);
op = gimple_assign_lhs (g);
}
tree type = TREE_TYPE (r->exp);
tree exp = r->exp;
- if (id >= l && !useless_type_conversion_p (type1, type))
+ if (POINTER_TYPE_P (type)
+ || (id >= l && !useless_type_conversion_p (type1, type)))
{
- g = gimple_build_assign (make_ssa_name (type1), NOP_EXPR, exp);
+ tree type3 = id >= l ? type1 : pointer_sized_int_node;
+ g = gimple_build_assign (make_ssa_name (type3), NOP_EXPR, exp);
gimple_seq_add_stmt_without_update (&seq, g);
exp = gimple_assign_lhs (g);
}
@@ -3695,6 +3704,14 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
gimple_seq_add_stmt_without_update (&seq, g);
op = gimple_assign_lhs (g);
}
+ type1 = TREE_TYPE (ranges[k - 1].exp);
+ if (POINTER_TYPE_P (type1))
+ {
+ gimple *g
+ = gimple_build_assign (make_ssa_name (type1), NOP_EXPR, op);
+ gimple_seq_add_stmt_without_update (&seq, g);
+ op = gimple_assign_lhs (g);
+ }
candidates.pop ();
if (update_range_test (&ranges[k - 1], NULL, candidates.address (),
candidates.length (), opcode, ops, op,
diff --git a/gcc/tree-ssa-uninit.cc b/gcc/tree-ssa-uninit.cc
index 4a1c333..eae29f8 100644
--- a/gcc/tree-ssa-uninit.cc
+++ b/gcc/tree-ssa-uninit.cc
@@ -1013,11 +1013,9 @@ warn_uninitialized_vars (bool wmaybe_uninit)
if (ee)
bb = ee->dest;
else
- {
- bb = get_immediate_dominator (CDI_POST_DOMINATORS, bb);
- if (!bb || bb->index == EXIT_BLOCK)
- break;
- }
+ bb = get_immediate_dominator (CDI_POST_DOMINATORS, bb);
+ if (!bb || bb->index == EXIT_BLOCK)
+ break;
}
FOR_EACH_BB_FN (bb, cfun)
diff --git a/gcc/tree-ssa.cc b/gcc/tree-ssa.cc
index 6507348..1a93ffd 100644
--- a/gcc/tree-ssa.cc
+++ b/gcc/tree-ssa.cc
@@ -1459,6 +1459,8 @@ maybe_rewrite_mem_ref_base (tree *tp, bitmap suitable_for_renaming)
&& (! INTEGRAL_TYPE_P (TREE_TYPE (*tp))
|| (wi::to_offset (TYPE_SIZE (TREE_TYPE (*tp)))
== TYPE_PRECISION (TREE_TYPE (*tp))))
+ && (! INTEGRAL_TYPE_P (TREE_TYPE (sym))
+ || type_has_mode_precision_p (TREE_TYPE (sym)))
&& wi::umod_trunc (wi::to_offset (TYPE_SIZE (TREE_TYPE (*tp))),
BITS_PER_UNIT) == 0)
{
@@ -1531,6 +1533,10 @@ non_rewritable_mem_ref_base (tree ref)
&& (! INTEGRAL_TYPE_P (TREE_TYPE (base))
|| (wi::to_offset (TYPE_SIZE (TREE_TYPE (base)))
== TYPE_PRECISION (TREE_TYPE (base))))
+ /* ??? Likewise for extracts from bitfields, we'd have
+ to pun the base object to a size precision mode first. */
+ && (! INTEGRAL_TYPE_P (TREE_TYPE (decl))
+ || type_has_mode_precision_p (TREE_TYPE (decl)))
&& wi::umod_trunc (wi::to_offset (TYPE_SIZE (TREE_TYPE (base))),
BITS_PER_UNIT) == 0)
return NULL_TREE;
diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
index 8f88f17..9c434b6 100644
--- a/gcc/tree-vect-loop.cc
+++ b/gcc/tree-vect-loop.cc
@@ -8646,8 +8646,10 @@ vectorizable_nonlinear_induction (loop_vec_info loop_vinfo,
/* Also doens't support peel for neg when niter is variable.
??? generate something like niter_expr & 1 ? init_expr : -init_expr? */
niters_skip = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
- if (niters_skip != NULL_TREE
- && TREE_CODE (niters_skip) != INTEGER_CST)
+ if ((niters_skip != NULL_TREE
+ && TREE_CODE (niters_skip) != INTEGER_CST)
+ || (!vect_use_loop_mask_for_alignment_p (loop_vinfo)
+ && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
diff --git a/gcc/tree.cc b/gcc/tree.cc
index 0546c8f..4165cbd 100644
--- a/gcc/tree.cc
+++ b/gcc/tree.cc
@@ -9414,6 +9414,8 @@ build_common_tree_nodes (bool signed_char)
void_node = make_node (VOID_CST);
TREE_TYPE (void_node) = void_type_node;
+ void_list_node = build_tree_list (NULL_TREE, void_type_node);
+
null_pointer_node = build_int_cst (build_pointer_type (void_type_node), 0);
layout_type (TREE_TYPE (null_pointer_node));
diff --git a/gcc/value-query.cc b/gcc/value-query.cc
index 201f679..0bdd670 100644
--- a/gcc/value-query.cc
+++ b/gcc/value-query.cc
@@ -167,7 +167,6 @@ range_query::free_value_range_equiv (value_range_equiv *v)
const class value_range_equiv *
range_query::get_value_range (const_tree expr, gimple *stmt)
{
- gcc_checking_assert (value_range_equiv::supports_p (TREE_TYPE (expr)));
int_range_max r;
if (range_of_expr (r, const_cast<tree> (expr), stmt))
return new (equiv_alloc->allocate ()) value_range_equiv (r);
@@ -218,22 +217,10 @@ range_query::get_tree_range (vrange &r, tree expr, gimple *stmt)
case REAL_CST:
{
- if (TREE_OVERFLOW_P (expr))
- expr = drop_tree_overflow (expr);
-
frange &f = as_a <frange> (r);
f.set (expr, expr);
-
- // Singletons from the tree world have known properties.
- REAL_VALUE_TYPE *rv = TREE_REAL_CST_PTR (expr);
- if (real_isnan (rv))
- f.set_nan (fp_prop::YES);
- else
- f.set_nan (fp_prop::NO);
- if (real_isneg (rv))
- f.set_signbit (fp_prop::YES);
- else
- f.set_signbit (fp_prop::NO);
+ if (!real_isnan (TREE_REAL_CST_PTR (expr)))
+ f.clear_nan ();
return true;
}
diff --git a/gcc/value-range-pretty-print.cc b/gcc/value-range-pretty-print.cc
index 93e18d3..eb74422 100644
--- a/gcc/value-range-pretty-print.cc
+++ b/gcc/value-range-pretty-print.cc
@@ -122,47 +122,51 @@ vrange_printer::print_irange_bitmasks (const irange &r) const
void
vrange_printer::visit (const frange &r) const
{
- tree type = r.type ();
-
pp_string (pp, "[frange] ");
if (r.undefined_p ())
{
pp_string (pp, "UNDEFINED");
return;
}
+ tree type = r.type ();
dump_generic_node (pp, type, 0, TDF_NONE, false);
pp_string (pp, " ");
if (r.varying_p ())
{
pp_string (pp, "VARYING");
+ print_frange_nan (r);
return;
}
pp_character (pp, '[');
- dump_generic_node (pp,
- build_real (type, r.lower_bound ()), 0, TDF_NONE, false);
- pp_string (pp, ", ");
- dump_generic_node (pp,
- build_real (type, r.upper_bound ()), 0, TDF_NONE, false);
- pp_string (pp, "] ");
-
- print_frange_prop ("NAN", r.get_nan ());
- print_frange_prop ("SIGN", r.get_signbit ());
+ bool has_endpoints = !r.known_isnan ();
+ if (has_endpoints)
+ {
+ dump_generic_node (pp,
+ build_real (type, r.lower_bound ()), 0, TDF_NONE, false);
+ pp_string (pp, ", ");
+ dump_generic_node (pp,
+ build_real (type, r.upper_bound ()), 0, TDF_NONE, false);
+ }
+ pp_character (pp, ']');
+ print_frange_nan (r);
}
-// Print the FP properties in an frange.
+// Print the NAN info for an frange.
void
-vrange_printer::print_frange_prop (const char *str, const fp_prop &prop) const
+vrange_printer::print_frange_nan (const frange &r) const
{
- if (prop.varying_p ())
- return;
-
- if (prop.yes_p ())
- pp_string (pp, str);
- else if (prop.no_p ())
+ if (r.maybe_isnan ())
{
- pp_character (pp, '!');
- pp_string (pp, str);
+ if (r.m_pos_nan && r.m_neg_nan)
+ {
+ pp_string (pp, " +-NAN");
+ return;
+ }
+ bool nan_sign = r.m_neg_nan;
+ if (nan_sign)
+ pp_string (pp, " -NAN");
+ else
+ pp_string (pp, " +NAN");
}
- pp_character (pp, ' ');
}
diff --git a/gcc/value-range-pretty-print.h b/gcc/value-range-pretty-print.h
index ad06c93..20c2659 100644
--- a/gcc/value-range-pretty-print.h
+++ b/gcc/value-range-pretty-print.h
@@ -31,7 +31,7 @@ public:
private:
void print_irange_bound (const wide_int &w, tree type) const;
void print_irange_bitmasks (const irange &) const;
- void print_frange_prop (const char *str, const fp_prop &) const;
+ void print_frange_nan (const frange &) const;
pretty_printer *pp;
};
diff --git a/gcc/value-range-storage.cc b/gcc/value-range-storage.cc
index b7a23fa..de7575e 100644
--- a/gcc/value-range-storage.cc
+++ b/gcc/value-range-storage.cc
@@ -253,9 +253,11 @@ frange_storage_slot::set_frange (const frange &r)
gcc_checking_assert (fits_p (r));
gcc_checking_assert (!r.undefined_p ());
+ m_kind = r.m_kind;
m_min = r.m_min;
m_max = r.m_max;
- m_props = r.m_props;
+ m_pos_nan = r.m_pos_nan;
+ m_neg_nan = r.m_neg_nan;
}
void
@@ -264,11 +266,12 @@ frange_storage_slot::get_frange (frange &r, tree type) const
gcc_checking_assert (r.supports_type_p (type));
r.set_undefined ();
- r.m_kind = VR_RANGE;
- r.m_props = m_props;
+ r.m_kind = m_kind;
r.m_type = type;
r.m_min = m_min;
r.m_max = m_max;
+ r.m_pos_nan = m_pos_nan;
+ r.m_neg_nan = m_neg_nan;
r.normalize_kind ();
if (flag_checking)
diff --git a/gcc/value-range-storage.h b/gcc/value-range-storage.h
index f506789..0cf95eb 100644
--- a/gcc/value-range-storage.h
+++ b/gcc/value-range-storage.h
@@ -113,12 +113,11 @@ class GTY (()) frange_storage_slot
frange_storage_slot (const frange &r) { set_frange (r); }
DISABLE_COPY_AND_ASSIGN (frange_storage_slot);
- // We can get away with just storing the properties and the
- // endpoints because the type can be gotten from the SSA, and
- // UNDEFINED is unsupported, so it can only be a VR_RANGE.
+ enum value_range_kind m_kind;
REAL_VALUE_TYPE m_min;
REAL_VALUE_TYPE m_max;
- frange_props m_props;
+ bool m_pos_nan;
+ bool m_neg_nan;
};
class obstack_vrange_allocator final: public vrange_allocator
diff --git a/gcc/value-range.cc b/gcc/value-range.cc
index adcaaa2..a8e3bb3 100644
--- a/gcc/value-range.cc
+++ b/gcc/value-range.cc
@@ -267,85 +267,24 @@ tree_compare (tree_code code, tree op1, tree op2)
return !integer_zerop (fold_build2 (code, integer_type_node, op1, op2));
}
-// Set the NAN property. Adjust the range if appopriate.
+// Flush denormal endpoints to the appropriate 0.0.
void
-frange::set_nan (fp_prop::kind k)
+frange::flush_denormals_to_zero ()
{
- if (k == fp_prop::YES)
- {
- if (!maybe_nan ())
- {
- set_undefined ();
- return;
- }
- gcc_checking_assert (!undefined_p ());
- *this = frange_nan (m_type);
- return;
- }
-
- if (k == fp_prop::NO && known_nan ())
- {
- set_undefined ();
- return;
- }
-
- // Setting VARYING on an obviously NAN range is a no-op.
- if (k == fp_prop::VARYING && real_isnan (&m_min))
+ if (undefined_p () || known_isnan ())
return;
- m_props.set_nan (k);
- normalize_kind ();
- if (flag_checking)
- verify_range ();
-}
-
-// Set the SIGNBIT property. Adjust the range if appropriate.
-
-void
-frange::set_signbit (fp_prop::kind k)
-{
- gcc_checking_assert (m_type);
-
- // No additional adjustments are needed for a NAN.
- if (known_nan ())
- {
- m_props.set_signbit (k);
- return;
- }
- // Ignore sign changes when they're set correctly.
- if (!maybe_nan ())
- {
- if (real_less (&m_max, &dconst0))
- return;
- if (real_less (&dconst0, &m_min))
- return;
- }
- // Adjust the range depending on the sign bit.
- if (k == fp_prop::YES)
- {
- // Crop the range to [-INF, 0].
- frange crop (m_type, dconstninf, dconst0);
- intersect (crop);
- if (!undefined_p ())
- m_props.set_signbit (fp_prop::YES);
- }
- else if (k == fp_prop::NO)
+ // Flush [x, -DENORMAL] to [x, -0.0].
+ if (real_isdenormal (&m_max) && real_isneg (&m_max))
{
- // Crop the range to [0, +INF].
- frange crop (m_type, dconst0, dconstinf);
- intersect (crop);
- if (!undefined_p ())
- m_props.set_signbit (fp_prop::NO);
+ m_max = dconst0;
+ if (HONOR_SIGNED_ZEROS (m_type))
+ m_max.sign = 1;
}
- else
- {
- m_props.set_signbit (fp_prop::VARYING);
- normalize_kind ();
- }
-
- if (flag_checking)
- verify_range ();
+ // Flush [+DENORMAL, x] to [+0.0, x].
+ if (real_isdenormal (&m_min) && !real_isneg (&m_min))
+ m_min = dconst0;
}
// Setter for franges.
@@ -353,51 +292,54 @@ frange::set_signbit (fp_prop::kind k)
void
frange::set (tree min, tree max, value_range_kind kind)
{
- gcc_checking_assert (TREE_CODE (min) == REAL_CST);
- gcc_checking_assert (TREE_CODE (max) == REAL_CST);
-
- if (kind == VR_UNDEFINED)
+ switch (kind)
{
+ case VR_UNDEFINED:
set_undefined ();
return;
+ case VR_VARYING:
+ case VR_ANTI_RANGE:
+ set_varying (TREE_TYPE (min));
+ return;
+ case VR_RANGE:
+ break;
+ default:
+ gcc_unreachable ();
}
- // Treat VR_ANTI_RANGE and VR_VARYING as varying.
- if (kind != VR_RANGE)
+ // Handle NANs.
+ if (real_isnan (TREE_REAL_CST_PTR (min)) || real_isnan (TREE_REAL_CST_PTR (max)))
{
- set_varying (TREE_TYPE (min));
+ gcc_checking_assert (real_identical (TREE_REAL_CST_PTR (min),
+ TREE_REAL_CST_PTR (max)));
+ tree type = TREE_TYPE (min);
+ bool sign = real_isneg (TREE_REAL_CST_PTR (min));
+ set_nan (type, sign);
return;
}
m_kind = kind;
m_type = TREE_TYPE (min);
- m_props.set_varying ();
m_min = *TREE_REAL_CST_PTR (min);
m_max = *TREE_REAL_CST_PTR (max);
-
- bool is_nan = (real_isnan (TREE_REAL_CST_PTR (min))
- || real_isnan (TREE_REAL_CST_PTR (max)));
-
- // Ranges with a NAN and a non-NAN endpoint are nonsensical.
- gcc_checking_assert (!is_nan || operand_equal_p (min, max));
-
- // Set NAN property if we're absolutely sure.
- if (is_nan && operand_equal_p (min, max))
- m_props.nan_set_yes ();
- else if (!HONOR_NANS (m_type))
- m_props.nan_set_no ();
-
- // Set SIGNBIT property for positive and negative ranges.
- if (real_less (&m_max, &dconst0))
- m_props.signbit_set_yes ();
- else if (real_less (&dconst0, &m_min))
- m_props.signbit_set_no ();
+ if (HONOR_NANS (m_type))
+ {
+ m_pos_nan = true;
+ m_neg_nan = true;
+ }
+ else
+ {
+ m_pos_nan = false;
+ m_neg_nan = false;
+ }
// Check for swapped ranges.
- gcc_checking_assert (is_nan || tree_compare (LE_EXPR, min, max));
+ gcc_checking_assert (tree_compare (LE_EXPR, min, max));
normalize_kind ();
+ flush_denormals_to_zero ();
+
if (flag_checking)
verify_range ();
}
@@ -423,18 +365,11 @@ frange::set (tree type,
bool
frange::normalize_kind ()
{
- // Undefined is viral.
- if (m_props.nan_undefined_p () || m_props.signbit_undefined_p ())
- {
- set_undefined ();
- return true;
- }
if (m_kind == VR_RANGE
&& real_isinf (&m_min, 1)
&& real_isinf (&m_max, 0))
{
- // No FP properties set means varying.
- if (m_props.varying_p ())
+ if (m_pos_nan && m_neg_nan)
{
set_varying (m_type);
return true;
@@ -442,8 +377,7 @@ frange::normalize_kind ()
}
else if (m_kind == VR_VARYING)
{
- // If a VARYING has any FP properties, it's no longer VARYING.
- if (!m_props.varying_p ())
+ if (!m_pos_nan || !m_neg_nan)
{
m_kind = VR_RANGE;
m_min = dconstninf;
@@ -451,9 +385,70 @@ frange::normalize_kind ()
return true;
}
}
+ else if (m_kind == VR_NAN && !m_pos_nan && !m_neg_nan)
+ set_undefined ();
return false;
}
+// Union or intersect the zero endpoints of two ranges. For example:
+// [-0, x] U [+0, x] => [-0, x]
+// [ x, -0] U [ x, +0] => [ x, +0]
+// [-0, x] ^ [+0, x] => [+0, x]
+// [ x, -0] ^ [ x, +0] => [ x, -0]
+//
+// UNION_P is true when performing a union, or false when intersecting.
+
+bool
+frange::combine_zeros (const frange &r, bool union_p)
+{
+ gcc_checking_assert (!undefined_p () && !known_isnan ());
+
+ bool changed = false;
+ if (real_iszero (&m_min) && real_iszero (&r.m_min)
+ && real_isneg (&m_min) != real_isneg (&r.m_min))
+ {
+ m_min.sign = union_p;
+ changed = true;
+ }
+ if (real_iszero (&m_max) && real_iszero (&r.m_max)
+ && real_isneg (&m_max) != real_isneg (&r.m_max))
+ {
+ m_max.sign = !union_p;
+ changed = true;
+ }
+ // If the signs are swapped, the resulting range is empty.
+ if (m_min.sign == 0 && m_max.sign == 1)
+ {
+ if (maybe_isnan ())
+ m_kind = VR_NAN;
+ else
+ m_kind = VR_UNDEFINED;
+ changed = true;
+ }
+ return changed;
+}
+
+// Union two ranges when one is known to be a NAN.
+
+bool
+frange::union_nans (const frange &r)
+{
+ gcc_checking_assert (known_isnan () || r.known_isnan ());
+
+ if (known_isnan ())
+ {
+ m_kind = r.m_kind;
+ m_min = r.m_min;
+ m_max = r.m_max;
+ }
+ m_pos_nan |= r.m_pos_nan;
+ m_neg_nan |= r.m_neg_nan;
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+ return true;
+}
+
bool
frange::union_ (const vrange &v)
{
@@ -467,29 +462,18 @@ frange::union_ (const vrange &v)
return true;
}
- // If one side has a NAN, the union is the other side, plus the union
- // of the properties and the possibility of a NAN.
- if (known_nan ())
- {
- frange_props save = m_props;
- *this = r;
- m_props = save;
- m_props.union_ (r.m_props);
- set_nan (fp_prop::VARYING);
- if (flag_checking)
- verify_range ();
- return true;
- }
- if (r.known_nan ())
+ // Combine NAN info.
+ if (known_isnan () || r.known_isnan ())
+ return union_nans (r);
+ bool changed = false;
+ if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
{
- m_props.union_ (r.m_props);
- set_nan (fp_prop::VARYING);
- if (flag_checking)
- verify_range ();
- return true;
+ m_pos_nan |= r.m_pos_nan;
+ m_neg_nan |= r.m_neg_nan;
+ changed = true;
}
- bool changed = m_props.union_ (r.m_props);
+ // Combine endpoints.
if (real_less (&r.m_min, &m_min))
{
m_min = r.m_min;
@@ -500,13 +484,34 @@ frange::union_ (const vrange &v)
m_max = r.m_max;
changed = true;
}
- changed |= normalize_kind ();
+ if (HONOR_SIGNED_ZEROS (m_type))
+ changed |= combine_zeros (r, true);
+
+ changed |= normalize_kind ();
if (flag_checking)
verify_range ();
return changed;
}
+// Intersect two ranges when one is known to be a NAN.
+
+bool
+frange::intersect_nans (const frange &r)
+{
+ gcc_checking_assert (known_isnan () || r.known_isnan ());
+
+ m_pos_nan &= r.m_pos_nan;
+ m_neg_nan &= r.m_neg_nan;
+ if (maybe_isnan ())
+ m_kind = VR_NAN;
+ else
+ m_kind = VR_UNDEFINED;
+ if (flag_checking)
+ verify_range ();
+ return true;
+}
+
bool
frange::intersect (const vrange &v)
{
@@ -525,25 +530,18 @@ frange::intersect (const vrange &v)
return true;
}
- // If two NANs are not exactly the same, drop to an unknown NAN,
- // otherwise there's nothing to do.
- if (known_nan () && r.known_nan ())
+ // Combine NAN info.
+ if (known_isnan () || r.known_isnan ())
+ return intersect_nans (r);
+ bool changed = false;
+ if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
{
- if (m_props == r.m_props)
- return false;
-
- *this = frange_nan (m_type);
- return true;
- }
- // ?? Perhaps the intersection of a NAN and anything is a NAN ??.
- if (known_nan () || r.known_nan ())
- {
- set_varying (m_type);
- return true;
+ m_pos_nan &= r.m_pos_nan;
+ m_neg_nan &= r.m_neg_nan;
+ changed = true;
}
- bool changed = m_props.intersect (r.m_props);
-
+ // Combine endpoints.
if (real_less (&m_min, &r.m_min))
{
m_min = r.m_min;
@@ -554,14 +552,22 @@ frange::intersect (const vrange &v)
m_max = r.m_max;
changed = true;
}
- // If the endpoints are swapped, the ranges are disjoint.
+ // If the endpoints are swapped, the resulting range is empty.
if (real_less (&m_max, &m_min))
{
- set_undefined ();
+ if (maybe_isnan ())
+ m_kind = VR_NAN;
+ else
+ m_kind = VR_UNDEFINED;
+ if (flag_checking)
+ verify_range ();
return true;
}
- changed |= normalize_kind ();
+ if (HONOR_SIGNED_ZEROS (m_type))
+ changed |= combine_zeros (r, false);
+
+ changed |= normalize_kind ();
if (flag_checking)
verify_range ();
return changed;
@@ -574,7 +580,8 @@ frange::operator= (const frange &src)
m_type = src.m_type;
m_min = src.m_min;
m_max = src.m_max;
- m_props = src.m_props;
+ m_pos_nan = src.m_pos_nan;
+ m_neg_nan = src.m_neg_nan;
if (flag_checking)
verify_range ();
@@ -592,12 +599,13 @@ frange::operator== (const frange &src) const
if (varying_p ())
return types_compatible_p (m_type, src.m_type);
- if (known_nan () || src.known_nan ())
+ if (known_isnan () || src.known_isnan ())
return false;
return (real_identical (&m_min, &src.m_min)
&& real_identical (&m_max, &src.m_max)
- && m_props == src.m_props
+ && m_pos_nan == src.m_pos_nan
+ && m_neg_nan == src.m_neg_nan
&& types_compatible_p (m_type, src.m_type));
}
return false;
@@ -608,30 +616,33 @@ frange::operator== (const frange &src) const
bool
frange::contains_p (tree cst) const
{
+ gcc_checking_assert (m_kind != VR_ANTI_RANGE);
+ const REAL_VALUE_TYPE *rv = TREE_REAL_CST_PTR (cst);
+
if (undefined_p ())
return false;
if (varying_p ())
return true;
- gcc_checking_assert (m_kind == VR_RANGE);
+ if (real_isnan (rv))
+ {
+ // No NAN in range.
+ if (!m_pos_nan && !m_neg_nan)
+ return false;
+ // Both +NAN and -NAN are present.
+ if (m_pos_nan && m_neg_nan)
+ return true;
+ return m_neg_nan == rv->sign;
+ }
+ if (known_isnan ())
+ return false;
- const REAL_VALUE_TYPE *rv = TREE_REAL_CST_PTR (cst);
- if (real_compare (GE_EXPR, rv, &m_min)
- && real_compare (LE_EXPR, rv, &m_max))
+ if (real_compare (GE_EXPR, rv, &m_min) && real_compare (LE_EXPR, rv, &m_max))
{
+ // Make sure the signs are equal for signed zeros.
if (HONOR_SIGNED_ZEROS (m_type) && real_iszero (rv))
- {
- // FIXME: This is still using get_signbit() instead of
- // known_signbit() because the latter bails on possible NANs
- // (for now).
- if (get_signbit ().yes_p ())
- return real_isneg (rv);
- else if (get_signbit ().no_p ())
- return !real_isneg (rv);
- else
- return true;
- }
+ return m_min.sign == m_max.sign && m_min.sign == rv->sign;
return true;
}
return false;
@@ -648,29 +659,24 @@ frange::singleton_p (tree *result) const
if (m_kind == VR_RANGE && real_identical (&m_min, &m_max))
{
// Return false for any singleton that may be a NAN.
- if (HONOR_NANS (m_type) && maybe_nan ())
+ if (HONOR_NANS (m_type) && maybe_isnan ())
return false;
- // Return the appropriate zero if known.
- if (HONOR_SIGNED_ZEROS (m_type) && zero_p ())
+ if (MODE_COMPOSITE_P (TYPE_MODE (m_type)))
{
- bool signbit;
- if (known_signbit (signbit))
- {
- if (signbit)
- {
- if (result)
- *result = build_real (m_type, real_value_negate (&dconst0));
- }
- else
- {
- if (result)
- *result = build_real (m_type, dconst0);
- }
- return true;
- }
- return false;
+ // For IBM long doubles, if the value is +-Inf or is exactly
+ // representable in double, the other double could be +0.0
+ // or -0.0. Since this means there is more than one way to
+ // represent a value, return false to avoid propagating it.
+ // See libgcc/config/rs6000/ibm-ldouble-format for details.
+ if (real_isinf (&m_min))
+ return false;
+ REAL_VALUE_TYPE r;
+ real_convert (&r, DFmode, &m_min);
+ if (real_identical (&r, &m_min))
+ return false;
}
+
if (result)
*result = build_real (m_type, m_min);
return true;
@@ -687,57 +693,40 @@ frange::supports_type_p (const_tree type) const
void
frange::verify_range ()
{
- if (undefined_p ())
+ switch (m_kind)
{
- gcc_checking_assert (m_props.undefined_p ());
+ case VR_UNDEFINED:
+ // m_type is ignored.
return;
- }
- gcc_checking_assert (!m_props.undefined_p ());
-
- if (varying_p ())
- {
- gcc_checking_assert (m_props.varying_p ());
+ case VR_VARYING:
+ gcc_checking_assert (m_type);
+ gcc_checking_assert (m_pos_nan && m_neg_nan);
+ gcc_checking_assert (real_isinf (&m_min, 1));
+ gcc_checking_assert (real_isinf (&m_max, 0));
+ return;
+ case VR_RANGE:
+ gcc_checking_assert (m_type);
+ break;
+ case VR_NAN:
+ gcc_checking_assert (m_type);
+ gcc_checking_assert (m_pos_nan || m_neg_nan);
return;
+ default:
+ gcc_unreachable ();
}
- // We don't support the inverse of an frange (yet).
- gcc_checking_assert (m_kind == VR_RANGE);
+ // NANs cannot appear in the endpoints of a range.
+ gcc_checking_assert (!real_isnan (&m_min) && !real_isnan (&m_max));
- bool is_nan = real_isnan (&m_min) || real_isnan (&m_max);
- if (is_nan)
- {
- // If either is a NAN, both must be a NAN.
- gcc_checking_assert (real_identical (&m_min, &m_max));
- gcc_checking_assert (known_nan ());
- }
- else
- // Make sure we don't have swapped ranges.
- gcc_checking_assert (!real_less (&m_max, &m_min));
+ // Make sure we don't have swapped ranges.
+ gcc_checking_assert (!real_less (&m_max, &m_min));
- // If we're absolutely sure we have a NAN, the endpoints should
- // reflect this, otherwise we'd have more than one way to represent
- // a NAN.
- if (known_nan ())
- {
- gcc_checking_assert (real_isnan (&m_min));
- gcc_checking_assert (real_isnan (&m_max));
- }
- else
- {
- // Make sure the signbit and range agree.
- bool signbit;
- if (known_signbit (signbit))
- {
- if (signbit)
- gcc_checking_assert (real_compare (LE_EXPR, &m_max, &dconst0));
- else
- gcc_checking_assert (real_compare (GE_EXPR, &m_min, &dconst0));
- }
- }
+ // [ +0.0, -0.0 ] is nonsensical.
+ gcc_checking_assert (!(real_iszero (&m_min, 0) && real_iszero (&m_max, 1)));
// If all the properties are clear, we better not span the entire
// domain, because that would make us varying.
- if (m_props.varying_p ())
+ if (m_pos_nan && m_neg_nan)
gcc_checking_assert (!real_isinf (&m_min, 1) || !real_isinf (&m_max, 0));
}
@@ -755,16 +744,24 @@ frange::nonzero_p () const
return false;
}
-// Set range to [+0.0, +0.0].
+// Set range to [+0.0, +0.0] if honoring signed zeros, or [0.0, 0.0]
+// otherwise.
void
frange::set_zero (tree type)
{
- tree zero = build_zero_cst (type);
- set (zero, zero);
+ if (HONOR_SIGNED_ZEROS (type))
+ {
+ REAL_VALUE_TYPE dconstm0 = dconst0;
+ dconstm0.sign = 1;
+ set (type, dconstm0, dconst0);
+ clear_nan ();
+ }
+ else
+ set (type, dconst0, dconst0);
}
-// Return TRUE for any [0.0, 0.0] regardless of sign.
+// Return TRUE for any zero regardless of sign.
bool
frange::zero_p () const
@@ -777,9 +774,7 @@ frange::zero_p () const
void
frange::set_nonnegative (tree type)
{
- tree zero = build_zero_cst (type);
- tree inf = vrp_val_max (type);
- set (zero, inf);
+ set (type, dconst0, dconstinf);
}
// Here we copy between any two irange's. The ranges can be legacy or
@@ -3635,62 +3630,110 @@ range_tests_nan ()
r1 = frange_float ("10", "12");
r0 = r1;
ASSERT_EQ (r0, r1);
- r0.set_nan (fp_prop::NO);
- ASSERT_NE (r0, r1);
- r0.set_nan (fp_prop::YES);
+ r0.clear_nan ();
ASSERT_NE (r0, r1);
+ r0.update_nan ();
+ ASSERT_EQ (r0, r1);
+
+ // [10, 20] NAN ^ [30, 40] NAN = NAN.
+ r0 = frange_float ("10", "20");
+ r1 = frange_float ("30", "40");
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.known_isnan ());
+
+ // [3,5] U [5,10] NAN = ... NAN
+ r0 = frange_float ("3", "5");
+ r0.clear_nan ();
+ r1 = frange_float ("5", "10");
+ r0.union_ (r1);
+ ASSERT_TRUE (r0.maybe_isnan ());
}
// NAN ranges are not equal to each other.
- r0 = frange_nan (float_type_node);
+ r0.set_nan (float_type_node);
r1 = r0;
ASSERT_FALSE (r0 == r1);
ASSERT_FALSE (r0 == r0);
ASSERT_TRUE (r0 != r0);
- // [5,6] U NAN.
+ // [5,6] U NAN = [5,6] NAN.
r0 = frange_float ("5", "6");
- r0.set_nan (fp_prop::NO);
- r1 = frange_nan (float_type_node);
+ r0.clear_nan ();
+ r1.set_nan (float_type_node);
r0.union_ (r1);
real_from_string (&q, "5");
real_from_string (&r, "6");
ASSERT_TRUE (real_identical (&q, &r0.lower_bound ()));
ASSERT_TRUE (real_identical (&r, &r0.upper_bound ()));
- ASSERT_TRUE (r0.maybe_nan ());
+ ASSERT_TRUE (r0.maybe_isnan ());
// NAN U NAN = NAN
- r0 = frange_nan (float_type_node);
- r1 = frange_nan (float_type_node);
+ r0.set_nan (float_type_node);
+ r1.set_nan (float_type_node);
r0.union_ (r1);
- ASSERT_TRUE (real_isnan (&r0.lower_bound ()));
- ASSERT_TRUE (real_isnan (&r1.upper_bound ()));
- ASSERT_TRUE (r0.known_nan ());
+ ASSERT_TRUE (r0.known_isnan ());
- // [INF, INF] ^ NAN = VARYING
- r0 = frange_nan (float_type_node);
+ // [INF, INF] NAN ^ NAN = NAN
+ r0.set_nan (float_type_node);
r1 = frange_float ("+Inf", "+Inf");
+ if (!HONOR_NANS (float_type_node))
+ r1.update_nan ();
r0.intersect (r1);
- ASSERT_TRUE (r0.varying_p ());
+ ASSERT_TRUE (r0.known_isnan ());
// NAN ^ NAN = NAN
- r0 = frange_nan (float_type_node);
- r1 = frange_nan (float_type_node);
+ r0.set_nan (float_type_node);
+ r1.set_nan (float_type_node);
r0.intersect (r1);
- ASSERT_TRUE (r0.known_nan ());
+ ASSERT_TRUE (r0.known_isnan ());
+
+ // +NAN ^ -NAN = UNDEFINED
+ r0.set_nan (float_type_node, false);
+ r1.set_nan (float_type_node, true);
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.undefined_p ());
// VARYING ^ NAN = NAN.
- r0 = frange_nan (float_type_node);
+ r0.set_nan (float_type_node);
r1.set_varying (float_type_node);
r0.intersect (r1);
- ASSERT_TRUE (r0.known_nan ());
+ ASSERT_TRUE (r0.known_isnan ());
+
+ // [3,4] ^ NAN = UNDEFINED.
+ r0 = frange_float ("3", "4");
+ r0.clear_nan ();
+ r1.set_nan (float_type_node);
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.undefined_p ());
+
+ // [-3, 5] ^ NAN = UNDEFINED
+ r0 = frange_float ("-3", "5");
+ r0.clear_nan ();
+ r1.set_nan (float_type_node);
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.undefined_p ());
+
+ // Setting the NAN bit to yes does not make us a known NAN.
+ r0.set_varying (float_type_node);
+ r0.update_nan ();
+ ASSERT_FALSE (r0.known_isnan ());
+
+ // NAN is in a VARYING.
+ r0.set_varying (float_type_node);
+ real_nan (&r, "", 1, TYPE_MODE (float_type_node));
+ tree nan = build_real (float_type_node, r);
+ ASSERT_TRUE (r0.contains_p (nan));
- // Setting the NAN bit to yes, forces to range to [NAN, NAN].
+ // -NAN is in a VARYING.
r0.set_varying (float_type_node);
- r0.set_nan (fp_prop::YES);
- ASSERT_TRUE (r0.known_nan ());
- ASSERT_TRUE (real_isnan (&r0.lower_bound ()));
- ASSERT_TRUE (real_isnan (&r0.upper_bound ()));
+ q = real_value_negate (&r);
+ tree neg_nan = build_real (float_type_node, q);
+ ASSERT_TRUE (r0.contains_p (neg_nan));
+
+ // Clearing the NAN on a [] NAN is the empty set.
+ r0.set_nan (float_type_node);
+ r0.clear_nan ();
+ ASSERT_TRUE (r0.undefined_p ());
}
static void
@@ -3702,49 +3745,84 @@ range_tests_signed_zeros ()
frange r0, r1;
bool signbit;
- // Since -0.0 == +0.0, a range of [-0.0, -0.0] should contain +0.0
- // and vice versa.
+ // [0,0] contains [0,0] but not [-0,-0] and vice versa.
r0 = frange (zero, zero);
r1 = frange (neg_zero, neg_zero);
ASSERT_TRUE (r0.contains_p (zero));
- ASSERT_TRUE (r0.contains_p (neg_zero));
- ASSERT_TRUE (r1.contains_p (zero));
+ ASSERT_TRUE (!r0.contains_p (neg_zero));
ASSERT_TRUE (r1.contains_p (neg_zero));
+ ASSERT_TRUE (!r1.contains_p (zero));
// Test contains_p() when we know the sign of the zero.
- r0 = frange(zero, zero);
- r0.set_signbit (fp_prop::NO);
+ r0 = frange (zero, zero);
ASSERT_TRUE (r0.contains_p (zero));
ASSERT_FALSE (r0.contains_p (neg_zero));
- r0.set_signbit (fp_prop::YES);
+ r0 = frange (neg_zero, neg_zero);
ASSERT_TRUE (r0.contains_p (neg_zero));
ASSERT_FALSE (r0.contains_p (zero));
- // The intersection of zeros that differ in sign is the empty set.
- r0 = frange (zero, zero);
- r0.set_signbit (fp_prop::YES);
+ // The intersection of zeros that differ in sign is a NAN (or
+ // undefined if not honoring NANs).
+ r0 = frange (neg_zero, neg_zero);
r1 = frange (zero, zero);
- r1.set_signbit (fp_prop::NO);
r0.intersect (r1);
- ASSERT_TRUE (r0.undefined_p ());
+ if (HONOR_NANS (float_type_node))
+ ASSERT_TRUE (r0.known_isnan ());
+ else
+ ASSERT_TRUE (r0.undefined_p ());
// The union of zeros that differ in sign is a zero with unknown sign.
r0 = frange (zero, zero);
- r0.set_signbit (fp_prop::NO);
- r1 = frange (zero, zero);
- r1.set_signbit (fp_prop::YES);
+ r1 = frange (neg_zero, neg_zero);
r0.union_ (r1);
- ASSERT_TRUE (r0.zero_p () && !r0.known_signbit (signbit));
+ ASSERT_TRUE (r0.zero_p () && !r0.signbit_p (signbit));
+
+ // [-0, +0] has an unknown sign.
+ r0 = frange (neg_zero, zero);
+ ASSERT_TRUE (r0.zero_p () && !r0.signbit_p (signbit));
+
+ // [-0, +0] ^ [0, 0] is [0, 0]
+ r0 = frange (neg_zero, zero);
+ r1 = frange (zero, zero);
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.zero_p ());
- // NAN U [5,6] should be [5,6] with no sign info.
- r0 = frange_nan (float_type_node);
+ // NAN U [5,6] should be [5,6] NAN.
+ r0.set_nan (float_type_node);
r1 = frange_float ("5", "6");
+ r1.clear_nan ();
r0.union_ (r1);
real_from_string (&q, "5");
real_from_string (&r, "6");
ASSERT_TRUE (real_identical (&q, &r0.lower_bound ()));
ASSERT_TRUE (real_identical (&r, &r0.upper_bound ()));
- ASSERT_TRUE (!r0.known_signbit (signbit));
+ ASSERT_TRUE (!r0.signbit_p (signbit));
+ ASSERT_TRUE (r0.maybe_isnan ());
+
+ r0 = frange_float ("+0", "5");
+ r0.clear_nan ();
+ ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
+
+ r0 = frange_float ("-0", "5");
+ r0.clear_nan ();
+ ASSERT_TRUE (!r0.signbit_p (signbit));
+
+ r0 = frange_float ("-0", "10");
+ r1 = frange_float ("0", "5");
+ r0.intersect (r1);
+ ASSERT_TRUE (real_iszero (&r0.lower_bound (), false));
+
+ r0 = frange_float ("-0", "5");
+ r1 = frange_float ("0", "5");
+ r0.union_ (r1);
+ ASSERT_TRUE (real_iszero (&r0.lower_bound (), true));
+
+ r0 = frange_float ("-5", "-0");
+ r0.update_nan ();
+ r1 = frange_float ("0", "0");
+ r1.update_nan ();
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.known_isnan ());
}
static void
@@ -3753,52 +3831,25 @@ range_tests_signbit ()
frange r0, r1;
bool signbit;
- // Setting the signbit drops the range to [-INF, 0].
- r0.set_varying (float_type_node);
- r0.set_signbit (fp_prop::YES);
- ASSERT_TRUE (real_isinf (&r0.lower_bound (), 1));
- ASSERT_TRUE (real_iszero (&r0.upper_bound ()));
-
- // Setting the signbit for [-5, 10] crops the range to [-5, 0] with
- // the signbit property set.
- r0 = frange_float ("-5", "10");
- r0.set_signbit (fp_prop::YES);
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (r0.known_signbit (signbit) && signbit);
- r1 = frange_float ("-5", "0");
- ASSERT_TRUE (real_identical (&r0.lower_bound (), &r1.lower_bound ()));
- ASSERT_TRUE (real_identical (&r0.upper_bound (), &r1.upper_bound ()));
-
// Negative numbers should have the SIGNBIT set.
r0 = frange_float ("-5", "-1");
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (r0.known_signbit (signbit) && signbit);
+ r0.clear_nan ();
+ ASSERT_TRUE (r0.signbit_p (signbit) && signbit);
// Positive numbers should have the SIGNBIT clear.
r0 = frange_float ("1", "10");
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (r0.known_signbit (signbit) && !signbit);
+ r0.clear_nan ();
+ ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
// Numbers containing zero should have an unknown SIGNBIT.
r0 = frange_float ("0", "10");
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (!r0.known_signbit (signbit));
+ r0.clear_nan ();
+ ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
// Numbers spanning both positive and negative should have an
// unknown SIGNBIT.
r0 = frange_float ("-10", "10");
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (!r0.known_signbit (signbit));
+ r0.clear_nan ();
+ ASSERT_TRUE (!r0.signbit_p (signbit));
r0.set_varying (float_type_node);
- ASSERT_TRUE (!r0.known_signbit (signbit));
-
- // Ignore signbit changes when the sign bit is obviously known from
- // the range.
- r0 = frange_float ("5", "10");
- r0.set_nan (fp_prop::NO);
- r0.set_signbit (fp_prop::VARYING);
- ASSERT_TRUE (r0.known_signbit (signbit) && !signbit);
- r0 = frange_float ("-5", "-1");
- r0.set_signbit (fp_prop::NO);
- r0.set_nan (fp_prop::NO);
- ASSERT_TRUE (r0.undefined_p ());
+ ASSERT_TRUE (!r0.signbit_p (signbit));
}
static void
@@ -3815,10 +3866,10 @@ range_tests_floats ()
// A range of [-INF,+INF] is actually VARYING if no other properties
// are set.
r0 = frange_float ("-Inf", "+Inf");
- if (r0.maybe_nan ())
+ if (r0.maybe_isnan ())
ASSERT_TRUE (r0.varying_p ());
// ...unless it has some special property...
- r0.set_nan (fp_prop::NO);
+ r0.clear_nan ();
ASSERT_FALSE (r0.varying_p ());
// The endpoints of a VARYING are +-INF.
@@ -3896,9 +3947,19 @@ range_tests_floats ()
r0.intersect (r1);
ASSERT_EQ (r0, frange_float ("15", "20"));
+ // [10,20] NAN ^ [21,25] NAN = [NAN]
+ r0 = frange_float ("10", "20");
+ r0.update_nan ();
+ r1 = frange_float ("21", "25");
+ r1.update_nan ();
+ r0.intersect (r1);
+ ASSERT_TRUE (r0.known_isnan ());
+
// [10,20] ^ [21,25] = []
r0 = frange_float ("10", "20");
+ r0.clear_nan ();
r1 = frange_float ("21", "25");
+ r1.clear_nan ();
r0.intersect (r1);
ASSERT_TRUE (r0.undefined_p ());
}
diff --git a/gcc/value-range.h b/gcc/value-range.h
index f9a01ee..795b1f0 100644
--- a/gcc/value-range.h
+++ b/gcc/value-range.h
@@ -35,6 +35,8 @@ enum value_range_kind
VR_RANGE,
/* Range is ~[MIN, MAX]. */
VR_ANTI_RANGE,
+ /* Range is a NAN. */
+ VR_NAN,
/* Range is a nice guy. */
VR_LAST
};
@@ -263,69 +265,10 @@ public:
virtual void accept (const vrange_visitor &v) const override;
};
-// Floating point property to represent possible values of a NAN, INF, etc.
-
-class fp_prop
-{
-public:
- enum kind {
- UNDEFINED = 0x0, // Prop is impossible.
- YES = 0x1, // Prop is definitely set.
- NO = 0x2, // Prop is definitely not set.
- VARYING = (YES | NO) // Prop may hold.
- };
- fp_prop (kind f) : m_kind (f) { }
- bool varying_p () const { return m_kind == VARYING; }
- bool undefined_p () const { return m_kind == UNDEFINED; }
- bool yes_p () const { return m_kind == YES; }
- bool no_p () const { return m_kind == NO; }
-private:
- unsigned char m_kind : 2;
-};
-
-// Accessors for individual FP properties.
-
-#define FP_PROP_ACCESSOR(NAME) \
- void NAME##_set_varying () { u.bits.NAME = fp_prop::VARYING; } \
- void NAME##_set_yes () { u.bits.NAME = fp_prop::YES; } \
- void NAME##_set_no () { u.bits.NAME = fp_prop::NO; } \
- bool NAME##_varying_p () const { return u.bits.NAME == fp_prop::VARYING; } \
- bool NAME##_undefined_p () const { return u.bits.NAME == fp_prop::UNDEFINED; } \
- bool NAME##_yes_p () const { return u.bits.NAME == fp_prop::YES; } \
- bool NAME##_no_p () const { return u.bits.NAME == fp_prop::NO; } \
- fp_prop get_##NAME () const \
- { return fp_prop ((fp_prop::kind) u.bits.NAME); } \
- void set_##NAME (fp_prop::kind f) { u.bits.NAME = f; }
-
-// Aggregate of all the FP properties in an frange packed into one
-// structure to save space. Using explicit fp_prop's in the frange,
-// would take one byte per property because of padding. Instead, we
-// can save all properties into one byte.
-
-class frange_props
-{
-public:
- frange_props () { set_varying (); }
- void set_varying () { u.bytes = 0xff; }
- void set_undefined () { u.bytes = 0; }
- bool varying_p () { return u.bytes == 0xff; }
- bool undefined_p () { return u.bytes == 0; }
- bool union_ (const frange_props &other);
- bool intersect (const frange_props &other);
- bool operator== (const frange_props &other) const;
- FP_PROP_ACCESSOR(nan)
- FP_PROP_ACCESSOR(signbit)
-private:
- union {
- struct {
- unsigned char nan : 2;
- unsigned char signbit : 2;
- } bits;
- unsigned char bytes;
- } u;
-};
-
// A floating point range.
+//
+// The representation is a type with a couple of endpoints, unioned
+// with the set of { -NAN, +Nan }.
class frange : public vrange
{
@@ -348,6 +291,8 @@ public:
virtual void set (tree, tree, value_range_kind = VR_RANGE) override;
void set (tree type, const REAL_VALUE_TYPE &, const REAL_VALUE_TYPE &,
value_range_kind = VR_RANGE);
+ void set_nan (tree type);
+ void set_nan (tree type, bool sign);
virtual void set_varying (tree type) override;
virtual void set_undefined () override;
virtual bool union_ (const vrange &) override;
@@ -366,41 +311,42 @@ public:
bool operator!= (const frange &r) const { return !(*this == r); }
const REAL_VALUE_TYPE &lower_bound () const;
const REAL_VALUE_TYPE &upper_bound () const;
+ void update_nan ();
+ void clear_nan ();
// fpclassify like API
- bool known_finite () const;
- bool maybe_inf () const;
- bool known_inf () const;
- bool maybe_nan () const;
- bool known_nan () const;
- bool known_signbit (bool &signbit) const;
-
- // Accessors for FP properties.
- void set_nan (fp_prop::kind f);
- void set_signbit (fp_prop::kind);
+ bool known_isfinite () const;
+ bool known_isnan () const;
+ bool known_isinf () const;
+ bool maybe_isnan () const;
+ bool maybe_isinf () const;
+ bool signbit_p (bool &signbit) const;
private:
- fp_prop get_nan () const { return m_props.get_nan (); }
- fp_prop get_signbit () const { return m_props.get_signbit (); }
void verify_range ();
bool normalize_kind ();
+ bool union_nans (const frange &);
+ bool intersect_nans (const frange &);
+ bool combine_zeros (const frange &, bool union_p);
+ void flush_denormals_to_zero ();
- frange_props m_props;
tree m_type;
REAL_VALUE_TYPE m_min;
REAL_VALUE_TYPE m_max;
+ bool m_pos_nan;
+ bool m_neg_nan;
};
inline const REAL_VALUE_TYPE &
frange::lower_bound () const
{
- gcc_checking_assert (!undefined_p ());
+ gcc_checking_assert (!undefined_p () && !known_isnan ());
return m_min;
}
inline const REAL_VALUE_TYPE &
frange::upper_bound () const
{
- gcc_checking_assert (!undefined_p ());
+ gcc_checking_assert (!undefined_p () && !known_isnan ());
return m_max;
}
@@ -1080,30 +1026,6 @@ vrp_val_min (const_tree type)
return NULL_TREE;
}
-// Supporting methods for frange.
-
-inline bool
-frange_props::operator== (const frange_props &other) const
-{
- return u.bytes == other.u.bytes;
-}
-
-inline bool
-frange_props::union_ (const frange_props &other)
-{
- unsigned char saved = u.bytes;
- u.bytes |= other.u.bytes;
- return u.bytes != saved;
-}
-
-inline bool
-frange_props::intersect (const frange_props &other)
-{
- unsigned char saved = u.bytes;
- u.bytes &= other.u.bytes;
- return u.bytes != saved;
-}
-
inline
frange::frange ()
{
@@ -1141,6 +1063,7 @@ frange::frange (tree min, tree max, value_range_kind kind)
inline tree
frange::type () const
{
+ gcc_checking_assert (!undefined_p ());
return m_type;
}
@@ -1151,17 +1074,42 @@ frange::set_varying (tree type)
m_type = type;
m_min = dconstninf;
m_max = dconstinf;
- m_props.set_varying ();
+ m_pos_nan = true;
+ m_neg_nan = true;
}
inline void
frange::set_undefined ()
{
m_kind = VR_UNDEFINED;
- m_type = NULL;
- m_props.set_undefined ();
- memset (&m_min, 0, sizeof (m_min));
- memset (&m_max, 0, sizeof (m_max));
+ if (flag_checking)
+ verify_range ();
+}
+
+// Set the NAN bit and adjust the range.
+
+inline void
+frange::update_nan ()
+{
+ gcc_checking_assert (!undefined_p ());
+ m_pos_nan = true;
+ m_neg_nan = true;
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+}
+
+// Clear the NAN bit and adjust the range.
+
+inline void
+frange::clear_nan ()
+{
+ gcc_checking_assert (!undefined_p ());
+ m_pos_nan = false;
+ m_neg_nan = false;
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
}
// Set R to maximum representable value for TYPE.
@@ -1185,35 +1133,48 @@ real_min_representable (REAL_VALUE_TYPE *r, tree type)
*r = real_value_negate (r);
}
-// Build a NAN of type TYPE.
+// Build a signless NAN of type TYPE.
-inline frange
-frange_nan (tree type)
+inline void
+frange::set_nan (tree type)
{
- REAL_VALUE_TYPE r;
+ m_kind = VR_NAN;
+ m_type = type;
+ m_pos_nan = true;
+ m_neg_nan = true;
+ if (flag_checking)
+ verify_range ();
+}
+
+// Build a NAN of type TYPE with SIGN.
- gcc_assert (real_nan (&r, "", 1, TYPE_MODE (type)));
- return frange (type, r, r);
+inline void
+frange::set_nan (tree type, bool sign)
+{
+ m_kind = VR_NAN;
+ m_type = type;
+ m_neg_nan = sign;
+ m_pos_nan = !sign;
+ if (flag_checking)
+ verify_range ();
}
// Return TRUE if range is known to be finite.
inline bool
-frange::known_finite () const
+frange::known_isfinite () const
{
if (undefined_p () || varying_p () || m_kind == VR_ANTI_RANGE)
return false;
- return (!real_isnan (&m_min)
- && !real_isinf (&m_min)
- && !real_isinf (&m_max));
+ return (!maybe_isnan () && !real_isinf (&m_min) && !real_isinf (&m_max));
}
// Return TRUE if range may be infinite.
inline bool
-frange::maybe_inf () const
+frange::maybe_isinf () const
{
- if (undefined_p () || m_kind == VR_ANTI_RANGE)
+ if (undefined_p () || m_kind == VR_ANTI_RANGE || m_kind == VR_NAN)
return false;
if (varying_p ())
return true;
@@ -1223,7 +1184,7 @@ frange::maybe_inf () const
// Return TRUE if range is known to be the [-INF,-INF] or [+INF,+INF].
inline bool
-frange::known_inf () const
+frange::known_isinf () const
{
return (m_kind == VR_RANGE
&& real_identical (&m_min, &m_max)
@@ -1233,32 +1194,50 @@ frange::known_inf () const
// Return TRUE if range is possibly a NAN.
inline bool
-frange::maybe_nan () const
+frange::maybe_isnan () const
{
- return !get_nan ().no_p ();
+ return m_pos_nan || m_neg_nan;
}
// Return TRUE if range is a +NAN or -NAN.
inline bool
-frange::known_nan () const
+frange::known_isnan () const
{
- return get_nan ().yes_p ();
+ return m_kind == VR_NAN;
}
// If the signbit for the range is known, set it in SIGNBIT and return
// TRUE.
inline bool
-frange::known_signbit (bool &signbit) const
+frange::signbit_p (bool &signbit) const
{
- // FIXME: Signed NANs are not supported yet.
- if (maybe_nan ())
+ if (undefined_p ())
return false;
- if (get_signbit ().varying_p ())
+
+ // NAN with unknown sign.
+ if (m_pos_nan && m_neg_nan)
return false;
- signbit = get_signbit ().yes_p ();
- return true;
+ // No NAN.
+ if (!m_pos_nan && !m_neg_nan)
+ {
+ if (m_min.sign == m_max.sign)
+ {
+ signbit = m_min.sign;
+ return true;
+ }
+ return false;
+ }
+ // NAN with known sign.
+ bool nan_sign = m_neg_nan;
+ if (known_isnan ()
+ || (nan_sign == m_min.sign && nan_sign == m_max.sign))
+ {
+ signbit = nan_sign;
+ return true;
+ }
+ return false;
}
#endif // GCC_VALUE_RANGE_H
diff --git a/libgcc/ChangeLog b/libgcc/ChangeLog
index e0deb15..7c434c9 100644
--- a/libgcc/ChangeLog
+++ b/libgcc/ChangeLog
@@ -1,3 +1,21 @@
+2022-09-18 Thomas Neumann <tneumann@users.sourceforge.net>
+
+ * unwind-dw2-fde.c: Replace uintptr_t with typedef
+ for __UINTPTR_TYPE__.
+ * unwind-dw2-btree.h: Likewise.
+
+2022-09-16 Thomas Neumann <tneumann@users.sourceforge.net>
+
+ * unwind-dw2-fde.c (release_registered_frames): Cleanup at shutdown.
+ (__register_frame_info_table_bases): Use btree in atomic fast path.
+ (__deregister_frame_info_bases): Likewise.
+ (_Unwind_Find_FDE): Likewise.
+ (base_from_object): Make parameter const.
+ (classify_object_over_fdes): Add query-only mode.
+ (get_pc_range): Compute PC range for lookup.
+ * unwind-dw2-fde.h (last_fde): Make parameter const.
+ * unwind-dw2-btree.h: New file.
+
2022-08-31 Martin Liska <mliska@suse.cz>
* config.host: Remove hppa.
diff --git a/libgcc/config/avr/libf7/ChangeLog b/libgcc/config/avr/libf7/ChangeLog
index d24215e..026dafd 100644
--- a/libgcc/config/avr/libf7/ChangeLog
+++ b/libgcc/config/avr/libf7/ChangeLog
@@ -1,3 +1,9 @@
+2022-09-19 Georg-Johann Lay <avr@gjlay.de>
+
+ PR target/99184
+ * libf7-asm.sx (to_integer, to_unsigned): Don't round 16-bit
+ and 32-bit integers.
+
2020-06-23 David Edelsohn <dje.gcc@gmail.com>
* t-libf7: Use -include.
diff --git a/libgcc/config/avr/libf7/libf7-asm.sx b/libgcc/config/avr/libf7/libf7-asm.sx
index cfdbecd..752a939 100644
--- a/libgcc/config/avr/libf7/libf7-asm.sx
+++ b/libgcc/config/avr/libf7/libf7-asm.sx
@@ -601,9 +601,6 @@ DEFUN to_integer
tst C6
brmi .Lsaturate.T ; > INTxx_MAX => saturate
- rcall .Lround
- brmi .Lsaturate.T ; > INTxx_MAX => saturate
-
brtc 9f ; >= 0 => return
sbrc Mask, 5
.global __negdi2
@@ -658,30 +655,6 @@ DEFUN to_integer
.global __clr_8
XJMP __clr_8
-.Lround:
- ;; C6.7 is known to be 0 here.
- ;; Return N = 1 iff we have to saturate.
- cpi Mask, 0xf
- breq .Lround16
- cpi Mask, 0x1f
- breq .Lround32
-
- ;; For now, no rounding in the 64-bit case. This rounding
- ;; would have to be integrated into the right-shift.
- cln
- ret
-
-.Lround32:
- rol C2
- adc C3, ZERO
- adc C4, ZERO
- rjmp 2f
-
-.Lround16:
- rol C4
-2: adc C5, ZERO
- adc C6, ZERO
- ret
ENDF to_integer
#endif /* F7MOD_to_integer_ */
@@ -725,29 +698,6 @@ DEFUN to_unsigned
clr CA
F7call lshrdi3
POP r16
-
- ;; Rounding
- ;; ??? C6.7 is known to be 0 here.
- cpi Mask, 0xf
- breq .Lround16
- cpi Mask, 0x1f
- breq .Lround32
-
- ;; For now, no rounding in the 64-bit case. This rounding
- ;; would have to be integrated into the right-shift.
- ret
-
-.Lround32:
- rol C2
- adc C3, ZERO
- adc C4, ZERO
- rjmp 2f
-
-.Lround16:
- rol C4
-2: adc C5, ZERO
- adc C6, ZERO
- brcs .Lset_0xffff ; Rounding overflow => saturate
ret
.Lset_0xffff:
diff --git a/libgcc/unwind-dw2-btree.h b/libgcc/unwind-dw2-btree.h
new file mode 100644
index 0000000..ace507d
--- /dev/null
+++ b/libgcc/unwind-dw2-btree.h
@@ -0,0 +1,954 @@
+/* Lock-free btree for manually registered unwind frames. */
+/* Copyright (C) 2022 Free Software Foundation, Inc.
+ Contributed by Thomas Neumann
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_UNWIND_DW2_BTREE_H
+#define GCC_UNWIND_DW2_BTREE_H
+
+#include <stdbool.h>
+
+// Common logic for version locks.
+struct version_lock
+{
+ // The lock itself. The lowest bit indicates an exclusive lock,
+ // the second bit indicates waiting threads. All other bits are
+ // used as counter to recognize changes.
+ // Overflows are okay here, we must only prevent overflow to the
+ // same value within one lock_optimistic/validate
+ // range. Even on 32 bit platforms that would require 1 billion
+ // frame registrations within the time span of a few assembler
+ // instructions.
+ uintptr_type version_lock;
+};
+
+#ifdef __GTHREAD_HAS_COND
+// We should never get contention within the tree as it rarely changes.
+// But if we ever do get contention we use these for waiting.
+static __gthread_mutex_t version_lock_mutex = __GTHREAD_MUTEX_INIT;
+static __gthread_cond_t version_lock_cond = __GTHREAD_COND_INIT;
+#endif
+
+// Initialize in locked state.
+static inline void
+version_lock_initialize_locked_exclusive (struct version_lock *vl)
+{
+ vl->version_lock = 1;
+}
+
+// Try to lock the node exclusive.
+static inline bool
+version_lock_try_lock_exclusive (struct version_lock *vl)
+{
+ uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
+ if (state & 1)
+ return false;
+ return __atomic_compare_exchange_n (&(vl->version_lock), &state, state | 1,
+ false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST);
+}
+
+// Lock the node exclusive, blocking as needed.
+static void
+version_lock_lock_exclusive (struct version_lock *vl)
+{
+#ifndef __GTHREAD_HAS_COND
+restart:
+#endif
+
+ // We should virtually never get contention here, as frame
+ // changes are rare.
+ uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
+ if (!(state & 1))
+ {
+ if (__atomic_compare_exchange_n (&(vl->version_lock), &state, state | 1,
+ false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST))
+ return;
+ }
+
+ // We did get contention, wait properly.
+#ifdef __GTHREAD_HAS_COND
+ __gthread_mutex_lock (&version_lock_mutex);
+ state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
+ while (true)
+ {
+ // Check if the lock is still held.
+ if (!(state & 1))
+ {
+ if (__atomic_compare_exchange_n (&(vl->version_lock), &state,
+ state | 1, false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST))
+ {
+ __gthread_mutex_unlock (&version_lock_mutex);
+ return;
+ }
+ else
+ {
+ continue;
+ }
+ }
+
+ // Register waiting thread.
+ if (!(state & 2))
+ {
+ if (!__atomic_compare_exchange_n (&(vl->version_lock), &state,
+ state | 2, false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST))
+ continue;
+ }
+
+ // And sleep.
+ __gthread_cond_wait (&version_lock_cond, &version_lock_mutex);
+ state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
+ }
+#else
+ // Spin if we do not have condition variables available.
+ // We expect no contention here, spinning should be okay.
+ goto restart;
+#endif
+}
+
+// Release a locked node and increase the version lock.
+static void
+version_lock_unlock_exclusive (struct version_lock *vl)
+{
+ // increase version, reset exclusive lock bits
+ uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
+ uintptr_type ns = (state + 4) & (~((uintptr_type) 3));
+ state = __atomic_exchange_n (&(vl->version_lock), ns, __ATOMIC_SEQ_CST);
+
+#ifdef __GTHREAD_HAS_COND
+ if (state & 2)
+ {
+ // Wake up waiting threads. This should be extremely rare.
+ __gthread_mutex_lock (&version_lock_mutex);
+ __gthread_cond_broadcast (&version_lock_cond);
+ __gthread_mutex_unlock (&version_lock_mutex);
+ }
+#endif
+}
+
+// Acquire an optimistic "lock". Note that this does not lock at all, it
+// only allows for validation later.
+static inline bool
+version_lock_lock_optimistic (const struct version_lock *vl, uintptr_type *lock)
+{
+ uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
+ *lock = state;
+
+ // Acquiring the lock fails when there is currently an exclusive lock.
+ return !(state & 1);
+}
+
+// Validate a previously acquired "lock".
+static inline bool
+version_lock_validate (const struct version_lock *vl, uintptr_type lock)
+{
+ // Prevent the reordering of non-atomic loads behind the atomic load.
+ // Hans Boehm, Can Seqlocks Get Along with Programming Language Memory
+ // Models?, Section 4.
+ __atomic_thread_fence (__ATOMIC_ACQUIRE);
+
+ // Check that the node is still in the same state.
+ uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
+ return (state == lock);
+}
+
+// The largest possible separator value.
+static const uintptr_type max_separator = ~((uintptr_type) (0));
+
+struct btree_node;
+
+// Inner entry. The child tree contains all entries <= separator.
+struct inner_entry
+{
+ uintptr_type separator;
+ struct btree_node *child;
+};
+
+// Leaf entry. Stores an object entry.
+struct leaf_entry
+{
+ uintptr_type base, size;
+ struct object *ob;
+};
+
+// Node types.
+enum node_type
+{
+ btree_node_inner,
+ btree_node_leaf,
+ btree_node_free
+};
+
+// Node sizes. Chosen such that the result size is roughly 256 bytes.
+#define max_fanout_inner 15
+#define max_fanout_leaf 10
+
+// A btree node.
+struct btree_node
+{
+ // The version lock used for optimistic lock coupling.
+ struct version_lock version_lock;
+ // The number of entries.
+ unsigned entry_count;
+ // The type.
+ enum node_type type;
+ // The payload.
+ union
+ {
+ // The inner nodes have fence keys, i.e., the right-most entry includes a
+ // separator.
+ struct inner_entry children[max_fanout_inner];
+ struct leaf_entry entries[max_fanout_leaf];
+ } content;
+};
+
+// Is an inner node?
+static inline bool
+btree_node_is_inner (const struct btree_node *n)
+{
+ return n->type == btree_node_inner;
+}
+
+// Is a leaf node?
+static inline bool
+btree_node_is_leaf (const struct btree_node *n)
+{
+ return n->type == btree_node_leaf;
+}
+
+// Should the node be merged?
+static inline bool
+btree_node_needs_merge (const struct btree_node *n)
+{
+ return n->entry_count < (btree_node_is_inner (n) ? (max_fanout_inner / 2)
+ : (max_fanout_leaf / 2));
+}
+
+// Get the fence key for inner nodes.
+static inline uintptr_type
+btree_node_get_fence_key (const struct btree_node *n)
+{
+ // For inner nodes we just return our right-most entry.
+ return n->content.children[n->entry_count - 1].separator;
+}
+
+// Find the position for a slot in an inner node.
+static unsigned
+btree_node_find_inner_slot (const struct btree_node *n, uintptr_type value)
+{
+ for (unsigned index = 0, ec = n->entry_count; index != ec; ++index)
+ if (n->content.children[index].separator >= value)
+ return index;
+ return n->entry_count;
+}
+
+// Find the position for a slot in a leaf node.
+static unsigned
+btree_node_find_leaf_slot (const struct btree_node *n, uintptr_type value)
+{
+ for (unsigned index = 0, ec = n->entry_count; index != ec; ++index)
+ if (n->content.entries[index].base + n->content.entries[index].size > value)
+ return index;
+ return n->entry_count;
+}
+
+// Try to lock the node exclusive.
+static inline bool
+btree_node_try_lock_exclusive (struct btree_node *n)
+{
+ return version_lock_try_lock_exclusive (&(n->version_lock));
+}
+
+// Lock the node exclusive, blocking as needed.
+static inline void
+btree_node_lock_exclusive (struct btree_node *n)
+{
+ version_lock_lock_exclusive (&(n->version_lock));
+}
+
+// Release a locked node and increase the version lock.
+static inline void
+btree_node_unlock_exclusive (struct btree_node *n)
+{
+ version_lock_unlock_exclusive (&(n->version_lock));
+}
+
+// Acquire an optimistic "lock". Note that this does not lock at all, it
+// only allows for validation later.
+static inline bool
+btree_node_lock_optimistic (const struct btree_node *n, uintptr_type *lock)
+{
+ return version_lock_lock_optimistic (&(n->version_lock), lock);
+}
+
+// Validate a previously acquire lock.
+static inline bool
+btree_node_validate (const struct btree_node *n, uintptr_type lock)
+{
+ return version_lock_validate (&(n->version_lock), lock);
+}
+
+// Insert a new separator after splitting.
+static void
+btree_node_update_separator_after_split (struct btree_node *n,
+ uintptr_type old_separator,
+ uintptr_type new_separator,
+ struct btree_node *new_right)
+{
+ unsigned slot = btree_node_find_inner_slot (n, old_separator);
+ for (unsigned index = n->entry_count; index > slot; --index)
+ n->content.children[index] = n->content.children[index - 1];
+ n->content.children[slot].separator = new_separator;
+ n->content.children[slot + 1].child = new_right;
+ n->entry_count++;
+}
+
+// A btree. Suitable for static initialization, all members are zero at the
+// beginning.
+struct btree
+{
+ // The root of the btree.
+ struct btree_node *root;
+ // The free list of released node.
+ struct btree_node *free_list;
+ // The version lock used to protect the root.
+ struct version_lock root_lock;
+};
+
+// Initialize a btree. Not actually used, just for exposition.
+static inline void
+btree_init (struct btree *t)
+{
+ t->root = NULL;
+ t->free_list = NULL;
+ t->root_lock.version_lock = 0;
+};
+
+static void
+btree_release_tree_recursively (struct btree *t, struct btree_node *n);
+
+// Destroy a tree and release all nodes.
+static void
+btree_destroy (struct btree *t)
+{
+ // Disable the mechanism before cleaning up.
+ struct btree_node *old_root
+ = __atomic_exchange_n (&(t->root), NULL, __ATOMIC_SEQ_CST);
+ if (old_root)
+ btree_release_tree_recursively (t, old_root);
+
+ // Release all free nodes.
+ while (t->free_list)
+ {
+ struct btree_node *next = t->free_list->content.children[0].child;
+ free (t->free_list);
+ t->free_list = next;
+ }
+}
+
+// Allocate a node. This node will be returned in locked exclusive state.
+static struct btree_node *
+btree_allocate_node (struct btree *t, bool inner)
+{
+ while (true)
+ {
+ // Try the free list first.
+ struct btree_node *next_free
+ = __atomic_load_n (&(t->free_list), __ATOMIC_SEQ_CST);
+ if (next_free)
+ {
+ if (!btree_node_try_lock_exclusive (next_free))
+ continue;
+ // The node might no longer be free, check that again after acquiring
+ // the exclusive lock.
+ if (next_free->type == btree_node_free)
+ {
+ struct btree_node *ex = next_free;
+ if (__atomic_compare_exchange_n (
+ &(t->free_list), &ex, next_free->content.children[0].child,
+ false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ {
+ next_free->entry_count = 0;
+ next_free->type = inner ? btree_node_inner : btree_node_leaf;
+ return next_free;
+ }
+ }
+ btree_node_unlock_exclusive (next_free);
+ continue;
+ }
+
+ // No free node available, allocate a new one.
+ struct btree_node *new_node
+ = (struct btree_node *) (malloc (sizeof (struct btree_node)));
+ version_lock_initialize_locked_exclusive (
+ &(new_node->version_lock)); // initialize the node in locked state.
+ new_node->entry_count = 0;
+ new_node->type = inner ? btree_node_inner : btree_node_leaf;
+ return new_node;
+ }
+}
+
+// Release a node. This node must be currently locked exclusively and will
+// be placed in the free list.
+static void
+btree_release_node (struct btree *t, struct btree_node *node)
+{
+ // We cannot release the memory immediately because there might still be
+ // concurrent readers on that node. Put it in the free list instead.
+ node->type = btree_node_free;
+ struct btree_node *next_free
+ = __atomic_load_n (&(t->free_list), __ATOMIC_SEQ_CST);
+ do
+ {
+ node->content.children[0].child = next_free;
+ } while (!__atomic_compare_exchange_n (&(t->free_list), &next_free, node,
+ false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST));
+ btree_node_unlock_exclusive (node);
+}
+
+// Recursively release a tree. The btree is by design very shallow, thus
+// we can risk recursion here.
+static void
+btree_release_tree_recursively (struct btree *t, struct btree_node *node)
+{
+ btree_node_lock_exclusive (node);
+ if (btree_node_is_inner (node))
+ {
+ for (unsigned index = 0; index < node->entry_count; ++index)
+ btree_release_tree_recursively (t, node->content.children[index].child);
+ }
+ btree_release_node (t, node);
+}
+
+// Check if we are splitting the root.
+static void
+btree_handle_root_split (struct btree *t, struct btree_node **node,
+ struct btree_node **parent)
+{
+ // We want to keep the root pointer stable to allow for contention
+ // free reads. Thus, we split the root by first moving the content
+ // of the root node to a new node, and then split that new node.
+ if (!*parent)
+ {
+ // Allocate a new node, this guarantees us that we will have a parent
+ // afterwards.
+ struct btree_node *new_node
+ = btree_allocate_node (t, btree_node_is_inner (*node));
+ struct btree_node *old_node = *node;
+ new_node->entry_count = old_node->entry_count;
+ new_node->content = old_node->content;
+ old_node->content.children[0].separator = max_separator;
+ old_node->content.children[0].child = new_node;
+ old_node->entry_count = 1;
+ old_node->type = btree_node_inner;
+
+ *parent = old_node;
+ *node = new_node;
+ }
+}
+
+// Split an inner node.
+static void
+btree_split_inner (struct btree *t, struct btree_node **inner,
+ struct btree_node **parent, uintptr_type target)
+{
+ // Check for the root.
+ btree_handle_root_split (t, inner, parent);
+
+ // Create two inner node.
+ uintptr_type right_fence = btree_node_get_fence_key (*inner);
+ struct btree_node *left_inner = *inner;
+ struct btree_node *right_inner = btree_allocate_node (t, true);
+ unsigned split = left_inner->entry_count / 2;
+ right_inner->entry_count = left_inner->entry_count - split;
+ for (unsigned index = 0; index < right_inner->entry_count; ++index)
+ right_inner->content.children[index]
+ = left_inner->content.children[split + index];
+ left_inner->entry_count = split;
+ uintptr_type left_fence = btree_node_get_fence_key (left_inner);
+ btree_node_update_separator_after_split (*parent, right_fence, left_fence,
+ right_inner);
+ if (target <= left_fence)
+ {
+ *inner = left_inner;
+ btree_node_unlock_exclusive (right_inner);
+ }
+ else
+ {
+ *inner = right_inner;
+ btree_node_unlock_exclusive (left_inner);
+ }
+}
+
+// Split a leaf node.
+static void
+btree_split_leaf (struct btree *t, struct btree_node **leaf,
+ struct btree_node **parent, uintptr_type fence,
+ uintptr_type target)
+{
+ // Check for the root.
+ btree_handle_root_split (t, leaf, parent);
+
+ // Create two leaf nodes.
+ uintptr_type right_fence = fence;
+ struct btree_node *left_leaf = *leaf;
+ struct btree_node *right_leaf = btree_allocate_node (t, false);
+ unsigned split = left_leaf->entry_count / 2;
+ right_leaf->entry_count = left_leaf->entry_count - split;
+ for (unsigned index = 0; index != right_leaf->entry_count; ++index)
+ right_leaf->content.entries[index]
+ = left_leaf->content.entries[split + index];
+ left_leaf->entry_count = split;
+ uintptr_type left_fence = right_leaf->content.entries[0].base - 1;
+ btree_node_update_separator_after_split (*parent, right_fence, left_fence,
+ right_leaf);
+ if (target <= left_fence)
+ {
+ *leaf = left_leaf;
+ btree_node_unlock_exclusive (right_leaf);
+ }
+ else
+ {
+ *leaf = right_leaf;
+ btree_node_unlock_exclusive (left_leaf);
+ }
+}
+
+// Merge (or balance) child nodes.
+static struct btree_node *
+btree_merge_node (struct btree *t, unsigned child_slot,
+ struct btree_node *parent, uintptr_type target)
+{
+ // Choose the emptiest neighbor and lock both. The target child is already
+ // locked.
+ unsigned left_slot;
+ struct btree_node *left_node, *right_node;
+ if ((child_slot == 0)
+ || (((child_slot + 1) < parent->entry_count)
+ && (parent->content.children[child_slot + 1].child->entry_count
+ < parent->content.children[child_slot - 1].child->entry_count)))
+ {
+ left_slot = child_slot;
+ left_node = parent->content.children[left_slot].child;
+ right_node = parent->content.children[left_slot + 1].child;
+ btree_node_lock_exclusive (right_node);
+ }
+ else
+ {
+ left_slot = child_slot - 1;
+ left_node = parent->content.children[left_slot].child;
+ right_node = parent->content.children[left_slot + 1].child;
+ btree_node_lock_exclusive (left_node);
+ }
+
+ // Can we merge both nodes into one node?
+ unsigned total_count = left_node->entry_count + right_node->entry_count;
+ unsigned max_count
+ = btree_node_is_inner (left_node) ? max_fanout_inner : max_fanout_leaf;
+ if (total_count <= max_count)
+ {
+ // Merge into the parent?
+ if (parent->entry_count == 2)
+ {
+ // Merge children into parent. This can only happen at the root.
+ if (btree_node_is_inner (left_node))
+ {
+ for (unsigned index = 0; index != left_node->entry_count; ++index)
+ parent->content.children[index]
+ = left_node->content.children[index];
+ for (unsigned index = 0; index != right_node->entry_count;
+ ++index)
+ parent->content.children[index + left_node->entry_count]
+ = right_node->content.children[index];
+ }
+ else
+ {
+ parent->type = btree_node_leaf;
+ for (unsigned index = 0; index != left_node->entry_count; ++index)
+ parent->content.entries[index]
+ = left_node->content.entries[index];
+ for (unsigned index = 0; index != right_node->entry_count;
+ ++index)
+ parent->content.entries[index + left_node->entry_count]
+ = right_node->content.entries[index];
+ }
+ parent->entry_count = total_count;
+ btree_release_node (t, left_node);
+ btree_release_node (t, right_node);
+ return parent;
+ }
+ else
+ {
+ // Regular merge.
+ if (btree_node_is_inner (left_node))
+ {
+ for (unsigned index = 0; index != right_node->entry_count;
+ ++index)
+ left_node->content.children[left_node->entry_count++]
+ = right_node->content.children[index];
+ }
+ else
+ {
+ for (unsigned index = 0; index != right_node->entry_count;
+ ++index)
+ left_node->content.entries[left_node->entry_count++]
+ = right_node->content.entries[index];
+ }
+ parent->content.children[left_slot].separator
+ = parent->content.children[left_slot + 1].separator;
+ for (unsigned index = left_slot + 1; index + 1 < parent->entry_count;
+ ++index)
+ parent->content.children[index]
+ = parent->content.children[index + 1];
+ parent->entry_count--;
+ btree_release_node (t, right_node);
+ btree_node_unlock_exclusive (parent);
+ return left_node;
+ }
+ }
+
+ // No merge possible, rebalance instead.
+ if (left_node->entry_count > right_node->entry_count)
+ {
+ // Shift from left to right.
+ unsigned to_shift
+ = (left_node->entry_count - right_node->entry_count) / 2;
+ if (btree_node_is_inner (left_node))
+ {
+ for (unsigned index = 0; index != right_node->entry_count; ++index)
+ {
+ unsigned pos = right_node->entry_count - 1 - index;
+ right_node->content.children[pos + to_shift]
+ = right_node->content.children[pos];
+ }
+ for (unsigned index = 0; index != to_shift; ++index)
+ right_node->content.children[index]
+ = left_node->content
+ .children[left_node->entry_count - to_shift + index];
+ }
+ else
+ {
+ for (unsigned index = 0; index != right_node->entry_count; ++index)
+ {
+ unsigned pos = right_node->entry_count - 1 - index;
+ right_node->content.entries[pos + to_shift]
+ = right_node->content.entries[pos];
+ }
+ for (unsigned index = 0; index != to_shift; ++index)
+ right_node->content.entries[index]
+ = left_node->content
+ .entries[left_node->entry_count - to_shift + index];
+ }
+ left_node->entry_count -= to_shift;
+ right_node->entry_count += to_shift;
+ }
+ else
+ {
+ // Shift from right to left.
+ unsigned to_shift
+ = (right_node->entry_count - left_node->entry_count) / 2;
+ if (btree_node_is_inner (left_node))
+ {
+ for (unsigned index = 0; index != to_shift; ++index)
+ left_node->content.children[left_node->entry_count + index]
+ = right_node->content.children[index];
+ for (unsigned index = 0; index != right_node->entry_count - to_shift;
+ ++index)
+ right_node->content.children[index]
+ = right_node->content.children[index + to_shift];
+ }
+ else
+ {
+ for (unsigned index = 0; index != to_shift; ++index)
+ left_node->content.entries[left_node->entry_count + index]
+ = right_node->content.entries[index];
+ for (unsigned index = 0; index != right_node->entry_count - to_shift;
+ ++index)
+ right_node->content.entries[index]
+ = right_node->content.entries[index + to_shift];
+ }
+ left_node->entry_count += to_shift;
+ right_node->entry_count -= to_shift;
+ }
+ uintptr_type left_fence;
+ if (btree_node_is_leaf (left_node))
+ {
+ left_fence = right_node->content.entries[0].base - 1;
+ }
+ else
+ {
+ left_fence = btree_node_get_fence_key (left_node);
+ }
+ parent->content.children[left_slot].separator = left_fence;
+ btree_node_unlock_exclusive (parent);
+ if (target <= left_fence)
+ {
+ btree_node_unlock_exclusive (right_node);
+ return left_node;
+ }
+ else
+ {
+ btree_node_unlock_exclusive (left_node);
+ return right_node;
+ }
+}
+
+// Insert an entry.
+static bool
+btree_insert (struct btree *t, uintptr_type base, uintptr_type size,
+ struct object *ob)
+{
+ // Sanity check.
+ if (!size)
+ return false;
+
+ // Access the root.
+ struct btree_node *iter, *parent = NULL;
+ {
+ version_lock_lock_exclusive (&(t->root_lock));
+ iter = t->root;
+ if (iter)
+ {
+ btree_node_lock_exclusive (iter);
+ }
+ else
+ {
+ t->root = iter = btree_allocate_node (t, false);
+ }
+ version_lock_unlock_exclusive (&(t->root_lock));
+ }
+
+ // Walk down the btree with classic lock coupling and eager splits.
+ // Strictly speaking this is not performance optimal, we could use
+ // optimistic lock coupling until we hit a node that has to be modified.
+ // But that is more difficult to implement and frame registration is
+ // rare anyway, we use simple locking for now.
+
+ uintptr_type fence = max_separator;
+ while (btree_node_is_inner (iter))
+ {
+ // Use eager splits to avoid lock coupling up.
+ if (iter->entry_count == max_fanout_inner)
+ btree_split_inner (t, &iter, &parent, base);
+
+ unsigned slot = btree_node_find_inner_slot (iter, base);
+ if (parent)
+ btree_node_unlock_exclusive (parent);
+ parent = iter;
+ fence = iter->content.children[slot].separator;
+ iter = iter->content.children[slot].child;
+ btree_node_lock_exclusive (iter);
+ }
+
+ // Make sure we have space.
+ if (iter->entry_count == max_fanout_leaf)
+ btree_split_leaf (t, &iter, &parent, fence, base);
+ if (parent)
+ btree_node_unlock_exclusive (parent);
+
+ // Insert in node.
+ unsigned slot = btree_node_find_leaf_slot (iter, base);
+ if ((slot < iter->entry_count) && (iter->content.entries[slot].base == base))
+ {
+ // Duplicate entry, this should never happen.
+ btree_node_unlock_exclusive (iter);
+ return false;
+ }
+ for (unsigned index = iter->entry_count; index > slot; --index)
+ iter->content.entries[index] = iter->content.entries[index - 1];
+ struct leaf_entry *e = &(iter->content.entries[slot]);
+ e->base = base;
+ e->size = size;
+ e->ob = ob;
+ iter->entry_count++;
+ btree_node_unlock_exclusive (iter);
+ return true;
+}
+
+// Remove an entry.
+static struct object *
+btree_remove (struct btree *t, uintptr_type base)
+{
+ // Access the root.
+ version_lock_lock_exclusive (&(t->root_lock));
+ struct btree_node *iter = t->root;
+ if (iter)
+ btree_node_lock_exclusive (iter);
+ version_lock_unlock_exclusive (&(t->root_lock));
+ if (!iter)
+ return NULL;
+
+ // Same strategy as with insert, walk down with lock coupling and
+ // merge eagerly.
+ while (btree_node_is_inner (iter))
+ {
+ unsigned slot = btree_node_find_inner_slot (iter, base);
+ struct btree_node *next = iter->content.children[slot].child;
+ btree_node_lock_exclusive (next);
+ if (btree_node_needs_merge (next))
+ {
+ // Use eager merges to avoid lock coupling up.
+ iter = btree_merge_node (t, slot, iter, base);
+ }
+ else
+ {
+ btree_node_unlock_exclusive (iter);
+ iter = next;
+ }
+ }
+
+ // Remove existing entry.
+ unsigned slot = btree_node_find_leaf_slot (iter, base);
+ if ((slot >= iter->entry_count) || (iter->content.entries[slot].base != base))
+ {
+ // Not found, this should never happen.
+ btree_node_unlock_exclusive (iter);
+ return NULL;
+ }
+ struct object *ob = iter->content.entries[slot].ob;
+ for (unsigned index = slot; index + 1 < iter->entry_count; ++index)
+ iter->content.entries[index] = iter->content.entries[index + 1];
+ iter->entry_count--;
+ btree_node_unlock_exclusive (iter);
+ return ob;
+}
+
+// Find the corresponding entry for the given address.
+static struct object *
+btree_lookup (const struct btree *t, uintptr_type target_addr)
+{
+ // Within this function many loads are relaxed atomic loads.
+ // Use a macro to keep the code reasonable.
+#define RLOAD(x) __atomic_load_n (&(x), __ATOMIC_RELAXED)
+
+ // For targets where unwind info is usually not registered through these
+ // APIs anymore, avoid any sequential consistent atomics.
+ // Use relaxed MO here, it is up to the app to ensure that the library
+ // loading/initialization happens-before using that library in other
+ // threads (in particular unwinding with that library's functions
+ // appearing in the backtraces). Calling that library's functions
+ // without waiting for the library to initialize would be racy.
+ if (__builtin_expect (!RLOAD (t->root), 1))
+ return NULL;
+
+ // The unwinding tables are mostly static, they only change when
+ // frames are added or removed. This makes it extremely unlikely that they
+ // change during a given unwinding sequence. Thus, we optimize for the
+ // contention free case and use optimistic lock coupling. This does not
+ // require any writes to shared state, instead we validate every read. It is
+ // important that we do not trust any value that we have read until we call
+ // validate again. Data can change at arbitrary points in time, thus we always
+ // copy something into a local variable and validate again before acting on
+ // the read. In the unlikely event that we encounter a concurrent change we
+ // simply restart and try again.
+
+restart:
+ struct btree_node *iter;
+ uintptr_type lock;
+ {
+ // Accessing the root node requires defending against concurrent pointer
+ // changes Thus we couple rootLock -> lock on root node -> validate rootLock
+ if (!version_lock_lock_optimistic (&(t->root_lock), &lock))
+ goto restart;
+ iter = RLOAD (t->root);
+ if (!version_lock_validate (&(t->root_lock), lock))
+ goto restart;
+ if (!iter)
+ return NULL;
+ uintptr_type child_lock;
+ if ((!btree_node_lock_optimistic (iter, &child_lock))
+ || (!version_lock_validate (&(t->root_lock), lock)))
+ goto restart;
+ lock = child_lock;
+ }
+
+ // Now we can walk down towards the right leaf node.
+ while (true)
+ {
+ enum node_type type = RLOAD (iter->type);
+ unsigned entry_count = RLOAD (iter->entry_count);
+ if (!btree_node_validate (iter, lock))
+ goto restart;
+ if (!entry_count)
+ return NULL;
+
+ if (type == btree_node_inner)
+ {
+ // We cannot call find_inner_slot here because we need (relaxed)
+ // atomic reads here.
+ unsigned slot = 0;
+ while (
+ ((slot + 1) < entry_count)
+ && (RLOAD (iter->content.children[slot].separator) < target_addr))
+ ++slot;
+ struct btree_node *child = RLOAD (iter->content.children[slot].child);
+ if (!btree_node_validate (iter, lock))
+ goto restart;
+
+ // The node content can change at any point in time, thus we must
+ // interleave parent and child checks.
+ uintptr_type child_lock;
+ if (!btree_node_lock_optimistic (child, &child_lock))
+ goto restart;
+ if (!btree_node_validate (iter, lock))
+ goto restart; // make sure we still point to the correct node after
+ // acquiring the optimistic lock.
+
+ // Go down
+ iter = child;
+ lock = child_lock;
+ }
+ else
+ {
+ // We cannot call find_leaf_slot here because we need (relaxed)
+ // atomic reads here.
+ unsigned slot = 0;
+ while (((slot + 1) < entry_count)
+ && (RLOAD (iter->content.entries[slot].base)
+ + RLOAD (iter->content.entries[slot].size)
+ <= target_addr))
+ ++slot;
+ struct leaf_entry entry;
+ entry.base = RLOAD (iter->content.entries[slot].base);
+ entry.size = RLOAD (iter->content.entries[slot].size);
+ entry.ob = RLOAD (iter->content.entries[slot].ob);
+ if (!btree_node_validate (iter, lock))
+ goto restart;
+
+ // Check if we have a hit.
+ if ((entry.base <= target_addr)
+ && (target_addr < entry.base + entry.size))
+ {
+ return entry.ob;
+ }
+ return NULL;
+ }
+ }
+#undef RLOAD
+}
+
+#endif /* unwind-dw2-btree.h */
diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c
index 8ee55be..919abfe 100644
--- a/libgcc/unwind-dw2-fde.c
+++ b/libgcc/unwind-dw2-fde.c
@@ -42,15 +42,36 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#endif
#endif
+typedef __UINTPTR_TYPE__ uintptr_type;
+
+#ifdef ATOMIC_FDE_FAST_PATH
+#include "unwind-dw2-btree.h"
+
+static struct btree registered_frames;
+
+static void
+release_registered_frames (void) __attribute__ ((destructor (110)));
+static void
+release_registered_frames (void)
+{
+ /* Release the b-tree and all frames. Frame releases that happen later are
+ * silently ignored */
+ btree_destroy (&registered_frames);
+}
+
+static void
+get_pc_range (const struct object *ob, uintptr_type *range);
+static void
+init_object (struct object *ob);
+
+#else
+
/* The unseen_objects list contains objects that have been registered
but not yet categorized in any way. The seen_objects list has had
its pc_begin and count fields initialized at minimum, and is sorted
by decreasing value of pc_begin. */
static struct object *unseen_objects;
static struct object *seen_objects;
-#ifdef ATOMIC_FDE_FAST_PATH
-static int any_objects_registered;
-#endif
#ifdef __GTHREAD_MUTEX_INIT
static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
@@ -78,6 +99,7 @@ init_object_mutex_once (void)
static __gthread_mutex_t object_mutex;
#endif
#endif
+#endif
/* Called from crtbegin.o to register the unwind info for an object. */
@@ -99,23 +121,23 @@ __register_frame_info_bases (const void *begin, struct object *ob,
ob->fde_end = NULL;
#endif
+#ifdef ATOMIC_FDE_FAST_PATH
+ // Initialize eagerly to avoid locking later
+ init_object (ob);
+
+ // And register the frame
+ uintptr_type range[2];
+ get_pc_range (ob, range);
+ btree_insert (&registered_frames, range[0], range[1] - range[0], ob);
+#else
init_object_mutex_once ();
__gthread_mutex_lock (&object_mutex);
ob->next = unseen_objects;
unseen_objects = ob;
-#ifdef ATOMIC_FDE_FAST_PATH
- /* Set flag that at least one library has registered FDEs.
- Use relaxed MO here, it is up to the app to ensure that the library
- loading/initialization happens-before using that library in other
- threads (in particular unwinding with that library's functions
- appearing in the backtraces). Calling that library's functions
- without waiting for the library to initialize would be racy. */
- if (!any_objects_registered)
- __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
-#endif
__gthread_mutex_unlock (&object_mutex);
+#endif
}
void
@@ -153,23 +175,23 @@ __register_frame_info_table_bases (void *begin, struct object *ob,
ob->s.b.from_array = 1;
ob->s.b.encoding = DW_EH_PE_omit;
+#ifdef ATOMIC_FDE_FAST_PATH
+ // Initialize eagerly to avoid locking later
+ init_object (ob);
+
+ // And register the frame
+ uintptr_type range[2];
+ get_pc_range (ob, range);
+ btree_insert (&registered_frames, range[0], range[1] - range[0], ob);
+#else
init_object_mutex_once ();
__gthread_mutex_lock (&object_mutex);
ob->next = unseen_objects;
unseen_objects = ob;
-#ifdef ATOMIC_FDE_FAST_PATH
- /* Set flag that at least one library has registered FDEs.
- Use relaxed MO here, it is up to the app to ensure that the library
- loading/initialization happens-before using that library in other
- threads (in particular unwinding with that library's functions
- appearing in the backtraces). Calling that library's functions
- without waiting for the library to initialize would be racy. */
- if (!any_objects_registered)
- __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
-#endif
__gthread_mutex_unlock (&object_mutex);
+#endif
}
void
@@ -200,16 +222,33 @@ __register_frame_table (void *begin)
void *
__deregister_frame_info_bases (const void *begin)
{
- struct object **p;
struct object *ob = 0;
/* If .eh_frame is empty, we haven't registered. */
if ((const uword *) begin == 0 || *(const uword *) begin == 0)
return ob;
+#ifdef ATOMIC_FDE_FAST_PATH
+ // Find the corresponding PC range
+ struct object lookupob;
+ lookupob.tbase = 0;
+ lookupob.dbase = 0;
+ lookupob.u.single = begin;
+ lookupob.s.i = 0;
+ lookupob.s.b.encoding = DW_EH_PE_omit;
+#ifdef DWARF2_OBJECT_END_PTR_EXTENSION
+ lookupob.fde_end = NULL;
+#endif
+ uintptr_type range[2];
+ get_pc_range (&lookupob, range);
+
+ // And remove
+ ob = btree_remove (&registered_frames, range[0]);
+#else
init_object_mutex_once ();
__gthread_mutex_lock (&object_mutex);
+ struct object **p;
for (p = &unseen_objects; *p ; p = &(*p)->next)
if ((*p)->u.single == begin)
{
@@ -241,6 +280,8 @@ __deregister_frame_info_bases (const void *begin)
out:
__gthread_mutex_unlock (&object_mutex);
+#endif
+
gcc_assert (ob);
return (void *) ob;
}
@@ -264,7 +305,7 @@ __deregister_frame (void *begin)
instead of an _Unwind_Context. */
static _Unwind_Ptr
-base_from_object (unsigned char encoding, struct object *ob)
+base_from_object (unsigned char encoding, const struct object *ob)
{
if (encoding == DW_EH_PE_omit)
return 0;
@@ -628,13 +669,17 @@ end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
}
}
-
-/* Update encoding, mixed_encoding, and pc_begin for OB for the
- fde array beginning at THIS_FDE. Return the number of fdes
- encountered along the way. */
+/* Inspect the fde array beginning at this_fde. This
+ function can be used either in query mode (RANGE is
+ not null, OB is const), or in update mode (RANGE is
+ null, OB is modified). In query mode the function computes
+ the range of PC values and stores it in RANGE. In
+ update mode it updates encoding, mixed_encoding, and pc_begin
+ for OB. Return the number of fdes encountered along the way. */
static size_t
-classify_object_over_fdes (struct object *ob, const fde *this_fde)
+classify_object_over_fdes (struct object *ob, const fde *this_fde,
+ uintptr_type *range)
{
const struct dwarf_cie *last_cie = 0;
size_t count = 0;
@@ -660,14 +705,18 @@ classify_object_over_fdes (struct object *ob, const fde *this_fde)
if (encoding == DW_EH_PE_omit)
return -1;
base = base_from_object (encoding, ob);
- if (ob->s.b.encoding == DW_EH_PE_omit)
- ob->s.b.encoding = encoding;
- else if (ob->s.b.encoding != encoding)
- ob->s.b.mixed_encoding = 1;
+ if (!range)
+ {
+ if (ob->s.b.encoding == DW_EH_PE_omit)
+ ob->s.b.encoding = encoding;
+ else if (ob->s.b.encoding != encoding)
+ ob->s.b.mixed_encoding = 1;
+ }
}
- read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
- &pc_begin);
+ const unsigned char *p;
+ p = read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
+ &pc_begin);
/* Take care to ignore link-once functions that were removed.
In these cases, the function address will be NULL, but if
@@ -683,8 +732,29 @@ classify_object_over_fdes (struct object *ob, const fde *this_fde)
continue;
count += 1;
- if ((void *) pc_begin < ob->pc_begin)
- ob->pc_begin = (void *) pc_begin;
+ if (range)
+ {
+ _Unwind_Ptr pc_range, pc_end;
+ read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
+ pc_end = pc_begin + pc_range;
+ if ((!range[0]) && (!range[1]))
+ {
+ range[0] = pc_begin;
+ range[1] = pc_end;
+ }
+ else
+ {
+ if (pc_begin < range[0])
+ range[0] = pc_begin;
+ if (pc_end > range[1])
+ range[1] = pc_end;
+ }
+ }
+ else
+ {
+ if ((void *) pc_begin < ob->pc_begin)
+ ob->pc_begin = (void *) pc_begin;
+ }
}
return count;
@@ -769,7 +839,7 @@ init_object (struct object* ob)
fde **p = ob->u.array;
for (count = 0; *p; ++p)
{
- size_t cur_count = classify_object_over_fdes (ob, *p);
+ size_t cur_count = classify_object_over_fdes (ob, *p, NULL);
if (cur_count == (size_t) -1)
goto unhandled_fdes;
count += cur_count;
@@ -777,7 +847,7 @@ init_object (struct object* ob)
}
else
{
- count = classify_object_over_fdes (ob, ob->u.single);
+ count = classify_object_over_fdes (ob, ob->u.single, NULL);
if (count == (size_t) -1)
{
static const fde terminator;
@@ -821,6 +891,32 @@ init_object (struct object* ob)
ob->s.b.sorted = 1;
}
+#ifdef ATOMIC_FDE_FAST_PATH
+/* Get the PC range for lookup */
+static void
+get_pc_range (const struct object *ob, uintptr_type *range)
+{
+ // It is safe to cast to non-const object* here as
+ // classify_object_over_fdes does not modify ob in query mode.
+ struct object *ncob = (struct object *) (uintptr_type) ob;
+ range[0] = range[1] = 0;
+ if (ob->s.b.sorted)
+ {
+ classify_object_over_fdes (ncob, ob->u.sort->orig_data, range);
+ }
+ else if (ob->s.b.from_array)
+ {
+ fde **p = ob->u.array;
+ for (; *p; ++p)
+ classify_object_over_fdes (ncob, *p, range);
+ }
+ else
+ {
+ classify_object_over_fdes (ncob, ob->u.single, range);
+ }
+}
+#endif
+
/* A linear search through a set of FDEs for the given PC. This is
used when there was insufficient memory to allocate and sort an
array. */
@@ -985,6 +1081,9 @@ binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
static const fde *
search_object (struct object* ob, void *pc)
{
+ /* The fast path initializes objects eagerly to avoid locking.
+ * On the slow path we initialize them now */
+#ifndef ATOMIC_FDE_FAST_PATH
/* If the data hasn't been sorted, try to do this now. We may have
more memory available than last time we tried. */
if (! ob->s.b.sorted)
@@ -997,6 +1096,7 @@ search_object (struct object* ob, void *pc)
if (pc < ob->pc_begin)
return NULL;
}
+#endif
if (ob->s.b.sorted)
{
@@ -1033,17 +1133,12 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
const fde *f = NULL;
#ifdef ATOMIC_FDE_FAST_PATH
- /* For targets where unwind info is usually not registered through these
- APIs anymore, avoid taking a global lock.
- Use relaxed MO here, it is up to the app to ensure that the library
- loading/initialization happens-before using that library in other
- threads (in particular unwinding with that library's functions
- appearing in the backtraces). Calling that library's functions
- without waiting for the library to initialize would be racy. */
- if (__builtin_expect (!__atomic_load_n (&any_objects_registered,
- __ATOMIC_RELAXED), 1))
+ ob = btree_lookup (&registered_frames, (uintptr_type) pc);
+ if (!ob)
return NULL;
-#endif
+
+ f = search_object (ob, pc);
+#else
init_object_mutex_once ();
__gthread_mutex_lock (&object_mutex);
@@ -1081,6 +1176,7 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
fini:
__gthread_mutex_unlock (&object_mutex);
+#endif
if (f)
{
diff --git a/libgcc/unwind-dw2-fde.h b/libgcc/unwind-dw2-fde.h
index 8a011c3..77c2caa 100644
--- a/libgcc/unwind-dw2-fde.h
+++ b/libgcc/unwind-dw2-fde.h
@@ -166,7 +166,7 @@ next_fde (const fde *f)
extern const fde * _Unwind_Find_FDE (void *, struct dwarf_eh_bases *);
static inline int
-last_fde (struct object *obj __attribute__ ((__unused__)), const fde *f)
+last_fde (const struct object *obj __attribute__ ((__unused__)), const fde *f)
{
#ifdef DWARF2_OBJECT_END_PTR_EXTENSION
return f == (const fde *) obj->fde_end || f->length == 0;
diff --git a/libgfortran/ChangeLog b/libgfortran/ChangeLog
index 51c9f5c..fab472e 100644
--- a/libgfortran/ChangeLog
+++ b/libgfortran/ChangeLog
@@ -1,3 +1,21 @@
+2022-09-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * ieee/ieee_exceptions.F90: Add IEEE_MODES_TYPE, IEEE_GET_MODES
+ and IEEE_SET_MODES.
+ * ieee/ieee_arithmetic.F90: Make them public in IEEE_ARITHMETIC
+ as well.
+
+2022-09-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * ieee/ieee_arithmetic.F90: Add RADIX argument to
+ IEEE_SET_ROUNDING_MODE and IEEE_GET_ROUNDING_MODE.
+ * config/fpu-387.h: Add IEEE_AWAY mode.
+ * config/fpu-aarch64.h: Add IEEE_AWAY mode.
+ * config/fpu-aix.h: Add IEEE_AWAY mode.
+ * config/fpu-generic.h: Add IEEE_AWAY mode.
+ * config/fpu-glibc.h: Add IEEE_AWAY mode.
+ * config/fpu-sysv.h: Add IEEE_AWAY mode.
+
2022-09-10 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
PR fortran/95644
diff --git a/libgfortran/config/fpu-387.h b/libgfortran/config/fpu-387.h
index fd00dab..e2f4a7d 100644
--- a/libgfortran/config/fpu-387.h
+++ b/libgfortran/config/fpu-387.h
@@ -418,9 +418,12 @@ get_fpu_rounding_mode (void)
}
int
-support_fpu_rounding_mode (int mode __attribute__((unused)))
+support_fpu_rounding_mode (int mode)
{
- return 1;
+ if (mode == GFC_FPE_AWAY)
+ return 0;
+ else
+ return 1;
}
void
diff --git a/libgfortran/config/fpu-aarch64.h b/libgfortran/config/fpu-aarch64.h
index 3a2e4ba..4789390 100644
--- a/libgfortran/config/fpu-aarch64.h
+++ b/libgfortran/config/fpu-aarch64.h
@@ -293,9 +293,12 @@ set_fpu_rounding_mode (int round)
int
-support_fpu_rounding_mode (int mode __attribute__((unused)))
+support_fpu_rounding_mode (int mode)
{
- return 1;
+ if (mode == GFC_FPE_AWAY)
+ return 0;
+ else
+ return 1;
}
diff --git a/libgfortran/config/fpu-aix.h b/libgfortran/config/fpu-aix.h
index c643874..fb1ac80 100644
--- a/libgfortran/config/fpu-aix.h
+++ b/libgfortran/config/fpu-aix.h
@@ -320,6 +320,11 @@ get_fpu_rounding_mode (void)
return GFC_FPE_TOWARDZERO;
#endif
+#ifdef FE_TONEARESTFROMZERO
+ case FE_TONEARESTFROMZERO:
+ return GFC_FPE_AWAY;
+#endif
+
default:
return 0; /* Should be unreachable. */
}
@@ -357,8 +362,14 @@ set_fpu_rounding_mode (int mode)
break;
#endif
+#ifdef FE_TONEARESTFROMZERO
+ case GFC_FPE_AWAY:
+ rnd_mode = FE_TONEARESTFROMZERO;
+ break;
+#endif
+
default:
- return; /* Should be unreachable. */
+ return;
}
fesetround (rnd_mode);
@@ -398,8 +409,15 @@ support_fpu_rounding_mode (int mode)
return 0;
#endif
+ case GFC_FPE_AWAY:
+#ifdef FE_TONEARESTFROMZERO
+ return 1;
+#else
+ return 0;
+#endif
+
default:
- return 0; /* Should be unreachable. */
+ return 0;
}
}
diff --git a/libgfortran/config/fpu-generic.h b/libgfortran/config/fpu-generic.h
index 3b62228..9e976a8 100644
--- a/libgfortran/config/fpu-generic.h
+++ b/libgfortran/config/fpu-generic.h
@@ -66,9 +66,16 @@ get_fpu_except_flags (void)
int
get_fpu_rounding_mode (void)
-{
+{
+ return 0;
+}
+
+
+int
+support_fpu_rounding_mode (int mode __attribute__((unused)))
+{
return 0;
-}
+}
void
diff --git a/libgfortran/config/fpu-glibc.h b/libgfortran/config/fpu-glibc.h
index 265ef69..f34b696 100644
--- a/libgfortran/config/fpu-glibc.h
+++ b/libgfortran/config/fpu-glibc.h
@@ -342,6 +342,11 @@ get_fpu_rounding_mode (void)
return GFC_FPE_TOWARDZERO;
#endif
+#ifdef FE_TONEARESTFROMZERO
+ case FE_TONEARESTFROMZERO:
+ return GFC_FPE_AWAY;
+#endif
+
default:
return 0; /* Should be unreachable. */
}
@@ -379,6 +384,12 @@ set_fpu_rounding_mode (int mode)
break;
#endif
+#ifdef FE_TONEARESTFROMZERO
+ case GFC_FPE_AWAY:
+ rnd_mode = FE_TONEARESTFROMZERO;
+ break;
+#endif
+
default:
return; /* Should be unreachable. */
}
@@ -420,6 +431,13 @@ support_fpu_rounding_mode (int mode)
return 0;
#endif
+ case GFC_FPE_AWAY:
+#ifdef FE_TONEARESTFROMZERO
+ return 1;
+#else
+ return 0;
+#endif
+
default:
return 0; /* Should be unreachable. */
}
diff --git a/libgfortran/config/fpu-sysv.h b/libgfortran/config/fpu-sysv.h
index 4de3852..4681322 100644
--- a/libgfortran/config/fpu-sysv.h
+++ b/libgfortran/config/fpu-sysv.h
@@ -374,9 +374,12 @@ set_fpu_rounding_mode (int mode)
int
-support_fpu_rounding_mode (int mode __attribute__((unused)))
+support_fpu_rounding_mode (int mode)
{
- return 1;
+ if (mode == GFC_FPE_AWAY)
+ return 0;
+ else
+ return 1;
}
diff --git a/libgfortran/ieee/ieee_arithmetic.F90 b/libgfortran/ieee/ieee_arithmetic.F90
index 4e01aa5..ce30e4a 100644
--- a/libgfortran/ieee/ieee_arithmetic.F90
+++ b/libgfortran/ieee/ieee_arithmetic.F90
@@ -39,7 +39,8 @@ module IEEE_ARITHMETIC
IEEE_DIVIDE_BY_ZERO, IEEE_UNDERFLOW, IEEE_INEXACT, IEEE_USUAL, &
IEEE_ALL, IEEE_STATUS_TYPE, IEEE_GET_FLAG, IEEE_GET_HALTING_MODE, &
IEEE_GET_STATUS, IEEE_SET_FLAG, IEEE_SET_HALTING_MODE, &
- IEEE_SET_STATUS, IEEE_SUPPORT_FLAG, IEEE_SUPPORT_HALTING
+ IEEE_SET_STATUS, IEEE_SUPPORT_FLAG, IEEE_SUPPORT_HALTING, &
+ IEEE_MODES_TYPE, IEEE_GET_MODES, IEEE_SET_MODES
! Derived types and named constants
@@ -73,6 +74,7 @@ module IEEE_ARITHMETIC
IEEE_TO_ZERO = IEEE_ROUND_TYPE(GFC_FPE_TOWARDZERO), &
IEEE_UP = IEEE_ROUND_TYPE(GFC_FPE_UPWARD), &
IEEE_DOWN = IEEE_ROUND_TYPE(GFC_FPE_DOWNWARD), &
+ IEEE_AWAY = IEEE_ROUND_TYPE(GFC_FPE_AWAY), &
IEEE_OTHER = IEEE_ROUND_TYPE(0)
@@ -1044,9 +1046,10 @@ contains
! IEEE_GET_ROUNDING_MODE
- subroutine IEEE_GET_ROUNDING_MODE (ROUND_VALUE)
+ subroutine IEEE_GET_ROUNDING_MODE (ROUND_VALUE, RADIX)
implicit none
type(IEEE_ROUND_TYPE), intent(out) :: ROUND_VALUE
+ integer, intent(in), optional :: RADIX
interface
integer function helper() &
@@ -1060,9 +1063,10 @@ contains
! IEEE_SET_ROUNDING_MODE
- subroutine IEEE_SET_ROUNDING_MODE (ROUND_VALUE)
+ subroutine IEEE_SET_ROUNDING_MODE (ROUND_VALUE, RADIX)
implicit none
type(IEEE_ROUND_TYPE), intent(in) :: ROUND_VALUE
+ integer, intent(in), optional :: RADIX
interface
subroutine helper(val) &
diff --git a/libgfortran/ieee/ieee_exceptions.F90 b/libgfortran/ieee/ieee_exceptions.F90
index 77363cf..3ed2f6e 100644
--- a/libgfortran/ieee/ieee_exceptions.F90
+++ b/libgfortran/ieee/ieee_exceptions.F90
@@ -56,6 +56,13 @@ module IEEE_EXCEPTIONS
character(len=GFC_FPE_STATE_BUFFER_SIZE) :: hidden
end type
+ type, public :: IEEE_MODES_TYPE
+ private
+ integer :: rounding
+ integer :: underflow
+ integer :: halting
+ end type
+
interface IEEE_SUPPORT_FLAG
module procedure IEEE_SUPPORT_FLAG_4, &
IEEE_SUPPORT_FLAG_8, &
@@ -72,9 +79,65 @@ module IEEE_EXCEPTIONS
public :: IEEE_SET_HALTING_MODE, IEEE_GET_HALTING_MODE
public :: IEEE_SET_FLAG, IEEE_GET_FLAG
public :: IEEE_SET_STATUS, IEEE_GET_STATUS
+ public :: IEEE_SET_MODES, IEEE_GET_MODES
contains
+! Fortran 2018: Saving and restoring floating-point modes
+! (rounding modes, underflow mode, and halting mode)
+!
+! For now, we only have one rounding mode for all kinds.
+! Some targets could optimize getting/setting all modes at once, but for now
+! we make three calls. This code must be kept in sync with:
+! - IEEE_{GET,SET}_ROUNDING_MODE
+! - IEEE_{GET,SET}_UNDERFLOW_MODE
+! - IEEE_{GET,SET}_HALTING_MODE
+
+ subroutine IEEE_GET_MODES (MODES)
+ implicit none
+ type(IEEE_MODES_TYPE), intent(out) :: MODES
+
+ interface
+ integer function helper_rounding() &
+ bind(c, name="_gfortrani_get_fpu_rounding_mode")
+ end function
+ integer function helper_underflow() &
+ bind(c, name="_gfortrani_get_fpu_underflow_mode")
+ end function
+ pure integer function helper_halting() &
+ bind(c, name="_gfortrani_get_fpu_trap_exceptions")
+ end function
+ end interface
+
+ MODES%rounding = helper_rounding()
+ MODES%underflow = helper_underflow()
+ MODES%halting = helper_halting()
+ end subroutine
+
+ subroutine IEEE_SET_MODES (MODES)
+ implicit none
+ type(IEEE_MODES_TYPE), intent(in) :: MODES
+
+ interface
+ subroutine helper_rounding(val) &
+ bind(c, name="_gfortrani_set_fpu_rounding_mode")
+ integer, value :: val
+ end subroutine
+ subroutine helper_underflow(val) &
+ bind(c, name="_gfortrani_set_fpu_underflow_mode")
+ integer, value :: val
+ end subroutine
+ pure subroutine helper_halting(trap, notrap) &
+ bind(c, name="_gfortrani_set_fpu_trap_exceptions")
+ integer, intent(in), value :: trap, notrap
+ end subroutine
+ end interface
+
+ call helper_rounding(MODES%rounding)
+ call helper_underflow(MODES%underflow)
+ call helper_halting(MODES%halting, NOT(MODES%halting))
+ end subroutine
+
! Saving and restoring floating-point status
subroutine IEEE_GET_STATUS (STATUS_VALUE)
diff --git a/libgomp/ChangeLog b/libgomp/ChangeLog
index ea62dc6..d3b4758 100644
--- a/libgomp/ChangeLog
+++ b/libgomp/ChangeLog
@@ -1,3 +1,47 @@
+2022-09-14 Julian Brown <julian@codesourcery.com>
+
+ * testsuite/libgomp.oacc-c-c++-common/deep-copy-15.c: New test.
+ * testsuite/libgomp.oacc-c-c++-common/deep-copy-16.c: New test.
+ * testsuite/libgomp.oacc-c++/deep-copy-17.C: New test.
+ * testsuite/libgomp.oacc-c-c++-common/deep-copy-arrayofstruct.c: Move
+ test to here, make "run" test.
+
+2022-09-13 Jakub Jelinek <jakub@redhat.com>
+
+ PR libgomp/106906
+ * env.c (get_icv_member_addr): Cast false to void * before assigning
+ it to icv_addr[1], and comment the whole assignment out.
+
+2022-09-13 Tobias Burnus <tobias@codesourcery.com>
+
+ * libgomp.texi (gcn): Move misplaced -march=sm_30 remark to ...
+ (nvptx): ... here.
+
+2022-09-12 Tobias Burnus <tobias@codesourcery.com>
+
+ * libgomp.texi (Offload-Target Specifics: nvptx): Document
+ that reverse offload requires >= -march=sm_35.
+ * testsuite/libgomp.c-c++-common/requires-4.c: Build for nvptx
+ with -misa=sm_35.
+ * testsuite/libgomp.c-c++-common/requires-5.c: Likewise.
+ * testsuite/libgomp.c-c++-common/requires-6.c: Likewise.
+ * testsuite/libgomp.c-c++-common/reverse-offload-1.c: Likewise.
+ * testsuite/libgomp.fortran/reverse-offload-1.f90: Likewise.
+ * testsuite/libgomp.c/reverse-offload-sm30.c: New test.
+
+2022-09-12 Tobias Burnus <tobias@codesourcery.com>
+
+ * libgomp.texi (OpenMP 5.1 Impl. Status): Add two new minor items.
+ (OpenMP 5.2 Impl. Status): Improve omp/omx/ompx wording.
+
+2022-09-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR libgomp/106894
+ * testsuite/libgomp.c-c++-common/icv-6.c: Include string.h.
+ (main): Avoid tests for which corresponding non-_ALL suffixed variable
+ is in the environment, or for OMP_NUM_TEAMS on the device
+ OMP_NUM_TEAMS_DEV_?.
+
2022-09-10 Iain Sandoe <iain@sandoe.co.uk>
* env.c (initialize_env): Include libiberty environ.h.
diff --git a/libgomp/env.c b/libgomp/env.c
index ac8c764..0249966 100644
--- a/libgomp/env.c
+++ b/libgomp/env.c
@@ -1892,14 +1892,14 @@ get_icv_member_addr (struct gomp_initial_icvs *icvs, int icv_code,
{
case GOMP_ICV_NTEAMS:
icv_addr[0] = &icvs->nteams_var;
- icv_addr[1] = false;
+ /* icv_addr[1] = (void *) false; */
break;
case GOMP_ICV_DYNAMIC:
icv_addr[0] = &(*icvs).dyn_var;
break;
case GOMP_ICV_TEAMS_THREAD_LIMIT:
icv_addr[0] = &icvs->teams_thread_limit_var;
- icv_addr[1] = false;
+ /* icv_addr[1] = (void *) false; */
break;
case GOMP_ICV_SCHEDULE:
icv_addr[0] = &icvs->run_sched_var;
@@ -1907,7 +1907,7 @@ get_icv_member_addr (struct gomp_initial_icvs *icvs, int icv_code,
break;
case GOMP_ICV_THREAD_LIMIT:
icv_addr[0] = &icvs->thread_limit_var;
- icv_addr[1] = false;
+ /* icv_addr[1] = (void *) false; */
icv_addr[2] = (void *) UINT_MAX;
break;
case GOMP_ICV_NTHREADS:
diff --git a/libgomp/libgomp.texi b/libgomp/libgomp.texi
index ce3ba76..addf2d4 100644
--- a/libgomp/libgomp.texi
+++ b/libgomp/libgomp.texi
@@ -348,6 +348,9 @@ The OpenMP 4.5 specification is fully supported.
@item Support @code{begin/end declare target} syntax in C/C++ @tab N @tab
@item Pointer predetermined firstprivate getting initialized
to address of matching mapped list item per 5.1, Sect. 2.21.7.2 @tab N @tab
+@item @code{begin declare target} directive @tab N @tab
+@item For Fortran, diagnose placing declarative before/between @code{USE},
+ @code{IMPORT}, and @code{IMPLICIT} as invalid @tab N @tab
@end multitable
@@ -362,12 +365,13 @@ to address of matching mapped list item per 5.1, Sect. 2.21.7.2 @tab N @tab
@tab N @tab
@item @code{omp}/@code{ompx}/@code{omx} sentinels and @code{omp_}/@code{ompx_}
namespaces @tab N/A
- @tab warning for @code{omp/ompx} sentinels@footnote{@code{omp/ompx}
- sentinels as C/C++ pragma and C++ attributes are warned for with
+ @tab warning for @code{ompx/omx} sentinels@footnote{The @code{ompx}
+ sentinel as C/C++ pragma and C++ attributes are warned for with
@code{-Wunknown-pragmas} (implied by @code{-Wall}) and @code{-Wattributes}
(enabled by default), respectively; for Fortran free-source code, there is
- a warning enabled by default and for fixed-source code with
- @code{-Wsurprising} (enabled by @code{-Wall})}
+ a warning enabled by default and, for fixed-source code, the @code{omx}
+ sentinel is warned for with with @code{-Wsurprising} (enabled by
+ @code{-Wall}). Unknown clauses are always rejected with an error.}
@item Clauses on @code{end} directive can be on directive @tab N @tab
@item Deprecation of no-argument @code{destroy} clause on @code{depobj}
@tab N @tab
@@ -4428,6 +4432,9 @@ The implementation remark:
@item I/O within OpenMP target regions and OpenACC parallel/kernels is supported
using the C library @code{printf} functions. Note that the Fortran
@code{print}/@code{write} statements are not supported, yet.
+@item Compilation OpenMP code that contains @code{requires reverse_offload}
+ requires at least @code{-march=sm_35}, compiling for @code{-march=sm_30}
+ is not supported.
@end itemize
diff --git a/libgomp/testsuite/libgomp.c-c++-common/icv-6.c b/libgomp/testsuite/libgomp.c-c++-common/icv-6.c
index 7151bd1..e199a18 100644
--- a/libgomp/testsuite/libgomp.c-c++-common/icv-6.c
+++ b/libgomp/testsuite/libgomp.c-c++-common/icv-6.c
@@ -17,6 +17,7 @@
#include <omp.h>
#include <stdlib.h>
+#include <string.h>
int
main ()
@@ -25,21 +26,28 @@ main ()
int chunk_size;
omp_get_schedule(&kind, &chunk_size);
- if (omp_get_max_teams () != 42
- || !omp_get_dynamic ()
- || kind != 3 || chunk_size != 4
- || omp_get_teams_thread_limit () != 44
- || omp_get_thread_limit () != 45
- || omp_get_max_threads () != 46
- || omp_get_proc_bind () != omp_proc_bind_spread
- || omp_get_max_active_levels () != 47)
+ if ((!getenv ("OMP_NUM_TEAMS") && omp_get_max_teams () != 42)
+ || (!getenv ("OMP_DYNAMIC") && !omp_get_dynamic ())
+ || (!getenv ("OMP_SCHEDULE") && (kind != 3 || chunk_size != 4))
+ || (!getenv ("OMP_TEAMS_THREAD_LIMIT") && omp_get_teams_thread_limit () != 44)
+ || (!getenv ("OMP_THREAD_LIMIT") && omp_get_thread_limit () != 45)
+ || (!getenv ("OMP_NUM_THREADS") && omp_get_max_threads () != 46)
+ || (!getenv ("OMP_PROC_BIND") && omp_get_proc_bind () != omp_proc_bind_spread)
+ || (!getenv ("OMP_MAX_ACTIVE_LEVELS") && omp_get_max_active_levels () != 47))
abort ();
int num_devices = omp_get_num_devices () > 3 ? 3 : omp_get_num_devices ();
- for (int i=0; i < num_devices; i++)
+ for (int i = 0; i < num_devices; i++)
+ {
+ char name[sizeof ("OMP_NUM_TEAMS_DEV_1")];
+ strcpy (name, "OMP_NUM_TEAMS_DEV_1");
+ name[sizeof ("OMP_NUM_TEAMS_DEV_1") - 2] = '0' + i;
+ if (getenv (name))
+ continue;
#pragma omp target device (i)
if (omp_get_max_teams () != 43)
abort ();
+ }
return 0;
}
diff --git a/libgomp/testsuite/libgomp.c-c++-common/requires-4.c b/libgomp/testsuite/libgomp.c-c++-common/requires-4.c
index 6ed5a5f..5883eff 100644
--- a/libgomp/testsuite/libgomp.c-c++-common/requires-4.c
+++ b/libgomp/testsuite/libgomp.c-c++-common/requires-4.c
@@ -1,4 +1,5 @@
/* { dg-additional-options "-flto" } */
+/* { dg-additional-options "-foffload-options=nvptx-none=-misa=sm_35" { target { offload_target_nvptx } } } */
/* { dg-additional-sources requires-4-aux.c } */
/* Check no diagnostic by device-compiler's or host compiler's lto1.
diff --git a/libgomp/testsuite/libgomp.c-c++-common/requires-5.c b/libgomp/testsuite/libgomp.c-c++-common/requires-5.c
index 7fe0c73..d43d78d 100644
--- a/libgomp/testsuite/libgomp.c-c++-common/requires-5.c
+++ b/libgomp/testsuite/libgomp.c-c++-common/requires-5.c
@@ -1,3 +1,4 @@
+/* { dg-additional-options "-foffload-options=nvptx-none=-misa=sm_35" { target { offload_target_nvptx } } } */
/* { dg-additional-sources requires-5-aux.c } */
/* Depending on offload device capabilities, it may print something like the
diff --git a/libgomp/testsuite/libgomp.c-c++-common/requires-6.c b/libgomp/testsuite/libgomp.c-c++-common/requires-6.c
index b00c745..a25b4d2 100644
--- a/libgomp/testsuite/libgomp.c-c++-common/requires-6.c
+++ b/libgomp/testsuite/libgomp.c-c++-common/requires-6.c
@@ -1,3 +1,5 @@
+/* { dg-additional-options "-foffload-options=nvptx-none=-misa=sm_35" { target { offload_target_nvptx } } } */
+
#pragma omp requires unified_shared_memory, unified_address, reverse_offload
/* The requires line is not active as there is none of:
diff --git a/libgomp/testsuite/libgomp.c-c++-common/reverse-offload-1.c b/libgomp/testsuite/libgomp.c-c++-common/reverse-offload-1.c
index 976e129..52d828c 100644
--- a/libgomp/testsuite/libgomp.c-c++-common/reverse-offload-1.c
+++ b/libgomp/testsuite/libgomp.c-c++-common/reverse-offload-1.c
@@ -1,4 +1,5 @@
/* { dg-do run } */
+/* { dg-additional-options "-foffload-options=nvptx-none=-misa=sm_35" { target { offload_target_nvptx } } } */
/* { dg-additional-sources reverse-offload-1-aux.c } */
/* Check that reverse offload works in particular:
diff --git a/libgomp/testsuite/libgomp.c/reverse-offload-sm30.c b/libgomp/testsuite/libgomp.c/reverse-offload-sm30.c
new file mode 100644
index 0000000..fbfeae1
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/reverse-offload-sm30.c
@@ -0,0 +1,15 @@
+/* { dg-do link { target { offload_target_nvptx } } } */
+/* { dg-additional-options "-foffload-options=nvptx-none=-march=sm_30 -foffload=-mptx=_" } */
+
+#pragma omp requires reverse_offload
+
+int
+main ()
+{
+ #pragma omp target
+ {
+ }
+ return 0;
+}
+
+/* { dg-warning "'omp requires reverse_offload' requires at least 'sm_35' for '-foffload-options=nvptx-none=-march=' - disabling offload-code generation for this device type" "" { target *-*-* } 0 } */
diff --git a/libgomp/testsuite/libgomp.fortran/reverse-offload-1.f90 b/libgomp/testsuite/libgomp.fortran/reverse-offload-1.f90
index 7cfb8b6..de68011 100644
--- a/libgomp/testsuite/libgomp.fortran/reverse-offload-1.f90
+++ b/libgomp/testsuite/libgomp.fortran/reverse-offload-1.f90
@@ -1,4 +1,5 @@
! { dg-do run }
+! { dg-additional-options "-foffload-options=nvptx-none=-misa=sm_35" { target { offload_target_nvptx } } }
! { dg-additional-sources reverse-offload-1-aux.f90 }
! Check that reverse offload works in particular:
diff --git a/libgomp/testsuite/libgomp.oacc-c++/deep-copy-17.C b/libgomp/testsuite/libgomp.oacc-c++/deep-copy-17.C
new file mode 100644
index 0000000..dacbb52
--- /dev/null
+++ b/libgomp/testsuite/libgomp.oacc-c++/deep-copy-17.C
@@ -0,0 +1,101 @@
+#include <cassert>
+
+/* Test attach/detach operation with pointers and references to structs. */
+
+typedef struct mystruct {
+ int *a;
+ int b;
+ int *c;
+ int d;
+ int *e;
+} mystruct;
+
+void str (void)
+{
+ int a[10], c[10], e[10];
+ mystruct m = { .a = a, .c = c, .e = e };
+ a[0] = 5;
+ c[0] = 7;
+ e[0] = 9;
+ #pragma acc parallel copy(m.a[0:10], m.b, m.c[0:10], m.d, m.e[0:10])
+ {
+ m.a[0] = m.c[0] + m.e[0];
+ }
+ assert (m.a[0] == 7 + 9);
+}
+
+void strp (void)
+{
+ int *a = new int[10];
+ int *c = new int[10];
+ int *e = new int[10];
+ mystruct *m = new mystruct;
+ m->a = a;
+ m->c = c;
+ m->e = e;
+ a[0] = 6;
+ c[0] = 8;
+ e[0] = 10;
+ #pragma acc parallel copy(m->a[0:10], m->b, m->c[0:10], m->d, m->e[0:10])
+ {
+ m->a[0] = m->c[0] + m->e[0];
+ }
+ assert (m->a[0] == 8 + 10);
+ delete m;
+ delete[] a;
+ delete[] c;
+ delete[] e;
+}
+
+void strr (void)
+{
+ int *a = new int[10];
+ int *c = new int[10];
+ int *e = new int[10];
+ mystruct m;
+ mystruct &n = m;
+ n.a = a;
+ n.c = c;
+ n.e = e;
+ a[0] = 7;
+ c[0] = 9;
+ e[0] = 11;
+ #pragma acc parallel copy(n.a[0:10], n.b, n.c[0:10], n.d, n.e[0:10])
+ {
+ n.a[0] = n.c[0] + n.e[0];
+ }
+ assert (n.a[0] == 9 + 11);
+ delete[] a;
+ delete[] c;
+ delete[] e;
+}
+
+void strrp (void)
+{
+ int a[10], c[10], e[10];
+ mystruct *m = new mystruct;
+ mystruct *&n = m;
+ n->a = a;
+ n->b = 3;
+ n->c = c;
+ n->d = 5;
+ n->e = e;
+ a[0] = 8;
+ c[0] = 10;
+ e[0] = 12;
+ #pragma acc parallel copy(n->a[0:10], n->c[0:10], n->e[0:10])
+ {
+ n->a[0] = n->c[0] + n->e[0];
+ }
+ assert (n->a[0] == 10 + 12);
+ delete m;
+}
+
+int main (int argc, char *argv[])
+{
+ str ();
+ strp ();
+ strr ();
+ strrp ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-15.c b/libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-15.c
new file mode 100644
index 0000000..27fe1a9
--- /dev/null
+++ b/libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-15.c
@@ -0,0 +1,68 @@
+#include <stdlib.h>
+
+/* Test multiple struct dereferences on one directive, and slices starting at
+ non-zero. */
+
+typedef struct {
+ int *a;
+ int *b;
+ int *c;
+} mystruct;
+
+int main(int argc, char* argv[])
+{
+ const int N = 1024;
+ mystruct *m = (mystruct *) malloc (sizeof (*m));
+ int i;
+
+ m->a = (int *) malloc (N * sizeof (int));
+ m->b = (int *) malloc (N * sizeof (int));
+ m->c = (int *) malloc (N * sizeof (int));
+
+ for (i = 0; i < N; i++)
+ {
+ m->a[i] = 0;
+ m->b[i] = 0;
+ m->c[i] = 0;
+ }
+
+ for (int i = 0; i < 99; i++)
+ {
+ int j;
+#pragma acc parallel loop copy(m->a[0:N])
+ for (j = 0; j < N; j++)
+ m->a[j]++;
+#pragma acc parallel loop copy(m->b[0:N], m->c[5:N-10])
+ for (j = 0; j < N; j++)
+ {
+ m->b[j]++;
+ if (j > 5 && j < N - 5)
+ m->c[j]++;
+ }
+ }
+
+ for (i = 0; i < N; i++)
+ {
+ if (m->a[i] != 99)
+ abort ();
+ if (m->b[i] != 99)
+ abort ();
+ if (i > 5 && i < N-5)
+ {
+ if (m->c[i] != 99)
+ abort ();
+ }
+ else
+ {
+ if (m->c[i] != 0)
+ abort ();
+ }
+ }
+
+ free (m->a);
+ free (m->b);
+ free (m->c);
+ free (m);
+
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-16.c b/libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-16.c
new file mode 100644
index 0000000..a7308e8
--- /dev/null
+++ b/libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-16.c
@@ -0,0 +1,231 @@
+#include <stdlib.h>
+
+/* Test mapping chained indirect struct accesses, mixed in different ways. */
+
+typedef struct {
+ int *a;
+ int b;
+ int *c;
+} str1;
+
+typedef struct {
+ int d;
+ int *e;
+ str1 *f;
+} str2;
+
+typedef struct {
+ int g;
+ int h;
+ str2 *s2;
+} str3;
+
+typedef struct {
+ str3 m;
+ str3 n;
+} str4;
+
+void
+zero_arrays (str4 *s, int N)
+{
+ for (int i = 0; i < N; i++)
+ {
+ s->m.s2->e[i] = 0;
+ s->m.s2->f->a[i] = 0;
+ s->m.s2->f->c[i] = 0;
+ s->n.s2->e[i] = 0;
+ s->n.s2->f->a[i] = 0;
+ s->n.s2->f->c[i] = 0;
+ }
+}
+
+void
+alloc_s2 (str2 **s, int N)
+{
+ (*s) = (str2 *) malloc (sizeof (str2));
+ (*s)->f = (str1 *) malloc (sizeof (str1));
+ (*s)->e = (int *) malloc (sizeof (int) * N);
+ (*s)->f->a = (int *) malloc (sizeof (int) * N);
+ (*s)->f->c = (int *) malloc (sizeof (int) * N);
+}
+
+int main (int argc, char* argv[])
+{
+ const int N = 1024;
+ str4 p, *q;
+ int i;
+
+ alloc_s2 (&p.m.s2, N);
+ alloc_s2 (&p.n.s2, N);
+ q = (str4 *) malloc (sizeof (str4));
+ alloc_s2 (&q->m.s2, N);
+ alloc_s2 (&q->n.s2, N);
+
+ zero_arrays (&p, N);
+
+ for (int i = 0; i < 99; i++)
+ {
+#pragma acc enter data copyin(p.m.s2[:1])
+#pragma acc parallel loop copy(p.m.s2->e[:N])
+ for (int j = 0; j < N; j++)
+ p.m.s2->e[j]++;
+#pragma acc exit data delete(p.m.s2[:1])
+ }
+
+ for (i = 0; i < N; i++)
+ if (p.m.s2->e[i] != 99)
+ abort ();
+
+ zero_arrays (&p, N);
+
+ for (int i = 0; i < 99; i++)
+ {
+#pragma acc enter data copyin(p.m.s2[:1])
+#pragma acc enter data copyin(p.m.s2->f[:1])
+#pragma acc parallel loop copy(p.m.s2->f->a[:N]) copy(p.m.s2->f->c[:N])
+ for (int j = 0; j < N; j++)
+ {
+ p.m.s2->f->a[j]++;
+ p.m.s2->f->c[j]++;
+ }
+#pragma acc exit data delete(p.m.s2->f[:1])
+#pragma acc exit data delete(p.m.s2[:1])
+ }
+
+ for (i = 0; i < N; i++)
+ if (p.m.s2->f->a[i] != 99 || p.m.s2->f->c[i] != 99)
+ abort ();
+
+ zero_arrays (&p, N);
+
+ for (int i = 0; i < 99; i++)
+ {
+#pragma acc enter data copyin(p.m.s2[:1]) copyin(p.n.s2[:1])
+#pragma acc enter data copyin(p.m.s2->f[:1]) copyin(p.n.s2->f[:1])
+#pragma acc parallel loop copy(p.m.s2->f->a[:N]) copy(p.m.s2->f->c[:N]) \
+ copy(p.n.s2->f->a[:N]) copy(p.n.s2->f->c[:N])
+ for (int j = 0; j < N; j++)
+ {
+ p.m.s2->f->a[j]++;
+ p.m.s2->f->c[j]++;
+ p.n.s2->f->a[j]++;
+ p.n.s2->f->c[j]++;
+ }
+#pragma acc exit data delete(p.m.s2->f[:1]) delete(p.n.s2->f[:1])
+#pragma acc exit data delete(p.m.s2[:1]) delete(p.n.s2[:1])
+ }
+
+ for (i = 0; i < N; i++)
+ if (p.m.s2->f->a[i] != 99 || p.m.s2->f->c[i] != 99
+ || p.n.s2->f->a[i] != 99 || p.n.s2->f->c[i] != 99)
+ abort ();
+
+ zero_arrays (&p, N);
+
+ for (int i = 0; i < 99; i++)
+ {
+#pragma acc enter data copyin(p.m.s2[:1]) copyin(p.n.s2[:1])
+#pragma acc enter data copyin(p.n.s2->e[:N]) copyin(p.n.s2->f[:1]) \
+ copyin(p.m.s2->f[:1])
+#pragma acc parallel loop copy(p.m.s2->f->a[:N]) copy(p.n.s2->f->a[:N])
+ for (int j = 0; j < N; j++)
+ {
+ p.m.s2->f->a[j]++;
+ p.n.s2->f->a[j]++;
+ p.n.s2->e[j]++;
+ }
+#pragma acc exit data delete(p.m.s2->f[:1]) delete(p.n.s2->f[:1]) \
+ copyout(p.n.s2->e[:N])
+#pragma acc exit data delete(p.m.s2[:1]) delete(p.n.s2[:1])
+ }
+
+ for (i = 0; i < N; i++)
+ if (p.m.s2->f->a[i] != 99 || p.n.s2->f->a[i] != 99
+ || p.n.s2->e[i] != 99)
+ abort ();
+
+ zero_arrays (q, N);
+
+ for (int i = 0; i < 99; i++)
+ {
+#pragma acc enter data copyin(q->m.s2[:1])
+#pragma acc parallel loop copy(q->m.s2->e[:N])
+ for (int j = 0; j < N; j++)
+ q->m.s2->e[j]++;
+#pragma acc exit data delete(q->m.s2[:1])
+ }
+
+ for (i = 0; i < N; i++)
+ if (q->m.s2->e[i] != 99)
+ abort ();
+
+ zero_arrays (q, N);
+
+ for (int i = 0; i < 99; i++)
+ {
+#pragma acc enter data copyin(q->m.s2[:1])
+#pragma acc enter data copyin(q->m.s2->f[:1])
+#pragma acc parallel loop copy(q->m.s2->f->a[:N]) copy(q->m.s2->f->c[:N])
+ for (int j = 0; j < N; j++)
+ {
+ q->m.s2->f->a[j]++;
+ q->m.s2->f->c[j]++;
+ }
+#pragma acc exit data delete(q->m.s2->f[:1])
+#pragma acc exit data delete(q->m.s2[:1])
+ }
+
+ for (i = 0; i < N; i++)
+ if (q->m.s2->f->a[i] != 99 || q->m.s2->f->c[i] != 99)
+ abort ();
+
+ zero_arrays (q, N);
+
+ for (int i = 0; i < 99; i++)
+ {
+#pragma acc enter data copyin(q->m.s2[:1]) copyin(q->n.s2[:1])
+#pragma acc enter data copyin(q->m.s2->f[:1]) copyin(q->n.s2->f[:1])
+#pragma acc parallel loop copy(q->m.s2->f->a[:N]) copy(q->m.s2->f->c[:N]) \
+ copy(q->n.s2->f->a[:N]) copy(q->n.s2->f->c[:N])
+ for (int j = 0; j < N; j++)
+ {
+ q->m.s2->f->a[j]++;
+ q->m.s2->f->c[j]++;
+ q->n.s2->f->a[j]++;
+ q->n.s2->f->c[j]++;
+ }
+#pragma acc exit data delete(q->m.s2->f[:1]) delete(q->n.s2->f[:1])
+#pragma acc exit data delete(q->m.s2[:1]) delete(q->n.s2[:1])
+ }
+
+ for (i = 0; i < N; i++)
+ if (q->m.s2->f->a[i] != 99 || q->m.s2->f->c[i] != 99
+ || q->n.s2->f->a[i] != 99 || q->n.s2->f->c[i] != 99)
+ abort ();
+
+ zero_arrays (q, N);
+
+ for (int i = 0; i < 99; i++)
+ {
+#pragma acc enter data copyin(q->m.s2[:1]) copyin(q->n.s2[:1])
+#pragma acc enter data copyin(q->n.s2->e[:N]) copyin(q->m.s2->f[:1]) \
+ copyin(q->n.s2->f[:1])
+#pragma acc parallel loop copy(q->m.s2->f->a[:N]) copy(q->n.s2->f->a[:N])
+ for (int j = 0; j < N; j++)
+ {
+ q->m.s2->f->a[j]++;
+ q->n.s2->f->a[j]++;
+ q->n.s2->e[j]++;
+ }
+#pragma acc exit data delete(q->m.s2->f[:1]) delete(q->n.s2->f[:1]) \
+ copyout(q->n.s2->e[:N])
+#pragma acc exit data delete(q->m.s2[:1]) delete(q->n.s2[:1])
+ }
+
+ for (i = 0; i < N; i++)
+ if (q->m.s2->f->a[i] != 99 || q->n.s2->f->a[i] != 99
+ || q->n.s2->e[i] != 99)
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/goacc/deep-copy-arrayofstruct.c b/libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-arrayofstruct.c
index 4247607..a11c647 100644
--- a/gcc/testsuite/c-c++-common/goacc/deep-copy-arrayofstruct.c
+++ b/libgomp/testsuite/libgomp.oacc-c-c++-common/deep-copy-arrayofstruct.c
@@ -1,4 +1,4 @@
-/* { dg-do compile } */
+/* { dg-do run } */
#include <stdlib.h>
#include <stdio.h>
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index 2e15013..a55293e 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,263 @@
+2022-09-16 Jonathan Wakely <jwakely@redhat.com>
+
+ PR libstdc++/106953
+ * include/backward/auto_ptr.h [!_GLIBCXX_HOSTED]: Do not define
+ shared_ptr members.
+ * include/bits/alloc_traits.h [!_GLIBCXX_HOSTED]: Do not declare
+ std::allocator_traits<std::allocator<T>> specializations for
+ freestanding.
+ * include/bits/memoryfwd.h [!_GLIBCXX_HOSTED] (allocator): Do
+ not declare for freestanding.
+ * include/bits/stl_algo.h [!_GLIBCXX_HOSTED] (stable_partition):
+ Do not define for freestanding.
+ [!_GLIBCXX_HOSTED] (merge, stable_sort): Do not use temporary
+ buffers for freestanding.
+ * include/bits/stl_algobase.h [!_GLIBCXX_HOSTED]: Do not declare
+ streambuf iterators and overloaded algorithms using them.
+ * include/bits/stl_uninitialized.h [!_GLIBCXX_HOSTED]: Do not
+ define specialized overloads for std::allocator.
+ * include/bits/unique_ptr.h [!_GLIBCXX_HOSTED] (make_unique)
+ (make_unique_for_overwrite, operator<<): Do not define for
+ freestanding.
+ * include/c_global/cstdlib [!_GLIBCXX_HOSTED] (_Exit): Declare.
+ Use _GLIBCXX_NOTHROW instead of throw().
+ * include/debug/assertions.h [!_GLIBCXX_HOSTED]: Ignore
+ _GLIBCXX_DEBUG for freestanding.
+ * include/debug/debug.h [!_GLIBCXX_DEBUG]: Likewise.
+ * include/std/bit [!_GLIBCXX_HOSTED]: Do not use the custom
+ __int_traits if <ext/numeric_traits.h> is available.
+ * include/std/functional [!_GLIBCXX_HOSTED]: Do not include
+ headers that aren't valid for freestanding.
+ (boyer_moore_searcher, boyer_moore_horspool_searcher): Do not
+ define for freestanding.
+ * include/std/iterator [!_GLIBCXX_HOSTED]: Do not include
+ headers that aren't valid for freestanding.
+ * include/std/memory [!_GLIBCXX_HOSTED]: Likewise.
+ * include/std/ranges [!_GLIBCXX_HOSTED] (istream_view): Do not
+ define for freestanding.
+ (views::__detail::__is_basic_string_view) [!_GLIBCXX_HOSTED]:
+ Do not define partial specialization for freestanding.
+
+2022-09-16 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/bits/alloc_traits.h (__alloc_swap)
+ (__shrink_to_fit_aux): Move here, from ...
+ * include/bits/allocator.h: ... here.
+ * include/ext/alloc_traits.h: Do not include allocator.h.
+
+2022-09-16 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/bits/stl_algo.h: Include <bits/stl_algobase.h>.
+ * include/bits/stl_tempbuf.h: Include headers for __try and
+ __catch macros, std::pair, and __gnu_cxx::__numeric_traits.
+ * include/bits/stream_iterator.h: Include <iosfwd> and headers
+ for std::addressof and std::iterator.
+ * include/bits/streambuf_iterator.h: Include header for
+ std::iterator.
+ * include/std/iterator: Do not include <iosfwd>.
+
+2022-09-16 Jonathan Wakely <jwakely@redhat.com>
+
+ * testsuite/29_atomics/atomic_ref/compare_exchange_padding.cc:
+ Store value with non-zero padding bits after construction.
+
+2022-09-16 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/tr1/random.h (__detail::_Adaptor::_BEngine): Remove.
+ (__detail::_Adaptor::_M_g): Make public.
+ (__detail::_Adaptor<_Engine*, _Dist>): Remove partial
+ specialization.
+ (variate_generate::_Value): New helper to simplify handling of
+ _Engine* and _Engine& template arguments.
+ (variate_generate::engine_value_type): Define to underlying
+ engine type, not adapted type.
+ (variate_generate::engine()): Return underlying engine instead
+ of adaptor.
+ * testsuite/tr1/5_numerical_facilities/random/variate_generator/37986.cc:
+ Fix comment.
+ * testsuite/tr1/5_numerical_facilities/random/variate_generator/requirements/typedefs.cc:
+ Check member typedefs have the correct types.
+
+2022-09-16 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/debug/formatter.h [_GLIBCXX_DEBUG_BACKTRACE]
+ (_Error_formatter): Use 0 as null pointer constant.
+
+2022-09-16 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/bits/allocator.h (__alloc_neq): Remove.
+ * include/bits/stl_list.h (list::_M_check_equal_allocators):
+ Compare allocators directly, without __alloc_neq.
+
+2022-09-16 Jonathan Wakely <jwakely@redhat.com>
+
+ * doc/doxygen/user.cfg.in (PREDEFINED): Remove __allocator_base.
+ * include/bits/allocator.h: Fix nesting of Doxygen commands.
+
+2022-09-16 Jonathan Wakely <jwakely@redhat.com>
+
+ * doc/xml/manual/abi.xml: Document GLIBCXX_3.4.30 and
+ GLIBCXX_3.4.31 versions.
+ * doc/html/manual/abi.html: Regenerate.
+
+2022-09-15 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/std/memory: Do not include <bits/stl_algobase.h>.
+
+2022-09-15 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/bits/shared_ptr_atomic.h (_GLIBCXX_TSAN_MUTEX_PRE_LOCK):
+ Replace with ...
+ (_GLIBCXX_TSAN_MUTEX_TRY_LOCK): ... this, add try_lock flag.
+ (_GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED): New macro using
+ try_lock_failed flag
+ (_GLIBCXX_TSAN_MUTEX_POST_LOCK): Rename to ...
+ (_GLIBCXX_TSAN_MUTEX_LOCKED): ... this.
+ (_GLIBCXX_TSAN_MUTEX_PRE_UNLOCK): Remove invalid flag.
+ (_GLIBCXX_TSAN_MUTEX_POST_UNLOCK): Remove invalid flag.
+ (_Sp_atomic::_Atomic_count::lock): Use new macros.
+
+2022-09-15 Patrick Palka <ppalka@redhat.com>
+
+ * include/bits/ranges_algo.h (__adjacent_find_fn, adjacent_find):
+ Move to ...
+ * include/bits/ranges_util.h: ... here.
+ * include/std/ranges (chunk_by_view): Define.
+ (chunk_by_view::_Iterator): Define.
+ (__detail::__can_chunk_by_view): Define.
+ (_ChunkBy, chunk_by): Define.
+ * testsuite/std/ranges/adaptors/chunk_by/1.cc: New test.
+
+2022-09-15 François Dumont <fdumont@gcc.gnu.org>
+
+ * testsuite/20_util/is_complete_or_unbounded/memoization_neg.cc:
+ Adapt dg-prune-output to _GLIBCXX_INLINE_VERSION mode.
+
+2022-09-15 François Dumont <fdumont@gcc.gnu.org>
+
+ * config/abi/pre/gnu-versioned-namespace.ver: Remove obsolete std::__detail::__8
+ symbols.
+
+2022-09-14 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/bits/shared_ptr_atomic.h (_GLIBCXX_TSAN_MUTEX_DESTROY)
+ (_GLIBCXX_TSAN_MUTEX_PRE_LOCK, _GLIBCXX_TSAN_MUTEX_POST_LOCK)
+ (_GLIBCXX_TSAN_MUTEX_PRE_UNLOCK, _GLIBCXX_TSAN_MUTEX_POST_UNLOCK)
+ (_GLIBCXX_TSAN_MUTEX_PRE_SIGNAL, _GLIBCXX_TSAN_MUTEX_POST_SIGNAL):
+ Define macros for TSan annotation functions.
+ (_Sp_atomic::_Atomic_count): Add annotations.
+
+2022-09-14 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/bits/stl_tempbuf.h: Include <new>.
+
+2022-09-14 Jonathan Wakely <jwakely@redhat.com>
+
+ * testsuite/17_intro/names.cc: Explain why poison pragma can't
+ be used.
+
+2022-09-14 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/std/future
+ (_State_baseV2::__setter(exception_ptr&, promise&)): Add
+ assertion for LWG 2276 precondition.
+ * testsuite/30_threads/promise/members/set_exception_neg.cc:
+ New test.
+
+2022-09-14 Jonathan Wakely <jwakely@redhat.com>
+
+ * doc/xml/manual/intro.xml: Document LWG 1203.
+ * doc/html/*: Regenerate.
+
+2022-09-14 Philipp Fent <fent@in.tum.de>
+
+ * python/libstdcxx/v6/printers.py (access_streambuf_ptrs):
+ New helper function.
+ (StdStringBufPrinter, StdStringStreamPrinter): New printers.
+ (build_libstdcxx_dictionary): Register stringstream printers.
+ * testsuite/libstdc++-prettyprinters/debug.cc: Check string
+ streams.
+ * testsuite/libstdc++-prettyprinters/simple.cc: Likewise.
+ * testsuite/libstdc++-prettyprinters/simple11.cc: Likewise.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * include/std/ranges (__detail::__slide_caches_nothing): Define.
+ (__detail::__slide_caches_last): Define.
+ (__detail::__slide_caches_first): Define.
+ (slide_view): Define.
+ (enable_borrowed_range<slide_view>): Define.
+ (slide_view::_Iterator): Define.
+ (slide_view::_Sentinel): Define.
+ (views::__detail::__can_slide_view): Define.
+ (views::_Slide, views::slide): Define.
+ * testsuite/std/ranges/adaptors/slide/1.cc: New test.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * include/std/ranges (__detail::__div_ceil): Define.
+ (chunk_view): Define.
+ (chunk_view::_OuterIter): Define.
+ (chunk_view::_OuterIter::value_type): Define.
+ (chunk_view::_InnerIter): Define.
+ (chunk_view<_Vp>): Define partial specialization for forward
+ ranges.
+ (enable_borrowed_range<chunk_view>): Define.
+ (chunk_view<_Vp>::_Iterator): Define.
+ (views::__detail::__can_chunk_view): Define.
+ (views::_Chunk, views::chunk): Define.
+ * testsuite/std/ranges/adaptors/chunk/1.cc: New test.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * include/std/ranges (join_view::_Iterator::_M_satisfy):
+ Adjust resetting _M_inner as per LWG 3569.
+ (join_view::_Iterator::_M_inner): Wrap in std::optional
+ as per LWG 3569.
+ (join_view::_Iterator::_Iterator): Relax constraints as
+ per LWG 3569.
+ (join_view::_Iterator::operator*): Adjust as per LWG 3569.
+ (join_view::_Iterator::operator->): Likewise.
+ (join_view::_Iterator::operator++): Likewise.
+ (join_view::_Iterator::operator--): Likewise.
+ (join_view::_Iterator::iter_move): Likewise.
+ (join_view::_Iterator::iter_swap): Likewise.
+ * testsuite/std/ranges/adaptors/join.cc (test14): New test.
+
+2022-09-13 Patrick Palka <ppalka@redhat.com>
+
+ * include/debug/safe_iterator.h (_GLIBCXX_DEBUG_VERIFY_OPERANDS):
+ Add parentheses to avoid -Wparentheses warning.
+
+2022-09-12 Patrick Palka <ppalka@redhat.com>
+
+ PR libstdc++/106320
+ * testsuite/std/ranges/adaptors/join.cc (test13): New test.
+
+2022-09-12 Jonathan Wakely <jwakely@redhat.com>
+
+ * testsuite/19_diagnostics/error_code/cons/lwg3629.cc: Fix
+ comments.
+ * testsuite/19_diagnostics/error_condition/cons/lwg3629.cc:
+ Likewise.
+
+2022-09-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/105329
+ * acinclude.m4 (libtool_VERSION): Change to 6:31:0.
+ * config/abi/pre/gnu.ver (GLIBCXX_3.4.21): Don't export
+ std::basic_string methods with name length of 15.
+ (GLIBCXX_3.4.31): Export std::basic_string::_M_replace_cold.
+ * testsuite/util/testsuite_abi.cc (check_version): Handle
+ GLIBCXX_3.4.31.
+ * include/bits/basic_string.h (std::basic_string::_M_replace_cold):
+ Declare.
+ * include/bits/basic_string.tcc (std::basic_string::_M_replace_cold):
+ Define and export even for C++20.
+ (std::basic_string::_M_replace): Use __builtin_expect, outline
+ the overlapping case to _M_replace_cold.
+ * configure: Regenerated.
+
2022-09-09 Jonathan Wakely <jwakely@redhat.com>
* include/bits/atomic_base.h (__atomic_impl::__compare_exchange):
diff --git a/libstdc++-v3/acinclude.m4 b/libstdc++-v3/acinclude.m4
index e3cc3a8..9aa81e1 100644
--- a/libstdc++-v3/acinclude.m4
+++ b/libstdc++-v3/acinclude.m4
@@ -3821,7 +3821,7 @@ changequote([,])dnl
fi
# For libtool versioning info, format is CURRENT:REVISION:AGE
-libtool_VERSION=6:30:0
+libtool_VERSION=6:31:0
# Everything parsed; figure out what files and settings to use.
case $enable_symvers in
diff --git a/libstdc++-v3/config/abi/pre/gnu-versioned-namespace.ver b/libstdc++-v3/config/abi/pre/gnu-versioned-namespace.ver
index b37199e..06ccaa8 100644
--- a/libstdc++-v3/config/abi/pre/gnu-versioned-namespace.ver
+++ b/libstdc++-v3/config/abi/pre/gnu-versioned-namespace.ver
@@ -76,20 +76,9 @@ GLIBCXX_8.0 {
# locale
_ZNSt3__89has_facetINS_*;
- # hash
- _ZNSt8__detail3__812__prime_listE;
- _ZNSt3tr18__detail3__812__prime_listE;
-
# thread/mutex/condition_variable/future
__once_proxy;
- # std::__detail::_List_node_base
- _ZNSt8__detail3__815_List_node_base7_M_hook*;
- _ZNSt8__detail3__815_List_node_base9_M_unhookEv;
- _ZNSt8__detail3__815_List_node_base10_M_reverseEv;
- _ZNSt8__detail3__815_List_node_base11_M_transfer*;
- _ZNSt8__detail3__815_List_node_base4swapER*;
-
# std::__convert_to_v
_ZNSt3__814__convert_to_v*;
diff --git a/libstdc++-v3/config/abi/pre/gnu.ver b/libstdc++-v3/config/abi/pre/gnu.ver
index 9b80a31..7f30377 100644
--- a/libstdc++-v3/config/abi/pre/gnu.ver
+++ b/libstdc++-v3/config/abi/pre/gnu.ver
@@ -1736,7 +1736,7 @@ GLIBCXX_3.4.21 {
_ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE12_M*;
_ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE13*;
_ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE14_M_replace_aux*;
- _ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE1[568-9]*;
+ _ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE1[68-9]*;
_ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE2at*;
_ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE3end*;
_ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE4back*;
@@ -2444,6 +2444,10 @@ GLIBCXX_3.4.30 {
} GLIBCXX_3.4.29;
+GLIBCXX_3.4.31 {
+ _ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE15_M_replace_cold*;
+} GLIBCXX_3.4.30;
+
# Symbols in the support library (libsupc++) have their own tag.
CXXABI_1.3 {
diff --git a/libstdc++-v3/configure b/libstdc++-v3/configure
index 1772eef..07916d1 100755
--- a/libstdc++-v3/configure
+++ b/libstdc++-v3/configure
@@ -69034,7 +69034,7 @@ $as_echo "$as_me: WARNING: === Symbol versioning will be disabled." >&2;}
fi
# For libtool versioning info, format is CURRENT:REVISION:AGE
-libtool_VERSION=6:30:0
+libtool_VERSION=6:31:0
# Everything parsed; figure out what files and settings to use.
case $enable_symvers in
diff --git a/libstdc++-v3/doc/doxygen/user.cfg.in b/libstdc++-v3/doc/doxygen/user.cfg.in
index 57270bd..834ad9e 100644
--- a/libstdc++-v3/doc/doxygen/user.cfg.in
+++ b/libstdc++-v3/doc/doxygen/user.cfg.in
@@ -2407,7 +2407,6 @@ PREDEFINED = __cplusplus=202002L \
_GLIBCXX_HAVE_IS_CONSTANT_EVALUATED \
_GLIBCXX_HAVE_BUILTIN_LAUNDER \
"_GLIBCXX_DOXYGEN_ONLY(X)=X " \
- __allocator_base=std::__new_allocator \
__exception_ptr=__unspecified__ \
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
diff --git a/libstdc++-v3/doc/html/manual/abi.html b/libstdc++-v3/doc/html/manual/abi.html
index 82b03fd..1079886 100644
--- a/libstdc++-v3/doc/html/manual/abi.html
+++ b/libstdc++-v3/doc/html/manual/abi.html
@@ -128,7 +128,7 @@ compatible.
GLIBCPP_3.2 for symbols that were introduced in the GCC 3.2.0
release.) If a particular release is not listed, it has the same
version labels as the preceding release.
- </p><div class="itemizedlist"><ul class="itemizedlist" style="list-style-type: disc; "><li class="listitem"><p>GCC 3.0.0: (Error, not versioned)</p></li><li class="listitem"><p>GCC 3.0.1: (Error, not versioned)</p></li><li class="listitem"><p>GCC 3.0.2: (Error, not versioned)</p></li><li class="listitem"><p>GCC 3.0.3: (Error, not versioned)</p></li><li class="listitem"><p>GCC 3.0.4: (Error, not versioned)</p></li><li class="listitem"><p>GCC 3.1.0: GLIBCPP_3.1, CXXABI_1</p></li><li class="listitem"><p>GCC 3.1.1: GLIBCPP_3.1, CXXABI_1</p></li><li class="listitem"><p>GCC 3.2.0: GLIBCPP_3.2, CXXABI_1.2</p></li><li class="listitem"><p>GCC 3.2.1: GLIBCPP_3.2.1, CXXABI_1.2</p></li><li class="listitem"><p>GCC 3.2.2: GLIBCPP_3.2.2, CXXABI_1.2</p></li><li class="listitem"><p>GCC 3.2.3: GLIBCPP_3.2.2, CXXABI_1.2</p></li><li class="listitem"><p>GCC 3.3.0: GLIBCPP_3.2.2, CXXABI_1.2.1</p></li><li class="listitem"><p>GCC 3.3.1: GLIBCPP_3.2.3, CXXABI_1.2.1</p></li><li class="listitem"><p>GCC 3.3.2: GLIBCPP_3.2.3, CXXABI_1.2.1</p></li><li class="listitem"><p>GCC 3.3.3: GLIBCPP_3.2.3, CXXABI_1.2.1</p></li><li class="listitem"><p>GCC 3.4.0: GLIBCXX_3.4, CXXABI_1.3</p></li><li class="listitem"><p>GCC 3.4.1: GLIBCXX_3.4.1, CXXABI_1.3</p></li><li class="listitem"><p>GCC 3.4.2: GLIBCXX_3.4.2</p></li><li class="listitem"><p>GCC 3.4.3: GLIBCXX_3.4.3</p></li><li class="listitem"><p>GCC 4.0.0: GLIBCXX_3.4.4, CXXABI_1.3.1</p></li><li class="listitem"><p>GCC 4.0.1: GLIBCXX_3.4.5</p></li><li class="listitem"><p>GCC 4.0.2: GLIBCXX_3.4.6</p></li><li class="listitem"><p>GCC 4.0.3: GLIBCXX_3.4.7</p></li><li class="listitem"><p>GCC 4.1.1: GLIBCXX_3.4.8</p></li><li class="listitem"><p>GCC 4.2.0: GLIBCXX_3.4.9</p></li><li class="listitem"><p>GCC 4.3.0: GLIBCXX_3.4.10, CXXABI_1.3.2</p></li><li class="listitem"><p>GCC 4.4.0: GLIBCXX_3.4.11, CXXABI_1.3.3</p></li><li class="listitem"><p>GCC 4.4.1: GLIBCXX_3.4.12, CXXABI_1.3.3</p></li><li class="listitem"><p>GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3</p></li><li class="listitem"><p>GCC 4.5.0: GLIBCXX_3.4.14, CXXABI_1.3.4</p></li><li class="listitem"><p>GCC 4.6.0: GLIBCXX_3.4.15, CXXABI_1.3.5</p></li><li class="listitem"><p>GCC 4.6.1: GLIBCXX_3.4.16, CXXABI_1.3.5</p></li><li class="listitem"><p>GCC 4.7.0: GLIBCXX_3.4.17, CXXABI_1.3.6</p></li><li class="listitem"><p>GCC 4.8.0: GLIBCXX_3.4.18, CXXABI_1.3.7</p></li><li class="listitem"><p>GCC 4.8.3: GLIBCXX_3.4.19, CXXABI_1.3.7</p></li><li class="listitem"><p>GCC 4.9.0: GLIBCXX_3.4.20, CXXABI_1.3.8</p></li><li class="listitem"><p>GCC 5.1.0: GLIBCXX_3.4.21, CXXABI_1.3.9</p></li><li class="listitem"><p>GCC 6.1.0: GLIBCXX_3.4.22, CXXABI_1.3.10</p></li><li class="listitem"><p>GCC 7.1.0: GLIBCXX_3.4.23, CXXABI_1.3.11</p></li><li class="listitem"><p>GCC 7.2.0: GLIBCXX_3.4.24, CXXABI_1.3.11</p></li><li class="listitem"><p>GCC 8.1.0: GLIBCXX_3.4.25, CXXABI_1.3.11</p></li><li class="listitem"><p>GCC 9.1.0: GLIBCXX_3.4.26, CXXABI_1.3.12</p></li><li class="listitem"><p>GCC 9.2.0: GLIBCXX_3.4.27, CXXABI_1.3.12</p></li><li class="listitem"><p>GCC 9.3.0: GLIBCXX_3.4.28, CXXABI_1.3.12</p></li><li class="listitem"><p>GCC 10.1.0: GLIBCXX_3.4.28, CXXABI_1.3.12</p></li><li class="listitem"><p>GCC 11.1.0: GLIBCXX_3.4.29, CXXABI_1.3.13</p></li></ul></div></li><li class="listitem"><p>Incremental bumping of a compiler pre-defined macro,
+ </p><div class="itemizedlist"><ul class="itemizedlist" style="list-style-type: disc; "><li class="listitem"><p>GCC 3.0.0: (Error, not versioned)</p></li><li class="listitem"><p>GCC 3.0.1: (Error, not versioned)</p></li><li class="listitem"><p>GCC 3.0.2: (Error, not versioned)</p></li><li class="listitem"><p>GCC 3.0.3: (Error, not versioned)</p></li><li class="listitem"><p>GCC 3.0.4: (Error, not versioned)</p></li><li class="listitem"><p>GCC 3.1.0: GLIBCPP_3.1, CXXABI_1</p></li><li class="listitem"><p>GCC 3.1.1: GLIBCPP_3.1, CXXABI_1</p></li><li class="listitem"><p>GCC 3.2.0: GLIBCPP_3.2, CXXABI_1.2</p></li><li class="listitem"><p>GCC 3.2.1: GLIBCPP_3.2.1, CXXABI_1.2</p></li><li class="listitem"><p>GCC 3.2.2: GLIBCPP_3.2.2, CXXABI_1.2</p></li><li class="listitem"><p>GCC 3.2.3: GLIBCPP_3.2.2, CXXABI_1.2</p></li><li class="listitem"><p>GCC 3.3.0: GLIBCPP_3.2.2, CXXABI_1.2.1</p></li><li class="listitem"><p>GCC 3.3.1: GLIBCPP_3.2.3, CXXABI_1.2.1</p></li><li class="listitem"><p>GCC 3.3.2: GLIBCPP_3.2.3, CXXABI_1.2.1</p></li><li class="listitem"><p>GCC 3.3.3: GLIBCPP_3.2.3, CXXABI_1.2.1</p></li><li class="listitem"><p>GCC 3.4.0: GLIBCXX_3.4, CXXABI_1.3</p></li><li class="listitem"><p>GCC 3.4.1: GLIBCXX_3.4.1, CXXABI_1.3</p></li><li class="listitem"><p>GCC 3.4.2: GLIBCXX_3.4.2</p></li><li class="listitem"><p>GCC 3.4.3: GLIBCXX_3.4.3</p></li><li class="listitem"><p>GCC 4.0.0: GLIBCXX_3.4.4, CXXABI_1.3.1</p></li><li class="listitem"><p>GCC 4.0.1: GLIBCXX_3.4.5</p></li><li class="listitem"><p>GCC 4.0.2: GLIBCXX_3.4.6</p></li><li class="listitem"><p>GCC 4.0.3: GLIBCXX_3.4.7</p></li><li class="listitem"><p>GCC 4.1.1: GLIBCXX_3.4.8</p></li><li class="listitem"><p>GCC 4.2.0: GLIBCXX_3.4.9</p></li><li class="listitem"><p>GCC 4.3.0: GLIBCXX_3.4.10, CXXABI_1.3.2</p></li><li class="listitem"><p>GCC 4.4.0: GLIBCXX_3.4.11, CXXABI_1.3.3</p></li><li class="listitem"><p>GCC 4.4.1: GLIBCXX_3.4.12, CXXABI_1.3.3</p></li><li class="listitem"><p>GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3</p></li><li class="listitem"><p>GCC 4.5.0: GLIBCXX_3.4.14, CXXABI_1.3.4</p></li><li class="listitem"><p>GCC 4.6.0: GLIBCXX_3.4.15, CXXABI_1.3.5</p></li><li class="listitem"><p>GCC 4.6.1: GLIBCXX_3.4.16, CXXABI_1.3.5</p></li><li class="listitem"><p>GCC 4.7.0: GLIBCXX_3.4.17, CXXABI_1.3.6</p></li><li class="listitem"><p>GCC 4.8.0: GLIBCXX_3.4.18, CXXABI_1.3.7</p></li><li class="listitem"><p>GCC 4.8.3: GLIBCXX_3.4.19, CXXABI_1.3.7</p></li><li class="listitem"><p>GCC 4.9.0: GLIBCXX_3.4.20, CXXABI_1.3.8</p></li><li class="listitem"><p>GCC 5.1.0: GLIBCXX_3.4.21, CXXABI_1.3.9</p></li><li class="listitem"><p>GCC 6.1.0: GLIBCXX_3.4.22, CXXABI_1.3.10</p></li><li class="listitem"><p>GCC 7.1.0: GLIBCXX_3.4.23, CXXABI_1.3.11</p></li><li class="listitem"><p>GCC 7.2.0: GLIBCXX_3.4.24, CXXABI_1.3.11</p></li><li class="listitem"><p>GCC 8.1.0: GLIBCXX_3.4.25, CXXABI_1.3.11</p></li><li class="listitem"><p>GCC 9.1.0: GLIBCXX_3.4.26, CXXABI_1.3.12</p></li><li class="listitem"><p>GCC 9.2.0: GLIBCXX_3.4.27, CXXABI_1.3.12</p></li><li class="listitem"><p>GCC 9.3.0: GLIBCXX_3.4.28, CXXABI_1.3.12</p></li><li class="listitem"><p>GCC 10.1.0: GLIBCXX_3.4.28, CXXABI_1.3.12</p></li><li class="listitem"><p>GCC 11.1.0: GLIBCXX_3.4.29, CXXABI_1.3.13</p></li><li class="listitem"><p>GCC 12.1.0: GLIBCXX_3.4.30, CXXABI_1.3.13</p></li><li class="listitem"><p>GCC 13.1.0: GLIBCXX_3.4.31, CXXABI_1.3.13</p></li></ul></div></li><li class="listitem"><p>Incremental bumping of a compiler pre-defined macro,
__GXX_ABI_VERSION. This macro is defined as the version of the
compiler v3 ABI, with g++ 3.0 being version 100. This macro will
be automatically defined whenever g++ is used (the curious can
diff --git a/libstdc++-v3/doc/html/manual/bugs.html b/libstdc++-v3/doc/html/manual/bugs.html
index 38594a9..384fe8d 100644
--- a/libstdc++-v3/doc/html/manual/bugs.html
+++ b/libstdc++-v3/doc/html/manual/bugs.html
@@ -357,6 +357,9 @@
<span class="emphasis"><em>More algorithms that throw away information</em></span>
</span></dt><dd><p>The traditional HP / SGI return type and value is blessed
by the resolution of the DR.
+ </p></dd><dt><a id="manual.bugs.dr1203"></a><span class="term"><a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#1203" target="_top">1203</a>:
+ <span class="emphasis"><em>More useful rvalue stream insertion</em></span>
+ </span></dt><dd><p>Return the stream as its original type, not the base class.
</p></dd><dt><a id="manual.bugs.dr1339"></a><span class="term"><a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#1339" target="_top">1339</a>:
<span class="emphasis"><em>uninitialized_fill_n should return the end of its range</em></span>
</span></dt><dd><p>Return the end of the filled range.
diff --git a/libstdc++-v3/doc/html/manual/debug_mode_using.html b/libstdc++-v3/doc/html/manual/debug_mode_using.html
index 4deb498..e26d06c 100644
--- a/libstdc++-v3/doc/html/manual/debug_mode_using.html
+++ b/libstdc++-v3/doc/html/manual/debug_mode_using.html
@@ -9,7 +9,12 @@
units.</p><p>By default, error messages are formatted to fit on lines of about
78 characters. The environment variable
<code class="code">GLIBCXX_DEBUG_MESSAGE_LENGTH</code> can be used to request a
- different length.</p></div><div class="section"><div class="titlepage"><div><div><h3 class="title"><a id="debug_mode.using.specific"></a>Using a Specific Debug Container</h3></div></div></div><p>When it is not feasible to recompile your entire application, or
+ different length.</p><p>Note that libstdc++ is able to produce backtraces on error.
+ It requires that you configure libstdc++ build with
+ <code class="option">--enable-libstdcxx-backtrace=yes</code>.
+ Use <code class="code">-D_GLIBCXX_DEBUG_BACKTRACE</code> to activate it.
+ You'll then have to link with libstdc++_libbacktrace static library
+ (<code class="option">-lstdc++_libbacktrace</code>) to build your application.</p></div><div class="section"><div class="titlepage"><div><div><h3 class="title"><a id="debug_mode.using.specific"></a>Using a Specific Debug Container</h3></div></div></div><p>When it is not feasible to recompile your entire application, or
only specific containers need checking, debugging containers are
available as GNU extensions. These debugging containers are
functionally equivalent to the standard drop-in containers used in
diff --git a/libstdc++-v3/doc/html/manual/using_macros.html b/libstdc++-v3/doc/html/manual/using_macros.html
index edbbd03..9823046 100644
--- a/libstdc++-v3/doc/html/manual/using_macros.html
+++ b/libstdc++-v3/doc/html/manual/using_macros.html
@@ -95,6 +95,11 @@
the <a class="link" href="debug_mode.html" title="Chapter 17. Debug Mode">debug mode</a>, makes
the debug mode extremely picky by making the use of libstdc++
extensions and libstdc++-specific behavior into errors.
+ </p></dd><dt><span class="term"><code class="code">_GLIBCXX_DEBUG_BACKTRACE</code></span></dt><dd><p>
+ Undefined by default. Considered only if libstdc++ has been configured with
+ <code class="option">--enable-libstdcxx-backtrace=yes</code> and if <code class="code">_GLIBCXX_DEBUG</code>
+ is defined. When defined display backtraces on
+ <a class="link" href="debug_mode.html" title="Chapter 17. Debug Mode">debug mode</a> assertions.
</p></dd><dt><span class="term"><code class="code">_GLIBCXX_PARALLEL</code></span></dt><dd><p>Undefined by default. When defined, compiles user code
using the <a class="link" href="parallel_mode.html" title="Chapter 18. Parallel Mode">parallel
mode</a>.
diff --git a/libstdc++-v3/doc/xml/manual/abi.xml b/libstdc++-v3/doc/xml/manual/abi.xml
index c2c0c02..0153395 100644
--- a/libstdc++-v3/doc/xml/manual/abi.xml
+++ b/libstdc++-v3/doc/xml/manual/abi.xml
@@ -348,6 +348,8 @@ compatible.
<listitem><para>GCC 9.3.0: GLIBCXX_3.4.28, CXXABI_1.3.12</para></listitem>
<listitem><para>GCC 10.1.0: GLIBCXX_3.4.28, CXXABI_1.3.12</para></listitem>
<listitem><para>GCC 11.1.0: GLIBCXX_3.4.29, CXXABI_1.3.13</para></listitem>
+ <listitem><para>GCC 12.1.0: GLIBCXX_3.4.30, CXXABI_1.3.13</para></listitem>
+ <listitem><para>GCC 13.1.0: GLIBCXX_3.4.31, CXXABI_1.3.13</para></listitem>
</itemizedlist>
</listitem>
diff --git a/libstdc++-v3/doc/xml/manual/intro.xml b/libstdc++-v3/doc/xml/manual/intro.xml
index 290e5d3..d341c3e 100644
--- a/libstdc++-v3/doc/xml/manual/intro.xml
+++ b/libstdc++-v3/doc/xml/manual/intro.xml
@@ -852,6 +852,12 @@ requirements of the license of GCC.
by the resolution of the DR.
</para></listitem></varlistentry>
+ <varlistentry xml:id="manual.bugs.dr1203"><term><link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="&DR;#1203">1203</link>:
+ <emphasis>More useful rvalue stream insertion</emphasis>
+ </term>
+ <listitem><para>Return the stream as its original type, not the base class.
+ </para></listitem></varlistentry>
+
<varlistentry xml:id="manual.bugs.dr1339"><term><link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="&DR;#1339">1339</link>:
<emphasis>uninitialized_fill_n should return the end of its range</emphasis>
</term>
diff --git a/libstdc++-v3/include/backward/auto_ptr.h b/libstdc++-v3/include/backward/auto_ptr.h
index 184ab40..093db52 100644
--- a/libstdc++-v3/include/backward/auto_ptr.h
+++ b/libstdc++-v3/include/backward/auto_ptr.h
@@ -300,6 +300,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
} _GLIBCXX11_DEPRECATED;
#if __cplusplus >= 201103L
+#if _GLIBCXX_HOSTED
template<_Lock_policy _Lp>
template<typename _Tp>
inline
@@ -325,13 +326,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
inline
shared_ptr<_Tp>::shared_ptr(std::auto_ptr<_Tp1>&& __r)
: __shared_ptr<_Tp>(std::move(__r)) { }
+#endif // HOSTED
template<typename _Tp, typename _Dp>
template<typename _Up, typename>
inline
unique_ptr<_Tp, _Dp>::unique_ptr(auto_ptr<_Up>&& __u) noexcept
: _M_t(__u.release(), deleter_type()) { }
-#endif
+#endif // C++11
#pragma GCC diagnostic pop
diff --git a/libstdc++-v3/include/bits/alloc_traits.h b/libstdc++-v3/include/bits/alloc_traits.h
index f9ca37f..507e8f1 100644
--- a/libstdc++-v3/include/bits/alloc_traits.h
+++ b/libstdc++-v3/include/bits/alloc_traits.h
@@ -33,9 +33,11 @@
#include <bits/stl_construct.h>
#include <bits/memoryfwd.h>
#if __cplusplus >= 201103L
-# include <bits/allocator.h>
# include <bits/ptr_traits.h>
# include <ext/numeric_traits.h>
+# if _GLIBCXX_HOSTED
+# include <bits/allocator.h>
+# endif
#endif
namespace std _GLIBCXX_VISIBILITY(default)
@@ -402,6 +404,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{ return _S_select(__rhs, 0); }
};
+#if _GLIBCXX_HOSTED
+
#if __cplusplus > 201703L
# define __cpp_lib_constexpr_dynamic_alloc 201907L
#endif
@@ -660,6 +664,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
select_on_container_copy_construction(const allocator_type& __rhs)
{ return __rhs; }
};
+#endif
/// @cond undocumented
#if __cplusplus < 201703L
@@ -774,11 +779,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typename _Alloc::value_type const&>::type
{ };
+#if _GLIBCXX_HOSTED
// std::allocator<_Tp> just requires CopyConstructible
template<typename _Tp>
struct __is_copy_insertable<allocator<_Tp>>
: is_copy_constructible<_Tp>
{ };
+#endif
// true if _Alloc::value_type is MoveInsertable into containers using _Alloc
// (might be wrong if _Alloc::construct exists but is not constrained,
@@ -788,11 +795,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
: __is_alloc_insertable_impl<_Alloc, typename _Alloc::value_type>::type
{ };
+#if _GLIBCXX_HOSTED
// std::allocator<_Tp> just requires MoveConstructible
template<typename _Tp>
struct __is_move_insertable<allocator<_Tp>>
: is_move_constructible<_Tp>
{ };
+#endif
// Trait to detect Allocator-like types.
template<typename _Alloc, typename = void>
@@ -824,6 +833,54 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
/// @cond undocumented
+ // To implement Option 3 of DR 431.
+ template<typename _Alloc, bool = __is_empty(_Alloc)>
+ struct __alloc_swap
+ { static void _S_do_it(_Alloc&, _Alloc&) _GLIBCXX_NOEXCEPT { } };
+
+ template<typename _Alloc>
+ struct __alloc_swap<_Alloc, false>
+ {
+ static void
+ _S_do_it(_Alloc& __one, _Alloc& __two) _GLIBCXX_NOEXCEPT
+ {
+ // Precondition: swappable allocators.
+ if (__one != __two)
+ swap(__one, __two);
+ }
+ };
+
+#if __cplusplus >= 201103L
+ template<typename _Tp, bool
+ = __or_<is_copy_constructible<typename _Tp::value_type>,
+ is_nothrow_move_constructible<typename _Tp::value_type>>::value>
+ struct __shrink_to_fit_aux
+ { static bool _S_do_it(_Tp&) noexcept { return false; } };
+
+ template<typename _Tp>
+ struct __shrink_to_fit_aux<_Tp, true>
+ {
+ _GLIBCXX20_CONSTEXPR
+ static bool
+ _S_do_it(_Tp& __c) noexcept
+ {
+#if __cpp_exceptions
+ try
+ {
+ _Tp(__make_move_if_noexcept_iterator(__c.begin()),
+ __make_move_if_noexcept_iterator(__c.end()),
+ __c.get_allocator()).swap(__c);
+ return true;
+ }
+ catch(...)
+ { return false; }
+#else
+ return false;
+#endif
+ }
+ };
+#endif
+
/**
* Destroy a range of objects using the supplied allocator. For
* non-default allocators we do not optimize away invocation of
@@ -845,6 +902,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif
}
+#if _GLIBCXX_HOSTED
template<typename _ForwardIterator, typename _Tp>
_GLIBCXX20_CONSTEXPR
inline void
@@ -853,6 +911,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
_Destroy(__first, __last);
}
+#endif
/// @endcond
_GLIBCXX_END_NAMESPACE_VERSION
diff --git a/libstdc++-v3/include/bits/allocator.h b/libstdc++-v3/include/bits/allocator.h
index aec0b37..54f5acf 100644
--- a/libstdc++-v3/include/bits/allocator.h
+++ b/libstdc++-v3/include/bits/allocator.h
@@ -265,6 +265,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typedef _Tp value_type;
template<typename _Up> allocator(const allocator<_Up>&) { }
};
+ /// @endcond
/// @} group allocator
@@ -278,72 +279,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Undefine.
#undef __allocator_base
- // To implement Option 3 of DR 431.
- template<typename _Alloc, bool = __is_empty(_Alloc)>
- struct __alloc_swap
- { static void _S_do_it(_Alloc&, _Alloc&) _GLIBCXX_NOEXCEPT { } };
-
- template<typename _Alloc>
- struct __alloc_swap<_Alloc, false>
- {
- static void
- _S_do_it(_Alloc& __one, _Alloc& __two) _GLIBCXX_NOEXCEPT
- {
- // Precondition: swappable allocators.
- if (__one != __two)
- swap(__one, __two);
- }
- };
-
- // Optimize for stateless allocators.
- template<typename _Alloc, bool = __is_empty(_Alloc)>
- struct __alloc_neq
- {
- static bool
- _S_do_it(const _Alloc&, const _Alloc&)
- { return false; }
- };
-
- template<typename _Alloc>
- struct __alloc_neq<_Alloc, false>
- {
- static bool
- _S_do_it(const _Alloc& __one, const _Alloc& __two)
- { return __one != __two; }
- };
-
-#if __cplusplus >= 201103L
- template<typename _Tp, bool
- = __or_<is_copy_constructible<typename _Tp::value_type>,
- is_nothrow_move_constructible<typename _Tp::value_type>>::value>
- struct __shrink_to_fit_aux
- { static bool _S_do_it(_Tp&) noexcept { return false; } };
-
- template<typename _Tp>
- struct __shrink_to_fit_aux<_Tp, true>
- {
- _GLIBCXX20_CONSTEXPR
- static bool
- _S_do_it(_Tp& __c) noexcept
- {
-#if __cpp_exceptions
- try
- {
- _Tp(__make_move_if_noexcept_iterator(__c.begin()),
- __make_move_if_noexcept_iterator(__c.end()),
- __c.get_allocator()).swap(__c);
- return true;
- }
- catch(...)
- { return false; }
-#else
- return false;
-#endif
- }
- };
-#endif
- /// @endcond
-
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
diff --git a/libstdc++-v3/include/bits/basic_string.h b/libstdc++-v3/include/bits/basic_string.h
index 0df64ea..cd24419 100644
--- a/libstdc++-v3/include/bits/basic_string.h
+++ b/libstdc++-v3/include/bits/basic_string.h
@@ -2504,6 +2504,10 @@ _GLIBCXX_BEGIN_NAMESPACE_CXX11
_M_replace_aux(size_type __pos1, size_type __n1, size_type __n2,
_CharT __c);
+ __attribute__((__noinline__, __noclone__, __cold__)) void
+ _M_replace_cold(pointer __p, size_type __len1, const _CharT* __s,
+ const size_type __len2, const size_type __how_much);
+
_GLIBCXX20_CONSTEXPR
basic_string&
_M_replace(size_type __pos, size_type __len1, const _CharT* __s,
diff --git a/libstdc++-v3/include/bits/basic_string.tcc b/libstdc++-v3/include/bits/basic_string.tcc
index 4563c61..710c2df 100644
--- a/libstdc++-v3/include/bits/basic_string.tcc
+++ b/libstdc++-v3/include/bits/basic_string.tcc
@@ -471,6 +471,37 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
template<typename _CharT, typename _Traits, typename _Alloc>
+ __attribute__((__noinline__, __noclone__, __cold__)) void
+ basic_string<_CharT, _Traits, _Alloc>::
+ _M_replace_cold(pointer __p, size_type __len1, const _CharT* __s,
+ const size_type __len2, const size_type __how_much)
+ {
+ // Work in-place.
+ if (__len2 && __len2 <= __len1)
+ this->_S_move(__p, __s, __len2);
+ if (__how_much && __len1 != __len2)
+ this->_S_move(__p + __len2, __p + __len1, __how_much);
+ if (__len2 > __len1)
+ {
+ if (__s + __len2 <= __p + __len1)
+ this->_S_move(__p, __s, __len2);
+ else if (__s >= __p + __len1)
+ {
+ // Hint to middle end that __p and __s overlap
+ // (PR 98465).
+ const size_type __poff = (__s - __p) + (__len2 - __len1);
+ this->_S_copy(__p, __p + __poff, __len2);
+ }
+ else
+ {
+ const size_type __nleft = (__p + __len1) - __s;
+ this->_S_move(__p, __s, __nleft);
+ this->_S_copy(__p + __nleft, __p + __len2, __len2 - __nleft);
+ }
+ }
+ }
+
+ template<typename _CharT, typename _Traits, typename _Alloc>
_GLIBCXX20_CONSTEXPR
basic_string<_CharT, _Traits, _Alloc>&
basic_string<_CharT, _Traits, _Alloc>::
@@ -500,7 +531,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
else
#endif
- if (_M_disjunct(__s))
+ if (__builtin_expect(_M_disjunct(__s), true))
{
if (__how_much && __len1 != __len2)
this->_S_move(__p + __len2, __p + __len1, __how_much);
@@ -508,32 +539,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
this->_S_copy(__p, __s, __len2);
}
else
- {
- // Work in-place.
- if (__len2 && __len2 <= __len1)
- this->_S_move(__p, __s, __len2);
- if (__how_much && __len1 != __len2)
- this->_S_move(__p + __len2, __p + __len1, __how_much);
- if (__len2 > __len1)
- {
- if (__s + __len2 <= __p + __len1)
- this->_S_move(__p, __s, __len2);
- else if (__s >= __p + __len1)
- {
- // Hint to middle end that __p and __s overlap
- // (PR 98465).
- const size_type __poff = (__s - __p) + (__len2 - __len1);
- this->_S_copy(__p, __p + __poff, __len2);
- }
- else
- {
- const size_type __nleft = (__p + __len1) - __s;
- this->_S_move(__p, __s, __nleft);
- this->_S_copy(__p + __nleft, __p + __len2,
- __len2 - __nleft);
- }
- }
- }
+ _M_replace_cold(__p, __len1, __s, __len2, __how_much);
}
else
this->_M_mutate(__pos, __len1, __s, __len2);
@@ -1000,6 +1006,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// to ensure the definition in libstdc++.so is unique (PR 86138).
extern template basic_string<char>::size_type
basic_string<char>::_Rep::_S_empty_rep_storage[];
+# elif _GLIBCXX_EXTERN_TEMPLATE > 0
+ // Export _M_replace_cold even for C++20.
+ extern template void
+ basic_string<char>::_M_replace_cold(char *, size_type, const char*,
+ const size_type, const size_type);
# endif
extern template
@@ -1021,6 +1032,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
# elif ! _GLIBCXX_USE_CXX11_ABI
extern template basic_string<wchar_t>::size_type
basic_string<wchar_t>::_Rep::_S_empty_rep_storage[];
+# elif _GLIBCXX_EXTERN_TEMPLATE > 0
+ // Export _M_replace_cold even for C++20.
+ extern template void
+ basic_string<wchar_t>::_M_replace_cold(wchar_t*, size_type, const wchar_t*,
+ const size_type, const size_type);
# endif
extern template
diff --git a/libstdc++-v3/include/bits/memoryfwd.h b/libstdc++-v3/include/bits/memoryfwd.h
index 751329c..ae164fa 100644
--- a/libstdc++-v3/include/bits/memoryfwd.h
+++ b/libstdc++-v3/include/bits/memoryfwd.h
@@ -60,11 +60,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
* @{
*/
+#if _GLIBCXX_HOSTED
template<typename>
class allocator;
template<>
class allocator<void>;
+#endif
#if __cplusplus >= 201103L
/// Declare uses_allocator so it can be specialized in `<queue>` etc.
diff --git a/libstdc++-v3/include/bits/ranges_algo.h b/libstdc++-v3/include/bits/ranges_algo.h
index 2a11636..228e10b 100644
--- a/libstdc++-v3/include/bits/ranges_algo.h
+++ b/libstdc++-v3/include/bits/ranges_algo.h
@@ -506,43 +506,7 @@ namespace ranges
inline constexpr __find_end_fn find_end{};
- struct __adjacent_find_fn
- {
- template<forward_iterator _Iter, sentinel_for<_Iter> _Sent,
- typename _Proj = identity,
- indirect_binary_predicate<projected<_Iter, _Proj>,
- projected<_Iter, _Proj>> _Pred
- = ranges::equal_to>
- constexpr _Iter
- operator()(_Iter __first, _Sent __last,
- _Pred __pred = {}, _Proj __proj = {}) const
- {
- if (__first == __last)
- return __first;
- auto __next = __first;
- for (; ++__next != __last; __first = __next)
- {
- if (std::__invoke(__pred,
- std::__invoke(__proj, *__first),
- std::__invoke(__proj, *__next)))
- return __first;
- }
- return __next;
- }
-
- template<forward_range _Range, typename _Proj = identity,
- indirect_binary_predicate<
- projected<iterator_t<_Range>, _Proj>,
- projected<iterator_t<_Range>, _Proj>> _Pred = ranges::equal_to>
- constexpr borrowed_iterator_t<_Range>
- operator()(_Range&& __r, _Pred __pred = {}, _Proj __proj = {}) const
- {
- return (*this)(ranges::begin(__r), ranges::end(__r),
- std::move(__pred), std::move(__proj));
- }
- };
-
- inline constexpr __adjacent_find_fn adjacent_find{};
+ // adjacent_find is defined in <bits/ranges_util.h>.
struct __is_permutation_fn
{
diff --git a/libstdc++-v3/include/bits/ranges_util.h b/libstdc++-v3/include/bits/ranges_util.h
index bb56dee..85ddea6 100644
--- a/libstdc++-v3/include/bits/ranges_util.h
+++ b/libstdc++-v3/include/bits/ranges_util.h
@@ -704,6 +704,44 @@ namespace ranges
inline constexpr __min_fn min{};
+ struct __adjacent_find_fn
+ {
+ template<forward_iterator _Iter, sentinel_for<_Iter> _Sent,
+ typename _Proj = identity,
+ indirect_binary_predicate<projected<_Iter, _Proj>,
+ projected<_Iter, _Proj>> _Pred
+ = ranges::equal_to>
+ constexpr _Iter
+ operator()(_Iter __first, _Sent __last,
+ _Pred __pred = {}, _Proj __proj = {}) const
+ {
+ if (__first == __last)
+ return __first;
+ auto __next = __first;
+ for (; ++__next != __last; __first = __next)
+ {
+ if (std::__invoke(__pred,
+ std::__invoke(__proj, *__first),
+ std::__invoke(__proj, *__next)))
+ return __first;
+ }
+ return __next;
+ }
+
+ template<forward_range _Range, typename _Proj = identity,
+ indirect_binary_predicate<
+ projected<iterator_t<_Range>, _Proj>,
+ projected<iterator_t<_Range>, _Proj>> _Pred = ranges::equal_to>
+ constexpr borrowed_iterator_t<_Range>
+ operator()(_Range&& __r, _Pred __pred = {}, _Proj __proj = {}) const
+ {
+ return (*this)(ranges::begin(__r), ranges::end(__r),
+ std::move(__pred), std::move(__proj));
+ }
+ };
+
+ inline constexpr __adjacent_find_fn adjacent_find{};
+
} // namespace ranges
using ranges::get;
diff --git a/libstdc++-v3/include/bits/shared_ptr_atomic.h b/libstdc++-v3/include/bits/shared_ptr_atomic.h
index d4bd712..55d193d 100644
--- a/libstdc++-v3/include/bits/shared_ptr_atomic.h
+++ b/libstdc++-v3/include/bits/shared_ptr_atomic.h
@@ -32,6 +32,32 @@
#include <bits/atomic_base.h>
+// Annotations for the custom locking in atomic<shared_ptr<T>>.
+#if defined _GLIBCXX_TSAN && __has_include(<sanitizer/tsan_interface.h>)
+#include <sanitizer/tsan_interface.h>
+#define _GLIBCXX_TSAN_MUTEX_DESTROY(X) \
+ __tsan_mutex_destroy(X, __tsan_mutex_not_static)
+#define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X) \
+ __tsan_mutex_pre_lock(X, __tsan_mutex_not_static|__tsan_mutex_try_lock)
+#define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X) __tsan_mutex_post_lock(X, \
+ __tsan_mutex_not_static|__tsan_mutex_try_lock_failed, 0)
+#define _GLIBCXX_TSAN_MUTEX_LOCKED(X) \
+ __tsan_mutex_post_lock(X, __tsan_mutex_not_static, 0)
+#define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X) __tsan_mutex_pre_unlock(X, 0)
+#define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X) __tsan_mutex_post_unlock(X, 0)
+#define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X) __tsan_mutex_pre_signal(X, 0)
+#define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X) __tsan_mutex_post_signal(X, 0)
+#else
+#define _GLIBCXX_TSAN_MUTEX_DESTROY(X)
+#define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X)
+#define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X)
+#define _GLIBCXX_TSAN_MUTEX_LOCKED(X)
+#define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X)
+#define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X)
+#define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X)
+#define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X)
+#endif
+
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
@@ -377,6 +403,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
~_Atomic_count()
{
auto __val = _M_val.load(memory_order_relaxed);
+ _GLIBCXX_TSAN_MUTEX_DESTROY(&_M_val);
__glibcxx_assert(!(__val & _S_lock_bit));
if (auto __pi = reinterpret_cast<pointer>(__val))
{
@@ -406,16 +433,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__current = _M_val.load(memory_order_relaxed);
}
+ _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
+
while (!_M_val.compare_exchange_strong(__current,
__current | _S_lock_bit,
__o,
memory_order_relaxed))
{
+ _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(&_M_val);
#if __cpp_lib_atomic_wait
__detail::__thread_relax();
#endif
__current = __current & ~_S_lock_bit;
+ _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
}
+ _GLIBCXX_TSAN_MUTEX_LOCKED(&_M_val);
return reinterpret_cast<pointer>(__current);
}
@@ -423,7 +455,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
unlock(memory_order __o) const noexcept
{
+ _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
_M_val.fetch_sub(1, __o);
+ _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
}
// Swaps the values of *this and __c, and unlocks *this.
@@ -434,7 +468,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
if (__o != memory_order_seq_cst)
__o = memory_order_release;
auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
+ _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
__x = _M_val.exchange(__x, __o);
+ _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
__c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
}
@@ -443,20 +479,26 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_M_wait_unlock(memory_order __o) const noexcept
{
+ _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
auto __v = _M_val.fetch_sub(1, memory_order_relaxed);
+ _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
_M_val.wait(__v & ~_S_lock_bit, __o);
}
void
notify_one() noexcept
{
+ _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
_M_val.notify_one();
+ _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
}
void
notify_all() noexcept
{
+ _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
_M_val.notify_all();
+ _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
}
#endif
diff --git a/libstdc++-v3/include/bits/stl_algo.h b/libstdc++-v3/include/bits/stl_algo.h
index 57fa1c1..e63fe66 100644
--- a/libstdc++-v3/include/bits/stl_algo.h
+++ b/libstdc++-v3/include/bits/stl_algo.h
@@ -57,16 +57,19 @@
#define _STL_ALGO_H 1
#include <bits/algorithmfwd.h>
+#include <bits/stl_algobase.h>
#include <bits/stl_heap.h>
-#include <bits/stl_tempbuf.h> // for _Temporary_buffer
#include <bits/predefined_ops.h>
#if __cplusplus >= 201103L
#include <bits/uniform_int_dist.h>
#endif
-#if _GLIBCXX_HOSTED && (__cplusplus <= 201103L || _GLIBCXX_USE_DEPRECATED)
-#include <cstdlib> // for rand
+#if _GLIBCXX_HOSTED
+# include <bits/stl_tempbuf.h> // for _Temporary_buffer
+# if (__cplusplus <= 201103L || _GLIBCXX_USE_DEPRECATED)
+# include <cstdlib> // for rand
+# endif
#endif
// See concept_check.h for the __glibcxx_*_requires macros.
@@ -1491,6 +1494,7 @@ _GLIBCXX_END_INLINE_ABI_NAMESPACE(_V2)
}
}
+#if _GLIBCXX_HOSTED
// partition
/// This is a helper function...
@@ -1616,6 +1620,7 @@ _GLIBCXX_END_INLINE_ABI_NAMESPACE(_V2)
return std::__stable_partition(__first, __last,
__gnu_cxx::__ops::__pred_iter(__pred));
}
+#endif // HOSTED
/// @cond undocumented
@@ -2526,7 +2531,6 @@ _GLIBCXX_END_INLINE_ABI_NAMESPACE(_V2)
_ValueType;
typedef typename iterator_traits<_BidirectionalIterator>::difference_type
_DistanceType;
- typedef _Temporary_buffer<_BidirectionalIterator, _ValueType> _TmpBuf;
if (__first == __middle || __middle == __last)
return;
@@ -2534,6 +2538,8 @@ _GLIBCXX_END_INLINE_ABI_NAMESPACE(_V2)
const _DistanceType __len1 = std::distance(__first, __middle);
const _DistanceType __len2 = std::distance(__middle, __last);
+#if _GLIBCXX_HOSTED
+ typedef _Temporary_buffer<_BidirectionalIterator, _ValueType> _TmpBuf;
// __merge_adaptive will use a buffer for the smaller of
// [first,middle) and [middle,last).
_TmpBuf __buf(__first, std::min(__len1, __len2));
@@ -2548,6 +2554,10 @@ _GLIBCXX_END_INLINE_ABI_NAMESPACE(_V2)
std::__merge_adaptive_resize
(__first, __middle, __last, __len1, __len2, __buf.begin(),
_DistanceType(__buf.size()), __comp);
+#else
+ std::__merge_without_buffer
+ (__first, __middle, __last, __len1, __len2, __comp);
+#endif
}
/**
@@ -4584,7 +4594,7 @@ _GLIBCXX_BEGIN_NAMESPACE_ALGO
std::iter_swap(__i, __j);
}
}
-#endif
+#endif // HOSTED
/**
* @brief Shuffle the elements of a sequence using a random number
@@ -5016,11 +5026,12 @@ _GLIBCXX_BEGIN_NAMESPACE_ALGO
_ValueType;
typedef typename iterator_traits<_RandomAccessIterator>::difference_type
_DistanceType;
- typedef _Temporary_buffer<_RandomAccessIterator, _ValueType> _TmpBuf;
if (__first == __last)
return;
+#if _GLIBCXX_HOSTED
+ typedef _Temporary_buffer<_RandomAccessIterator, _ValueType> _TmpBuf;
// __stable_sort_adaptive sorts the range in two halves,
// so the buffer only needs to fit half the range at once.
_TmpBuf __buf(__first, (__last - __first + 1) / 2);
@@ -5034,6 +5045,9 @@ _GLIBCXX_BEGIN_NAMESPACE_ALGO
else
std::__stable_sort_adaptive_resize(__first, __last, __buf.begin(),
_DistanceType(__buf.size()), __comp);
+#else
+ std::__inplace_stable_sort(__first, __last, __comp);
+#endif
}
/**
diff --git a/libstdc++-v3/include/bits/stl_algobase.h b/libstdc++-v3/include/bits/stl_algobase.h
index 84a1f9e..ae898ed 100644
--- a/libstdc++-v3/include/bits/stl_algobase.h
+++ b/libstdc++-v3/include/bits/stl_algobase.h
@@ -442,6 +442,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
_GLIBCXX_END_NAMESPACE_CONTAINER
+#if _GLIBCXX_HOSTED
// Helpers for streambuf iterators (either istream or ostream).
// NB: avoid including <iosfwd>, relatively large.
template<typename _CharT>
@@ -479,6 +480,7 @@ _GLIBCXX_END_NAMESPACE_CONTAINER
istreambuf_iterator<_CharT, char_traits<_CharT> >,
istreambuf_iterator<_CharT, char_traits<_CharT> >,
_GLIBCXX_STD_C::_Deque_iterator<_CharT, _CharT&, _CharT*>);
+#endif // HOSTED
template<bool _IsMove, typename _II, typename _OI>
_GLIBCXX20_CONSTEXPR
@@ -574,6 +576,7 @@ _GLIBCXX_END_NAMESPACE_CONTAINER
return __result;
}
+#if _GLIBCXX_HOSTED
template<typename _CharT, typename _Size>
typename __gnu_cxx::__enable_if<
__is_char<_CharT>::__value, _CharT*>::__type
@@ -587,6 +590,7 @@ _GLIBCXX_END_NAMESPACE_CONTAINER
__copy_n_a(istreambuf_iterator<_CharT, char_traits<_CharT> >, _Size,
_GLIBCXX_STD_C::_Deque_iterator<_CharT, _CharT&, _CharT*>,
bool);
+#endif
/**
* @brief Copies the range [first,last) into result.
diff --git a/libstdc++-v3/include/bits/stl_list.h b/libstdc++-v3/include/bits/stl_list.h
index b8bd461..a73ca60 100644
--- a/libstdc++-v3/include/bits/stl_list.h
+++ b/libstdc++-v3/include/bits/stl_list.h
@@ -2026,10 +2026,9 @@ _GLIBCXX_BEGIN_NAMESPACE_CXX11
// To implement the splice (and merge) bits of N1599.
void
- _M_check_equal_allocators(list& __x) _GLIBCXX_NOEXCEPT
+ _M_check_equal_allocators(const list& __x) _GLIBCXX_NOEXCEPT
{
- if (std::__alloc_neq<typename _Base::_Node_alloc_type>::
- _S_do_it(_M_get_Node_allocator(), __x._M_get_Node_allocator()))
+ if (_M_get_Node_allocator() != __x._M_get_Node_allocator())
__builtin_abort();
}
diff --git a/libstdc++-v3/include/bits/stl_tempbuf.h b/libstdc++-v3/include/bits/stl_tempbuf.h
index db7cdb1..b13aa3b 100644
--- a/libstdc++-v3/include/bits/stl_tempbuf.h
+++ b/libstdc++-v3/include/bits/stl_tempbuf.h
@@ -56,8 +56,11 @@
#ifndef _STL_TEMPBUF_H
#define _STL_TEMPBUF_H 1
-#include <bits/stl_algobase.h>
+#include <new>
+#include <bits/exception_defines.h>
#include <bits/stl_construct.h>
+#include <bits/stl_pair.h>
+#include <ext/numeric_traits.h>
namespace std _GLIBCXX_VISIBILITY(default)
{
diff --git a/libstdc++-v3/include/bits/stl_uninitialized.h b/libstdc++-v3/include/bits/stl_uninitialized.h
index 7ed69f5..0b32074 100644
--- a/libstdc++-v3/include/bits/stl_uninitialized.h
+++ b/libstdc++-v3/include/bits/stl_uninitialized.h
@@ -359,6 +359,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
}
+#if _GLIBCXX_HOSTED
template<typename _InputIterator, typename _ForwardIterator, typename _Tp>
_GLIBCXX20_CONSTEXPR
inline _ForwardIterator
@@ -371,6 +372,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif
return std::uninitialized_copy(__first, __last, __result);
}
+#endif
template<typename _InputIterator, typename _ForwardIterator,
typename _Allocator>
@@ -418,6 +420,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
}
+#if _GLIBCXX_HOSTED
template<typename _ForwardIterator, typename _Tp, typename _Tp2>
_GLIBCXX20_CONSTEXPR
inline void
@@ -430,6 +433,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif
std::uninitialized_fill(__first, __last, __x);
}
+#endif
template<typename _ForwardIterator, typename _Size, typename _Tp,
typename _Allocator>
@@ -453,6 +457,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
}
+#if _GLIBCXX_HOSTED
template<typename _ForwardIterator, typename _Size, typename _Tp,
typename _Tp2>
_GLIBCXX20_CONSTEXPR
@@ -466,7 +471,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif
return std::uninitialized_fill_n(__first, __n, __x);
}
-
+#endif
// Extensions: __uninitialized_copy_move, __uninitialized_move_copy,
// __uninitialized_fill_move, __uninitialized_move_fill.
@@ -725,13 +730,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
}
+#if _GLIBCXX_HOSTED
template<typename _ForwardIterator, typename _Tp>
inline void
__uninitialized_default_a(_ForwardIterator __first,
_ForwardIterator __last,
allocator<_Tp>&)
{ std::__uninitialized_default(__first, __last); }
-
+#endif
// __uninitialized_default_n_a
// Fills [first, first + n) with value_types constructed by the allocator
@@ -756,6 +762,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
}
+#if _GLIBCXX_HOSTED
// __uninitialized_default_n_a specialization for std::allocator,
// which ignores the allocator and value-initializes the elements.
template<typename _ForwardIterator, typename _Size, typename _Tp>
@@ -764,6 +771,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__uninitialized_default_n_a(_ForwardIterator __first, _Size __n,
allocator<_Tp>&)
{ return std::__uninitialized_default_n(__first, __n); }
+#endif
template<bool _TrivialValueType>
struct __uninitialized_default_novalue_1
@@ -1094,6 +1102,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return __cur;
}
+#if _GLIBCXX_HOSTED
template <typename _Tp, typename _Up>
_GLIBCXX20_CONSTEXPR
inline __enable_if_t<std::__is_bitwise_relocatable<_Tp>::value, _Tp*>
@@ -1118,7 +1127,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
return __result + __count;
}
-
+#endif
template <typename _InputIterator, typename _ForwardIterator,
typename _Allocator>
@@ -1136,7 +1145,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
/// @endcond
-#endif
+#endif // C++11
/// @} group memory
diff --git a/libstdc++-v3/include/bits/stream_iterator.h b/libstdc++-v3/include/bits/stream_iterator.h
index 86c5845..0a1362a 100644
--- a/libstdc++-v3/include/bits/stream_iterator.h
+++ b/libstdc++-v3/include/bits/stream_iterator.h
@@ -32,6 +32,9 @@
#pragma GCC system_header
+#include <iosfwd>
+#include <bits/move.h>
+#include <bits/stl_iterator_base_types.h>
#include <debug/debug.h>
namespace std _GLIBCXX_VISIBILITY(default)
diff --git a/libstdc++-v3/include/bits/streambuf_iterator.h b/libstdc++-v3/include/bits/streambuf_iterator.h
index 72344c6..c26ac24 100644
--- a/libstdc++-v3/include/bits/streambuf_iterator.h
+++ b/libstdc++-v3/include/bits/streambuf_iterator.h
@@ -33,6 +33,7 @@
#pragma GCC system_header
#include <streambuf>
+#include <bits/stl_iterator_base_types.h>
#include <debug/debug.h>
namespace std _GLIBCXX_VISIBILITY(default)
diff --git a/libstdc++-v3/include/bits/unique_ptr.h b/libstdc++-v3/include/bits/unique_ptr.h
index e1ad772..1086f40 100644
--- a/libstdc++-v3/include/bits/unique_ptr.h
+++ b/libstdc++-v3/include/bits/unique_ptr.h
@@ -36,9 +36,11 @@
#include <tuple>
#include <bits/stl_function.h>
#include <bits/functional_hash.h>
-#if __cplusplus > 201703L
+#if __cplusplus >= 202002L
# include <compare>
-# include <ostream>
+# if _GLIBCXX_HOSTED
+# include <ostream>
+# endif
#endif
#if __cplusplus > 202002L && __cpp_constexpr_dynamic_alloc
@@ -1031,7 +1033,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
public __uniq_ptr_hash<unique_ptr<_Tp, _Dp>>
{ };
-#if __cplusplus >= 201402L
+#if __cplusplus >= 201402L && _GLIBCXX_HOSTED
#define __cpp_lib_make_unique 201304L
/// @cond undocumented
@@ -1131,9 +1133,9 @@ namespace __detail
make_unique_for_overwrite(_Args&&...) = delete;
#endif // C++20
-#endif // C++14
+#endif // C++14 && HOSTED
-#if __cplusplus > 201703L && __cpp_concepts
+#if __cplusplus > 201703L && __cpp_concepts && _GLIBCXX_HOSTED
// _GLIBCXX_RESOLVE_LIB_DEFECTS
// 2948. unique_ptr does not define operator<< for stream output
/// Stream output operator for unique_ptr
@@ -1148,7 +1150,7 @@ namespace __detail
__os << __p.get();
return __os;
}
-#endif // C++20
+#endif // C++20 && HOSTED
/// @} group pointer_abstractions
diff --git a/libstdc++-v3/include/c_global/cstdlib b/libstdc++-v3/include/c_global/cstdlib
index 8a832af..0f7362e 100644
--- a/libstdc++-v3/include/c_global/cstdlib
+++ b/libstdc++-v3/include/c_global/cstdlib
@@ -51,19 +51,23 @@
#define EXIT_SUCCESS 0
#define EXIT_FAILURE 1
+#define NULL __null
namespace std
{
- extern "C" void abort(void) throw () _GLIBCXX_NORETURN;
- extern "C" int atexit(void (*)(void)) throw ();
- extern "C" void exit(int) throw () _GLIBCXX_NORETURN;
+ extern "C" void abort(void) _GLIBCXX_NOTHROW _GLIBCXX_NORETURN;
+ extern "C" int atexit(void (*)(void)) _GLIBCXX_NOTHROW;
+ extern "C" void exit(int) _GLIBCXX_NOTHROW _GLIBCXX_NORETURN;
#if __cplusplus >= 201103L
# ifdef _GLIBCXX_HAVE_AT_QUICK_EXIT
- extern "C" int at_quick_exit(void (*)(void)) throw ();
+ extern "C" int at_quick_exit(void (*)(void)) _GLIBCXX_NOTHROW;
# endif
# ifdef _GLIBCXX_HAVE_QUICK_EXIT
- extern "C" void quick_exit(int) throw() _GLIBCXX_NORETURN;
+ extern "C" void quick_exit(int) _GLIBCXX_NOTHROW_GLIBCXX_NORETURN;
# endif
+#if _GLIBCXX_USE_C99_STDLIB
+ extern "C" void _Exit(int) _GLIBCXX_NOTHROW _GLIBCXX_NORETURN;
+#endif
#endif
} // namespace std
@@ -174,7 +178,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#ifndef __CORRECT_ISO_CPP_STDLIB_H_PROTO
inline ldiv_t
- div(long __i, long __j) { return ldiv(__i, __j); }
+ div(long __i, long __j) _GLIBCXX_NOTHROW { return ldiv(__i, __j); }
#endif
@@ -200,7 +204,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
using ::lldiv_t;
#endif
#if _GLIBCXX_USE_C99_CHECK || _GLIBCXX_USE_C99_DYNAMIC
- extern "C" void (_Exit)(int) throw () _GLIBCXX_NORETURN;
+ extern "C" void (_Exit)(int) _GLIBCXX_NOTHROW _GLIBCXX_NORETURN;
#endif
#if !_GLIBCXX_USE_C99_DYNAMIC
using ::_Exit;
@@ -217,11 +221,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif
#if _GLIBCXX_USE_C99_LONG_LONG_CHECK || _GLIBCXX_USE_C99_LONG_LONG_DYNAMIC
- extern "C" long long int (atoll)(const char *) throw ();
+ extern "C" long long int (atoll)(const char *) _GLIBCXX_NOTHROW;
extern "C" long long int
- (strtoll)(const char * __restrict, char ** __restrict, int) throw ();
+ (strtoll)(const char * __restrict, char ** __restrict, int) _GLIBCXX_NOTHROW;
extern "C" unsigned long long int
- (strtoull)(const char * __restrict, char ** __restrict, int) throw ();
+ (strtoull)(const char * __restrict, char ** __restrict, int) _GLIBCXX_NOTHROW;
#endif
#if !_GLIBCXX_USE_C99_LONG_LONG_DYNAMIC
using ::atoll;
diff --git a/libstdc++-v3/include/debug/assertions.h b/libstdc++-v3/include/debug/assertions.h
index 57c0ab2..c3b5ad0 100644
--- a/libstdc++-v3/include/debug/assertions.h
+++ b/libstdc++-v3/include/debug/assertions.h
@@ -31,14 +31,6 @@
#include <bits/c++config.h>
-#ifndef _GLIBCXX_DEBUG
-
-# define _GLIBCXX_DEBUG_ASSERT(_Condition)
-# define _GLIBCXX_DEBUG_PEDASSERT(_Condition)
-# define _GLIBCXX_DEBUG_ONLY(_Statement)
-
-#endif
-
#ifndef _GLIBCXX_ASSERTIONS
# define __glibcxx_requires_non_empty_range(_First,_Last)
# define __glibcxx_requires_nonempty()
@@ -55,7 +47,8 @@
__glibcxx_assert(!this->empty())
#endif
-#ifdef _GLIBCXX_DEBUG
+#if defined _GLIBCXX_DEBUG && _GLIBCXX_HOSTED
+
# define _GLIBCXX_DEBUG_ASSERT(_Condition) __glibcxx_assert(_Condition)
# ifdef _GLIBCXX_DEBUG_PEDANTIC
@@ -65,6 +58,11 @@
# endif
# define _GLIBCXX_DEBUG_ONLY(_Statement) _Statement
+
+#else
+# define _GLIBCXX_DEBUG_ASSERT(_Condition)
+# define _GLIBCXX_DEBUG_PEDASSERT(_Condition)
+# define _GLIBCXX_DEBUG_ONLY(_Statement)
#endif
#endif // _GLIBCXX_DEBUG_ASSERTIONS
diff --git a/libstdc++-v3/include/debug/debug.h b/libstdc++-v3/include/debug/debug.h
index f423376..78546d7 100644
--- a/libstdc++-v3/include/debug/debug.h
+++ b/libstdc++-v3/include/debug/debug.h
@@ -61,7 +61,7 @@ namespace __gnu_debug
struct _Safe_iterator;
}
-#ifndef _GLIBCXX_DEBUG
+#if ! defined _GLIBCXX_DEBUG || ! _GLIBCXX_HOSTED
# define __glibcxx_requires_cond(_Cond,_Msg)
# define __glibcxx_requires_valid_range(_First,_Last)
diff --git a/libstdc++-v3/include/debug/formatter.h b/libstdc++-v3/include/debug/formatter.h
index b4b7238..f120163 100644
--- a/libstdc++-v3/include/debug/formatter.h
+++ b/libstdc++-v3/include/debug/formatter.h
@@ -609,8 +609,7 @@ namespace __gnu_debug
, _M_function(__function)
#if _GLIBCXX_HAVE_STACKTRACE
# ifdef _GLIBCXX_DEBUG_BACKTRACE
- , _M_backtrace_state(
- __glibcxx_backtrace_create_state(nullptr, 0, nullptr, nullptr))
+ , _M_backtrace_state(__glibcxx_backtrace_create_state(0, 0, 0, 0))
, _M_backtrace_full(&__glibcxx_backtrace_full)
# else
, _M_backtrace_state()
diff --git a/libstdc++-v3/include/debug/safe_iterator.h b/libstdc++-v3/include/debug/safe_iterator.h
index 33f7a86..117dc93 100644
--- a/libstdc++-v3/include/debug/safe_iterator.h
+++ b/libstdc++-v3/include/debug/safe_iterator.h
@@ -40,7 +40,7 @@
#endif
#define _GLIBCXX_DEBUG_VERIFY_OPERANDS(_Lhs, _Rhs, _BadMsgId, _DiffMsgId) \
- _GLIBCXX_DEBUG_VERIFY(!_Lhs._M_singular() && !_Rhs._M_singular() \
+ _GLIBCXX_DEBUG_VERIFY((!_Lhs._M_singular() && !_Rhs._M_singular()) \
|| (_Lhs._M_value_initialized() \
&& _Rhs._M_value_initialized()), \
_M_message(_BadMsgId) \
diff --git a/libstdc++-v3/include/ext/alloc_traits.h b/libstdc++-v3/include/ext/alloc_traits.h
index 1d7d959..c9547c7 100644
--- a/libstdc++-v3/include/ext/alloc_traits.h
+++ b/libstdc++-v3/include/ext/alloc_traits.h
@@ -32,9 +32,6 @@
#pragma GCC system_header
# include <bits/alloc_traits.h>
-#if __cplusplus < 201103L
-# include <bits/allocator.h> // for __alloc_swap
-#endif
namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
{
diff --git a/libstdc++-v3/include/std/bit b/libstdc++-v3/include/std/bit
index ef19d64..2fd8018 100644
--- a/libstdc++-v3/include/std/bit
+++ b/libstdc++-v3/include/std/bit
@@ -35,7 +35,7 @@
#include <type_traits>
-#if _GLIBCXX_HOSTED
+#if _GLIBCXX_HOSTED || __has_include(<ext/numeric_traits.h>)
# include <ext/numeric_traits.h>
#else
# include <limits>
diff --git a/libstdc++-v3/include/std/functional b/libstdc++-v3/include/std/functional
index 685a3e1..c4f7588 100644
--- a/libstdc++-v3/include/std/functional
+++ b/libstdc++-v3/include/std/functional
@@ -56,18 +56,22 @@
#include <bits/functional_hash.h>
#include <bits/invoke.h>
#include <bits/refwrap.h> // std::reference_wrapper and _Mem_fn_traits
-#include <bits/std_function.h> // std::function
-#if __cplusplus > 201402L
-# include <unordered_map>
-# include <vector>
-# include <array>
-# include <bits/stl_algo.h>
+#if _GLIBCXX_HOSTED
+# include <bits/std_function.h> // std::function
+#endif
+#if __cplusplus >= 201703L
+# if _GLIBCXX_HOSTED
+# include <unordered_map>
+# include <vector>
+# include <array>
+# endif
+# include <bits/stl_algo.h> // std::search
#endif
#if __cplusplus > 201703L
# include <bits/ranges_cmp.h>
# include <compare>
#endif
-#if __cplusplus > 202002L
+#if __cplusplus > 202002L && _GLIBCXX_HOSTED
# include <bits/move_only_function.h>
#endif
@@ -238,7 +242,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
/**
* @brief Trait that identifies a bind expression.
- *
+ *
* Determines if the given type `_Tp` is a function object that
* should be treated as a subexpression when evaluating calls to
* function objects returned by `std::bind`.
@@ -1117,6 +1121,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
tuple<_ForwardIterator1, _ForwardIterator1, _BinaryPredicate> _M_m;
};
+#if _GLIBCXX_HOSTED
template<typename _Key, typename _Tp, typename _Hash, typename _Pred>
struct __boyer_moore_map_base
{
@@ -1359,6 +1364,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
return std::make_pair(__last, __last);
}
+#endif // HOSTED
#endif // C++17
#endif // C++14
diff --git a/libstdc++-v3/include/std/future b/libstdc++-v3/include/std/future
index ba1f28c..a1b2d7f 100644
--- a/libstdc++-v3/include/std/future
+++ b/libstdc++-v3/include/std/future
@@ -559,6 +559,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
static _Setter<_Res, __exception_ptr_tag>
__setter(exception_ptr& __ex, promise<_Res>* __prom) noexcept
{
+ __glibcxx_assert(__ex != nullptr); // LWG 2276
return _Setter<_Res, __exception_ptr_tag>{ __prom, &__ex };
}
diff --git a/libstdc++-v3/include/std/iterator b/libstdc++-v3/include/std/iterator
index 7f8fc50..fb2a47c 100644
--- a/libstdc++-v3/include/std/iterator
+++ b/libstdc++-v3/include/std/iterator
@@ -61,9 +61,10 @@
#include <bits/stl_iterator_base_types.h>
#include <bits/stl_iterator_base_funcs.h>
#include <bits/stl_iterator.h>
-#include <iosfwd>
-#include <bits/stream_iterator.h>
-#include <bits/streambuf_iterator.h>
+#if _GLIBCXX_HOSTED
+# include <bits/stream_iterator.h>
+# include <bits/streambuf_iterator.h>
+#endif
#include <bits/range_access.h>
#if __cplusplus >= 201402L && ! defined _GLIBCXX_DEBUG // PR libstdc++/70303
diff --git a/libstdc++-v3/include/std/memory b/libstdc++-v3/include/std/memory
index 481fa42..3eff121 100644
--- a/libstdc++-v3/include/std/memory
+++ b/libstdc++-v3/include/std/memory
@@ -60,8 +60,10 @@
* Smart pointers, etc.
*/
-#include <bits/stl_algobase.h>
-#include <bits/allocator.h>
+#include <bits/memoryfwd.h>
+#if _GLIBCXX_HOSTED
+# include <bits/allocator.h>
+#endif
#include <bits/stl_construct.h>
#include <bits/stl_uninitialized.h>
#include <bits/stl_tempbuf.h>
@@ -74,8 +76,10 @@
# include <bits/alloc_traits.h>
# include <debug/debug.h>
# include <bits/unique_ptr.h>
+# if _GLIBCXX_HOSTED
# include <bits/shared_ptr.h>
# include <bits/shared_ptr_atomic.h>
+# endif
#endif
#if __cplusplus < 201103L || _GLIBCXX_USE_DEPRECATED
@@ -87,7 +91,7 @@
# include <bits/uses_allocator_args.h>
#endif
-#if __cplusplus >= 201103L && __cplusplus <= 202002L
+#if __cplusplus >= 201103L && __cplusplus <= 202002L && _GLIBCXX_HOSTED
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
@@ -134,7 +138,7 @@ _GLIBCXX_END_NAMESPACE_VERSION
} // namespace
#endif // C++11 to C++20
-#if __cplusplus >= 201703L
+#if __cplusplus >= 201703L && _GLIBCXX_HOSTED
// Parallel STL algorithms
# if _PSTL_EXECUTION_POLICIES_DEFINED
// If <execution> has already been included, pull in implementations
diff --git a/libstdc++-v3/include/std/ranges b/libstdc++-v3/include/std/ranges
index 20eb4e8..c2eacde 100644
--- a/libstdc++-v3/include/std/ranges
+++ b/libstdc++-v3/include/std/ranges
@@ -701,6 +701,7 @@ namespace views
inline constexpr _Iota iota{};
} // namespace views
+#if _GLIBCXX_HOSTED
namespace __detail
{
template<typename _Val, typename _CharT, typename _Traits>
@@ -804,6 +805,7 @@ namespace views
template<typename _Tp>
inline constexpr _Istream<_Tp> istream;
}
+#endif // HOSTED
// C++20 24.7 [range.adaptors] Range adaptors
@@ -2234,9 +2236,11 @@ namespace views::__adaptor
template<typename _Range>
inline constexpr bool __is_basic_string_view = false;
+#if _GLIBCXX_HOSTED
template<typename _CharT, typename _Traits>
inline constexpr bool __is_basic_string_view<basic_string_view<_CharT, _Traits>>
= true;
+#endif
template<typename _Range>
inline constexpr bool __is_subrange = false;
@@ -2746,7 +2750,7 @@ namespace views::__adaptor
}
if constexpr (_S_ref_is_glvalue)
- _M_inner = _Inner_iter();
+ _M_inner.reset();
}
static constexpr auto
@@ -2769,7 +2773,7 @@ namespace views::__adaptor
using _Inner_iter = join_view::_Inner_iter<_Const>;
_Outer_iter _M_outer = _Outer_iter();
- _Inner_iter _M_inner = _Inner_iter();
+ optional<_Inner_iter> _M_inner;
_Parent* _M_parent = nullptr;
public:
@@ -2780,9 +2784,7 @@ namespace views::__adaptor
= common_type_t<range_difference_t<_Base>,
range_difference_t<range_reference_t<_Base>>>;
- _Iterator() requires (default_initializable<_Outer_iter>
- && default_initializable<_Inner_iter>)
- = default;
+ _Iterator() requires default_initializable<_Outer_iter> = default;
constexpr
_Iterator(_Parent* __parent, _Outer_iter __outer)
@@ -2801,7 +2803,7 @@ namespace views::__adaptor
constexpr decltype(auto)
operator*() const
- { return *_M_inner; }
+ { return **_M_inner; }
// _GLIBCXX_RESOLVE_LIB_DEFECTS
// 3500. join_view::iterator::operator->() is bogus
@@ -2809,7 +2811,7 @@ namespace views::__adaptor
operator->() const
requires __detail::__has_arrow<_Inner_iter>
&& copyable<_Inner_iter>
- { return _M_inner; }
+ { return *_M_inner; }
constexpr _Iterator&
operator++()
@@ -2820,7 +2822,7 @@ namespace views::__adaptor
else
return *_M_parent->_M_inner;
}();
- if (++_M_inner == ranges::end(__inner_range))
+ if (++*_M_inner == ranges::end(__inner_range))
{
++_M_outer;
_M_satisfy();
@@ -2850,9 +2852,9 @@ namespace views::__adaptor
{
if (_M_outer == ranges::end(_M_parent->_M_base))
_M_inner = ranges::end(*--_M_outer);
- while (_M_inner == ranges::begin(*_M_outer))
- _M_inner = ranges::end(*--_M_outer);
- --_M_inner;
+ while (*_M_inner == ranges::begin(*_M_outer))
+ *_M_inner = ranges::end(*--_M_outer);
+ --*_M_inner;
return *this;
}
@@ -2879,14 +2881,14 @@ namespace views::__adaptor
friend constexpr decltype(auto)
iter_move(const _Iterator& __i)
- noexcept(noexcept(ranges::iter_move(__i._M_inner)))
- { return ranges::iter_move(__i._M_inner); }
+ noexcept(noexcept(ranges::iter_move(*__i._M_inner)))
+ { return ranges::iter_move(*__i._M_inner); }
friend constexpr void
iter_swap(const _Iterator& __x, const _Iterator& __y)
- noexcept(noexcept(ranges::iter_swap(__x._M_inner, __y._M_inner)))
+ noexcept(noexcept(ranges::iter_swap(*__x._M_inner, *__y._M_inner)))
requires indirectly_swappable<_Inner_iter>
- { return ranges::iter_swap(__x._M_inner, __y._M_inner); }
+ { return ranges::iter_swap(*__x._M_inner, *__y._M_inner); }
friend _Iterator<!_Const>;
template<bool> friend struct _Sentinel;
@@ -5778,6 +5780,1099 @@ namespace views::__adaptor
inline constexpr auto pairwise_transform = adjacent_transform<2>;
}
+
+ namespace __detail
+ {
+ template<typename _Tp>
+ constexpr _Tp __div_ceil(_Tp __num, _Tp __denom)
+ {
+ _Tp __r = __num / __denom;
+ if (__num % __denom)
+ ++__r;
+ return __r;
+ }
+ }
+
+ template<view _Vp>
+ requires input_range<_Vp>
+ class chunk_view : public view_interface<chunk_view<_Vp>>
+ {
+ _Vp _M_base;
+ range_difference_t<_Vp> _M_n;
+ range_difference_t<_Vp> _M_remainder = 0;
+ __detail::__non_propagating_cache<iterator_t<_Vp>> _M_current;
+
+ class _OuterIter;
+ class _InnerIter;
+
+ public:
+ constexpr explicit
+ chunk_view(_Vp __base, range_difference_t<_Vp> __n)
+ : _M_base(std::move(__base)), _M_n(__n)
+ { __glibcxx_assert(__n >= 0); }
+
+ constexpr _Vp
+ base() const & requires copy_constructible<_Vp>
+ { return _M_base; }
+
+ constexpr _Vp
+ base() &&
+ { return std::move(_M_base); }
+
+ constexpr _OuterIter
+ begin()
+ {
+ _M_current = ranges::begin(_M_base);
+ _M_remainder = _M_n;
+ return _OuterIter(*this);
+ }
+
+ constexpr default_sentinel_t
+ end() const noexcept
+ { return default_sentinel; }
+
+ constexpr auto
+ size() requires sized_range<_Vp>
+ {
+ return __detail::__to_unsigned_like(__detail::__div_ceil
+ (ranges::distance(_M_base), _M_n));
+ }
+
+ constexpr auto
+ size() const requires sized_range<const _Vp>
+ {
+ return __detail::__to_unsigned_like(__detail::__div_ceil
+ (ranges::distance(_M_base), _M_n));
+ }
+ };
+
+ template<typename _Range>
+ chunk_view(_Range&&, range_difference_t<_Range>) -> chunk_view<views::all_t<_Range>>;
+
+ template<view _Vp>
+ requires input_range<_Vp>
+ class chunk_view<_Vp>::_OuterIter
+ {
+ chunk_view* _M_parent;
+
+ constexpr explicit
+ _OuterIter(chunk_view& __parent) noexcept
+ : _M_parent(std::__addressof(__parent))
+ { }
+
+ friend chunk_view;
+
+ public:
+ using iterator_concept = input_iterator_tag;
+ using difference_type = range_difference_t<_Vp>;
+
+ struct value_type;
+
+ _OuterIter(_OuterIter&&) = default;
+ _OuterIter& operator=(_OuterIter&&) = default;
+
+ constexpr value_type
+ operator*() const
+ {
+ __glibcxx_assert(*this != default_sentinel);
+ return value_type(*_M_parent);
+ }
+
+ constexpr _OuterIter&
+ operator++()
+ {
+ __glibcxx_assert(*this != default_sentinel);
+ ranges::advance(*_M_parent->_M_current, _M_parent->_M_remainder,
+ ranges::end(_M_parent->_M_base));
+ _M_parent->_M_remainder = _M_parent->_M_n;
+ return *this;
+ }
+
+ constexpr void
+ operator++(int)
+ { ++*this; }
+
+ friend constexpr bool
+ operator==(const _OuterIter& __x, default_sentinel_t)
+ {
+ return *__x._M_parent->_M_current == ranges::end(__x._M_parent->_M_base)
+ && __x._M_parent->_M_remainder != 0;
+ }
+
+ friend constexpr difference_type
+ operator-(default_sentinel_t, const _OuterIter& __x)
+ requires sized_sentinel_for<sentinel_t<_Vp>, iterator_t<_Vp>>
+ {
+ const auto __dist = ranges::end(__x._M_parent->_M_base) - *__x._M_parent->_M_current;
+
+ if (__dist < __x._M_parent->_M_remainder)
+ return __dist == 0 ? 0 : 1;
+
+ return 1 + __detail::__div_ceil(__dist - __x._M_parent->_M_remainder,
+ __x._M_parent->_M_n);
+ }
+
+ friend constexpr difference_type
+ operator-(const _OuterIter& __x, default_sentinel_t __y)
+ requires sized_sentinel_for<sentinel_t<_Vp>, iterator_t<_Vp>>
+ { return -(__y - __x); }
+ };
+
+ template<view _Vp>
+ requires input_range<_Vp>
+ struct chunk_view<_Vp>::_OuterIter::value_type : view_interface<value_type>
+ {
+ private:
+ chunk_view* _M_parent;
+
+ constexpr explicit
+ value_type(chunk_view& __parent) noexcept
+ : _M_parent(std::__addressof(__parent))
+ { }
+
+ friend _OuterIter;
+
+ public:
+ constexpr _InnerIter
+ begin() const noexcept
+ { return _InnerIter(*_M_parent); }
+
+ constexpr default_sentinel_t
+ end() const noexcept
+ { return default_sentinel; }
+
+ constexpr auto
+ size() const
+ requires sized_sentinel_for<sentinel_t<_Vp>, iterator_t<_Vp>>
+ {
+ return __detail::__to_unsigned_like
+ (ranges::min(_M_parent->_M_remainder,
+ ranges::end(_M_parent->_M_base) - *_M_parent->_M_current));
+ }
+ };
+
+ template<view _Vp>
+ requires input_range<_Vp>
+ class chunk_view<_Vp>::_InnerIter
+ {
+ chunk_view* _M_parent;
+
+ constexpr explicit
+ _InnerIter(chunk_view& __parent) noexcept
+ : _M_parent(std::__addressof(__parent))
+ { }
+
+ friend _OuterIter::value_type;
+
+ public:
+ using iterator_concept = input_iterator_tag;
+ using difference_type = range_difference_t<_Vp>;
+ using value_type = range_value_t<_Vp>;
+
+ _InnerIter(_InnerIter&&) = default;
+ _InnerIter& operator=(_InnerIter&&) = default;
+
+ constexpr const iterator_t<_Vp>&
+ base() const &
+ { return *_M_parent->_M_current; }
+
+ constexpr range_reference_t<_Vp>
+ operator*() const
+ {
+ __glibcxx_assert(*this != default_sentinel);
+ return **_M_parent->_M_current;
+ }
+
+ constexpr _InnerIter&
+ operator++()
+ {
+ __glibcxx_assert(*this != default_sentinel);
+ ++*_M_parent->_M_current;
+ if (*_M_parent->_M_current == ranges::end(_M_parent->_M_base))
+ _M_parent->_M_remainder = 0;
+ else
+ --_M_parent->_M_remainder;
+ return *this;
+ }
+
+ constexpr void
+ operator++(int)
+ { ++*this; }
+
+ friend constexpr bool
+ operator==(const _InnerIter& __x, default_sentinel_t) noexcept
+ { return __x._M_parent->_M_remainder == 0; }
+
+ friend constexpr difference_type
+ operator-(default_sentinel_t, const _InnerIter& __x)
+ requires sized_sentinel_for<sentinel_t<_Vp>, iterator_t<_Vp>>
+ {
+ return ranges::min(__x._M_parent->_M_remainder,
+ ranges::end(__x._M_parent->_M_base) - *__x._M_parent->_M_current);
+ }
+
+ friend constexpr difference_type
+ operator-(const _InnerIter& __x, default_sentinel_t __y)
+ requires sized_sentinel_for<sentinel_t<_Vp>, iterator_t<_Vp>>
+ { return -(__y - __x); }
+ };
+
+ template<view _Vp>
+ requires forward_range<_Vp>
+ class chunk_view<_Vp> : public view_interface<chunk_view<_Vp>>
+ {
+ _Vp _M_base;
+ range_difference_t<_Vp> _M_n;
+ template<bool> class _Iterator;
+
+ public:
+ constexpr explicit
+ chunk_view(_Vp __base, range_difference_t<_Vp> __n)
+ : _M_base(std::move(__base)), _M_n(__n)
+ { __glibcxx_assert(__n > 0); }
+
+ constexpr _Vp
+ base() const & requires copy_constructible<_Vp>
+ { return _M_base; }
+
+ constexpr _Vp
+ base() &&
+ { return std::move(_M_base); }
+
+ constexpr auto
+ begin() requires (!__detail::__simple_view<_Vp>)
+ { return _Iterator<false>(this, ranges::begin(_M_base)); }
+
+ constexpr auto
+ begin() const requires forward_range<const _Vp>
+ { return _Iterator<true>(this, ranges::begin(_M_base)); }
+
+ constexpr auto
+ end() requires (!__detail::__simple_view<_Vp>)
+ {
+ if constexpr (common_range<_Vp> && sized_range<_Vp>)
+ {
+ auto __missing = (_M_n - ranges::distance(_M_base) % _M_n) % _M_n;
+ return _Iterator<false>(this, ranges::end(_M_base), __missing);
+ }
+ else if constexpr (common_range<_Vp> && !bidirectional_range<_Vp>)
+ return _Iterator<false>(this, ranges::end(_M_base));
+ else
+ return default_sentinel;
+ }
+
+ constexpr auto
+ end() const requires forward_range<const _Vp>
+ {
+ if constexpr (common_range<const _Vp> && sized_range<const _Vp>)
+ {
+ auto __missing = (_M_n - ranges::distance(_M_base) % _M_n) % _M_n;
+ return _Iterator<true>(this, ranges::end(_M_base), __missing);
+ }
+ else if constexpr (common_range<const _Vp> && !bidirectional_range<const _Vp>)
+ return _Iterator<true>(this, ranges::end(_M_base));
+ else
+ return default_sentinel;
+ }
+
+ constexpr auto
+ size() requires sized_range<_Vp>
+ {
+ return __detail::__to_unsigned_like(__detail::__div_ceil
+ (ranges::distance(_M_base), _M_n));
+ }
+
+ constexpr auto
+ size() const requires sized_range<const _Vp>
+ {
+ return __detail::__to_unsigned_like(__detail::__div_ceil
+ (ranges::distance(_M_base), _M_n));
+ }
+ };
+
+ template<typename _Vp>
+ inline constexpr bool enable_borrowed_range<chunk_view<_Vp>>
+ = forward_range<_Vp> && enable_borrowed_range<_Vp>;
+
+ template<view _Vp>
+ requires forward_range<_Vp>
+ template<bool _Const>
+ class chunk_view<_Vp>::_Iterator
+ {
+ using _Parent = __detail::__maybe_const_t<_Const, chunk_view>;
+ using _Base = __detail::__maybe_const_t<_Const, _Vp>;
+
+ iterator_t<_Base> _M_current = iterator_t<_Base>();
+ sentinel_t<_Base> _M_end = sentinel_t<_Base>();
+ range_difference_t<_Base> _M_n = 0;
+ range_difference_t<_Base> _M_missing = 0;
+
+ constexpr
+ _Iterator(_Parent* __parent, iterator_t<_Base> __current,
+ range_difference_t<_Base> __missing = 0)
+ : _M_current(__current), _M_end(ranges::end(__parent->_M_base)),
+ _M_n(__parent->_M_n), _M_missing(__missing)
+ { }
+
+ static auto
+ _S_iter_cat()
+ {
+ if constexpr (random_access_range<_Base>)
+ return random_access_iterator_tag{};
+ else if constexpr (bidirectional_range<_Base>)
+ return bidirectional_iterator_tag{};
+ else
+ return forward_iterator_tag{};
+ }
+
+ friend chunk_view;
+
+ public:
+ using iterator_category = input_iterator_tag;
+ using iterator_concept = decltype(_S_iter_cat());
+ using value_type = decltype(views::take(subrange(_M_current, _M_end), _M_n));
+ using difference_type = range_difference_t<_Base>;
+
+ _Iterator() = default;
+
+ constexpr _Iterator(_Iterator<!_Const> __i)
+ requires _Const
+ && convertible_to<iterator_t<_Vp>, iterator_t<_Base>>
+ && convertible_to<sentinel_t<_Vp>, sentinel_t<_Base>>
+ : _M_current(std::move(__i._M_current)), _M_end(std::move(__i._M_end)),
+ _M_n(__i._M_n), _M_missing(__i._M_missing)
+ { }
+
+ constexpr iterator_t<_Base>
+ base() const
+ { return _M_current; }
+
+ constexpr value_type
+ operator*() const
+ {
+ __glibcxx_assert(_M_current != _M_end);
+ return views::take(subrange(_M_current, _M_end), _M_n);
+ }
+
+ constexpr _Iterator&
+ operator++()
+ {
+ __glibcxx_assert(_M_current != _M_end);
+ _M_missing = ranges::advance(_M_current, _M_n, _M_end);
+ return *this;
+ }
+
+ constexpr _Iterator
+ operator++(int)
+ {
+ auto __tmp = *this;
+ ++*this;
+ return __tmp;
+ }
+
+ constexpr _Iterator&
+ operator--() requires bidirectional_range<_Base>
+ {
+ ranges::advance(_M_current, _M_missing - _M_n);
+ _M_missing = 0;
+ return *this;
+ }
+
+ constexpr _Iterator
+ operator--(int) requires bidirectional_range<_Base>
+ {
+ auto __tmp = *this;
+ --*this;
+ return __tmp;
+ }
+
+ constexpr _Iterator&
+ operator+=(difference_type __x)
+ requires random_access_range<_Base>
+ {
+ if (__x > 0)
+ {
+ __glibcxx_assert(ranges::distance(_M_current, _M_end) > _M_n * (__x - 1));
+ _M_missing = ranges::advance(_M_current, _M_n * __x, _M_end);
+ }
+ else if (__x < 0)
+ {
+ ranges::advance(_M_current, _M_n * __x + _M_missing);
+ _M_missing = 0;
+ }
+ return *this;
+ }
+
+ constexpr _Iterator&
+ operator-=(difference_type __x)
+ requires random_access_range<_Base>
+ { return *this += -__x; }
+
+ constexpr value_type
+ operator[](difference_type __n) const
+ requires random_access_range<_Base>
+ { return *(*this + __n); }
+
+ friend constexpr bool
+ operator==(const _Iterator& __x, const _Iterator& __y)
+ { return __x._M_current == __y._M_current; }
+
+ friend constexpr bool
+ operator==(const _Iterator& __x, default_sentinel_t)
+ { return __x._M_current == __x._M_end; }
+
+ friend constexpr bool
+ operator<(const _Iterator& __x, const _Iterator& __y)
+ requires random_access_range<_Base>
+ { return __x._M_current > __y._M_current; }
+
+ friend constexpr bool
+ operator>(const _Iterator& __x, const _Iterator& __y)
+ requires random_access_range<_Base>
+ { return __y < __x; }
+
+ friend constexpr bool
+ operator<=(const _Iterator& __x, const _Iterator& __y)
+ requires random_access_range<_Base>
+ { return !(__y < __x); }
+
+ friend constexpr bool
+ operator>=(const _Iterator& __x, const _Iterator& __y)
+ requires random_access_range<_Base>
+ { return !(__x < __y); }
+
+ friend constexpr auto
+ operator<=>(const _Iterator& __x, const _Iterator& __y)
+ requires random_access_range<_Base>
+ && three_way_comparable<iterator_t<_Base>>
+ { return __x._M_current <=> __y._M_current; }
+
+ friend constexpr _Iterator
+ operator+(const _Iterator& __i, difference_type __n)
+ requires random_access_range<_Base>
+ {
+ auto __r = __i;
+ __r += __n;
+ return __r;
+ }
+
+ friend constexpr _Iterator
+ operator+(difference_type __n, const _Iterator& __i)
+ requires random_access_range<_Base>
+ {
+ auto __r = __i;
+ __r += __n;
+ return __r;
+ }
+
+ friend constexpr _Iterator
+ operator-(const _Iterator& __i, difference_type __n)
+ requires random_access_range<_Base>
+ {
+ auto __r = __i;
+ __r -= __n;
+ return __r;
+ }
+
+ friend constexpr difference_type
+ operator-(const _Iterator& __x, const _Iterator& __y)
+ requires sized_sentinel_for<iterator_t<_Base>, iterator_t<_Base>>
+ {
+ return (__x._M_current - __y._M_current
+ + __x._M_missing - __y._M_missing) / __x._M_n;
+ }
+
+ friend constexpr difference_type
+ operator-(default_sentinel_t __y, const _Iterator& __x)
+ requires sized_sentinel_for<sentinel_t<_Base>, iterator_t<_Base>>
+ { return __detail::__div_ceil(__x._M_end - __x._M_current, __x._M_n); }
+
+ friend constexpr difference_type
+ operator-(const _Iterator& __x, default_sentinel_t __y)
+ requires sized_sentinel_for<sentinel_t<_Base>, iterator_t<_Base>>
+ { return -(__y - __x); }
+ };
+
+ namespace views
+ {
+ namespace __detail
+ {
+ template<typename _Range, typename _Dp>
+ concept __can_chunk_view
+ = requires { chunk_view(std::declval<_Range>(), std::declval<_Dp>()); };
+ }
+
+ struct _Chunk : __adaptor::_RangeAdaptor<_Chunk>
+ {
+ template<viewable_range _Range, typename _Dp = range_difference_t<_Range>>
+ requires __detail::__can_chunk_view<_Range, _Dp>
+ constexpr auto
+ operator() [[nodiscard]] (_Range&& __r, type_identity_t<_Dp> __n) const
+ { return chunk_view(std::forward<_Range>(__r), __n); }
+
+ using __adaptor::_RangeAdaptor<_Chunk>::operator();
+ static constexpr int _S_arity = 2;
+ static constexpr bool _S_has_simple_extra_args = true;
+ };
+
+ inline constexpr _Chunk chunk;
+ }
+
+ namespace __detail
+ {
+ template<typename _Vp>
+ concept __slide_caches_nothing = random_access_range<_Vp> && sized_range<_Vp>;
+
+ template<typename _Vp>
+ concept __slide_caches_last
+ = !__slide_caches_nothing<_Vp> && bidirectional_range<_Vp> && common_range<_Vp>;
+
+ template<typename _Vp>
+ concept __slide_caches_first
+ = !__slide_caches_nothing<_Vp> && !__slide_caches_last<_Vp>;
+ }
+
+ template<forward_range _Vp>
+ requires view<_Vp>
+ class slide_view : public view_interface<slide_view<_Vp>>
+ {
+ _Vp _M_base;
+ range_difference_t<_Vp> _M_n;
+ [[no_unique_address]]
+ __detail::__maybe_present_t<__detail::__slide_caches_first<_Vp>,
+ __detail::_CachedPosition<_Vp>> _M_cached_begin;
+ [[no_unique_address]]
+ __detail::__maybe_present_t<__detail::__slide_caches_last<_Vp>,
+ __detail::_CachedPosition<_Vp>> _M_cached_end;
+
+ template<bool> class _Iterator;
+ class _Sentinel;
+
+ public:
+ constexpr explicit
+ slide_view(_Vp __base, range_difference_t<_Vp> __n)
+ : _M_base(std::move(__base)), _M_n(__n)
+ { __glibcxx_assert(__n > 0); }
+
+ constexpr auto
+ begin() requires (!(__detail::__simple_view<_Vp>
+ && __detail::__slide_caches_nothing<const _Vp>))
+ {
+ if constexpr (__detail::__slide_caches_first<_Vp>)
+ {
+ iterator_t<_Vp> __it;
+ if (_M_cached_begin._M_has_value())
+ __it = _M_cached_begin._M_get(_M_base);
+ else
+ {
+ __it = ranges::next(ranges::begin(_M_base), _M_n - 1, ranges::end(_M_base));
+ _M_cached_begin._M_set(_M_base, __it);
+ }
+ return _Iterator<false>(ranges::begin(_M_base), std::move(__it), _M_n);
+ }
+ else
+ return _Iterator<false>(ranges::begin(_M_base), _M_n);
+ }
+
+ constexpr auto
+ begin() const requires __detail::__slide_caches_nothing<const _Vp>
+ { return _Iterator<true>(ranges::begin(_M_base), _M_n); }
+
+ constexpr auto
+ end() requires (!(__detail::__simple_view<_Vp>
+ && __detail::__slide_caches_nothing<const _Vp>))
+ {
+ if constexpr (__detail::__slide_caches_nothing<_Vp>)
+ return _Iterator<false>(ranges::begin(_M_base) + range_difference_t<_Vp>(size()),
+ _M_n);
+ else if constexpr (__detail::__slide_caches_last<_Vp>)
+ {
+ iterator_t<_Vp> __it;
+ if (_M_cached_end._M_has_value())
+ __it = _M_cached_end._M_get(_M_base);
+ else
+ {
+ __it = ranges::prev(ranges::end(_M_base), _M_n - 1, ranges::begin(_M_base));
+ _M_cached_end._M_set(_M_base, __it);
+ }
+ return _Iterator<false>(std::move(__it), _M_n);
+ }
+ else if constexpr (common_range<_Vp>)
+ return _Iterator<false>(ranges::end(_M_base), ranges::end(_M_base), _M_n);
+ else
+ return _Sentinel(ranges::end(_M_base));
+ }
+
+ constexpr auto
+ end() const requires __detail::__slide_caches_nothing<const _Vp>
+ { return begin() + range_difference_t<const _Vp>(size()); }
+
+ constexpr auto
+ size() requires sized_range<_Vp>
+ {
+ auto __sz = ranges::distance(_M_base) - _M_n + 1;
+ if (__sz < 0)
+ __sz = 0;
+ return __detail::__to_unsigned_like(__sz);
+ }
+
+ constexpr auto
+ size() const requires sized_range<const _Vp>
+ {
+ auto __sz = ranges::distance(_M_base) - _M_n + 1;
+ if (__sz < 0)
+ __sz = 0;
+ return __detail::__to_unsigned_like(__sz);
+ }
+ };
+
+ template<typename _Range>
+ slide_view(_Range&&, range_difference_t<_Range>) -> slide_view<views::all_t<_Range>>;
+
+ template<typename _Vp>
+ inline constexpr bool enable_borrowed_range<slide_view<_Vp>>
+ = enable_borrowed_range<_Vp>;
+
+ template<forward_range _Vp>
+ requires view<_Vp>
+ template<bool _Const>
+ class slide_view<_Vp>::_Iterator
+ {
+ using _Base = __detail::__maybe_const_t<_Const, _Vp>;
+ static constexpr bool _S_last_elt_present
+ = __detail::__slide_caches_first<_Base>;
+
+ iterator_t<_Base> _M_current = iterator_t<_Base>();
+ [[no_unique_address]]
+ __detail::__maybe_present_t<_S_last_elt_present, iterator_t<_Base>>
+ _M_last_elt = decltype(_M_last_elt)();
+ range_difference_t<_Base> _M_n = 0;
+
+ constexpr
+ _Iterator(iterator_t<_Base> __current, range_difference_t<_Base> __n)
+ requires (!_S_last_elt_present)
+ : _M_current(__current), _M_n(__n)
+ { }
+
+ constexpr
+ _Iterator(iterator_t<_Base> __current, iterator_t<_Base> __last_elt,
+ range_difference_t<_Base> __n)
+ requires _S_last_elt_present
+ : _M_current(__current), _M_last_elt(__last_elt), _M_n(__n)
+ { }
+
+ static auto
+ _S_iter_concept()
+ {
+ if constexpr (random_access_range<_Base>)
+ return random_access_iterator_tag{};
+ else if constexpr (bidirectional_range<_Base>)
+ return bidirectional_iterator_tag{};
+ else
+ return forward_iterator_tag{};
+ }
+
+ friend slide_view;
+ friend slide_view::_Sentinel;
+
+ public:
+ using iterator_category = input_iterator_tag;
+ using iterator_concept = decltype(_S_iter_concept());
+ using value_type = decltype(views::counted(_M_current, _M_n));
+ using difference_type = range_difference_t<_Base>;
+
+ _Iterator() = default;
+
+ constexpr
+ _Iterator(_Iterator<!_Const> __i)
+ requires _Const && convertible_to<iterator_t<_Vp>, iterator_t<_Base>>
+ : _M_current(std::move(__i._M_current)), _M_n(__i._M_n)
+ { }
+
+ constexpr auto
+ operator*() const
+ { return views::counted(_M_current, _M_n); }
+
+ constexpr _Iterator&
+ operator++()
+ {
+ ++_M_current;
+ if constexpr (_S_last_elt_present)
+ ++_M_last_elt;
+ return *this;
+ }
+
+ constexpr _Iterator
+ operator++(int)
+ {
+ auto __tmp = *this;
+ ++*this;
+ return __tmp;
+ }
+
+ constexpr _Iterator&
+ operator--() requires bidirectional_range<_Base>
+ {
+ --_M_current;
+ if constexpr (_S_last_elt_present)
+ --_M_last_elt;
+ return *this;
+ }
+
+ constexpr _Iterator
+ operator--(int) requires bidirectional_range<_Base>
+ {
+ auto __tmp = *this;
+ --*this;
+ return __tmp;
+ }
+
+ constexpr _Iterator&
+ operator+=(difference_type __x)
+ requires random_access_range<_Base>
+ {
+ _M_current += __x;
+ if constexpr (_S_last_elt_present)
+ _M_last_elt += __x;
+ return *this;
+ }
+
+ constexpr _Iterator&
+ operator-=(difference_type __x)
+ requires random_access_range<_Base>
+ {
+ _M_current -= __x;
+ if constexpr (_S_last_elt_present)
+ _M_last_elt -= __x;
+ return *this;
+ }
+
+ constexpr auto
+ operator[](difference_type __n) const
+ requires random_access_range<_Base>
+ { return views::counted(_M_current + __n, _M_n); }
+
+ friend constexpr bool
+ operator==(const _Iterator& __x, const _Iterator& __y)
+ {
+ if constexpr (_S_last_elt_present)
+ return __x._M_last_elt == __y._M_last_elt;
+ else
+ return __x._M_current == __y._M_current;
+ }
+
+ friend constexpr bool
+ operator<(const _Iterator& __x, const _Iterator& __y)
+ requires random_access_range<_Base>
+ { return __x._M_current < __y._M_current; }
+
+ friend constexpr bool
+ operator>(const _Iterator& __x, const _Iterator& __y)
+ requires random_access_range<_Base>
+ { return __y < __x; }
+
+ friend constexpr bool
+ operator<=(const _Iterator& __x, const _Iterator& __y)
+ requires random_access_range<_Base>
+ { return !(__y < __x); }
+
+ friend constexpr bool
+ operator>=(const _Iterator& __x, const _Iterator& __y)
+ requires random_access_range<_Base>
+ { return !(__x < __y); }
+
+ friend constexpr auto
+ operator<=>(const _Iterator& __x, const _Iterator& __y)
+ requires random_access_range<_Base>
+ && three_way_comparable<iterator_t<_Base>>
+ { return __x._M_current <=> __y._M_current; }
+
+ friend constexpr _Iterator
+ operator+(const _Iterator& __i, difference_type __n)
+ requires random_access_range<_Base>
+ {
+ auto __r = __i;
+ __r += __n;
+ return __r;
+ }
+
+ friend constexpr _Iterator
+ operator+(difference_type __n, const _Iterator& __i)
+ requires random_access_range<_Base>
+ {
+ auto __r = __i;
+ __r += __n;
+ return __r;
+ }
+
+ friend constexpr _Iterator
+ operator-(const _Iterator& __i, difference_type __n)
+ requires random_access_range<_Base>
+ {
+ auto __r = __i;
+ __r -= __n;
+ return __r;
+ }
+
+ friend constexpr difference_type
+ operator-(const _Iterator& __x, const _Iterator& __y)
+ requires sized_sentinel_for<iterator_t<_Base>, iterator_t<_Base>>
+ {
+ if constexpr (_S_last_elt_present)
+ return __x._M_last_elt - __y._M_last_elt;
+ else
+ return __x._M_current - __y._M_current;
+ }
+ };
+
+ template<forward_range _Vp>
+ requires view<_Vp>
+ class slide_view<_Vp>::_Sentinel
+ {
+ sentinel_t<_Vp> _M_end = sentinel_t<_Vp>();
+
+ constexpr explicit
+ _Sentinel(sentinel_t<_Vp> __end)
+ : _M_end(__end)
+ { }
+
+ friend slide_view;
+
+ public:
+ _Sentinel() = default;
+
+ friend constexpr bool
+ operator==(const _Iterator<false>& __x, const _Sentinel& __y)
+ { return __x._M_last_elt == __y._M_end; }
+
+ friend constexpr range_difference_t<_Vp>
+ operator-(const _Iterator<false>& __x, const _Sentinel& __y)
+ requires sized_sentinel_for<sentinel_t<_Vp>, iterator_t<_Vp>>
+ { return __x._M_last_elt - __y._M_end; }
+
+ friend constexpr range_difference_t<_Vp>
+ operator-(const _Sentinel& __y, const _Iterator<false>& __x)
+ requires sized_sentinel_for<sentinel_t<_Vp>, iterator_t<_Vp>>
+ { return __y._M_end -__x._M_last_elt; }
+ };
+
+ namespace views
+ {
+ namespace __detail
+ {
+ template<typename _Range, typename _Dp>
+ concept __can_slide_view
+ = requires { slide_view(std::declval<_Range>(), std::declval<_Dp>()); };
+ }
+
+ struct _Slide : __adaptor::_RangeAdaptor<_Slide>
+ {
+ template<viewable_range _Range, typename _Dp = range_difference_t<_Range>>
+ requires __detail::__can_slide_view<_Range, _Dp>
+ constexpr auto
+ operator() [[nodiscard]] (_Range&& __r, type_identity_t<_Dp> __n) const
+ { return slide_view(std::forward<_Range>(__r), __n); }
+
+ using __adaptor::_RangeAdaptor<_Slide>::operator();
+ static constexpr int _S_arity = 2;
+ static constexpr bool _S_has_simple_extra_args = true;
+ };
+
+ inline constexpr _Slide slide;
+ }
+
+ template<forward_range _Vp,
+ indirect_binary_predicate<iterator_t<_Vp>, iterator_t<_Vp>> _Pred>
+ requires view<_Vp> && is_object_v<_Pred>
+ class chunk_by_view : public view_interface<chunk_by_view<_Vp, _Pred>>
+ {
+ _Vp _M_base = _Vp();
+ __detail::__box<_Pred> _M_pred = _Pred();
+ __detail::_CachedPosition<_Vp> _M_cached_begin;
+
+ constexpr iterator_t<_Vp>
+ _M_find_next(iterator_t<_Vp> __current)
+ {
+ __glibcxx_assert(_M_pred.has_value());
+ auto __pred = [this]<typename _Tp>(_Tp&& __x, _Tp&& __y) {
+ return !bool((*_M_pred)(std::forward<_Tp>(__x), std::forward<_Tp>(__y)));
+ };
+ auto __it = ranges::adjacent_find(__current, ranges::end(_M_base), __pred);
+ return ranges::next(__it, 1, ranges::end(_M_base));
+ }
+
+ constexpr iterator_t<_Vp>
+ _M_find_prev(iterator_t<_Vp> __current) requires bidirectional_range<_Vp>
+ {
+ __glibcxx_assert(_M_pred.has_value());
+ auto __pred = [this]<typename _Tp>(_Tp&& __x, _Tp&& __y) {
+ return !bool((*_M_pred)(std::forward<_Tp>(__y), std::forward<_Tp>(__x)));
+ };
+ auto __rbegin = std::make_reverse_iterator(__current);
+ auto __rend = std::make_reverse_iterator(ranges::begin(_M_base));
+ __glibcxx_assert(__rbegin != __rend);
+ auto __it = ranges::adjacent_find(__rbegin, __rend, __pred).base();
+ return ranges::prev(__it, 1, ranges::begin(_M_base));
+ }
+
+ class _Iterator;
+
+ public:
+ chunk_by_view() requires (default_initializable<_Vp>
+ && default_initializable<_Pred>)
+ = default;
+
+ constexpr explicit
+ chunk_by_view(_Vp __base, _Pred __pred)
+ : _M_base(std::move(__base)), _M_pred(std::move(__pred))
+ { }
+
+ constexpr _Vp
+ base() const & requires copy_constructible<_Vp>
+ { return _M_base; }
+
+ constexpr _Vp
+ base() &&
+ { return std::move(_M_base); }
+
+ constexpr const _Pred&
+ pred() const
+ { return *_M_pred; }
+
+ constexpr _Iterator
+ begin()
+ {
+ __glibcxx_assert(_M_pred.has_value());
+ iterator_t<_Vp> __it;
+ if (_M_cached_begin._M_has_value())
+ __it = _M_cached_begin._M_get(_M_base);
+ else
+ {
+ __it = _M_find_next(ranges::begin(_M_base));
+ _M_cached_begin._M_set(_M_base, __it);
+ }
+ return _Iterator(*this, ranges::begin(_M_base), __it);
+ }
+
+ constexpr auto
+ end()
+ {
+ if constexpr (common_range<_Vp>)
+ return _Iterator(*this, ranges::end(_M_base), ranges::end(_M_base));
+ else
+ return default_sentinel;
+ }
+ };
+
+ template<typename _Range, typename _Pred>
+ chunk_by_view(_Range&&, _Pred) -> chunk_by_view<views::all_t<_Range>, _Pred>;
+
+ template<forward_range _Vp,
+ indirect_binary_predicate<iterator_t<_Vp>, iterator_t<_Vp>> _Pred>
+ requires view<_Vp> && is_object_v<_Pred>
+ class chunk_by_view<_Vp, _Pred>::_Iterator
+ {
+ chunk_by_view* _M_parent = nullptr;
+ iterator_t<_Vp> _M_current = iterator_t<_Vp>();
+ iterator_t<_Vp> _M_next = iterator_t<_Vp>();
+
+ constexpr
+ _Iterator(chunk_by_view& __parent, iterator_t<_Vp> __current, iterator_t<_Vp> __next)
+ : _M_parent(std::__addressof(__parent)), _M_current(__current), _M_next(__next)
+ { }
+
+ static auto
+ _S_iter_concept()
+ {
+ if constexpr (bidirectional_range<_Vp>)
+ return bidirectional_iterator_tag{};
+ else
+ return forward_iterator_tag{};
+ }
+
+ friend chunk_by_view;
+
+ public:
+ using value_type = subrange<iterator_t<_Vp>>;
+ using difference_type = range_difference_t<_Vp>;
+ using iterator_category = input_iterator_tag;
+ using iterator_concept = decltype(_S_iter_concept());
+
+ _Iterator() = default;
+
+ constexpr value_type
+ operator*() const
+ {
+ __glibcxx_assert(_M_current != _M_next);
+ return ranges::subrange(_M_current, _M_next);
+ }
+
+ constexpr _Iterator&
+ operator++()
+ {
+ __glibcxx_assert(_M_current != _M_next);
+ _M_current = _M_next;
+ _M_next = _M_parent->_M_find_next(_M_current);
+ return *this;
+ }
+
+ constexpr _Iterator
+ operator++(int)
+ {
+ auto __tmp = *this;
+ ++*this;
+ return __tmp;
+ }
+
+ constexpr _Iterator&
+ operator--() requires bidirectional_range<_Vp>
+ {
+ _M_next = _M_current;
+ _M_current = _M_parent->_M_find_prev(_M_next);
+ return *this;
+ }
+
+ constexpr _Iterator
+ operator--(int) requires bidirectional_range<_Vp>
+ {
+ auto __tmp = *this;
+ --*this;
+ return __tmp;
+ }
+
+ friend constexpr bool
+ operator==(const _Iterator& __x, const _Iterator& __y)
+ { return __x._M_current == __y._M_current; }
+
+ friend constexpr bool
+ operator==(const _Iterator& __x, default_sentinel_t)
+ { return __x._M_current == __x._M_next; }
+ };
+
+ namespace views
+ {
+ namespace __detail
+ {
+ template<typename _Range, typename _Pred>
+ concept __can_chunk_by_view
+ = requires { chunk_by_view(std::declval<_Range>(), std::declval<_Pred>()); };
+ }
+
+ struct _ChunkBy : __adaptor::_RangeAdaptor<_ChunkBy>
+ {
+ template<viewable_range _Range, typename _Pred>
+ requires __detail::__can_chunk_by_view<_Range, _Pred>
+ constexpr auto
+ operator() [[nodiscard]] (_Range&& __r, _Pred&& __pred) const
+ { return chunk_by_view(std::forward<_Range>(__r), std::forward<_Pred>(__pred)); }
+
+ using __adaptor::_RangeAdaptor<_ChunkBy>::operator();
+ static constexpr int _S_arity = 2;
+ static constexpr bool _S_has_simple_extra_args = true;
+ };
+
+ inline constexpr _ChunkBy chunk_by;
+ }
#endif // C++23
} // namespace ranges
diff --git a/libstdc++-v3/include/tr1/random.h b/libstdc++-v3/include/tr1/random.h
index 535f142..6061649 100644
--- a/libstdc++-v3/include/tr1/random.h
+++ b/libstdc++-v3/include/tr1/random.h
@@ -81,9 +81,8 @@ namespace tr1
template<typename _Engine, typename _Distribution>
struct _Adaptor
{
- typedef typename remove_reference<_Engine>::type _BEngine;
- typedef typename _BEngine::result_type _Engine_result_type;
- typedef typename _Distribution::input_type result_type;
+ typedef typename _Engine::result_type _Engine_result_type;
+ typedef typename _Distribution::input_type result_type;
public:
_Adaptor(const _Engine& __g)
@@ -146,72 +145,8 @@ namespace tr1
return __return_value;
}
- private:
_Engine _M_g;
};
-
- // Specialization for _Engine*.
- template<typename _Engine, typename _Distribution>
- struct _Adaptor<_Engine*, _Distribution>
- {
- typedef typename _Engine::result_type _Engine_result_type;
- typedef typename _Distribution::input_type result_type;
-
- public:
- _Adaptor(_Engine* __g)
- : _M_g(__g) { }
-
- result_type
- min() const
- {
- result_type __return_value;
- if (is_integral<_Engine_result_type>::value
- && is_integral<result_type>::value)
- __return_value = _M_g->min();
- else
- __return_value = result_type(0);
- return __return_value;
- }
-
- result_type
- max() const
- {
- result_type __return_value;
- if (is_integral<_Engine_result_type>::value
- && is_integral<result_type>::value)
- __return_value = _M_g->max();
- else if (!is_integral<result_type>::value)
- __return_value = result_type(1);
- else
- __return_value = std::numeric_limits<result_type>::max() - 1;
- return __return_value;
- }
-
- result_type
- operator()()
- {
- result_type __return_value;
- if (is_integral<_Engine_result_type>::value
- && is_integral<result_type>::value)
- __return_value = (*_M_g)();
- else if (!is_integral<_Engine_result_type>::value
- && !is_integral<result_type>::value)
- __return_value = result_type((*_M_g)() - _M_g->min())
- / result_type(_M_g->max() - _M_g->min());
- else if (is_integral<_Engine_result_type>::value
- && !is_integral<result_type>::value)
- __return_value = result_type((*_M_g)() - _M_g->min())
- / result_type(_M_g->max() - _M_g->min() + result_type(1));
- else
- __return_value = ((((*_M_g)() - _M_g->min())
- / (_M_g->max() - _M_g->min()))
- * std::numeric_limits<result_type>::max());
- return __return_value;
- }
-
- private:
- _Engine* _M_g;
- };
} // namespace __detail
/**
@@ -223,17 +158,45 @@ namespace tr1
template<typename _Engine, typename _Dist>
class variate_generator
{
- // Concept requirements.
- __glibcxx_class_requires(_Engine, _CopyConstructibleConcept)
- // __glibcxx_class_requires(_Engine, _EngineConcept)
- // __glibcxx_class_requires(_Dist, _EngineConcept)
+ template<typename _Eng>
+ struct _Value
+ {
+ typedef _Eng type;
+
+ static const _Eng&
+ _S_ref(const _Eng& __e) { return __e; }
+ };
+
+ template<typename _Eng>
+ struct _Value<_Eng*>
+ {
+ typedef _Eng type;
+
+ __attribute__((__nonnull__))
+ static const _Eng&
+ _S_ref(const _Eng* __e) { return *__e; }
+ };
+
+ template<typename _Eng>
+ struct _Value<_Eng&>
+ {
+ typedef _Eng type;
+
+ static const _Eng&
+ _S_ref(const _Eng& __e) { return __e; }
+ };
public:
typedef _Engine engine_type;
- typedef __detail::_Adaptor<_Engine, _Dist> engine_value_type;
+ typedef typename _Value<_Engine>::type engine_value_type;
typedef _Dist distribution_type;
typedef typename _Dist::result_type result_type;
+ // Concept requirements.
+ __glibcxx_class_requires(engine_value_type, _CopyConstructibleConcept)
+ // __glibcxx_class_requires(_Engine, _EngineConcept)
+ // __glibcxx_class_requires(_Dist, _EngineConcept)
+
// tr1:5.1.1 table 5.1 requirement
typedef typename __gnu_cxx::__enable_if<
is_arithmetic<result_type>::value, result_type>::__type _IsValidType;
@@ -246,7 +209,7 @@ namespace tr1
* the @p _Engine or @p _Dist objects.
*/
variate_generator(engine_type __eng, distribution_type __dist)
- : _M_engine(__eng), _M_dist(__dist) { }
+ : _M_engine(_Value<_Engine>::_S_ref(__eng)), _M_dist(__dist) { }
/**
* Gets the next generated value on the distribution.
@@ -269,7 +232,7 @@ namespace tr1
*/
engine_value_type&
engine()
- { return _M_engine; }
+ { return _M_engine._M_g; }
/**
* Gets a const reference to the underlying uniform random number
@@ -277,7 +240,7 @@ namespace tr1
*/
const engine_value_type&
engine() const
- { return _M_engine; }
+ { return _M_engine._M_g; }
/**
* Gets a reference to the underlying random distribution.
@@ -308,7 +271,7 @@ namespace tr1
{ return this->distribution().max(); }
private:
- engine_value_type _M_engine;
+ __detail::_Adaptor<engine_value_type, _Dist> _M_engine;
distribution_type _M_dist;
};
diff --git a/libstdc++-v3/python/libstdcxx/v6/printers.py b/libstdc++-v3/python/libstdcxx/v6/printers.py
index d70c8d5..bd4289c 100644
--- a/libstdc++-v3/python/libstdcxx/v6/printers.py
+++ b/libstdc++-v3/python/libstdcxx/v6/printers.py
@@ -969,6 +969,57 @@ class StdStringPrinter:
def display_hint (self):
return 'string'
+def access_streambuf_ptrs(streambuf):
+ "Access the streambuf put area pointers"
+ pbase = streambuf['_M_out_beg']
+ pptr = streambuf['_M_out_cur']
+ egptr = streambuf['_M_in_end']
+ return pbase, pptr, egptr
+
+class StdStringBufPrinter:
+ "Print a std::basic_stringbuf"
+
+ def __init__(self, _, val):
+ self.val = val
+
+ def to_string(self):
+ (pbase, pptr, egptr) = access_streambuf_ptrs(self.val)
+ # Logic from basic_stringbuf::_M_high_mark()
+ if pptr:
+ if not egptr or pptr > egptr:
+ return pbase.string(length = pptr - pbase)
+ else:
+ return pbase.string(length = egptr - pbase)
+ return self.val['_M_string']
+
+ def display_hint(self):
+ return 'string'
+
+class StdStringStreamPrinter:
+ "Print a std::basic_stringstream"
+
+ def __init__(self, typename, val):
+ self.val = val
+ self.typename = typename
+
+ # Check if the stream was redirected:
+ # This is essentially: val['_M_streambuf'] == val['_M_stringbuf'].address
+ # However, GDB can't resolve the virtual inheritance, so we do that manually
+ basetype = [f.type for f in val.type.fields() if f.is_base_class][0]
+ gdb.set_convenience_variable('__stream', val.cast(basetype).address)
+ self.streambuf = gdb.parse_and_eval('$__stream->rdbuf()')
+ self.was_redirected = self.streambuf != val['_M_stringbuf'].address
+
+ def to_string(self):
+ if self.was_redirected:
+ return "%s redirected to %s" % (self.typename, self.streambuf.dereference())
+ return self.val['_M_stringbuf']
+
+ def display_hint(self):
+ if self.was_redirected:
+ return None
+ return 'string'
+
class Tr1HashtableIterator(Iterator):
def __init__ (self, hashtable):
self.buckets = hashtable['_M_buckets']
@@ -2232,6 +2283,11 @@ def build_libstdcxx_dictionary ():
libstdcxx_printer.add_version('std::', 'initializer_list',
StdInitializerListPrinter)
libstdcxx_printer.add_version('std::', 'atomic', StdAtomicPrinter)
+ libstdcxx_printer.add_version('std::', 'basic_stringbuf', StdStringBufPrinter)
+ libstdcxx_printer.add_version('std::__cxx11::', 'basic_stringbuf', StdStringBufPrinter)
+ for sstream in ('istringstream', 'ostringstream', 'stringstream'):
+ libstdcxx_printer.add_version('std::', 'basic_' + sstream, StdStringStreamPrinter)
+ libstdcxx_printer.add_version('std::__cxx11::', 'basic_' + sstream, StdStringStreamPrinter)
# std::regex components
libstdcxx_printer.add_version('std::__detail::', '_State',
diff --git a/libstdc++-v3/testsuite/17_intro/names.cc b/libstdc++-v3/testsuite/17_intro/names.cc
index 86fb8f8..82e201c 100644
--- a/libstdc++-v3/testsuite/17_intro/names.cc
+++ b/libstdc++-v3/testsuite/17_intro/names.cc
@@ -20,6 +20,8 @@
// Define macros for some common variables names that we must not use for
// naming variables, parameters etc. in the library.
+// N.B. we cannot use '#pragma GCC poison A' because that also prevents using
+// these names even as macro arguments, e.g. #define FOO(A) BAR(A)
#define A (
#define B (
#define C (
diff --git a/libstdc++-v3/testsuite/19_diagnostics/error_code/cons/lwg3629.cc b/libstdc++-v3/testsuite/19_diagnostics/error_code/cons/lwg3629.cc
index b1e0b7f..70fa5e8 100644
--- a/libstdc++-v3/testsuite/19_diagnostics/error_code/cons/lwg3629.cc
+++ b/libstdc++-v3/testsuite/19_diagnostics/error_code/cons/lwg3629.cc
@@ -39,10 +39,10 @@ template<> struct std::is_error_code_enum<user::E3> : std::true_type { };
// ::make_error_code(E1) should not be found by name lookup.
std::error_code e1( user::E1{} ); // { dg-error "here" }
-// std::make_error_code(errc) should not be found by name lookup.
+// std::make_error_code(future_errc) should not be found by name lookup.
std::error_code e2( user::E2{} ); // { dg-error "here" }
-// std::make_error_code(future_errc) should not be found by name lookup.
+// std::make_error_code(errc) should not be found by name lookup.
std::error_code e3( user::E3{} ); // { dg-error "here" }
// { dg-error "use of deleted function" "" { target *-*-* } 0 }
diff --git a/libstdc++-v3/testsuite/19_diagnostics/error_condition/cons/lwg3629.cc b/libstdc++-v3/testsuite/19_diagnostics/error_condition/cons/lwg3629.cc
index e34b53d..562a99a 100644
--- a/libstdc++-v3/testsuite/19_diagnostics/error_condition/cons/lwg3629.cc
+++ b/libstdc++-v3/testsuite/19_diagnostics/error_condition/cons/lwg3629.cc
@@ -39,10 +39,10 @@ template<> struct std::is_error_condition_enum<user::E3> : std::true_type { };
// ::make_error_condition(E1) should not be found by name lookup.
std::error_condition e1( user::E1{} ); // { dg-error "here" }
-// std::make_error_condition(errc) should not be found by name lookup.
+// std::make_error_condition(future_errc) should not be found by name lookup.
std::error_condition e2( user::E2{} ); // { dg-error "here" }
-// std::make_error_condition(future_errc) should not be found by name lookup.
+// std::make_error_condition(errc) should not be found by name lookup.
std::error_condition e3( user::E3{} ); // { dg-error "here" }
// { dg-error "use of deleted function" "" { target *-*-* } 0 }
diff --git a/libstdc++-v3/testsuite/20_util/is_complete_or_unbounded/memoization_neg.cc b/libstdc++-v3/testsuite/20_util/is_complete_or_unbounded/memoization_neg.cc
index fc0b70b..bc66c13 100644
--- a/libstdc++-v3/testsuite/20_util/is_complete_or_unbounded/memoization_neg.cc
+++ b/libstdc++-v3/testsuite/20_util/is_complete_or_unbounded/memoization_neg.cc
@@ -1,6 +1,6 @@
// { dg-do compile { target c++11 } }
// { dg-prune-output "must be a complete" }
-// { dg-prune-output "'value' is not a member of 'std::is_move_cons" }
+// { dg-prune-output "'value' is not a member of 'std::(__8::)?is_move_cons" }
// { dg-prune-output "invalid use of incomplete type" }
// Copyright (C) 2019-2022 Free Software Foundation, Inc.
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/compare_exchange_padding.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/compare_exchange_padding.cc
index 1b1a12d..e9f8a4b 100644
--- a/libstdc++-v3/testsuite/29_atomics/atomic_ref/compare_exchange_padding.cc
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/compare_exchange_padding.cc
@@ -20,14 +20,15 @@ int
main ()
{
S s;
- fill_struct(s);
- s.c = 'a';
- s.s = 42;
-
S ss{ s };
+ fill_struct(ss);
+ ss.c = 'a';
+ ss.s = 42;
+
std::atomic_ref<S> as{ s };
+ as.store(ss);
auto ts = as.load();
- VERIFY( !compare_struct(ss, ts) ); // padding cleared on construction
+ VERIFY( !compare_struct(ss, ts) ); // padding cleared on store
as.exchange(ss);
auto es = as.load();
VERIFY( compare_struct(ts, es) ); // padding cleared on exchange
diff --git a/libstdc++-v3/testsuite/30_threads/promise/members/set_exception_neg.cc b/libstdc++-v3/testsuite/30_threads/promise/members/set_exception_neg.cc
new file mode 100644
index 0000000..1666093
--- /dev/null
+++ b/libstdc++-v3/testsuite/30_threads/promise/members/set_exception_neg.cc
@@ -0,0 +1,18 @@
+// { dg-options "-D_GLIBCXX_ASSERTIONS" }
+// { dg-do run { xfail *-*-* } }
+// { dg-additional-options "-pthread" { target pthread } }
+// { dg-require-effective-target c++11 }
+// { dg-require-gthreads "" }
+
+// LWG 2276. Missing requirement on std::promise::set_exception
+
+#include <future>
+
+int main()
+{
+ std::promise<void> prom;
+ auto f = prom.get_future();
+ std::exception_ptr p;
+ prom.set_exception(p); // Preconditions: p is not null
+ f.get();
+}
diff --git a/libstdc++-v3/testsuite/libstdc++-prettyprinters/debug.cc b/libstdc++-v3/testsuite/libstdc++-prettyprinters/debug.cc
index 98bbc18..3c61955 100644
--- a/libstdc++-v3/testsuite/libstdc++-prettyprinters/debug.cc
+++ b/libstdc++-v3/testsuite/libstdc++-prettyprinters/debug.cc
@@ -29,6 +29,7 @@
#include <list>
#include <map>
#include <set>
+#include <sstream>
#include <vector>
#include <ext/slist>
@@ -110,6 +111,20 @@ main()
__gnu_cxx::slist<int>::iterator slliter = sll.begin();
// { dg-final { note-test slliter {47} } }
+ std::stringstream sstream;
+ sstream << "abc";
+// { dg-final { note-test sstream "\"abc\"" } }
+ std::stringstream ssin("input", std::ios::in);
+// { dg-final { note-test ssin "\"input\"" } }
+ std::istringstream ssin2("input");
+// { dg-final { note-test ssin2 "\"input\"" } }
+ std::ostringstream ssout;
+ ssout << "out";
+// { dg-final { note-test ssout "\"out\"" } }
+ std::stringstream redirected("xxx");
+ static_cast<std::basic_ios<std::stringstream::char_type>&>(redirected).rdbuf(sstream.rdbuf());
+// { dg-final { regexp-test redirected {std::.*stringstream redirected to .*} } }
+
std::cout << "\n";
return 0; // Mark SPOT
}
diff --git a/libstdc++-v3/testsuite/libstdc++-prettyprinters/simple.cc b/libstdc++-v3/testsuite/libstdc++-prettyprinters/simple.cc
index 1f85775..1609ae2 100644
--- a/libstdc++-v3/testsuite/libstdc++-prettyprinters/simple.cc
+++ b/libstdc++-v3/testsuite/libstdc++-prettyprinters/simple.cc
@@ -30,6 +30,7 @@
#include <list>
#include <map>
#include <set>
+#include <sstream>
#include <vector>
#include <ext/slist>
@@ -169,6 +170,20 @@ main()
__gnu_cxx::slist<int>::iterator slliter0;
// { dg-final { note-test slliter0 {non-dereferenceable iterator for __gnu_cxx::slist} } }
+ std::stringstream sstream;
+ sstream << "abc";
+// { dg-final { note-test sstream "\"abc\"" } }
+ std::stringstream ssin("input", std::ios::in);
+// { dg-final { note-test ssin "\"input\"" } }
+ std::istringstream ssin2("input");
+// { dg-final { note-test ssin2 "\"input\"" } }
+ std::ostringstream ssout;
+ ssout << "out";
+// { dg-final { note-test ssout "\"out\"" } }
+ std::stringstream redirected("xxx");
+ static_cast<std::basic_ios<std::stringstream::char_type>&>(redirected).rdbuf(sstream.rdbuf());
+// { dg-final { regexp-test redirected {std::.*stringstream redirected to .*} } }
+
std::cout << "\n";
return 0; // Mark SPOT
}
diff --git a/libstdc++-v3/testsuite/libstdc++-prettyprinters/simple11.cc b/libstdc++-v3/testsuite/libstdc++-prettyprinters/simple11.cc
index 6f21675..a4b82e3 100644
--- a/libstdc++-v3/testsuite/libstdc++-prettyprinters/simple11.cc
+++ b/libstdc++-v3/testsuite/libstdc++-prettyprinters/simple11.cc
@@ -30,6 +30,7 @@
#include <list>
#include <map>
#include <set>
+#include <sstream>
#include <vector>
#include <ext/slist>
@@ -162,6 +163,20 @@ main()
__gnu_cxx::slist<int>::iterator slliter0;
// { dg-final { note-test slliter0 {non-dereferenceable iterator for __gnu_cxx::slist} } }
+ std::stringstream sstream;
+ sstream << "abc";
+// { dg-final { note-test sstream "\"abc\"" } }
+ std::stringstream ssin("input", std::ios::in);
+// { dg-final { note-test ssin "\"input\"" } }
+ std::istringstream ssin2("input");
+// { dg-final { note-test ssin2 "\"input\"" } }
+ std::ostringstream ssout;
+ ssout << "out";
+// { dg-final { note-test ssout "\"out\"" } }
+ std::stringstream redirected("xxx");
+ static_cast<std::basic_ios<std::stringstream::char_type>&>(redirected).rdbuf(sstream.rdbuf());
+// { dg-final { regexp-test redirected {std::.*stringstream redirected to .*} } }
+
std::cout << "\n";
return 0; // Mark SPOT
}
diff --git a/libstdc++-v3/testsuite/std/ranges/adaptors/chunk/1.cc b/libstdc++-v3/testsuite/std/ranges/adaptors/chunk/1.cc
new file mode 100644
index 0000000..125c88e
--- /dev/null
+++ b/libstdc++-v3/testsuite/std/ranges/adaptors/chunk/1.cc
@@ -0,0 +1,80 @@
+// { dg-options "-std=gnu++23" }
+// { dg-do run { target c++23 } }
+
+#include <ranges>
+#include <algorithm>
+#include <vector>
+#include <testsuite_hooks.h>
+#include <testsuite_iterators.h>
+
+namespace ranges = std::ranges;
+namespace views = std::views;
+
+constexpr bool
+test01()
+{
+ int x[] = {1, 2, 3, 4, 5};
+
+ auto v2 = x | views::chunk(2);
+ const auto i0 = v2.begin(), i1 = v2.begin() + 1;
+ VERIFY( i0 + 1 - 1 == i0 );
+ VERIFY( i0 < i1 );
+ VERIFY( i1 < v2.end() );
+ VERIFY( i1 - i0 == 1 );
+ VERIFY( i0 - i1 == -1 );
+ VERIFY( v2.end() - i1 == 2 );
+ VERIFY( i1 - v2.end() == -2 );
+ auto i2 = v2.begin();
+ i2 += 2;
+ i2 -= -1;
+ VERIFY( i2 == v2.end() );
+ VERIFY( ranges::size(v2) == 3 );
+ VERIFY( ranges::equal(v2, (std::initializer_list<int>[]){{1, 2}, {3, 4}, {5}},
+ ranges::equal) );
+
+ auto v1 = x | views::chunk(1);
+ VERIFY( ranges::size(v1) == ranges::size(x) );
+ for (auto [r, n] : views::zip(v1, x))
+ {
+ VERIFY( ranges::size(r) == 1 );
+ VERIFY( *r.begin() == n );
+ }
+
+ auto v5 = x | views::chunk(5);
+ VERIFY( ranges::size(v5) == 1 );
+ VERIFY( ranges::equal(v5[0], (int[]){1, 2, 3, 4, 5}) );
+
+ auto v10 = x | views::chunk(10);
+ VERIFY( ranges::size(v10) == 1 );
+ VERIFY( ranges::equal(v10[0], (int[]){1, 2, 3, 4, 5}) );
+
+ return true;
+}
+
+template<class wrapper>
+void
+test02()
+{
+ int x[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ wrapper rx(x);
+ auto v = rx | views::chunk(3);
+ auto i = ranges::begin(v);
+ VERIFY( ranges::equal(*i, (int[]){1, 2, 3}) );
+ ++i;
+ VERIFY( ranges::equal(*i, (int[]){4, 5, 6}) );
+ ++i;
+ VERIFY( ranges::equal(*i, (int[]){7, 8}) );
+ i++;
+ VERIFY( i == ranges::end(v) );
+
+ for (int i = 1; i <= 10; ++i)
+ VERIFY( ranges::equal(wrapper(x) | views::chunk(i) | views::join, x) );
+}
+
+int
+main()
+{
+ static_assert(test01());
+ test02<__gnu_test::test_input_range<int>>();
+ test02<__gnu_test::test_forward_range<int>>();
+}
diff --git a/libstdc++-v3/testsuite/std/ranges/adaptors/chunk_by/1.cc b/libstdc++-v3/testsuite/std/ranges/adaptors/chunk_by/1.cc
new file mode 100644
index 0000000..d57b127
--- /dev/null
+++ b/libstdc++-v3/testsuite/std/ranges/adaptors/chunk_by/1.cc
@@ -0,0 +1,58 @@
+// { dg-options "-std=gnu++23" }
+// { dg-do run { target c++23 } }
+
+#include <ranges>
+#include <algorithm>
+#include <vector>
+#include <testsuite_hooks.h>
+#include <testsuite_iterators.h>
+
+namespace ranges = std::ranges;
+namespace views = std::views;
+
+constexpr bool
+test01()
+{
+ int x[] = {1, 2, 2, 3, 0, 4, 5, 2};
+ auto v = x | views::chunk_by(ranges::less_equal{});
+ static_assert(ranges::bidirectional_range<decltype(v)>
+ && ranges::common_range<decltype(v)>);
+ VERIFY( ranges::equal(v, (std::initializer_list<int>[]){{1, 2, 2, 3}, {0, 4, 5}, {2}},
+ ranges::equal) );
+ VERIFY( ranges::equal(v | views::reverse,
+ (std::initializer_list<int>[]){{2}, {0, 4, 5}, {1, 2, 2, 3}},
+ ranges::equal) );
+ VERIFY( ranges::equal(v | views::join, x) );
+ auto i = v.begin();
+ auto j = i;
+ j++;
+ VERIFY( i == i && i != v.end() );
+ VERIFY( j == j && j != v.end() );
+ VERIFY( j != i );
+ j--;
+ VERIFY( j == i );
+
+ return true;
+}
+
+void
+test02()
+{
+ int x[] = {1, 2, 3};
+ __gnu_test::test_forward_range<int> rx(x);
+ auto v = rx | views::chunk_by(ranges::equal_to{});
+ static_assert(!ranges::bidirectional_range<decltype(v)>
+ && !ranges::common_range<decltype(v)>);
+ VERIFY( ranges::equal(v, x | views::transform(views::single), ranges::equal) );
+ auto i = v.begin();
+ VERIFY( i != v.end() );
+ ranges::advance(i, 3);
+ VERIFY( i == v.end() );
+}
+
+int
+main()
+{
+ static_assert(test01());
+ test02();
+}
diff --git a/libstdc++-v3/testsuite/std/ranges/adaptors/join.cc b/libstdc++-v3/testsuite/std/ranges/adaptors/join.cc
index 8986f71..afc11d4 100644
--- a/libstdc++-v3/testsuite/std/ranges/adaptors/join.cc
+++ b/libstdc++-v3/testsuite/std/ranges/adaptors/join.cc
@@ -21,6 +21,7 @@
#include <algorithm>
#include <array>
#include <ranges>
+#include <sstream>
#include <string>
#include <string_view>
#include <vector>
@@ -205,6 +206,33 @@ test12()
}();
}
+void
+test13()
+{
+ // PR libstdc++/106320
+ auto l = std::views::transform([](auto x) {
+ return x | std::views::transform([i=0](auto y) {
+ return y;
+ });
+ });
+ std::vector<std::vector<int>> v{{5, 6, 7}};
+ v | l | std::views::join;
+}
+
+void
+test14()
+{
+ // LWG 3569: join_view fails to support ranges of ranges with
+ // non-default_initializable iterators
+ auto ss = std::istringstream{"1 2 3"};
+ auto v = views::single(views::istream<int>(ss));
+ using inner = ranges::range_reference_t<decltype(v)>;
+ static_assert(ranges::input_range<inner>
+ && !ranges::forward_range<inner>
+ && !std::default_initializable<ranges::iterator_t<inner>>);
+ VERIFY( ranges::equal(v | views::join, (int[]){1, 2, 3}) );
+}
+
int
main()
{
@@ -220,4 +248,6 @@ main()
test10();
test11();
test12();
+ test13();
+ test14();
}
diff --git a/libstdc++-v3/testsuite/std/ranges/adaptors/slide/1.cc b/libstdc++-v3/testsuite/std/ranges/adaptors/slide/1.cc
new file mode 100644
index 0000000..9856042
--- /dev/null
+++ b/libstdc++-v3/testsuite/std/ranges/adaptors/slide/1.cc
@@ -0,0 +1,105 @@
+// { dg-options "-std=gnu++23" }
+// { dg-do run { target c++23 } }
+
+#include <ranges>
+#include <algorithm>
+#include <utility>
+#include <testsuite_hooks.h>
+#include <testsuite_iterators.h>
+
+namespace ranges = std::ranges;
+namespace views = std::views;
+
+constexpr bool
+test01()
+{
+ auto v1 = std::array{1, 2} | views::slide(1);
+ const auto i0 = v1.begin(), i1 = v1.begin() + 1;
+ VERIFY( i0 + 1 - 1 == i0 );
+ VERIFY( i0 < i1 );
+ VERIFY( i1 < v1.end() );
+ VERIFY( i1 - i0 == 1 );
+ VERIFY( i0 - i1 == -1 );
+ VERIFY( v1.end() - i1 == 1 );
+ VERIFY( i1 - v1.end() == -1 );
+ VERIFY( ranges::equal(std::move(v1) | views::join, (int[]){1, 2}) );
+
+ int x[] = {1, 2, 3, 4};
+ auto v2 = x | views::slide(2);
+ auto i2 = v2.begin();
+ i2 += 2;
+ i2 -= -1;
+ VERIFY( i2 == v2.end() );
+ VERIFY( ranges::size(v2) == 3 );
+ VERIFY( ranges::size(std::as_const(v2)) == 3 );
+ VERIFY( ranges::equal(v2, (std::initializer_list<int>[]){{1, 2}, {2, 3}, {3, 4}},
+ ranges::equal) );
+
+ int y[] = {1, 2, 3, 4, 5};
+ const auto v3 = y | views::slide(3);
+ VERIFY( ranges::size(v3) == 3 );
+ for (unsigned i = 0; i < ranges::size(x); i++)
+ {
+ VERIFY( &v3[i][0] == &y[i] + 0 );
+ VERIFY( &v3[i][1] == &y[i] + 1 );
+ VERIFY( &v3[i][2] == &y[i] + 2 );
+ }
+
+ const auto v5 = y | views::slide(5);
+ VERIFY( ranges::size(v5) == 1 );
+ VERIFY( ranges::equal(v5 | views::join, y) );
+
+ const auto v6 = y | views::slide(6);
+ VERIFY( ranges::empty(v6) );
+
+ return true;
+}
+
+constexpr bool
+test02()
+{
+ using __gnu_test::test_input_range;
+ using __gnu_test::test_forward_range;
+ using __gnu_test::test_random_access_range;
+
+ using ty1 = ranges::slide_view<views::all_t<test_forward_range<int>>>;
+ static_assert(ranges::forward_range<ty1>);
+ static_assert(!ranges::bidirectional_range<ty1>);
+ static_assert(!ranges::sized_range<ty1>);
+
+ using ty2 = ranges::slide_view<views::all_t<test_random_access_range<int>>>;
+ static_assert(ranges::random_access_range<ty2>);
+ static_assert(ranges::sized_range<ty2>);
+
+ return true;
+}
+
+constexpr bool
+test03()
+{
+ auto v = views::iota(0, 4) | views::filter([](auto) { return true; }) | views::slide(2);
+ using ty = decltype(v);
+ static_assert(ranges::forward_range<ty>);
+ static_assert(ranges::common_range<ty>);
+ static_assert(!ranges::sized_range<ty>);
+ VERIFY( v.begin() == v.begin() );
+ VERIFY( v.begin() != v.end() );
+ VERIFY( ranges::next(v.begin(), 3) == v.end() );
+ auto it = v.begin();
+ ++it;
+ it++;
+ VERIFY( ranges::next(it) == v.end() );
+ it--;
+ --it;
+ VERIFY( it == v.begin() );
+
+ return true;
+}
+
+int
+main()
+{
+ static_assert(test01());
+ static_assert(test02());
+ static_assert(test03());
+}
diff --git a/libstdc++-v3/testsuite/tr1/5_numerical_facilities/random/variate_generator/37986.cc b/libstdc++-v3/testsuite/tr1/5_numerical_facilities/random/variate_generator/37986.cc
index 5eeccf0..a130947 100644
--- a/libstdc++-v3/testsuite/tr1/5_numerical_facilities/random/variate_generator/37986.cc
+++ b/libstdc++-v3/testsuite/tr1/5_numerical_facilities/random/variate_generator/37986.cc
@@ -21,7 +21,7 @@
#include <tr1/random>
-// libtsdc++/37986
+// libstdc++/37986
void test01()
{
std::tr1::mt19937 mt;
diff --git a/libstdc++-v3/testsuite/tr1/5_numerical_facilities/random/variate_generator/requirements/typedefs.cc b/libstdc++-v3/testsuite/tr1/5_numerical_facilities/random/variate_generator/requirements/typedefs.cc
index 0bdb610..a71c8ddf 100644
--- a/libstdc++-v3/testsuite/tr1/5_numerical_facilities/random/variate_generator/requirements/typedefs.cc
+++ b/libstdc++-v3/testsuite/tr1/5_numerical_facilities/random/variate_generator/requirements/typedefs.cc
@@ -23,19 +23,56 @@
#include <tr1/random>
+template<typename T, typename U> struct require_same; // not defined
+template<typename T> struct require_same<T, T> { };
+
+typedef std::tr1::linear_congruential<unsigned long, 16807, 0, 2147483647> E;
+typedef std::tr1::uniform_int<int> D;
+
void
test01()
{
- using namespace std::tr1;
+ typedef std::tr1::variate_generator<E, D> test_type;
+
+ typedef test_type::engine_type engine_type;
+ typedef test_type::engine_value_type engine_value_type;
+ typedef test_type::distribution_type distribution_type;
+ typedef test_type::result_type result_type;
+
+ require_same<engine_type, E> check_e;
+ require_same<engine_value_type, E> check_ev;
+ require_same<distribution_type, D> check_d;
+ require_same<result_type, typename D::result_type> check_r;
+}
+
+void
+test02()
+{
+ typedef std::tr1::variate_generator<E&, D> test_type;
- typedef variate_generator
- <
- linear_congruential<unsigned long, 16807 , 0 , 2147483647>,
- uniform_int<int>
- > test_type;
+ typedef test_type::engine_type engine_type;
+ typedef test_type::engine_value_type engine_value_type;
+ typedef test_type::distribution_type distribution_type;
+ typedef test_type::result_type result_type;
+
+ require_same<engine_type, E&> check_e;
+ require_same<engine_value_type, E> check_ev;
+ require_same<distribution_type, D> check_d;
+ require_same<result_type, typename D::result_type> check_r;
+}
+
+void
+test03()
+{
+ typedef std::tr1::variate_generator<E*, D> test_type;
typedef test_type::engine_type engine_type;
typedef test_type::engine_value_type engine_value_type;
typedef test_type::distribution_type distribution_type;
typedef test_type::result_type result_type;
+
+ require_same<engine_type, E*> check_e;
+ require_same<engine_value_type, E> check_ev;
+ require_same<distribution_type, D> check_d;
+ require_same<result_type, typename D::result_type> check_r;
}
diff --git a/libstdc++-v3/testsuite/util/testsuite_abi.cc b/libstdc++-v3/testsuite/util/testsuite_abi.cc
index 5c83835..09bd00e 100644
--- a/libstdc++-v3/testsuite/util/testsuite_abi.cc
+++ b/libstdc++-v3/testsuite/util/testsuite_abi.cc
@@ -211,6 +211,7 @@ check_version(symbol& test, bool added)
known_versions.push_back("GLIBCXX_3.4.28");
known_versions.push_back("GLIBCXX_3.4.29");
known_versions.push_back("GLIBCXX_3.4.30");
+ known_versions.push_back("GLIBCXX_3.4.31");
known_versions.push_back("GLIBCXX_LDBL_3.4.29");
known_versions.push_back("GLIBCXX_IEEE128_3.4.29");
known_versions.push_back("GLIBCXX_IEEE128_3.4.30");
@@ -247,7 +248,7 @@ check_version(symbol& test, bool added)
test.version_status = symbol::incompatible;
// Check that added symbols are added in the latest pre-release version.
- bool latestp = (test.version_name == "GLIBCXX_3.4.30"
+ bool latestp = (test.version_name == "GLIBCXX_3.4.31"
// XXX remove next line when baselines have been regenerated.
|| test.version_name == "GLIBCXX_IEEE128_3.4.30"
|| test.version_name == "CXXABI_1.3.13"