aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog178
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/ada/ChangeLog199
-rw-r--r--gcc/ada/gcc-interface/Makefile.in8
-rw-r--r--gcc/cobol/ChangeLog8
-rw-r--r--gcc/cobol/cbldiag.h2
-rw-r--r--gcc/cobol/genapi.cc17
-rw-r--r--gcc/cobol/genutil.cc5
-rw-r--r--gcc/cobol/scan.l21
-rw-r--r--gcc/cobol/scan_ante.h5
-rw-r--r--gcc/cobol/symbols.cc10
-rw-r--r--gcc/common/config/aarch64/cpuinfo.h25
-rw-r--r--gcc/common/config/avr/avr-common.cc1
-rw-r--r--gcc/config/aarch64/aarch64-option-extensions.def12
-rw-r--r--gcc/config/aarch64/aarch64-sme.md12
-rw-r--r--gcc/config/aarch64/aarch64.cc29
-rw-r--r--gcc/config/aarch64/tuning_models/generic_armv9_a.h2
-rw-r--r--gcc/config/avr/avr-passes.cc139
-rw-r--r--gcc/config/avr/avr-passes.def8
-rw-r--r--gcc/config/avr/avr-protos.h1
-rw-r--r--gcc/config/avr/avr.cc7
-rw-r--r--gcc/config/avr/avr.opt4
-rw-r--r--gcc/config/avr/avr.opt.urls3
-rw-r--r--gcc/config/i386/i386-features.cc2
-rw-r--r--gcc/config/i386/i386-options.cc35
-rw-r--r--gcc/config/i386/i386.cc4
-rwxr-xr-xgcc/config/riscv/arch-canonicalize2
-rw-r--r--gcc/config/riscv/riscv-vector-costs.cc16
-rw-r--r--gcc/config/rs6000/rs6000.cc25
-rw-r--r--gcc/config/rs6000/rs6000.md2
-rw-r--r--gcc/cp/ChangeLog29
-rw-r--r--gcc/cp/constexpr.cc10
-rw-r--r--gcc/cp/cp-tree.h14
-rw-r--r--gcc/cp/lambda.cc12
-rw-r--r--gcc/cp/module.cc43
-rw-r--r--gcc/cp/parser.cc135
-rw-r--r--gcc/cp/pt.cc3
-rw-r--r--gcc/cp/semantics.cc27
-rw-r--r--gcc/doc/install.texi2
-rw-r--r--gcc/doc/invoke.texi6
-rw-r--r--gcc/fortran/ChangeLog20
-rw-r--r--gcc/fortran/trans-array.cc116
-rw-r--r--gcc/fortran/trans-decl.cc2
-rw-r--r--gcc/fortran/trans-expr.cc35
-rw-r--r--gcc/gimple-fold.cc4
-rw-r--r--gcc/gimple-fold.h8
-rw-r--r--gcc/gimple-ssa-store-merging.cc6
-rw-r--r--gcc/m2/ChangeLog15
-rw-r--r--gcc/m2/gm2-compiler/M2GenGCC.mod51
-rw-r--r--gcc/m2/mc-boot/GFormatStrings.cc4
-rw-r--r--gcc/m2/mc-boot/GM2EXCEPTION.cc6
-rw-r--r--gcc/m2/mc-boot/GSFIO.cc20
-rw-r--r--gcc/m2/mc-boot/GSFIO.h7
-rw-r--r--gcc/m2/mc-boot/Gdecl.cc71
-rw-r--r--gcc/m2/mc-boot/GmcFileName.h2
-rw-r--r--gcc/m2/mc/decl.mod47
-rw-r--r--gcc/machmode.h3
-rw-r--r--gcc/stor-layout.cc2
-rw-r--r--gcc/testsuite/ChangeLog128
-rw-r--r--gcc/testsuite/g++.dg/abi/regparm1.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-array30.C22
-rw-r--r--gcc/testsuite/g++.dg/cpp26/consteval-block1.C82
-rw-r--r--gcc/testsuite/g++.dg/cpp26/consteval-block2.C49
-rw-r--r--gcc/testsuite/g++.dg/cpp26/consteval-block3.C41
-rw-r--r--gcc/testsuite/g++.dg/cpp26/consteval-block4.C41
-rw-r--r--gcc/testsuite/g++.dg/cpp26/consteval-block5.C70
-rw-r--r--gcc/testsuite/g++.dg/cpp26/consteval-block6.C108
-rw-r--r--gcc/testsuite/g++.dg/cpp26/consteval-block7.C12
-rw-r--r--gcc/testsuite/g++.dg/cpp26/consteval-block8.C38
-rw-r--r--gcc/testsuite/g++.dg/modules/merge-19.h21
-rw-r--r--gcc/testsuite/g++.dg/modules/merge-19_a.H5
-rw-r--r--gcc/testsuite/g++.dg/modules/merge-19_b.C16
-rw-r--r--gcc/testsuite/g++.dg/modules/pr108080.H5
-rw-r--r--gcc/testsuite/g++.target/aarch64/mv-cpu-features.C82
-rw-r--r--gcc/testsuite/gcc.dg/pr121322.c14
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr121264.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ifunc-resolver-0.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ifunc-resolver-1.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ifunc-resolver-2.c14
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ifunc-resolver-3.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ifunc-resolver-4.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ifunc-resolver.in48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_11.c5
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/pr121028.c46
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f16_x2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f16_x4.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f32_x2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f32_x4.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f64_x2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f64_x4.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f16_x2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f16_x4.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f32_x2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f32_x4.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f64_x2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f64_x4.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/20020224-1.c1
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-1.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/attributes-error.c42
-rw-r--r--gcc/testsuite/gcc.target/i386/attributes-ignore.c8
-rw-r--r--gcc/testsuite/gcc.target/i386/pr103785.c5
-rw-r--r--gcc/testsuite/gcc.target/i386/pr15184-2.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/pr36533.c24
-rw-r--r--gcc/testsuite/gcc.target/i386/pr59099.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/sibcall-8.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/sw-1.c5
-rw-r--r--gcc/testsuite/gcc.target/i386/uintr-2.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/uintr-5.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i16.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i32.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i64.c5
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i16.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i32.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i64.c6
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i8.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i16.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i32.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i64.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i8.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i16.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i32.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i64.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i8.c1
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary.h6
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h196
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i16.c17
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i32.c17
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i64.c17
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i8.c17
-rw-r--r--gcc/testsuite/gfortran.dg/class_elemental_1.f9035
-rw-r--r--gcc/testsuite/gm2/errors/fail/badindrtype.mod16
-rw-r--r--gcc/testsuite/gm2/errors/fail/badindrtype2.mod16
-rw-r--r--gcc/testsuite/lib/target-supports.exp28
-rw-r--r--gcc/tree-ssa-alias.cc4
-rw-r--r--gcc/tree-ssa-loop-ivopts.cc8
-rw-r--r--gcc/tree-ssa-sccvn.cc4
-rw-r--r--gcc/tree-vect-loop-manip.cc2
-rw-r--r--gcc/tree-vect-loop.cc12
-rw-r--r--gcc/tree-vect-slp.cc8
-rw-r--r--gcc/tree-vect-stmts.cc280
-rw-r--r--gcc/tree-vectorizer.h61
143 files changed, 2825 insertions, 492 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 23efda5..ca1b5d9 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,181 @@
+2025-07-31 Georg-Johann Lay <avr@gjlay.de>
+
+ * config/avr/avr.opt.urls (-mfuse-move2): Add url.
+
+2025-07-31 Georg-Johann Lay <avr@gjlay.de>
+
+ * config/avr/avr.cc (avr_output_addr_vec) <labl>: Asm out its .type.
+
+2025-07-31 Georg-Johann Lay <avr@gjlay.de>
+
+ PR rtl-optimization/121340
+ * config/avr/avr.opt (-mfuse-move2): New option.
+ * config/avr/avr-passes.def (avr_pass_2moves): Insert after combine.
+ * config/avr/avr-passes.cc (make_avr_pass_2moves): New function.
+ (pass_data avr_pass_data_2moves): New static variable.
+ (avr_pass_2moves): New rtl_opt_pass.
+ * config/avr/avr-protos.h (make_avr_pass_2moves): New proto.
+ * common/config/avr/avr-common.cc
+ (default_options avr_option_optimization_table) <-mfuse-move2>:
+ Set for -O1 and higher.
+ * doc/invoke.texi (AVR Options) <-mfuse-move2>: Document.
+
+2025-07-31 Tamar Christina <tamar.christina@arm.com>
+
+ PR tree-optimization/120805
+ * tree-vect-loop-manip.cc (vect_gen_vector_loop_niters): Skip setting
+ bounds on epilogues.
+
+2025-07-31 Wilco Dijkstra <wilco.dijkstra@arm.com>
+
+ * common/config/aarch64/cpuinfo.h: Remove unused features, add FEAT_CSSC
+ and FEAT_MOPS.
+ * config/aarch64/aarch64-option-extensions.def: Remove FMV support
+ for RPRES, use PULL rather than AES, add FMV support for CSSC and MOPS.
+
+2025-07-31 Wilco Dijkstra <wilco.dijkstra@arm.com>
+
+ * config/aarch64/tuning_models/generic_armv9_a.h
+ (generic_armv9_a_addrcost_table): Use zero cost for himode.
+
+2025-07-31 Richard Biener <rguenther@suse.de>
+
+ * tree-vect-stmts.cc (get_group_load_store_type): Properly
+ compare the scalar type of the gather/scatter offset to
+ the offset vector component type.
+
+2025-07-31 Richard Biener <rguenther@suse.de>
+
+ * gimple-fold.h (fold_stmt_inplace): Add valueization hook
+ argument, defaulted to no_follow_ssa_edges.
+ * gimple-fold.cc (fold_stmt_inplace): Adjust.
+
+2025-07-31 Artemiy Granat <a.granat@ispras.ru>
+
+ * config/i386/i386-options.cc (ix86_handle_cconv_attribute):
+ Fix typo.
+
+2025-07-31 Artemiy Granat <a.granat@ispras.ru>
+
+ * config/i386/i386-options.cc (ix86_handle_cconv_attribute):
+ Handle simultaneous use of regparm and thiscall attributes in
+ case when regparm is set before thiscall.
+
+2025-07-31 Artemiy Granat <a.granat@ispras.ru>
+
+ * config/i386/i386-options.cc (ix86_handle_cconv_attribute):
+ Fix comments which state that combination of stdcall and fastcall
+ attributes is valid but redundant.
+
+2025-07-31 Artemiy Granat <a.granat@ispras.ru>
+
+ * config/i386/i386-options.cc (ix86_handle_cconv_attribute):
+ Move 64-bit mode check before regparm handling.
+
+2025-07-31 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/121320
+ * tree-ssa-sccvn.cc (ao_ref_init_from_vn_reference): Convert
+ op->off to poly_offset_int before multiplying by
+ BITS_PER_UNIT.
+
+2025-07-31 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/121323
+ * tree-ssa-alias.cc (ao_ref_init_from_ptr_and_range): Check
+ the pointer offset fits in a HWI when represented in bits.
+
+2025-07-31 Yury Khrustalev <yury.khrustalev@arm.com>
+
+ * config/aarch64/aarch64.cc (build_ifunc_arg_type):
+ Add new fields _hwcap3 and _hwcap4.
+
+2025-07-31 Kishan Parmar <kishan@linux.ibm.com>
+
+ PR target/118890
+ * config/rs6000/rs6000.cc (can_be_rotated_to_negative_lis): Avoid left
+ shift of negative value and guard shift count.
+ (can_be_built_by_li_and_rldic): Likewise.
+ (rs6000_emit_set_long_const): Likewise.
+ * config/rs6000/rs6000.md (splitter for plus into two 16-bit parts): Fix
+ UB from overflow in addition.
+
+2025-07-31 Richard Biener <rguenther@suse.de>
+
+ * config/aarch64/aarch64.cc (aarch64_detect_vector_stmt_subtype):
+ Check for node before dereferencing.
+ (aarch64_vector_costs::add_stmt_cost): Likewise.
+
+2025-07-31 Spencer Abson <spencer.abson@arm.com>
+
+ PR target/121028
+ * config/aarch64/aarch64-sme.md (aarch64_smstart_sm): Use the .inst
+ directive if !TARGET_SME.
+ (aarch64_smstop_sm): Likewise.
+
+2025-07-31 Richard Biener <rguenther@suse.de>
+
+ * tree-vectorizer.h (_stmt_vec_info::memory_access_type): Remove.
+ (STMT_VINFO_MEMORY_ACCESS_TYPE): Likewise.
+ (vect_mem_access_type): Likewise.
+ * tree-vect-stmts.cc (vectorizable_store): Do not set
+ STMT_VINFO_MEMORY_ACCESS_TYPE. Fix SLP_TREE_MEMORY_ACCESS_TYPE
+ usage.
+ * tree-vect-loop.cc (update_epilogue_loop_vinfo): Remove
+ checking of memory access type.
+ * config/riscv/riscv-vector-costs.cc (costs::compute_local_live_ranges):
+ Use SLP_TREE_MEMORY_ACCESS_TYPE.
+ (costs::need_additional_vector_vars_p): Likewise.
+ (segment_loadstore_group_size): Get SLP node as argument,
+ use SLP_TREE_MEMORY_ACCESS_TYPE.
+ (costs::adjust_stmt_cost): Pass down SLP node.
+ * config/aarch64/aarch64.cc (aarch64_ld234_st234_vectors): Use
+ SLP_TREE_MEMORY_ACCESS_TYPE instead of vect_mem_access_type.
+ (aarch64_detect_vector_stmt_subtype): Likewise.
+ (aarch64_vector_costs::count_ops): Likewise.
+ (aarch64_vector_costs::add_stmt_cost): Likewise.
+
+2025-07-31 Richard Biener <rguenther@suse.de>
+
+ * tree-vect-loop.cc (vect_transform_loop): Do not verify DRs
+ have not been modified for epilogue loops.
+ (update_epilogue_loop_vinfo): Do not copy modified DRs to
+ the originals.
+
+2025-07-31 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/121264
+ * machmode.h (get_best_mode): Change type of first 2 arguments
+ from int to HOST_WIDE_INT.
+ * stor-layout.cc (get_best_mode): Likewise.
+
+2025-07-31 Jakub Jelinek <jakub@redhat.com>
+
+ * gimple-ssa-store-merging.cc (find_bswap_or_nop): Fix comment typos,
+ hanlde -> handle.
+ * config/i386/i386.cc (ix86_gimple_fold_builtin, ix86_rtx_costs):
+ Likewise.
+ * config/i386/i386-features.cc (remove_partial_avx_dependency):
+ Likewise.
+
+2025-07-31 Richard Biener <rguenther@suse.de>
+
+ * tree-vect-stmts.cc (check_scan_store): Remove redundant
+ slp_node check. Disallow epilogue vectorization.
+
+2025-07-31 Richard Biener <rguenther@suse.de>
+
+ * tree-vectorizer.h (vector_costs::costing_for_scalar): New
+ accessor.
+ (add_stmt_cost): For scalar costing force vectype to NULL.
+ Verify we do not pass in a SLP node.
+
+2025-07-31 Kito Cheng <kito.cheng@sifive.com>
+
+ PR target/121312
+ * config/riscv/arch-canonicalize: Add H extension to the
+ canonical order.
+
2025-07-30 Andrew Pinski <quic_apinski@quicinc.com>
PR rtl-optimization/121308
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index c551755..d0a9040 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20250731
+20250801
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index 821e3c0..85464e3 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,202 @@
+2025-07-31 Eric Botcazou <ebotcazou@gcc.gnu.org>
+
+ Revert:
+ 2025-07-31 Nicolas Boulenguez <nicolas@debian.org>
+
+ PR ada/114065
+ * Makefile.rtl (GNATRTL_NONTASKING_OBJS): Add g-c_time$(objext) and
+ s-c_time$(objext).
+ (Aarch64/Android): Do not use s-osinte__android.adb.
+ (SPARC/Solaris): Do not use s-osprim__solaris.adb.
+ (x86/Solaris): Likewise.
+ (LynxOS178): Do not use s-parame__posix2008.ads.
+ (RTEMS): Likewise.
+ (x32/Linux): Likewise, as well as s-linux__x32.ads. Replace
+ s-osprim__x32.adb with s-osprim__posix.adb.
+ (LIBGNAT_OBJS): Remove cal.o.
+ * cal.c: Delete.
+ * doc/gnat_rm/the_gnat_library.rst (GNAT.C_Time): New entry.
+ (GNAT.Calendar): Do not mention the obsolete conversion functions.
+ * impunit.adb (Non_Imp_File_Names_95): Add g-c_time.
+ * libgnarl/a-exetim__posix.adb: Add with clause for System.C_Time
+ (Clock): Use type and functions from System.C_Time.
+ * libgnarl/s-linux.ads: Remove with clause for System.Parameters.
+ Remove declarations of C time types.
+ * libgnarl/s-linux__alpha.ads: Likewise.
+ * libgnarl/s-linux__android-aarch64.ads: Likewise.
+ * libgnarl/s-linux__android-arm.ads: Likewise.
+ * libgnarl/s-linux__hppa.ads: Likewise.
+ * libgnarl/s-linux__loongarch.ads: Likewise.
+ * libgnarl/s-linux__mips.ads: Likewise.
+ * libgnarl/s-linux__riscv.ads: Likewise.
+ * libgnarl/s-linux__sparc.ads: Likewise.
+ * libgnarl/s-osinte__aix.ads: Likewise.
+ * libgnarl/s-osinte__android.ads: Likewise.
+ * libgnarl/s-osinte__cheribsd.ads: Likewise.
+ * libgnarl/s-osinte__darwin.ads: Likewise.
+ * libgnarl/s-osinte__dragonfly.ads: Likewise.
+ * libgnarl/s-osinte__freebsd.ads: Likewise.
+ * libgnarl/s-osinte__gnu.ads: Likewise.
+ * libgnarl/s-osinte__hpux.ads: Likewise.
+ * libgnarl/s-osinte__kfreebsd-gnu.ads: Likewise.
+ * libgnarl/s-osinte__linux.ads: Likewise.
+ * libgnarl/s-osinte__lynxos178e.ads: Likewise.
+ * libgnarl/s-osinte__qnx.ads: Likewise.
+ * libgnarl/s-osinte__rtems.ads: Likewise.
+ * libgnarl/s-osinte__solaris.ads: Likewise.
+ * libgnarl/s-osinte__vxworks.ads: Likewise.
+ * libgnarl/s-qnx.ads: Likewise.
+ * libgnarl/s-linux__x32.ads: Delete.
+ * libgnarl/s-osinte__darwin.adb (To_Duration): Remove.
+ (To_Timespec): Likewise.
+ * libgnarl/s-osinte__aix.adb: Likewise.
+ * libgnarl/s-osinte__dragonfly.adb: Likewise.
+ * libgnarl/s-osinte__freebsd.adb: Likewise.
+ * libgnarl/s-osinte__gnu.adb: Likewise.
+ * libgnarl/s-osinte__lynxos178.adb: Likewise.
+ * libgnarl/s-osinte__posix.adb: Likewise.
+ * libgnarl/s-osinte__qnx.adb: Likewise.
+ * libgnarl/s-osinte__rtems.adb: Likewise.
+ * libgnarl/s-osinte__solaris.adb: Likewise.
+ * libgnarl/s-osinte__vxworks.adb: Likewise.
+ * libgnarl/s-osinte__x32.adb: Likewise.
+ * libgnarl/s-taprop__solaris.adb: Add with clause for System.C_Time.
+ (Monotonic_Clock): Use type and functions from System.C_Time.
+ (RT_Resolution): Likewise.
+ (Timed_Sleep): Likewise.
+ (Timed_Delay): Likewise.
+ * libgnarl/s-taprop__vxworks.adb: Likewise.
+ * libgnarl/s-tpopmo.adb: Likewise.
+ * libgnarl/s-osinte__android.adb: Delete.
+ * libgnat/g-c_time.ads: New file.
+ * libgnat/g-calend.adb: Delegate to System.C_Time.
+ * libgnat/g-calend.ads: Likewise.
+ * libgnat/g-socket.adb: Likewise.
+ * libgnat/g-socthi.adb: Likewise.
+ * libgnat/g-socthi__vxworks.adb: Likewise.
+ * libgnat/g-sothco.ads: Likewise.
+ * libgnat/g-spogwa.adb: Likewise.
+ * libgnat/s-c_time.adb: New file.
+ * libgnat/s-c_time.ads: Likewise.
+ * libgnat/s-optide.adb: Import nanosleep here.
+ * libgnat/s-os_lib.ads (time_t): Remove.
+ (To_Ada): Adjust.
+ (To_C): Likewise.
+ * libgnat/s-os_lib.adb: Likewise.
+ * libgnat/s-osprim__darwin.adb: Delegate to System.C_Time.
+ * libgnat/s-osprim__posix.adb: Likewise.
+ * libgnat/s-osprim__posix2008.adb: Likewise.
+ * libgnat/s-osprim__rtems.adb: Likewise.
+ * libgnat/s-osprim__unix.adb: Likewise.
+ * libgnat/s-osprim__solaris.adb: Delete.
+ * libgnat/s-osprim__x32.adb: Likewise.
+ * libgnat/s-parame.ads (time_t_bits): Remove.
+ * libgnat/s-parame__hpux.ads: Likewise.
+ * libgnat/s-parame__vxworks.ads: Likewise.
+ * libgnat/s-parame__posix2008.ads: Delete.
+ * s-oscons-tmplt.c (SIZEOF_tv_nsec): New constant.
+
+2025-07-31 Eric Botcazou <ebotcazou@gcc.gnu.org>
+
+ PR ada/120440
+ * gcc-interface/Makefile.in (GNATLINK_OBJS): Add s-excmac.o.
+ (GNATMAKE_OBJS): Likewise.
+
+2025-07-31 Nicolas Boulenguez <nicolas@debian.org>
+
+ PR ada/114065
+ * Makefile.rtl (GNATRTL_NONTASKING_OBJS): Add g-c_time$(objext) and
+ s-c_time$(objext).
+ (Aarch64/Android): Do not use s-osinte__android.adb.
+ (SPARC/Solaris): Do not use s-osprim__solaris.adb.
+ (x86/Solaris): Likewise.
+ (LynxOS178): Do not use s-parame__posix2008.ads.
+ (RTEMS): Likewise.
+ (x32/Linux): Likewise, as well as s-linux__x32.ads. Replace
+ s-osprim__x32.adb with s-osprim__posix.adb.
+ (LIBGNAT_OBJS): Remove cal.o.
+ * cal.c: Delete.
+ * doc/gnat_rm/the_gnat_library.rst (GNAT.C_Time): New entry.
+ (GNAT.Calendar): Do not mention the obsolete conversion functions.
+ * impunit.adb (Non_Imp_File_Names_95): Add g-c_time.
+ * libgnarl/a-exetim__posix.adb: Add with clause for System.C_Time
+ (Clock): Use type and functions from System.C_Time.
+ * libgnarl/s-linux.ads: Remove with clause for System.Parameters.
+ Remove declarations of C time types.
+ * libgnarl/s-linux__alpha.ads: Likewise.
+ * libgnarl/s-linux__android-aarch64.ads: Likewise.
+ * libgnarl/s-linux__android-arm.ads: Likewise.
+ * libgnarl/s-linux__hppa.ads: Likewise.
+ * libgnarl/s-linux__loongarch.ads: Likewise.
+ * libgnarl/s-linux__mips.ads: Likewise.
+ * libgnarl/s-linux__riscv.ads: Likewise.
+ * libgnarl/s-linux__sparc.ads: Likewise.
+ * libgnarl/s-osinte__aix.ads: Likewise.
+ * libgnarl/s-osinte__android.ads: Likewise.
+ * libgnarl/s-osinte__cheribsd.ads: Likewise.
+ * libgnarl/s-osinte__darwin.ads: Likewise.
+ * libgnarl/s-osinte__dragonfly.ads: Likewise.
+ * libgnarl/s-osinte__freebsd.ads: Likewise.
+ * libgnarl/s-osinte__gnu.ads: Likewise.
+ * libgnarl/s-osinte__hpux.ads: Likewise.
+ * libgnarl/s-osinte__kfreebsd-gnu.ads: Likewise.
+ * libgnarl/s-osinte__linux.ads: Likewise.
+ * libgnarl/s-osinte__lynxos178e.ads: Likewise.
+ * libgnarl/s-osinte__qnx.ads: Likewise.
+ * libgnarl/s-osinte__rtems.ads: Likewise.
+ * libgnarl/s-osinte__solaris.ads: Likewise.
+ * libgnarl/s-osinte__vxworks.ads: Likewise.
+ * libgnarl/s-qnx.ads: Likewise.
+ * libgnarl/s-linux__x32.ads: Delete.
+ * libgnarl/s-osinte__darwin.adb (To_Duration): Remove.
+ (To_Timespec): Likewise.
+ * libgnarl/s-osinte__aix.adb: Likewise.
+ * libgnarl/s-osinte__dragonfly.adb: Likewise.
+ * libgnarl/s-osinte__freebsd.adb: Likewise.
+ * libgnarl/s-osinte__gnu.adb: Likewise.
+ * libgnarl/s-osinte__lynxos178.adb: Likewise.
+ * libgnarl/s-osinte__posix.adb: Likewise.
+ * libgnarl/s-osinte__qnx.adb: Likewise.
+ * libgnarl/s-osinte__rtems.adb: Likewise.
+ * libgnarl/s-osinte__solaris.adb: Likewise.
+ * libgnarl/s-osinte__vxworks.adb: Likewise.
+ * libgnarl/s-osinte__x32.adb: Likewise.
+ * libgnarl/s-taprop__solaris.adb: Add with clause for System.C_Time.
+ (Monotonic_Clock): Use type and functions from System.C_Time.
+ (RT_Resolution): Likewise.
+ (Timed_Sleep): Likewise.
+ (Timed_Delay): Likewise.
+ * libgnarl/s-taprop__vxworks.adb: Likewise.
+ * libgnarl/s-tpopmo.adb: Likewise.
+ * libgnarl/s-osinte__android.adb: Delete.
+ * libgnat/g-c_time.ads: New file.
+ * libgnat/g-calend.adb: Delegate to System.C_Time.
+ * libgnat/g-calend.ads: Likewise.
+ * libgnat/g-socket.adb: Likewise.
+ * libgnat/g-socthi.adb: Likewise.
+ * libgnat/g-socthi__vxworks.adb: Likewise.
+ * libgnat/g-sothco.ads: Likewise.
+ * libgnat/g-spogwa.adb: Likewise.
+ * libgnat/s-c_time.adb: New file.
+ * libgnat/s-c_time.ads: Likewise.
+ * libgnat/s-optide.adb: Import nanosleep here.
+ * libgnat/s-os_lib.ads (time_t): Remove.
+ (To_Ada): Adjust.
+ (To_C): Likewise.
+ * libgnat/s-os_lib.adb: Likewise.
+ * libgnat/s-osprim__darwin.adb: Delegate to System.C_Time.
+ * libgnat/s-osprim__posix.adb: Likewise.
+ * libgnat/s-osprim__posix2008.adb: Likewise.
+ * libgnat/s-osprim__rtems.adb: Likewise.
+ * libgnat/s-osprim__unix.adb: Likewise.
+ * libgnat/s-osprim__solaris.adb: Delete.
+ * libgnat/s-osprim__x32.adb: Likewise.
+ * libgnat/s-parame.ads (time_t_bits): Remove.
+ * libgnat/s-parame__hpux.ads: Likewise.
+ * libgnat/s-parame__vxworks.ads: Likewise.
+ * libgnat/s-parame__posix2008.ads: Delete.
+ * s-oscons-tmplt.c (SIZEOF_tv_nsec): New constant.
+
2025-07-28 Marc Poulhiès <poulhies@adacore.com>
* gcc-interface/trans.cc (gnat_to_gnu): Fix typo in comment.
diff --git a/gcc/ada/gcc-interface/Makefile.in b/gcc/ada/gcc-interface/Makefile.in
index 8615b59..d456ac1 100644
--- a/gcc/ada/gcc-interface/Makefile.in
+++ b/gcc/ada/gcc-interface/Makefile.in
@@ -314,16 +314,16 @@ Makefile: ../config.status $(srcdir)/ada/gcc-interface/Makefile.in $(srcdir)/ada
GNATLINK_OBJS = gnatlink.o \
a-except.o ali.o alloc.o butil.o casing.o csets.o debug.o fmap.o fname.o \
gnatvsn.o hostparm.o indepsw.o interfac.o i-c.o i-cstrin.o namet.o opt.o \
- osint.o output.o rident.o s-exctab.o s-secsta.o s-stalib.o s-stoele.o \
- sdefault.o snames.o stylesw.o switch.o system.o table.o targparm.o \
- types.o validsw.o widechar.o
+ osint.o output.o rident.o s-excmac.o s-exctab.o s-secsta.o s-stalib.o \
+ s-stoele.o sdefault.o snames.o stylesw.o switch.o system.o table.o \
+ targparm.o types.o validsw.o widechar.o
GNATMAKE_OBJS = a-except.o ali.o ali-util.o aspects.o s-casuti.o alloc.o \
atree.o binderr.o butil.o casing.o csets.o debug.o elists.o einfo.o errout.o \
erroutc.o errutil.o err_vars.o fmap.o fname.o fname-uf.o fname-sf.o \
gnatmake.o gnatvsn.o hostparm.o interfac.o i-c.o i-cstrin.o krunch.o lib.o \
make.o makeusg.o make_util.o namet.o nlists.o opt.o osint.o osint-m.o \
- output.o restrict.o rident.o s-exctab.o s-cautns.o \
+ output.o restrict.o rident.o s-cautns.o s-excmac.o s-exctab.o \
s-secsta.o s-stalib.o s-stoele.o scans.o scng.o sdefault.o sfn_scan.o \
s-purexc.o s-htable.o scil_ll.o sem_aux.o sinfo.o sinput.o sinput-c.o \
snames.o stand.o stringt.o styleg.o stylesw.o system.o validsw.o \
diff --git a/gcc/cobol/ChangeLog b/gcc/cobol/ChangeLog
index b286f5d..6fd24a9 100644
--- a/gcc/cobol/ChangeLog
+++ b/gcc/cobol/ChangeLog
@@ -1,3 +1,11 @@
+2025-07-31 Robert Dubner <rdubner@symas.com>
+
+ PR cobol/120244
+ * genapi.cc (get_level_88_domain): Increase array size for final byte.
+ (psa_FldLiteralA): Use correct length in build_string_literal call.
+ * scan.l: Use a loop instead of std:transform to avoid EOF overrun.
+ * scan_ante.h (binary_integer_usage): Use a variable-length buffer.
+
2025-07-25 David Malcolm <dmalcolm@redhat.com>
* util.cc: Update for diagnostic_t becoming
diff --git a/gcc/cobol/cbldiag.h b/gcc/cobol/cbldiag.h
index 39f1369..dd16190 100644
--- a/gcc/cobol/cbldiag.h
+++ b/gcc/cobol/cbldiag.h
@@ -122,7 +122,7 @@ static void
location_dump( const char func[], int line, const char tag[], const LOC& loc) {
extern int yy_flex_debug; // cppcheck-suppress shadowVariable
if( yy_flex_debug ) {
- const char *detail = gcobol_getenv("update_location");
+ const char *detail = gcobol_getenv("update_location"); // cppcheck-suppress knownConditionTrueFalse
if( detail ) {
fprintf(stderr, "%s:%d: %s location (%d,%d) to (%d,%d)\n",
func, line, tag,
diff --git a/gcc/cobol/genapi.cc b/gcc/cobol/genapi.cc
index 666802e..c9d2da4 100644
--- a/gcc/cobol/genapi.cc
+++ b/gcc/cobol/genapi.cc
@@ -531,6 +531,14 @@ get_level_88_domain(size_t parent_capacity, cbl_field_t *var, size_t &returned_s
free(stream);
domain += 1;
}
+
+ if( returned_size >= retval_capacity)
+ {
+ retval_capacity *= 2;
+ retval = static_cast<char *>(xrealloc(retval, retval_capacity));
+ }
+
+ gcc_assert(returned_size < retval_capacity);
retval[returned_size++] = '\0';
return retval;
}
@@ -1190,12 +1198,9 @@ parser_statement_begin( const cbl_name_t statement_name,
if( exception_processing )
{
store_location_stuff(statement_name);
- }
-
- if( exception_processing )
- {
set_exception_environment(ecs, dcls);
}
+
sv_is_i_o = false;
}
@@ -16765,9 +16770,9 @@ psa_FldLiteralA(struct cbl_field_t *field )
vs_file_static);
actually_create_the_static_field(
field,
- build_string_literal(field->data.capacity+1,
+ build_string_literal(field->data.capacity,
buffer),
- field->data.capacity+1,
+ field->data.capacity,
field->data.initial,
NULL_TREE,
field->var_decl_node);
diff --git a/gcc/cobol/genutil.cc b/gcc/cobol/genutil.cc
index 7895ea8..a5f69a0 100644
--- a/gcc/cobol/genutil.cc
+++ b/gcc/cobol/genutil.cc
@@ -27,6 +27,9 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
+// cppcheck-suppress-file duplicateBreak
+
#include "cobol-system.h"
#include "coretypes.h"
#include "tree.h"
@@ -1267,7 +1270,7 @@ get_binary_value( tree value,
cbl_field_type_str(field->type) );
cbl_internal_error("%s", err);
abort();
- // break; // break not needed after abort();
+ break;
}
}
diff --git a/gcc/cobol/scan.l b/gcc/cobol/scan.l
index 2da38d8..ba4c044 100644
--- a/gcc/cobol/scan.l
+++ b/gcc/cobol/scan.l
@@ -1673,16 +1673,17 @@ B-SHIFT-RC
p += 2;
while( ISSPACE(*p) ) p++;
cbl_name_t name2;
- std::transform( p, p + sizeof(name2), name2,
- []( char ch ) {
- switch(ch) {
- case '-':
- case '_': return ch;
- default:
- if( ISALNUM(ch) ) return ch;
- }
- return '\0';
- } );
+ const char *pend = p + sizeof(name2);
+ char *pout = name2;
+ while( p < pend ) {
+ char ch = *p++;
+ if( ISALNUM(ch) || ch == '-' || ch == '_' ) {
+ *pout++ = ch;
+ } else {
+ *pout++ = '\0';
+ break;
+ }
+ }
symbol_elem_t *e = symbol_file(PROGRAM, name2);
/*
* For NAME IN FILENAME, we want the parser to handle it.
diff --git a/gcc/cobol/scan_ante.h b/gcc/cobol/scan_ante.h
index 19ceb2b..c00826d 100644
--- a/gcc/cobol/scan_ante.h
+++ b/gcc/cobol/scan_ante.h
@@ -609,7 +609,9 @@ static const std::map <std::string, bint_t > binary_integers {
static int
binary_integer_usage( const char name[]) {
- cbl_name_t uname = {};
+ // uname can't be cbl_name_t, because at this point name[] might have more
+ // than sizeof(cbl_name_t) characters. The length check comes later.
+ char *uname = xstrdup(name);
std::transform(name, name + strlen(name), uname, ftoupper);
dbgmsg("%s:%d: checking %s in %zu keyword_aliases",
@@ -628,6 +630,7 @@ binary_integer_usage( const char name[]) {
yylval.computational.signable = p->second.signable;
dbgmsg("%s:%d: %s has type %d", __func__, __LINE__,
uname, p->second.type );
+ free(uname);
return p->second.token;
}
diff --git a/gcc/cobol/symbols.cc b/gcc/cobol/symbols.cc
index 7d6a955..f2cd1b5 100644
--- a/gcc/cobol/symbols.cc
+++ b/gcc/cobol/symbols.cc
@@ -28,6 +28,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+// cppcheck-suppress-file duplicateBreak
+
#include "config.h"
#include <fstream> // Before cobol-system because it uses poisoned functions
#include "cobol-system.h"
@@ -500,13 +502,13 @@ symbol_elem_cmp( const void *K, const void *E )
}
return strcasecmp(key.name, elem.name);
}
- // break; // This break not needed if all options do a return.
+ break;
case SymSpecial:
return special_pair_cmp(k->elem.special, e->elem.special)? 0 : 1;
- // break; // This break not needed after return.
+ break;
case SymAlphabet:
return strcasecmp(k->elem.alphabet.name, e->elem.alphabet.name);
- // break; // This break not needed after return.
+ break;
case SymFile:
// If the key is global, so must be the found element.
if( (cbl_file_of(k)->attr & global_e) == global_e &&
@@ -514,7 +516,7 @@ symbol_elem_cmp( const void *K, const void *E )
return 1;
}
return strcasecmp(k->elem.file.name, e->elem.file.name);
- // break; // This break not needed after return.
+ break;
}
assert(k->type == SymField);
diff --git a/gcc/common/config/aarch64/cpuinfo.h b/gcc/common/config/aarch64/cpuinfo.h
index cd3c2b2..d329d86 100644
--- a/gcc/common/config/aarch64/cpuinfo.h
+++ b/gcc/common/config/aarch64/cpuinfo.h
@@ -39,10 +39,10 @@ enum CPUFeatures {
FEAT_FP,
FEAT_SIMD,
FEAT_CRC,
- FEAT_SHA1,
+ FEAT_CSSC,
FEAT_SHA2,
FEAT_SHA3,
- FEAT_AES,
+ FEAT_unused5,
FEAT_PMULL,
FEAT_FP16,
FEAT_DIT,
@@ -53,30 +53,30 @@ enum CPUFeatures {
FEAT_RCPC,
FEAT_RCPC2,
FEAT_FRINTTS,
- FEAT_DGH,
+ FEAT_unused6,
FEAT_I8MM,
FEAT_BF16,
- FEAT_EBF16,
- FEAT_RPRES,
+ FEAT_unused7,
+ FEAT_unused8,
FEAT_SVE,
- FEAT_SVE_BF16,
- FEAT_SVE_EBF16,
- FEAT_SVE_I8MM,
+ FEAT_unused9,
+ FEAT_unused10,
+ FEAT_unused11,
FEAT_SVE_F32MM,
FEAT_SVE_F64MM,
FEAT_SVE2,
- FEAT_SVE_AES,
+ FEAT_unused12,
FEAT_SVE_PMULL128,
FEAT_SVE_BITPERM,
FEAT_SVE_SHA3,
FEAT_SVE_SM4,
FEAT_SME,
- FEAT_MEMTAG,
+ FEAT_unused13,
FEAT_MEMTAG2,
- FEAT_MEMTAG3,
+ FEAT_unused14,
FEAT_SB,
FEAT_unused1,
- FEAT_SSBS,
+ FEAT_unused15,
FEAT_SSBS2,
FEAT_BTI,
FEAT_unused2,
@@ -87,6 +87,7 @@ enum CPUFeatures {
FEAT_SME_I64,
FEAT_SME2,
FEAT_RCPC3,
+ FEAT_MOPS,
FEAT_MAX,
FEAT_EXT = 62, /* Reserved to indicate presence of additional features field
in __aarch64_cpu_features. */
diff --git a/gcc/common/config/avr/avr-common.cc b/gcc/common/config/avr/avr-common.cc
index 203a965..d8b982c 100644
--- a/gcc/common/config/avr/avr-common.cc
+++ b/gcc/common/config/avr/avr-common.cc
@@ -38,6 +38,7 @@ static const struct default_options avr_option_optimization_table[] =
{ OPT_LEVELS_1_PLUS, OPT_mmain_is_OS_task, NULL, 1 },
{ OPT_LEVELS_1_PLUS, OPT_mfuse_add_, NULL, 1 },
{ OPT_LEVELS_2_PLUS, OPT_mfuse_add_, NULL, 2 },
+ { OPT_LEVELS_1_PLUS, OPT_mfuse_move2, NULL, 1 },
{ OPT_LEVELS_1_PLUS_NOT_DEBUG, OPT_mfuse_move_, NULL, 3 },
{ OPT_LEVELS_2_PLUS, OPT_mfuse_move_, NULL, 23 },
{ OPT_LEVELS_2_PLUS, OPT_msplit_bit_shift, NULL, 1 },
diff --git a/gcc/config/aarch64/aarch64-option-extensions.def b/gcc/config/aarch64/aarch64-option-extensions.def
index 1c3e697..db88df0 100644
--- a/gcc/config/aarch64/aarch64-option-extensions.def
+++ b/gcc/config/aarch64/aarch64-option-extensions.def
@@ -128,7 +128,9 @@ AARCH64_OPT_FMV_EXTENSION("sha2", SHA2, (SIMD), (), (), "sha1 sha2")
AARCH64_FMV_FEATURE("sha3", SHA3, (SHA3))
-AARCH64_OPT_FMV_EXTENSION("aes", AES, (SIMD), (), (), "aes")
+AARCH64_OPT_EXTENSION("aes", AES, (SIMD), (), (), "aes")
+
+AARCH64_FMV_FEATURE("aes", PMULL, (AES))
/* +nocrypto disables AES, SHA2 and SM4, and anything that depends on them
(such as SHA3 and the SVE2 crypto extensions). */
@@ -171,8 +173,6 @@ AARCH64_OPT_FMV_EXTENSION("i8mm", I8MM, (SIMD), (), (), "i8mm")
instructions. */
AARCH64_OPT_FMV_EXTENSION("bf16", BF16, (FP), (SIMD), (), "bf16")
-AARCH64_FMV_FEATURE("rpres", RPRES, ())
-
AARCH64_OPT_FMV_EXTENSION("sve", SVE, (SIMD, F16, FCMA), (), (), "sve")
/* This specifically does not imply +sve. */
@@ -190,7 +190,7 @@ AARCH64_OPT_FMV_EXTENSION("sve2", SVE2, (SVE), (), (), "sve2")
AARCH64_OPT_EXTENSION("sve2-aes", SVE2_AES, (SVE2, AES), (), (), "sveaes")
-AARCH64_FMV_FEATURE("sve2-aes", SVE_AES, (SVE2_AES))
+AARCH64_FMV_FEATURE("sve2-aes", SVE_PMULL128, (SVE2_AES))
AARCH64_OPT_EXTENSION("sve2-bitperm", SVE2_BITPERM, (SVE2), (), (),
"svebitperm")
@@ -245,9 +245,9 @@ AARCH64_OPT_EXTENSION("sme-b16b16", SME_B16B16, (SME2, SVE_B16B16), (), (), "sme
AARCH64_OPT_EXTENSION("sme-f16f16", SME_F16F16, (SME2), (), (), "smef16f16")
-AARCH64_OPT_EXTENSION("mops", MOPS, (), (), (), "mops")
+AARCH64_OPT_FMV_EXTENSION("mops", MOPS, (), (), (), "mops")
-AARCH64_OPT_EXTENSION("cssc", CSSC, (), (), (), "cssc")
+AARCH64_OPT_FMV_EXTENSION("cssc", CSSC, (), (), (), "cssc")
AARCH64_OPT_EXTENSION("cmpbr", CMPBR, (), (), (), "cmpbr")
diff --git a/gcc/config/aarch64/aarch64-sme.md b/gcc/config/aarch64/aarch64-sme.md
index 6b3f439..6b1a747 100644
--- a/gcc/config/aarch64/aarch64-sme.md
+++ b/gcc/config/aarch64/aarch64-sme.md
@@ -62,6 +62,10 @@
;; (b) they are sometimes used conditionally, particularly in streaming-
;; compatible code.
;;
+;; To prevent the latter from upsetting the assembler, we emit the literal
+;; encodings of "SMSTART SM" and "SMSTOP SM" when compiling without
+;; TARGET_SME.
+;;
;; =========================================================================
;; -------------------------------------------------------------------------
@@ -161,7 +165,9 @@
(clobber (reg:VNx16BI P14_REGNUM))
(clobber (reg:VNx16BI P15_REGNUM))]
""
- "smstart\tsm"
+ {
+ return TARGET_SME ? "smstart\tsm" : ".inst 0xd503437f // smstart sm";
+ }
)
;; Turn off streaming mode. This clobbers all SVE state.
@@ -196,7 +202,9 @@
(clobber (reg:VNx16BI P14_REGNUM))
(clobber (reg:VNx16BI P15_REGNUM))]
""
- "smstop\tsm"
+ {
+ return TARGET_SME ? "smstop\tsm" : ".inst 0xd503427f // smstop sm";
+ }
)
;; -------------------------------------------------------------------------
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 5502d0b..f4a2062 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -17193,8 +17193,8 @@ aarch64_ld234_st234_vectors (vect_cost_for_stmt kind, stmt_vec_info stmt_info,
&& STMT_VINFO_DATA_REF (stmt_info))
{
stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
- if (stmt_info
- && vect_mem_access_type (stmt_info, node) == VMAT_LOAD_STORE_LANES)
+ if (node
+ && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_LOAD_STORE_LANES)
return DR_GROUP_SIZE (stmt_info);
}
return 0;
@@ -17465,8 +17465,9 @@ aarch64_detect_vector_stmt_subtype (vec_info *vinfo, vect_cost_for_stmt kind,
for each element. We therefore need to divide the full-instruction
cost by the number of elements in the vector. */
if (kind == scalar_load
+ && node
&& sve_costs
- && vect_mem_access_type (stmt_info, node) == VMAT_GATHER_SCATTER)
+ && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
{
unsigned int nunits = vect_nunits_for_cost (vectype);
/* Test for VNx2 modes, which have 64-bit containers. */
@@ -17478,8 +17479,9 @@ aarch64_detect_vector_stmt_subtype (vec_info *vinfo, vect_cost_for_stmt kind,
/* Detect cases in which a scalar_store is really storing one element
in a scatter operation. */
if (kind == scalar_store
+ && node
&& sve_costs
- && vect_mem_access_type (stmt_info, node) == VMAT_GATHER_SCATTER)
+ && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
return sve_costs->scatter_store_elt_cost;
/* Detect cases in which vec_to_scalar represents an in-loop reduction. */
@@ -17735,7 +17737,7 @@ aarch64_vector_costs::count_ops (unsigned int count, vect_cost_for_stmt kind,
if (stmt_info
&& kind == vec_to_scalar
&& (m_vec_flags & VEC_ADVSIMD)
- && vect_mem_access_type (stmt_info, node) == VMAT_GATHER_SCATTER)
+ && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
{
auto dr = STMT_VINFO_DATA_REF (stmt_info);
tree dr_ref = DR_REF (dr);
@@ -17850,7 +17852,7 @@ aarch64_vector_costs::count_ops (unsigned int count, vect_cost_for_stmt kind,
if (stmt_info
&& sve_issue
&& (kind == scalar_load || kind == scalar_store)
- && vect_mem_access_type (stmt_info, node) == VMAT_GATHER_SCATTER)
+ && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
{
unsigned int pairs = CEIL (count, 2);
ops->pred_ops += sve_issue->gather_scatter_pair_pred_ops * pairs;
@@ -18005,9 +18007,10 @@ aarch64_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
/* Check if we've seen an SVE gather/scatter operation and which size. */
if (kind == scalar_load
+ && node
&& vectype
&& aarch64_sve_mode_p (TYPE_MODE (vectype))
- && vect_mem_access_type (stmt_info, node) == VMAT_GATHER_SCATTER)
+ && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
{
const sve_vec_cost *sve_costs = aarch64_tune_params.vec_costs->sve;
if (sve_costs)
@@ -20509,6 +20512,8 @@ aarch64_compare_version_priority (tree decl1, tree decl2)
unsigned long _size; // Size of the struct, so it can grow.
unsigned long _hwcap;
unsigned long _hwcap2;
+ unsigned long _hwcap3;
+ unsigned long _hwcap4;
}
*/
@@ -20525,14 +20530,24 @@ build_ifunc_arg_type ()
tree field3 = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
get_identifier ("_hwcap2"),
long_unsigned_type_node);
+ tree field4 = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
+ get_identifier ("_hwcap3"),
+ long_unsigned_type_node);
+ tree field5 = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
+ get_identifier ("_hwcap4"),
+ long_unsigned_type_node);
DECL_FIELD_CONTEXT (field1) = ifunc_arg_type;
DECL_FIELD_CONTEXT (field2) = ifunc_arg_type;
DECL_FIELD_CONTEXT (field3) = ifunc_arg_type;
+ DECL_FIELD_CONTEXT (field4) = ifunc_arg_type;
+ DECL_FIELD_CONTEXT (field5) = ifunc_arg_type;
TYPE_FIELDS (ifunc_arg_type) = field1;
DECL_CHAIN (field1) = field2;
DECL_CHAIN (field2) = field3;
+ DECL_CHAIN (field3) = field4;
+ DECL_CHAIN (field4) = field5;
layout_type (ifunc_arg_type);
diff --git a/gcc/config/aarch64/tuning_models/generic_armv9_a.h b/gcc/config/aarch64/tuning_models/generic_armv9_a.h
index f76a250..9eb1a20 100644
--- a/gcc/config/aarch64/tuning_models/generic_armv9_a.h
+++ b/gcc/config/aarch64/tuning_models/generic_armv9_a.h
@@ -26,7 +26,7 @@
static const struct cpu_addrcost_table generic_armv9_a_addrcost_table =
{
{
- 1, /* hi */
+ 0, /* hi */
0, /* si */
0, /* di */
1, /* ti */
diff --git a/gcc/config/avr/avr-passes.cc b/gcc/config/avr/avr-passes.cc
index 6a88a27..69df6d2 100644
--- a/gcc/config/avr/avr-passes.cc
+++ b/gcc/config/avr/avr-passes.cc
@@ -4843,6 +4843,137 @@ avr_pass_fuse_add::execute1 (function *func)
//////////////////////////////////////////////////////////////////////////////
+// Fuse 2 move insns after combine.
+
+static const pass_data avr_pass_data_2moves =
+{
+ RTL_PASS, // type
+ "", // name (will be patched)
+ OPTGROUP_NONE, // optinfo_flags
+ TV_DF_SCAN, // tv_id
+ 0, // properties_required
+ 0, // properties_provided
+ 0, // properties_destroyed
+ 0, // todo_flags_start
+ 0 // todo_flags_finish
+};
+
+class avr_pass_2moves : public rtl_opt_pass
+{
+public:
+ avr_pass_2moves (gcc::context *ctxt, const char *name)
+ : rtl_opt_pass (avr_pass_data_2moves, ctxt)
+ {
+ this->name = name;
+ }
+
+ unsigned int execute (function *func) final override
+ {
+ if (optimize && avropt_fuse_move2)
+ {
+ bool changed = false;
+ basic_block bb;
+
+ FOR_EACH_BB_FN (bb, func)
+ {
+ changed |= optimize_2moves_bb (bb);
+ }
+
+ if (changed)
+ {
+ df_note_add_problem ();
+ df_analyze ();
+ }
+ }
+
+ return 0;
+ }
+
+ bool optimize_2moves (rtx_insn *, rtx_insn *);
+ bool optimize_2moves_bb (basic_block);
+}; // avr_pass_2moves
+
+bool
+avr_pass_2moves::optimize_2moves_bb (basic_block bb)
+{
+ bool changed = false;
+ rtx_insn *insn1 = nullptr;
+ rtx_insn *insn2 = nullptr;
+ rtx_insn *curr;
+
+ FOR_BB_INSNS (bb, curr)
+ {
+ if (insn1 && INSN_P (insn1)
+ && insn2 && INSN_P (insn2))
+ changed |= optimize_2moves (insn1, insn2);
+
+ insn1 = insn2;
+ insn2 = curr;
+ }
+
+ return changed;
+}
+
+bool
+avr_pass_2moves::optimize_2moves (rtx_insn *insn1, rtx_insn *insn2)
+{
+ bool good = false;
+ bool bad = false;
+ rtx set1, dest1, src1;
+ rtx set2, dest2, src2;
+
+ if ((set1 = single_set (insn1))
+ && (set2 = single_set (insn2))
+ && (src1 = SET_SRC (set1))
+ && REG_P (src2 = SET_SRC (set2))
+ && REG_P (dest1 = SET_DEST (set1))
+ && REG_P (dest2 = SET_DEST (set2))
+ && rtx_equal_p (dest1, src2)
+ // Now we have:
+ // insn1: dest1 = src1
+ // insn2: dest2 = dest1
+ && REGNO (dest1) >= FIRST_PSEUDO_REGISTER
+ // Paranoia.
+ && GET_CODE (PATTERN (insn1)) != PARALLEL
+ && GET_CODE (PATTERN (insn2)) != PARALLEL
+ && (rtx_equal_p (dest2, src1)
+ || !reg_overlap_mentioned_p (dest2, src1)))
+ {
+ avr_dump ("\n;; Found 2moves:\n%r\n%r\n", insn1, insn2);
+ avr_dump (";; reg %d: insn uses uids:", REGNO (dest1));
+
+ // Go check that dest1 is used exactly once, namely by insn2.
+
+ df_ref use = DF_REG_USE_CHAIN (REGNO (dest1));
+ for (; use; use = DF_REF_NEXT_REG (use))
+ {
+ rtx_insn *user = DF_REF_INSN (use);
+ avr_dump (" %d", INSN_UID (user));
+ good |= INSN_UID (user) == INSN_UID (insn2);
+ bad |= INSN_UID (user) != INSN_UID (insn2);
+ }
+ avr_dump (".\n");
+
+ if (good && !bad
+ // Propagate src1 to insn2:
+ // insn1: # Deleted
+ // insn2: dest2 = src1
+ && validate_change (insn2, &SET_SRC (set2), src1, false))
+ {
+ SET_INSN_DELETED (insn1);
+ return true;
+ }
+ }
+
+ if (good && !bad)
+ avr_dump (";; Failed\n");
+
+ return false;
+}
+
+
+
+//////////////////////////////////////////////////////////////////////////////
// Split insns with nonzero_bits() after combine.
static const pass_data avr_pass_data_split_nzb =
@@ -5704,6 +5835,14 @@ make_avr_pass_casesi (gcc::context *ctxt)
return new avr_pass_casesi (ctxt, "avr-casesi");
}
+// Optimize 2 consecutive moves after combine.
+
+rtl_opt_pass *
+make_avr_pass_2moves (gcc::context *ctxt)
+{
+ return new avr_pass_2moves (ctxt, "avr-2moves");
+}
+
rtl_opt_pass *
make_avr_pass_split_nzb (gcc::context *ctxt)
{
diff --git a/gcc/config/avr/avr-passes.def b/gcc/config/avr/avr-passes.def
index eb60a93..d668c7f 100644
--- a/gcc/config/avr/avr-passes.def
+++ b/gcc/config/avr/avr-passes.def
@@ -74,6 +74,14 @@ INSERT_PASS_BEFORE (pass_free_cfg, 1, avr_pass_recompute_notes);
INSERT_PASS_AFTER (pass_expand, 1, avr_pass_casesi);
+/* Insn combine may come up with superfluous reg-reg moves, where the combine
+ people say that these are no problem since reg-alloc is supposed to optimize
+ them. The issue is that the lower-subreg pass sitting between combine and
+ reg-alloc may split such moves, coming up with a zoo of subregs which are
+ only handled poorly by the register allocator. */
+
+INSERT_PASS_AFTER (pass_combine, 1, avr_pass_2moves);
+
/* Some combine insns have nonzero_bits() in their condition, though insns
should not use such stuff in their condition. Therefore, we split such
insn into something without nonzero_bits() in their condition right after
diff --git a/gcc/config/avr/avr-protos.h b/gcc/config/avr/avr-protos.h
index ca30136..37911e7 100644
--- a/gcc/config/avr/avr-protos.h
+++ b/gcc/config/avr/avr-protos.h
@@ -208,6 +208,7 @@ extern rtl_opt_pass *make_avr_pass_casesi (gcc::context *);
extern rtl_opt_pass *make_avr_pass_ifelse (gcc::context *);
extern rtl_opt_pass *make_avr_pass_split_nzb (gcc::context *);
extern rtl_opt_pass *make_avr_pass_split_after_peephole2 (gcc::context *);
+extern rtl_opt_pass *make_avr_pass_2moves (gcc::context *);
#ifdef RTX_CODE
extern bool avr_casei_sequence_check_operands (rtx *xop);
extern bool avr_split_fake_addressing_move (rtx_insn *insn, rtx *operands);
diff --git a/gcc/config/avr/avr.cc b/gcc/config/avr/avr.cc
index 9468446..1fb59b6 100644
--- a/gcc/config/avr/avr.cc
+++ b/gcc/config/avr/avr.cc
@@ -14418,6 +14418,13 @@ avr_output_addr_vec (rtx_insn *labl, rtx table)
// Output the label that precedes the table.
ASM_OUTPUT_ALIGN (stream, 1);
+
+ char s_labl[40];
+ targetm.asm_out.generate_internal_label (s_labl, "L",
+ CODE_LABEL_NUMBER (labl));
+ ASM_OUTPUT_TYPE_DIRECTIVE (stream, s_labl,
+ AVR_HAVE_JMP_CALL ? "object" : "function");
+
targetm.asm_out.internal_label (stream, "L", CODE_LABEL_NUMBER (labl));
// Output the table's content.
diff --git a/gcc/config/avr/avr.opt b/gcc/config/avr/avr.opt
index 9883119..7f6f18c 100644
--- a/gcc/config/avr/avr.opt
+++ b/gcc/config/avr/avr.opt
@@ -164,6 +164,10 @@ mfuse-move=
Target Joined RejectNegative UInteger Var(avropt_fuse_move) Init(0) Optimization IntegerRange(0, 23)
-mfuse-move=<0,23> Optimization. Run a post-reload pass that tweaks move instructions.
+mfuse-move2
+Target Var(avropt_fuse_move2) Init(0) Optimization
+Optimization. Fuse some move insns after insn combine.
+
mabsdata
Target Mask(ABSDATA)
Assume that all data in static storage can be accessed by LDS / STS instructions. This option is only useful for reduced Tiny devices like ATtiny40.
diff --git a/gcc/config/avr/avr.opt.urls b/gcc/config/avr/avr.opt.urls
index 662fdee..87c26b2 100644
--- a/gcc/config/avr/avr.opt.urls
+++ b/gcc/config/avr/avr.opt.urls
@@ -92,6 +92,9 @@ UrlSuffix(gcc/AVR-Options.html#index-mfuse-move)
mfuse-move=
UrlSuffix(gcc/AVR-Options.html#index-mfuse-move)
+mfuse-move2
+UrlSuffix(gcc/AVR-Options.html#index-mfuse-move2)
+
mabsdata
UrlSuffix(gcc/AVR-Options.html#index-mabsdata)
diff --git a/gcc/config/i386/i386-features.cc b/gcc/config/i386/i386-features.cc
index c131577..53e86c8 100644
--- a/gcc/config/i386/i386-features.cc
+++ b/gcc/config/i386/i386-features.cc
@@ -3226,7 +3226,7 @@ remove_partial_avx_dependency (void)
break;
}
- /* Only hanlde conversion here. */
+ /* Only handle conversion here. */
machine_mode src_mode
= convert_p ? GET_MODE (XEXP (src, 0)) : VOIDmode;
switch (src_mode)
diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
index ca6bb83..09a35ef 100644
--- a/gcc/config/i386/i386-options.cc
+++ b/gcc/config/i386/i386-options.cc
@@ -3615,6 +3615,18 @@ ix86_handle_cconv_attribute (tree *node, tree name, tree args, int,
return NULL_TREE;
}
+ if (TARGET_64BIT)
+ {
+ /* Do not warn when emulating the MS ABI. */
+ if ((TREE_CODE (*node) != FUNCTION_TYPE
+ && TREE_CODE (*node) != METHOD_TYPE)
+ || ix86_function_type_abi (*node) != MS_ABI)
+ warning (OPT_Wattributes, "%qE attribute ignored",
+ name);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
/* Can combine regparm with all attributes but fastcall, and thiscall. */
if (is_attribute_p ("regparm", name))
{
@@ -3627,7 +3639,7 @@ ix86_handle_cconv_attribute (tree *node, tree name, tree args, int,
if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
{
- error ("regparam and thiscall attributes are not compatible");
+ error ("regparm and thiscall attributes are not compatible");
}
cst = TREE_VALUE (args);
@@ -3648,19 +3660,7 @@ ix86_handle_cconv_attribute (tree *node, tree name, tree args, int,
return NULL_TREE;
}
- if (TARGET_64BIT)
- {
- /* Do not warn when emulating the MS ABI. */
- if ((TREE_CODE (*node) != FUNCTION_TYPE
- && TREE_CODE (*node) != METHOD_TYPE)
- || ix86_function_type_abi (*node) != MS_ABI)
- warning (OPT_Wattributes, "%qE attribute ignored",
- name);
- *no_add_attrs = true;
- return NULL_TREE;
- }
-
- /* Can combine fastcall with stdcall (redundant) and sseregparm. */
+ /* Can combine fastcall with sseregparm. */
if (is_attribute_p ("fastcall", name))
{
if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
@@ -3681,8 +3681,7 @@ ix86_handle_cconv_attribute (tree *node, tree name, tree args, int,
}
}
- /* Can combine stdcall with fastcall (redundant), regparm and
- sseregparm. */
+ /* Can combine stdcall with regparm and sseregparm. */
else if (is_attribute_p ("stdcall", name))
{
if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
@@ -3732,6 +3731,10 @@ ix86_handle_cconv_attribute (tree *node, tree name, tree args, int,
{
error ("cdecl and thiscall attributes are not compatible");
}
+ if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
+ {
+ error ("regparm and thiscall attributes are not compatible");
+ }
}
/* Can combine sseregparm with all attributes. */
diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
index 613f2b2..65e04d3 100644
--- a/gcc/config/i386/i386.cc
+++ b/gcc/config/i386/i386.cc
@@ -20029,7 +20029,7 @@ ix86_gimple_fold_builtin (gimple_stmt_iterator *gsi)
tree utype, ures, vce;
utype = unsigned_type_for (TREE_TYPE (arg0));
/* PABSB/W/D/Q store the unsigned result in dst, use ABSU_EXPR
- instead of ABS_EXPR to hanlde overflow case(TYPE_MIN). */
+ instead of ABS_EXPR to handle overflow case(TYPE_MIN). */
ures = gimple_build (&stmts, ABSU_EXPR, utype, arg0);
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
loc = gimple_location (stmt);
@@ -23153,7 +23153,7 @@ ix86_rtx_costs (rtx x, machine_mode mode, int outer_code_i, int opno,
So current solution is make constant disp as cheap as possible. */
if (GET_CODE (addr) == PLUS
&& x86_64_immediate_operand (XEXP (addr, 1), Pmode)
- /* Only hanlde (reg + disp) since other forms of addr are mostly LEA,
+ /* Only handle (reg + disp) since other forms of addr are mostly LEA,
there's no additional cost for the plus of disp. */
&& register_operand (XEXP (addr, 0), Pmode))
{
diff --git a/gcc/config/riscv/arch-canonicalize b/gcc/config/riscv/arch-canonicalize
index fd55255..34dad45 100755
--- a/gcc/config/riscv/arch-canonicalize
+++ b/gcc/config/riscv/arch-canonicalize
@@ -32,7 +32,7 @@ import itertools
from functools import reduce
SUPPORTED_ISA_SPEC = ["2.2", "20190608", "20191213"]
-CANONICAL_ORDER = "imafdgqlcbkjtpvn"
+CANONICAL_ORDER = "imafdqlcbkjtpvnh"
LONG_EXT_PREFIXES = ['z', 's', 'h', 'x']
#
diff --git a/gcc/config/riscv/riscv-vector-costs.cc b/gcc/config/riscv/riscv-vector-costs.cc
index 1c6bc25..44ef44a 100644
--- a/gcc/config/riscv/riscv-vector-costs.cc
+++ b/gcc/config/riscv/riscv-vector-costs.cc
@@ -400,7 +400,7 @@ costs::compute_local_live_ranges (
pair &live_range
= live_ranges->get_or_insert (lhs, &existed_p);
gcc_assert (!existed_p);
- if (STMT_VINFO_MEMORY_ACCESS_TYPE (program_point.stmt_info)
+ if (SLP_TREE_MEMORY_ACCESS_TYPE (*node)
== VMAT_LOAD_STORE_LANES)
point = get_first_lane_point (program_points,
program_point.stmt_info);
@@ -418,8 +418,7 @@ costs::compute_local_live_ranges (
bool existed_p = false;
pair &live_range
= live_ranges->get_or_insert (var, &existed_p);
- if (STMT_VINFO_MEMORY_ACCESS_TYPE (
- program_point.stmt_info)
+ if (SLP_TREE_MEMORY_ACCESS_TYPE (*node)
== VMAT_LOAD_STORE_LANES)
point = get_last_lane_point (program_points,
program_point.stmt_info);
@@ -608,7 +607,7 @@ costs::need_additional_vector_vars_p (stmt_vec_info stmt_info,
if (type == load_vec_info_type || type == store_vec_info_type)
{
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)
- && STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) == VMAT_GATHER_SCATTER)
+ && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
return true;
machine_mode mode = TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info));
@@ -1086,7 +1085,7 @@ costs::better_main_loop_than_p (const vector_costs *uncast_other) const
load/store. */
static int
segment_loadstore_group_size (enum vect_cost_for_stmt kind,
- stmt_vec_info stmt_info)
+ stmt_vec_info stmt_info, slp_tree node)
{
if (stmt_info
&& (kind == vector_load || kind == vector_store)
@@ -1094,7 +1093,7 @@ segment_loadstore_group_size (enum vect_cost_for_stmt kind,
{
stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
if (stmt_info
- && STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) == VMAT_LOAD_STORE_LANES)
+ && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_LOAD_STORE_LANES)
return DR_GROUP_SIZE (stmt_info);
}
return 0;
@@ -1108,7 +1107,7 @@ segment_loadstore_group_size (enum vect_cost_for_stmt kind,
unsigned
costs::adjust_stmt_cost (enum vect_cost_for_stmt kind, loop_vec_info loop,
stmt_vec_info stmt_info,
- slp_tree, tree vectype, int stmt_cost)
+ slp_tree node, tree vectype, int stmt_cost)
{
const cpu_vector_cost *costs = get_vector_costs ();
switch (kind)
@@ -1131,7 +1130,8 @@ costs::adjust_stmt_cost (enum vect_cost_for_stmt kind, loop_vec_info loop,
each vector in the group. Here we additionally add permute
costs for each. */
/* TODO: Indexed and ordered/unordered cost. */
- int group_size = segment_loadstore_group_size (kind, stmt_info);
+ int group_size = segment_loadstore_group_size (kind, stmt_info,
+ node);
if (group_size > 1)
{
switch (group_size)
diff --git a/gcc/config/rs6000/rs6000.cc b/gcc/config/rs6000/rs6000.cc
index 1c60695..764b499 100644
--- a/gcc/config/rs6000/rs6000.cc
+++ b/gcc/config/rs6000/rs6000.cc
@@ -10320,15 +10320,18 @@ can_be_rotated_to_negative_lis (HOST_WIDE_INT c, int *rot)
/* case b. xx0..01..1xx: some of 15 x's (and some of 16 0's) are
rotated over the highest bit. */
- int pos_one = clz_hwi ((c << 16) >> 16);
- middle_zeros = ctz_hwi (c >> (HOST_BITS_PER_WIDE_INT - pos_one));
- int middle_ones = clz_hwi (~(c << pos_one));
- if (middle_zeros >= 16 && middle_ones >= 33)
+ unsigned HOST_WIDE_INT uc = c;
+ int pos_one = clz_hwi ((HOST_WIDE_INT) (uc << 16) >> 16);
+ if (pos_one != 0)
{
- *rot = pos_one;
- return true;
+ middle_zeros = ctz_hwi (c >> (HOST_BITS_PER_WIDE_INT - pos_one));
+ int middle_ones = clz_hwi (~(uc << pos_one));
+ if (middle_zeros >= 16 && middle_ones >= 33)
+ {
+ *rot = pos_one;
+ return true;
+ }
}
-
return false;
}
@@ -10445,7 +10448,8 @@ can_be_built_by_li_and_rldic (HOST_WIDE_INT c, int *shift, HOST_WIDE_INT *mask)
if (lz >= HOST_BITS_PER_WIDE_INT)
return false;
- int middle_ones = clz_hwi (~(c << lz));
+ unsigned HOST_WIDE_INT uc = c;
+ int middle_ones = clz_hwi (~(uc << lz));
if (tz + lz + middle_ones >= ones
&& (tz - lz) < HOST_BITS_PER_WIDE_INT
&& tz < HOST_BITS_PER_WIDE_INT)
@@ -10479,7 +10483,7 @@ can_be_built_by_li_and_rldic (HOST_WIDE_INT c, int *shift, HOST_WIDE_INT *mask)
if (!IN_RANGE (pos_first_1, 1, HOST_BITS_PER_WIDE_INT-1))
return false;
- middle_ones = clz_hwi (~c << pos_first_1);
+ middle_ones = clz_hwi ((~(unsigned HOST_WIDE_INT) c) << pos_first_1);
middle_zeros = ctz_hwi (c >> (HOST_BITS_PER_WIDE_INT - pos_first_1));
if (pos_first_1 < HOST_BITS_PER_WIDE_INT
&& middle_ones + middle_zeros < HOST_BITS_PER_WIDE_INT
@@ -10581,7 +10585,8 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c, int *num_insns)
{
/* li/lis; rldicX */
unsigned HOST_WIDE_INT imm = (c | ~mask);
- imm = (imm >> shift) | (imm << (HOST_BITS_PER_WIDE_INT - shift));
+ if (shift != 0)
+ imm = (imm >> shift) | (imm << (HOST_BITS_PER_WIDE_INT - shift));
count_or_emit_insn (temp, GEN_INT (imm));
if (shift != 0)
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 9c718ca..e31ee40 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -1969,7 +1969,7 @@
[(set (match_dup 0) (plus:GPR (match_dup 1) (match_dup 3)))
(set (match_dup 0) (plus:GPR (match_dup 0) (match_dup 4)))]
{
- HOST_WIDE_INT val = INTVAL (operands[2]);
+ unsigned HOST_WIDE_INT val = UINTVAL (operands[2]);
HOST_WIDE_INT low = sext_hwi (val, 16);
HOST_WIDE_INT rest = trunc_int_for_mode (val - low, <MODE>mode);
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 7f1e515..d9e6025 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,32 @@
+2025-07-31 Jason Merrill <jason@redhat.com>
+
+ PR c++/120800
+ * constexpr.cc (cxx_eval_vec_init_1): Suppress access control.
+
+2025-07-31 Marek Polacek <polacek@redhat.com>
+
+ PR c++/120775
+ * constexpr.cc (cxx_eval_outermost_constant_expr): Use
+ extract_call_expr.
+ * cp-tree.h (CONSTEVAL_BLOCK_P, LAMBDA_EXPR_CONSTEVAL_BLOCK_P): Define.
+ (finish_static_assert): Adjust declaration.
+ (current_nonlambda_function): Likewise.
+ * lambda.cc (current_nonlambda_function): New parameter. Only keep
+ iterating if the function represents a consteval block.
+ * parser.cc (cp_parser_lambda_expression): New parameter for
+ consteval blocks. Use it. Set LAMBDA_EXPR_CONSTEVAL_BLOCK_P.
+ (cp_parser_lambda_declarator_opt): Likewise.
+ (build_empty_string): New.
+ (cp_parser_next_tokens_are_consteval_block_p): New.
+ (cp_parser_consteval_block): New.
+ (cp_parser_block_declaration): Handle consteval blocks.
+ (cp_parser_static_assert): Use build_empty_string.
+ (cp_parser_member_declaration): Handle consteval blocks.
+ * pt.cc (tsubst_stmt): Adjust a call to finish_static_assert.
+ * semantics.cc (finish_fname): Warn for consteval blocks.
+ (finish_static_assert): New parameter for consteval blocks. Set
+ CONSTEVAL_BLOCK_P. Evaluate consteval blocks specially.
+
2025-07-30 Nathaniel Shead <nathanieloshead@gmail.com>
PR c++/121291
diff --git a/gcc/cp/constexpr.cc b/gcc/cp/constexpr.cc
index f92beb1..be24ae2 100644
--- a/gcc/cp/constexpr.cc
+++ b/gcc/cp/constexpr.cc
@@ -6598,6 +6598,9 @@ cxx_eval_vec_init_1 (const constexpr_ctx *ctx, tree atype, tree init,
return cxx_eval_bare_aggregate (ctx, init, lval,
non_constant_p, overflow_p, jump_target);
+ /* We already checked access when building the VEC_INIT_EXPR. */
+ deferring_access_check_sentinel acs (dk_deferred);
+
/* For the default constructor, build up a call to the default
constructor of the element type. We only need to handle class types
here, as for a constructor to be constexpr, all members must be
@@ -10329,11 +10332,14 @@ cxx_eval_outermost_constant_expr (tree t, bool allow_non_constant,
{
if (cxx_dialect < cxx20)
return t;
- if (TREE_CODE (t) != CALL_EXPR && TREE_CODE (t) != AGGR_INIT_EXPR)
+ /* We could have a COMPOUND_EXPR here coming from
+ keep_unused_object_arg. */
+ tree x = extract_call_expr (t);
+ if (x == NULL_TREE || x == error_mark_node)
return t;
/* Calls to immediate functions returning void need to be
evaluated. */
- tree fndecl = cp_get_callee_fndecl_nofold (t);
+ tree fndecl = cp_get_callee_fndecl_nofold (x);
if (fndecl == NULL_TREE || !DECL_IMMEDIATE_FUNCTION_P (fndecl))
return t;
else
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 0ac3ecb..fb8e0d8 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -453,6 +453,8 @@ extern GTY(()) tree cp_global_trees[CPTI_MAX];
RETURN_EXPR_LOCAL_ADDR_P (in RETURN_EXPR)
PACK_INDEX_PARENTHESIZED_P (in PACK_INDEX_*)
MUST_NOT_THROW_NOEXCEPT_P (in MUST_NOT_THROW_EXPR)
+ CONSTEVAL_BLOCK_P (in STATIC_ASSERT)
+ LAMBDA_EXPR_CONSTEVAL_BLOCK_P (in LAMBDA_EXPR)
1: IDENTIFIER_KIND_BIT_1 (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
@@ -1433,6 +1435,10 @@ struct GTY (()) tree_deferred_noexcept {
#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location)
+/* True if this static assert represents a C++26 consteval block. */
+#define CONSTEVAL_BLOCK_P(NODE) \
+ TREE_LANG_FLAG_0 (STATIC_ASSERT_CHECK (NODE))
+
struct GTY (()) tree_static_assert {
struct tree_base base;
tree condition;
@@ -1547,6 +1553,10 @@ enum cp_lambda_default_capture_mode_type {
#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture)
+/* True iff this lambda was created for a consteval block. */
+#define LAMBDA_EXPR_CONSTEVAL_BLOCK_P(NODE) \
+ TREE_LANG_FLAG_0 (LAMBDA_EXPR_CHECK (NODE))
+
/* True iff uses of a const variable capture were optimized away. */
#define LAMBDA_EXPR_CAPTURE_OPTIMIZED(NODE) \
TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE))
@@ -8243,7 +8253,7 @@ extern bool cxx_omp_create_clause_info (tree, tree, bool, bool,
bool, bool);
extern tree baselink_for_fns (tree);
extern void finish_static_assert (tree, tree, location_t,
- bool, bool);
+ bool, bool, bool = false);
extern tree finish_decltype_type (tree, bool, tsubst_flags_t);
extern tree fold_builtin_is_corresponding_member (location_t, int, tree *);
extern tree fold_builtin_is_pointer_inverconvertible_with_class (location_t, int, tree *);
@@ -8268,7 +8278,7 @@ extern void register_capture_members (tree);
extern tree lambda_expr_this_capture (tree, int);
extern void maybe_generic_this_capture (tree, tree);
extern tree maybe_resolve_dummy (tree, bool);
-extern tree current_nonlambda_function (void);
+extern tree current_nonlambda_function (bool = false);
extern tree nonlambda_method_basetype (void);
extern tree current_nonlambda_scope (bool = false);
extern tree current_lambda_expr (void);
diff --git a/gcc/cp/lambda.cc b/gcc/cp/lambda.cc
index ecf55eb..c798967 100644
--- a/gcc/cp/lambda.cc
+++ b/gcc/cp/lambda.cc
@@ -1038,13 +1038,19 @@ maybe_generic_this_capture (tree object, tree fns)
}
}
-/* Returns the innermost non-lambda function. */
+/* Returns the innermost non-lambda function. If ONLY_SKIP_CONSTEVAL_BLOCK_P,
+ we only skip lambda functions that represent consteval blocks. */
tree
-current_nonlambda_function (void)
+current_nonlambda_function (bool only_skip_consteval_block_p/*=false*/)
{
tree fn = current_function_decl;
- while (fn && LAMBDA_FUNCTION_P (fn))
+ tree lam;
+ while (fn && LAMBDA_FUNCTION_P (fn)
+ && (!only_skip_consteval_block_p
+ /* Only keep going if FN represents a consteval block. */
+ || ((lam = CLASSTYPE_LAMBDA_EXPR (CP_DECL_CONTEXT (fn)))
+ && LAMBDA_EXPR_CONSTEVAL_BLOCK_P (lam))))
fn = decl_function_context (fn);
return fn;
}
diff --git a/gcc/cp/module.cc b/gcc/cp/module.cc
index 2f6a8ab..9412f78 100644
--- a/gcc/cp/module.cc
+++ b/gcc/cp/module.cc
@@ -6560,8 +6560,14 @@ trees_out::core_vals (tree t)
}
WT (t->function_decl.personality);
- WT (t->function_decl.function_specific_target);
- WT (t->function_decl.function_specific_optimization);
+ /* Rather than streaming target/optimize nodes, we should reconstruct
+ them on stream-in from any attributes applied to the function. */
+ if (streaming_p () && t->function_decl.function_specific_target)
+ warning_at (DECL_SOURCE_LOCATION (t), 0,
+ "%<target%> attribute currently unsupported in modules");
+ if (streaming_p () && t->function_decl.function_specific_optimization)
+ warning_at (DECL_SOURCE_LOCATION (t), 0,
+ "%<optimize%> attribute currently unsupported in modules");
WT (t->function_decl.vindex);
if (DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (t))
@@ -6651,11 +6657,12 @@ trees_out::core_vals (tree t)
case TARGET_OPTION_NODE:
// FIXME: Our representation for these two nodes is a cache of
// the resulting set of options. Not a record of the options
- // that got changed by a particular attribute or pragma. Should
- // we record that, or should we record the diff from the command
- // line options? The latter seems the right behaviour, but is
- // (a) harder, and I guess could introduce strangeness if the
- // importer has set some incompatible set of optimization flags?
+ // that got changed by a particular attribute or pragma. Instead
+ // of recording that, we probably should just rebuild the options
+ // on stream-in from the function attributes. This could introduce
+ // strangeness if the importer has some incompatible set of flags
+ // but we currently assume users "know what they're doing" in such
+ // a case anyway.
gcc_unreachable ();
break;
@@ -7114,8 +7121,10 @@ trees_in::core_vals (tree t)
}
RT (t->function_decl.personality);
- RT (t->function_decl.function_specific_target);
- RT (t->function_decl.function_specific_optimization);
+ /* These properties are not streamed, and should be reconstructed
+ from any function attributes. */
+ // t->function_decl.function_specific_target);
+ // t->function_decl.function_specific_optimization);
RT (t->function_decl.vindex);
if (DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (t))
@@ -7221,7 +7230,7 @@ trees_in::core_vals (tree t)
case OPTIMIZATION_NODE:
case TARGET_OPTION_NODE:
- /* Not yet implemented, see trees_out::core_vals. */
+ /* Not implemented, see trees_out::core_vals. */
gcc_unreachable ();
break;
@@ -11164,6 +11173,20 @@ trees_in::fn_parms_fini (int tag, tree fn, tree existing, bool is_defn)
names of the parms from us. */
DECL_NAME (existing_parm) = DECL_NAME (parm);
DECL_SOURCE_LOCATION (existing_parm) = DECL_SOURCE_LOCATION (parm);
+
+ /* And some other flags important for codegen are only set
+ by the definition. */
+ TREE_ADDRESSABLE (existing_parm) = TREE_ADDRESSABLE (parm);
+ DECL_BY_REFERENCE (existing_parm) = DECL_BY_REFERENCE (parm);
+ DECL_NONLOCAL (existing_parm) = DECL_NONLOCAL (parm);
+ DECL_ARG_TYPE (existing_parm) = DECL_ARG_TYPE (parm);
+
+ /* Invisiref parms had their types adjusted by cp_genericize. */
+ if (DECL_BY_REFERENCE (parm))
+ {
+ TREE_TYPE (existing_parm) = TREE_TYPE (parm);
+ relayout_decl (existing_parm);
+ }
}
back_refs[~tag] = existing_parm;
diff --git a/gcc/cp/parser.cc b/gcc/cp/parser.cc
index eb66427..860f3f0 100644
--- a/gcc/cp/parser.cc
+++ b/gcc/cp/parser.cc
@@ -2576,11 +2576,11 @@ static cp_expr cp_parser_constant_expression
static cp_expr cp_parser_builtin_offsetof
(cp_parser *);
static cp_expr cp_parser_lambda_expression
- (cp_parser *);
+ (cp_parser *, bool = false);
static void cp_parser_lambda_introducer
(cp_parser *, tree);
static bool cp_parser_lambda_declarator_opt
- (cp_parser *, tree);
+ (cp_parser *, tree, bool = false);
static void cp_parser_lambda_body
(cp_parser *, tree);
@@ -11744,10 +11744,14 @@ cp_parser_trait (cp_parser* parser, const cp_trait* trait)
lambda-introducer < template-parameter-list > requires-clause [opt]
lambda-declarator [opt] compound-statement
+ If CONSTEVAL_BLOCK_P is true, we are parsing a consteval block, which
+ is syntactic sugar for a consteval lambda.
+
Returns a representation of the expression. */
static cp_expr
-cp_parser_lambda_expression (cp_parser* parser)
+cp_parser_lambda_expression (cp_parser* parser,
+ bool consteval_block_p/*=false*/)
{
tree lambda_expr = build_lambda_expr ();
tree type;
@@ -11756,6 +11760,7 @@ cp_parser_lambda_expression (cp_parser* parser)
cp_token_position start = 0;
LAMBDA_EXPR_LOCATION (lambda_expr) = token->location;
+ LAMBDA_EXPR_CONSTEVAL_BLOCK_P (lambda_expr) = consteval_block_p;
if (cxx_dialect >= cxx20)
{
@@ -11799,9 +11804,12 @@ cp_parser_lambda_expression (cp_parser* parser)
it now. */
push_deferring_access_checks (dk_no_deferred);
- cp_parser_lambda_introducer (parser, lambda_expr);
- if (cp_parser_error_occurred (parser))
- return error_mark_node;
+ if (!consteval_block_p)
+ {
+ cp_parser_lambda_introducer (parser, lambda_expr);
+ if (cp_parser_error_occurred (parser))
+ return error_mark_node;
+ }
{
/* OK, this is a bit tricksy. cp_parser_requires_expression sets
@@ -11873,7 +11881,8 @@ cp_parser_lambda_expression (cp_parser* parser)
if (cp_parser_start_tentative_firewall (parser))
start = token;
- ok &= cp_parser_lambda_declarator_opt (parser, lambda_expr);
+ ok &= cp_parser_lambda_declarator_opt (parser, lambda_expr,
+ consteval_block_p);
if (ok && cp_parser_error_occurred (parser))
ok = false;
@@ -12256,10 +12265,13 @@ cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr)
decl-specifier-seq [opt] noexcept-specifier [opt]
attribute-specifier-seq [opt] trailing-return-type [opt]
- LAMBDA_EXPR is the current representation of the lambda expression. */
+ LAMBDA_EXPR is the current representation of the lambda expression.
+ If CONSTEVAL_BLOCK_P is true, we are parsing a consteval block, which
+ is syntactic sugar for a consteval lambda. */
static bool
-cp_parser_lambda_declarator_opt (cp_parser* parser, tree lambda_expr)
+cp_parser_lambda_declarator_opt (cp_parser* parser, tree lambda_expr,
+ bool consteval_block_p/*=false*/)
{
/* 5.1.1.4 of the standard says:
If a lambda-expression does not include a lambda-declarator, it is as if
@@ -12362,6 +12374,18 @@ cp_parser_lambda_declarator_opt (cp_parser* parser, tree lambda_expr)
CP_PARSER_FLAGS_ONLY_MUTABLE_OR_CONSTEXPR,
&lambda_specs, &declares_class_or_enum);
+ /* [dcl.pre] For a consteval-block-declaration D, the expression E
+ corresponding to D is:
+ [] -> void static consteval compound-statement ()
+ Make it so. */
+ if (consteval_block_p)
+ {
+ return_type = void_type_node;
+ lambda_specs.storage_class = sc_static;
+ set_and_check_decl_spec_loc (&lambda_specs, ds_consteval,
+ cp_lexer_peek_token (parser->lexer));
+ }
+
if (omitted_parms_loc && lambda_specs.any_specifiers_p)
{
pedwarn (omitted_parms_loc, OPT_Wc__23_extensions,
@@ -16300,6 +16324,56 @@ cp_parser_toplevel_declaration (cp_parser* parser)
cp_parser_declaration (parser, NULL_TREE);
}
+/* Build an empty string for static_assert. */
+
+static tree
+build_empty_string ()
+{
+ tree message = build_string (1, "");
+ TREE_TYPE (message) = char_array_type_node;
+ fix_string_type (message);
+ return message;
+}
+
+/* Return true iff the next tokens start a C++26 consteval block. */
+
+static bool
+cp_parser_next_tokens_are_consteval_block_p (cp_parser *parser)
+{
+ return (cxx_dialect >= cxx26
+ && cp_lexer_next_token_is_keyword (parser->lexer, RID_CONSTEVAL)
+ && cp_lexer_nth_token_is (parser->lexer, 2, CPP_OPEN_BRACE));
+}
+
+/* Parse a consteval-block-declaration.
+
+ consteval-block-declaration:
+ consteval compound-statement
+
+ If MEMBER_P, this consteval block is a member declaration. */
+
+static void
+cp_parser_consteval_block (cp_parser *parser, bool member_p)
+{
+ const location_t loc = cp_lexer_peek_token (parser->lexer)->location;
+ /* Consume the 'consteval'. */
+ cp_lexer_consume_token (parser->lexer);
+
+ /* We know the next token is '{'. Let cp_parser_lambda_body handle it. */
+ cp_expr lam = cp_parser_lambda_expression (parser,
+ /*consteval_block_p=*/true);
+ if (!cp_parser_error_occurred (parser))
+ {
+ releasing_vec args;
+ tree call = finish_call_expr (lam, &args,
+ /*disallow_virtual=*/false,
+ /*koenig_p=*/false,
+ tf_warning_or_error);
+ finish_static_assert (call, build_empty_string (), loc, member_p,
+ /*show_expr_p=*/false, /*consteval_block_p=*/true);
+ }
+}
+
/* Parse a block-declaration.
block-declaration:
@@ -16307,18 +16381,18 @@ cp_parser_toplevel_declaration (cp_parser* parser)
asm-definition
namespace-alias-definition
using-declaration
+ using-enum-declaration
using-directive
+ static_assert-declaration
+ consteval-block-declaration
+ alias-declaration
+ opaque-enum-declaration
GNU Extension:
block-declaration:
__extension__ block-declaration
- C++0x Extension:
-
- block-declaration:
- static_assert-declaration
-
If STATEMENT_P is TRUE, then this block-declaration is occurring as
part of a declaration-statement. */
@@ -16396,6 +16470,8 @@ cp_parser_block_declaration (cp_parser *parser,
/* If the next token is `static_assert' we have a static assertion. */
else if (token1->keyword == RID_STATIC_ASSERT)
cp_parser_static_assert (parser, /*member_p=*/false);
+ else if (cp_parser_next_tokens_are_consteval_block_p (parser))
+ cp_parser_consteval_block (parser, /*member_p=*/false);
else
{
size_t attr_idx = cp_parser_skip_std_attribute_spec_seq (parser, 1);
@@ -17699,9 +17775,7 @@ cp_parser_static_assert (cp_parser *parser, bool member_p)
"only available with %<-std=c++17%> or %<-std=gnu++17%>");
/* Eat the ')' */
cp_lexer_consume_token (parser->lexer);
- message = build_string (1, "");
- TREE_TYPE (message) = char_array_type_node;
- fix_string_type (message);
+ message = build_empty_string ();
}
else
{
@@ -28831,12 +28905,20 @@ cp_parser_member_specification_opt (cp_parser* parser)
/* Parse a member-declaration.
member-declaration:
- decl-specifier-seq [opt] member-declarator-list [opt] ;
- function-definition ; [opt]
- :: [opt] nested-name-specifier template [opt] unqualified-id ;
+ attribute-specifier-seq [opt] decl-specifier-seq [opt]
+ member-declarator-list [opt] ;
+ function-definition
+ friend-type-declaration
using-declaration
+ using-enum-declaration
+ static_assert-declaration
+ consteval-block-declaration
template-declaration
+ explicit-specialization
+ deduction-guide
alias-declaration
+ opaque-enum-declaration
+ empty-declaration
member-declarator-list:
member-declarator
@@ -28855,12 +28937,7 @@ cp_parser_member_specification_opt (cp_parser* parser)
member-declarator:
declarator attributes [opt] pure-specifier [opt]
declarator attributes [opt] constant-initializer [opt]
- identifier [opt] attributes [opt] : constant-expression
-
- C++0x Extensions:
-
- member-declaration:
- static_assert-declaration */
+ identifier [opt] attributes [opt] : constant-expression */
static void
cp_parser_member_declaration (cp_parser* parser)
@@ -28960,6 +29037,12 @@ cp_parser_member_declaration (cp_parser* parser)
return;
}
+ if (cp_parser_next_tokens_are_consteval_block_p (parser))
+ {
+ cp_parser_consteval_block (parser, /*member_p=*/true);
+ return;
+ }
+
parser->colon_corrects_to_scope_p = false;
cp_omp_declare_simd_data odsd;
diff --git a/gcc/cp/pt.cc b/gcc/cp/pt.cc
index 0b7a05c..acfeb81 100644
--- a/gcc/cp/pt.cc
+++ b/gcc/cp/pt.cc
@@ -19601,7 +19601,8 @@ tsubst_stmt (tree t, tree args, tsubst_flags_t complain, tree in_decl)
finish_static_assert (condition, message,
STATIC_ASSERT_SOURCE_LOCATION (t),
- /*member_p=*/false, /*show_expr_p=*/true);
+ /*member_p=*/false, /*show_expr_p=*/true,
+ CONSTEVAL_BLOCK_P (t));
}
break;
diff --git a/gcc/cp/semantics.cc b/gcc/cp/semantics.cc
index 52ecac4..be79b50 100644
--- a/gcc/cp/semantics.cc
+++ b/gcc/cp/semantics.cc
@@ -3992,9 +3992,15 @@ finish_compound_literal (tree type, tree compound_literal,
tree
finish_fname (tree id)
{
- tree decl;
-
- decl = fname_decl (input_location, C_RID_CODE (id), id);
+ tree decl = fname_decl (input_location, C_RID_CODE (id), id);
+ /* [expr.prim.lambda.closure]/16 "Unless the compound-statement is that
+ of a consteval-block-declaration, a variable __func__ is implicitly
+ defined...". We could be in a consteval block in a function, though,
+ and then we shouldn't warn. */
+ if (current_function_decl
+ && !current_nonlambda_function (/*only_skip_consteval_block_p=*/true))
+ pedwarn (input_location, 0, "%qD is not defined outside of function scope",
+ decl);
if (processing_template_decl && current_function_decl
&& decl != error_mark_node)
decl = DECL_NAME (decl);
@@ -12598,11 +12604,14 @@ cexpr_str::extract (location_t location, const char * & msg, int &len)
CONDITION and the message text MESSAGE. LOCATION is the location
of the static assertion in the source code. When MEMBER_P, this
static assertion is a member of a class. If SHOW_EXPR_P is true,
- print the condition (because it was instantiation-dependent). */
+ print the condition (because it was instantiation-dependent).
+ If CONSTEVAL_BLOCK_P is true, this static assertion represents
+ a consteval block. */
void
finish_static_assert (tree condition, tree message, location_t location,
- bool member_p, bool show_expr_p)
+ bool member_p, bool show_expr_p,
+ bool consteval_block_p/*=false*/)
{
tsubst_flags_t complain = tf_warning_or_error;
@@ -12630,6 +12639,7 @@ finish_static_assert (tree condition, tree message, location_t location,
STATIC_ASSERT_CONDITION (assertion) = orig_condition;
STATIC_ASSERT_MESSAGE (assertion) = cstr.message;
STATIC_ASSERT_SOURCE_LOCATION (assertion) = location;
+ CONSTEVAL_BLOCK_P (assertion) = consteval_block_p;
if (member_p)
maybe_add_class_template_decl_list (current_class_type,
@@ -12641,6 +12651,13 @@ finish_static_assert (tree condition, tree message, location_t location,
return;
}
+ /* Evaluate the consteval { }. This must be done only once. */
+ if (consteval_block_p)
+ {
+ cxx_constant_value (condition);
+ return;
+ }
+
/* Fold the expression and convert it to a boolean value. */
condition = contextual_conv_bool (condition, complain);
condition = fold_non_dependent_expr (condition, complain,
diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi
index 09ea87a..f4cba09 100644
--- a/gcc/doc/install.texi
+++ b/gcc/doc/install.texi
@@ -302,7 +302,7 @@ released on 2017-05-06.
The gcobol documentation is maintained as manpages using troff
mdoc. GNU groff is required to convert them to PDF format. Conversion
to HTML is done with mandoc, available at
-@uref{http://mdocml.bsd.lv/}.
+@uref{https://mandoc.bsd.lv}.
Because ISO COBOL defines strict requirements for numerical precision,
gcobol requires hardware with 128-bit computation instructions. This
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index e442a9c..c1e708b 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -915,7 +915,7 @@ Objective-C and Objective-C++ Dialects}.
@emph{AVR Options} (@ref{AVR Options})
@gccoptlist{-mmcu=@var{mcu} -mabsdata -maccumulate-args -mcvt
-mbranch-cost=@var{cost} -mfuse-add=@var{level} -mfuse-move=@var{level}
--mcall-prologues -mgas-isr-prologues -mint8 -mflmap
+-mfuse-move2 -mcall-prologues -mgas-isr-prologues -mint8 -mflmap
-mdouble=@var{bits} -mlong-double=@var{bits} -mno-call-main
-mn_flash=@var{size} -mfract-convert-truncate -mno-interrupts
-mmain-is-OS_task -mrelax -mrmw -mstrict-X -mtiny-stack
@@ -25110,6 +25110,10 @@ Valid values for @var{level} are in the range @code{0} @dots{} @code{23}
which is a 3:2:2:2 mixed radix value. Each digit controls some
aspect of the optimization.
+@opindex mfuse-move2
+@item -mfuse-move2
+Run a post combine optimization pass that tries to fuse move instructions.
+
@opindex mstrict-X
@item -mstrict-X
Use address register @code{X} in a way proposed by the hardware. This means
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 70b93dd..78b0400a 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,23 @@
+2025-07-31 Mikael Morin <morin-mikael@orange.fr>
+
+ PR fortran/121342
+ * trans-expr.cc (gfc_conv_subref_array_arg): Remove offset
+ update.
+ (gfc_conv_procedure_call): For polymorphic functions, move the
+ scalarizer descriptor information...
+ * trans-array.cc (gfc_add_loop_ss_code): ... here, and evaluate
+ the bounds to fresh variables.
+ (get_class_info_from_ss): Remove offset update.
+ (gfc_conv_ss_startstride): Don't set a zero value for function
+ result upper bounds.
+ (late_set_loop_bounds): New.
+ (gfc_conv_loop_setup): If the bounds of a function result have
+ been set, and no other array provided loop bounds for a
+ dimension, use the function result bounds as loop bounds for
+ that dimension.
+ (gfc_set_delta): Don't skip delta setting for polymorphic
+ function results.
+
2025-07-30 Mikael Morin <morin-mikael@orange.fr>
* trans-array.cc (gfc_array_init_size): Remove the nelems
diff --git a/gcc/fortran/trans-array.cc b/gcc/fortran/trans-array.cc
index 0f7637d..990aaaf 100644
--- a/gcc/fortran/trans-array.cc
+++ b/gcc/fortran/trans-array.cc
@@ -1426,12 +1426,6 @@ get_class_info_from_ss (stmtblock_t * pre, gfc_ss *ss, tree *eltype,
tmp2 = gfc_class_len_get (class_expr);
gfc_add_modify (pre, tmp, tmp2);
}
-
- if (rhs_function)
- {
- tmp = gfc_class_data_get (class_expr);
- gfc_conv_descriptor_offset_set (pre, tmp, gfc_index_zero_node);
- }
}
else if (rhs_ss->info->data.array.descriptor)
{
@@ -3372,18 +3366,51 @@ gfc_add_loop_ss_code (gfc_loopinfo * loop, gfc_ss * ss, bool subscript,
break;
case GFC_SS_FUNCTION:
- /* Array function return value. We call the function and save its
- result in a temporary for use inside the loop. */
- gfc_init_se (&se, NULL);
- se.loop = loop;
- se.ss = ss;
- if (gfc_is_class_array_function (expr))
- expr->must_finalize = 1;
- gfc_conv_expr (&se, expr);
- gfc_add_block_to_block (&outer_loop->pre, &se.pre);
- gfc_add_block_to_block (&outer_loop->post, &se.post);
- gfc_add_block_to_block (&outer_loop->post, &se.finalblock);
- ss_info->string_length = se.string_length;
+ {
+ /* Array function return value. We call the function and save its
+ result in a temporary for use inside the loop. */
+ gfc_init_se (&se, NULL);
+ se.loop = loop;
+ se.ss = ss;
+ bool class_func = gfc_is_class_array_function (expr);
+ if (class_func)
+ expr->must_finalize = 1;
+ gfc_conv_expr (&se, expr);
+ gfc_add_block_to_block (&outer_loop->pre, &se.pre);
+ if (class_func
+ && se.expr
+ && GFC_CLASS_TYPE_P (TREE_TYPE (se.expr)))
+ {
+ tree tmp = gfc_class_data_get (se.expr);
+ info->descriptor = tmp;
+ info->data = gfc_conv_descriptor_data_get (tmp);
+ info->offset = gfc_conv_descriptor_offset_get (tmp);
+ for (gfc_ss *s = ss; s; s = s->parent)
+ for (int n = 0; n < s->dimen; n++)
+ {
+ int dim = s->dim[n];
+ tree tree_dim = gfc_rank_cst[dim];
+
+ tree start;
+ start = gfc_conv_descriptor_lbound_get (tmp, tree_dim);
+ start = gfc_evaluate_now (start, &outer_loop->pre);
+ info->start[dim] = start;
+
+ tree end;
+ end = gfc_conv_descriptor_ubound_get (tmp, tree_dim);
+ end = gfc_evaluate_now (end, &outer_loop->pre);
+ info->end[dim] = end;
+
+ tree stride;
+ stride = gfc_conv_descriptor_stride_get (tmp, tree_dim);
+ stride = gfc_evaluate_now (stride, &outer_loop->pre);
+ info->stride[dim] = stride;
+ }
+ }
+ gfc_add_block_to_block (&outer_loop->post, &se.post);
+ gfc_add_block_to_block (&outer_loop->post, &se.finalblock);
+ ss_info->string_length = se.string_length;
+ }
break;
case GFC_SS_CONSTRUCTOR:
@@ -5383,7 +5410,8 @@ done:
int dim = ss->dim[n];
info->start[dim] = gfc_index_zero_node;
- info->end[dim] = gfc_index_zero_node;
+ if (ss_info->type != GFC_SS_FUNCTION)
+ info->end[dim] = gfc_index_zero_node;
info->stride[dim] = gfc_index_one_node;
}
break;
@@ -6068,6 +6096,46 @@ set_loop_bounds (gfc_loopinfo *loop)
}
+/* Last attempt to set the loop bounds, in case they depend on an allocatable
+ function result. */
+
+static void
+late_set_loop_bounds (gfc_loopinfo *loop)
+{
+ int n, dim;
+ gfc_array_info *info;
+ gfc_ss **loopspec;
+
+ loopspec = loop->specloop;
+
+ for (n = 0; n < loop->dimen; n++)
+ {
+ /* Set the extents of this range. */
+ if (loop->from[n] == NULL_TREE
+ || loop->to[n] == NULL_TREE)
+ {
+ /* We should have found the scalarization loop specifier. If not,
+ that's bad news. */
+ gcc_assert (loopspec[n]);
+
+ info = &loopspec[n]->info->data.array;
+ dim = loopspec[n]->dim[n];
+
+ if (loopspec[n]->info->type == GFC_SS_FUNCTION
+ && info->start[dim]
+ && info->end[dim])
+ {
+ loop->from[n] = info->start[dim];
+ loop->to[n] = info->end[dim];
+ }
+ }
+ }
+
+ for (loop = loop->nested; loop; loop = loop->next)
+ late_set_loop_bounds (loop);
+}
+
+
/* Initialize the scalarization loop. Creates the loop variables. Determines
the range of the loop variables. Creates a temporary if required.
Also generates code for scalar expressions which have been
@@ -6086,6 +6154,8 @@ gfc_conv_loop_setup (gfc_loopinfo * loop, locus * where)
allocating the temporary. */
gfc_add_loop_ss_code (loop, loop->ss, false, where);
+ late_set_loop_bounds (loop);
+
tmp_ss = loop->temp_ss;
/* If we want a temporary then create it. */
if (tmp_ss != NULL)
@@ -6142,9 +6212,11 @@ gfc_set_delta (gfc_loopinfo *loop)
gfc_ss_type ss_type;
ss_type = ss->info->type;
- if (ss_type != GFC_SS_SECTION
- && ss_type != GFC_SS_COMPONENT
- && ss_type != GFC_SS_CONSTRUCTOR)
+ if (!(ss_type == GFC_SS_SECTION
+ || ss_type == GFC_SS_COMPONENT
+ || ss_type == GFC_SS_CONSTRUCTOR
+ || (ss_type == GFC_SS_FUNCTION
+ && gfc_is_class_array_function (ss->info->expr))))
continue;
info = &ss->info->data.array;
diff --git a/gcc/fortran/trans-decl.cc b/gcc/fortran/trans-decl.cc
index 741acc0..3b49b18 100644
--- a/gcc/fortran/trans-decl.cc
+++ b/gcc/fortran/trans-decl.cc
@@ -5340,7 +5340,7 @@ gfc_trans_deferred_vars (gfc_symbol * proc_sym, gfc_wrapped_block * block)
continue;
/* 'omp allocate( {purpose: allocator, value: align},
{purpose: init-stmtlist, value: cleanup-stmtlist},
- {purpose: size-var, value: last-size-expr}}
+ {purpose: size-var, value: last-size-expr} )
where init-stmt/cleanup-stmt is the STATEMENT list to find the
try-final block; last-size-expr is to find the location after
which to add the code and 'size-var' is for the proper size, cf.
diff --git a/gcc/fortran/trans-expr.cc b/gcc/fortran/trans-expr.cc
index 0db7ba3..ec24084 100644
--- a/gcc/fortran/trans-expr.cc
+++ b/gcc/fortran/trans-expr.cc
@@ -5485,16 +5485,6 @@ gfc_conv_subref_array_arg (gfc_se *se, gfc_expr * expr, int g77,
/* Translate the expression. */
gfc_conv_expr (&rse, expr);
- /* Reset the offset for the function call since the loop
- is zero based on the data pointer. Note that the temp
- comes first in the loop chain since it is added second. */
- if (gfc_is_class_array_function (expr))
- {
- tmp = loop.ss->loop_chain->info->data.array.descriptor;
- gfc_conv_descriptor_offset_set (&loop.pre, tmp,
- gfc_index_zero_node);
- }
-
gfc_conv_tmp_array_ref (&lse);
if (intent != INTENT_OUT)
@@ -8864,28 +8854,9 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
&& se->expr && GFC_CLASS_TYPE_P (TREE_TYPE (se->expr))
&& expr->must_finalize)
{
- int n;
- if (se->ss && se->ss->loop)
- {
- gfc_add_block_to_block (&se->ss->loop->pre, &se->pre);
- se->expr = gfc_evaluate_now (se->expr, &se->ss->loop->pre);
- tmp = gfc_class_data_get (se->expr);
- info->descriptor = tmp;
- info->data = gfc_conv_descriptor_data_get (tmp);
- info->offset = gfc_conv_descriptor_offset_get (tmp);
- for (n = 0; n < se->ss->loop->dimen; n++)
- {
- tree dim = gfc_rank_cst[n];
- se->ss->loop->to[n] = gfc_conv_descriptor_ubound_get (tmp, dim);
- se->ss->loop->from[n] = gfc_conv_descriptor_lbound_get (tmp, dim);
- }
- }
- else
- {
- /* TODO Eliminate the doubling of temporaries. This
- one is necessary to ensure no memory leakage. */
- se->expr = gfc_evaluate_now (se->expr, &se->pre);
- }
+ /* TODO Eliminate the doubling of temporaries. This
+ one is necessary to ensure no memory leakage. */
+ se->expr = gfc_evaluate_now (se->expr, &se->pre);
/* Finalize the result, if necessary. */
attr = expr->value.function.esym
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index 49e3440..85319b3 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -6762,10 +6762,10 @@ fold_stmt (gimple_stmt_iterator *gsi, tree (*valueize) (tree), bitmap dce_bitmap
which can produce *&x = 0. */
bool
-fold_stmt_inplace (gimple_stmt_iterator *gsi)
+fold_stmt_inplace (gimple_stmt_iterator *gsi, tree (*valueize) (tree))
{
gimple *stmt = gsi_stmt (*gsi);
- bool changed = fold_stmt_1 (gsi, true, no_follow_ssa_edges);
+ bool changed = fold_stmt_1 (gsi, true, valueize);
gcc_assert (gsi_stmt (*gsi) == stmt);
return changed;
}
diff --git a/gcc/gimple-fold.h b/gcc/gimple-fold.h
index e3cf1f6..b678502 100644
--- a/gcc/gimple-fold.h
+++ b/gcc/gimple-fold.h
@@ -28,9 +28,12 @@ struct c_strlen_data;
extern bool get_range_strlen (tree, c_strlen_data *, unsigned eltsize);
extern void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree);
extern bool update_gimple_call (gimple_stmt_iterator *, tree, int, ...);
+extern tree no_follow_ssa_edges (tree);
+extern tree follow_single_use_edges (tree);
+extern tree follow_all_ssa_edges (tree);
extern bool fold_stmt (gimple_stmt_iterator *, bitmap = nullptr);
extern bool fold_stmt (gimple_stmt_iterator *, tree (*) (tree), bitmap = nullptr);
-extern bool fold_stmt_inplace (gimple_stmt_iterator *);
+extern bool fold_stmt_inplace (gimple_stmt_iterator *, tree (*) (tree) = no_follow_ssa_edges);
extern tree maybe_fold_and_comparisons (tree, enum tree_code, tree, tree,
enum tree_code, tree, tree,
basic_block = nullptr);
@@ -39,9 +42,6 @@ extern tree maybe_fold_or_comparisons (tree, enum tree_code, tree, tree,
basic_block = nullptr);
extern bool optimize_atomic_compare_exchange_p (gimple *);
extern void fold_builtin_atomic_compare_exchange (gimple_stmt_iterator *);
-extern tree no_follow_ssa_edges (tree);
-extern tree follow_single_use_edges (tree);
-extern tree follow_all_ssa_edges (tree);
extern tree gimple_fold_stmt_to_constant_1 (gimple *, tree (*) (tree),
tree (*) (tree) = no_follow_ssa_edges);
extern tree gimple_fold_stmt_to_constant (gimple *, tree (*) (tree));
diff --git a/gcc/gimple-ssa-store-merging.cc b/gcc/gimple-ssa-store-merging.cc
index ce56d97..d8075ca 100644
--- a/gcc/gimple-ssa-store-merging.cc
+++ b/gcc/gimple-ssa-store-merging.cc
@@ -1033,7 +1033,7 @@ find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap,
source when rsize < range. */
if (n->range == orig_range
/* There're case like 0x300000200 for uint32->uint64 cast,
- Don't hanlde this. */
+ Don't handle this. */
&& n->range == TYPE_PRECISION (n->type)
&& ((orig_range == 32
&& optab_handler (rotl_optab, SImode) != CODE_FOR_nothing)
@@ -1043,7 +1043,7 @@ find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap,
{
uint64_t range = (orig_range / BITS_PER_UNIT) * BITS_PER_MARKER;
uint64_t count = (tmp_n & MARKER_MASK) * BITS_PER_MARKER;
- /* .i.e. hanlde 0x203040506070800 when lower byte is zero. */
+ /* .i.e. handle 0x203040506070800 when lower byte is zero. */
if (!count)
{
for (uint64_t i = 1; i != range / BITS_PER_MARKER; i++)
@@ -1055,6 +1055,8 @@ find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap,
if (count <= range / BITS_PER_MARKER)
{
count = (count + i) * BITS_PER_MARKER % range;
+ if (!count)
+ return NULL;
break;
}
else
diff --git a/gcc/m2/ChangeLog b/gcc/m2/ChangeLog
index 2406b95..1342492 100644
--- a/gcc/m2/ChangeLog
+++ b/gcc/m2/ChangeLog
@@ -1,3 +1,18 @@
+2025-07-31 Gaius Mulley <gaiusmod2@gmail.com>
+
+ PR modula2/121314
+ * mc-boot/GFormatStrings.cc (PerformFormatString): Rebuilt.
+ * mc-boot/GM2EXCEPTION.cc (M2EXCEPTION_M2Exception): Rebuilt.
+ * mc-boot/GSFIO.cc (SFIO_GetFileName): Rebuilt.
+ * mc-boot/GSFIO.h (SFIO_GetFileName): Rebuilt.
+ * mc-boot/Gdecl.cc: Rebuilt.
+ * mc-boot/GmcFileName.h: Rebuilt.
+ * mc/decl.mod (getStringChar): New procedure function.
+ (getStringContents): Call getStringChar.
+ (addQuotes): New procedure function.
+ (foldBinary): Call addQuotes to add delimiting quotes
+ to the new string.
+
2025-07-29 Gaius Mulley <gaiusmod2@gmail.com>
* gm2-compiler/M2GenGCC.mod (FoldBecomes): Remove all
diff --git a/gcc/m2/gm2-compiler/M2GenGCC.mod b/gcc/m2/gm2-compiler/M2GenGCC.mod
index 2507c53..2440b2a 100644
--- a/gcc/m2/gm2-compiler/M2GenGCC.mod
+++ b/gcc/m2/gm2-compiler/M2GenGCC.mod
@@ -6439,37 +6439,52 @@ END ResolveHigh ;
(*
+ IsUnboundedArray - return TRUE if symbol is an unbounded array.
+*)
+
+PROCEDURE IsUnboundedArray (sym: CARDINAL) : BOOLEAN ;
+BEGIN
+ IF IsParameter (sym) OR IsVar (sym)
+ THEN
+ RETURN IsUnbounded (GetType (sym))
+ END ;
+ RETURN FALSE
+END IsUnboundedArray ;
+
+
+(*
FoldHigh - if the array is not dynamic then we should be able to
remove the HighOp quadruple and assign op1 with
- the known compile time HIGH(op3).
+ the known compile time HIGH(array).
*)
PROCEDURE FoldHigh (tokenno: CARDINAL; p: WalkAction;
- quad: CARDINAL; op1, dim, op3: CARDINAL) ;
+ quad: CARDINAL; op1, dim, array: CARDINAL) ;
VAR
t : tree ;
location: location_t ;
BEGIN
- (* firstly ensure that any constant literal is declared *)
- TryDeclareConstant(tokenno, op3) ;
- location := TokenToLocation(tokenno) ;
- IF GccKnowsAbout(op3) AND CompletelyResolved(op3)
+ (* Firstly ensure that any constant literal is declared. *)
+ TryDeclareConstant (tokenno, array) ;
+ location := TokenToLocation (tokenno) ;
+ IF (NOT IsUnboundedArray (array)) AND
+ GccKnowsAbout (array) AND CompletelyResolved (array)
THEN
- t := ResolveHigh(tokenno, dim, op3) ;
- (* fine, we can take advantage of this and fold constants *)
- IF IsConst(op1) AND (t#tree(NIL))
+ t := ResolveHigh (tokenno, dim, array) ;
+ (* We can take advantage of this and fold constants. *)
+ IF IsConst (op1) AND (t # tree (NIL))
THEN
- PutConst(op1, Cardinal) ;
- AddModGcc(op1,
- DeclareKnownConstant(location, GetCardinalType(),
- ToCardinal(location, t))) ;
- p(op1) ;
+ PutConst (op1, Cardinal) ;
+ AddModGcc (op1,
+ DeclareKnownConstant (location, GetCardinalType (),
+ ToCardinal (location, t))) ;
+ p (op1) ;
NoChange := FALSE ;
- SubQuad(quad)
+ SubQuad (quad)
ELSE
- (* we can still fold the expression, but not the assignment, however, we will
- not do this here but in CodeHigh
- *)
+ (* We can still fold the expression but not the assignment,
+ we will not do this here but in CodeHigh when the result
+ can be stored. *)
END
END
END FoldHigh ;
diff --git a/gcc/m2/mc-boot/GFormatStrings.cc b/gcc/m2/mc-boot/GFormatStrings.cc
index f4c4fd6..ad7e7d8 100644
--- a/gcc/m2/mc-boot/GFormatStrings.cc
+++ b/gcc/m2/mc-boot/GFormatStrings.cc
@@ -464,7 +464,7 @@ static DynamicStrings_String PerformFormatString (DynamicStrings_String fmt, int
/* avoid dangling else. */
afterperc += 1;
Cast ((unsigned char *) &u, (sizeof (u)-1), (const unsigned char *) w, _w_high);
- in = DynamicStrings_ConCat (in, DynamicStrings_Slice (fmt, (*startpos), nextperc));
+ in = Copy (fmt, in, (*startpos), nextperc);
in = DynamicStrings_ConCat (in, StringConvert_CardinalToString (u, static_cast<unsigned int> (width), leader, 16, true));
(*startpos) = afterperc;
DSdbExit (static_cast<DynamicStrings_String> (NULL));
@@ -475,7 +475,7 @@ static DynamicStrings_String PerformFormatString (DynamicStrings_String fmt, int
/* avoid dangling else. */
afterperc += 1;
Cast ((unsigned char *) &u, (sizeof (u)-1), (const unsigned char *) w, _w_high);
- in = DynamicStrings_ConCat (in, DynamicStrings_Slice (fmt, (*startpos), nextperc));
+ in = Copy (fmt, in, (*startpos), nextperc);
in = DynamicStrings_ConCat (in, StringConvert_CardinalToString (u, static_cast<unsigned int> (width), leader, 10, false));
(*startpos) = afterperc;
DSdbExit (static_cast<DynamicStrings_String> (NULL));
diff --git a/gcc/m2/mc-boot/GM2EXCEPTION.cc b/gcc/m2/mc-boot/GM2EXCEPTION.cc
index 62d47f0..6baff3c 100644
--- a/gcc/m2/mc-boot/GM2EXCEPTION.cc
+++ b/gcc/m2/mc-boot/GM2EXCEPTION.cc
@@ -34,7 +34,6 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
typedef struct { PROC_t proc; } PROC;
# endif
-# include "Gmcrts.h"
#define _M2EXCEPTION_C
#include "GM2EXCEPTION.h"
@@ -51,18 +50,19 @@ extern "C" M2EXCEPTION_M2Exceptions M2EXCEPTION_M2Exception (void)
/* If the program or coroutine is in the exception state then return the enumeration
value representing the exception cause. If it is not in the exception state then
- raises and exception (exException). */
+ raises an exException exception. */
e = RTExceptions_GetExceptionBlock ();
n = RTExceptions_GetNumber (e);
if (n == (UINT_MAX))
{
RTExceptions_Raise ( ((unsigned int) (M2EXCEPTION_exException)), const_cast<void*> (static_cast<const void*>("../../gcc/m2/gm2-libs/M2EXCEPTION.mod")), 47, 6, const_cast<void*> (static_cast<const void*>("M2Exception")), const_cast<void*> (static_cast<const void*>("current coroutine is not in the exceptional execution state")));
+ return M2EXCEPTION_exException;
}
else
{
return (M2EXCEPTION_M2Exceptions) (n);
}
- ReturnException ("../../gcc/m2/gm2-libs/M2EXCEPTION.def", 25, 1);
+ /* static analysis guarentees a RETURN statement will be used before here. */
__builtin_unreachable ();
}
diff --git a/gcc/m2/mc-boot/GSFIO.cc b/gcc/m2/mc-boot/GSFIO.cc
index 6ae0d5e..f8c13d3 100644
--- a/gcc/m2/mc-boot/GSFIO.cc
+++ b/gcc/m2/mc-boot/GSFIO.cc
@@ -99,6 +99,13 @@ extern "C" DynamicStrings_String SFIO_WriteS (FIO_File file, DynamicStrings_Stri
extern "C" DynamicStrings_String SFIO_ReadS (FIO_File file);
+/*
+ GetFileName - return a new string containing the name of the file.
+ The string should be killed by the caller.
+*/
+
+extern "C" DynamicStrings_String SFIO_GetFileName (FIO_File file);
+
/*
Exists - returns TRUE if a file named, fname exists for reading.
@@ -207,6 +214,19 @@ extern "C" DynamicStrings_String SFIO_ReadS (FIO_File file)
__builtin_unreachable ();
}
+
+/*
+ GetFileName - return a new string containing the name of the file.
+ The string should be killed by the caller.
+*/
+
+extern "C" DynamicStrings_String SFIO_GetFileName (FIO_File file)
+{
+ return DynamicStrings_InitStringCharStar (FIO_getFileName (file));
+ /* static analysis guarentees a RETURN statement will be used before here. */
+ __builtin_unreachable ();
+}
+
extern "C" void _M2_SFIO_init (__attribute__((unused)) int argc, __attribute__((unused)) char *argv[], __attribute__((unused)) char *envp[])
{
}
diff --git a/gcc/m2/mc-boot/GSFIO.h b/gcc/m2/mc-boot/GSFIO.h
index 42ffc48..93c8099 100644
--- a/gcc/m2/mc-boot/GSFIO.h
+++ b/gcc/m2/mc-boot/GSFIO.h
@@ -103,6 +103,13 @@ EXTERN DynamicStrings_String SFIO_WriteS (FIO_File file, DynamicStrings_String s
*/
EXTERN DynamicStrings_String SFIO_ReadS (FIO_File file);
+
+/*
+ GetFileName - return a new string containing the name of the file.
+ The string should be killed by the caller.
+*/
+
+EXTERN DynamicStrings_String SFIO_GetFileName (FIO_File file);
# ifdef __cplusplus
}
# endif
diff --git a/gcc/m2/mc-boot/Gdecl.cc b/gcc/m2/mc-boot/Gdecl.cc
index ae03483..94ea098 100644
--- a/gcc/m2/mc-boot/Gdecl.cc
+++ b/gcc/m2/mc-boot/Gdecl.cc
@@ -2550,6 +2550,14 @@ static bool isLeafString (decl_node__opaque n);
static DynamicStrings_String getLiteralStringContents (decl_node__opaque n);
/*
+ getStringChar - if the string is delimited by single
+ or double quotes then strip both
+ quotes from the string.
+*/
+
+static DynamicStrings_String getStringChar (decl_node__opaque n);
+
+/*
getStringContents - return the string contents of a constant, literal,
string or a constexp node.
*/
@@ -2569,7 +2577,13 @@ static nameKey_Name addNames (decl_node__opaque a, decl_node__opaque b);
static decl_node__opaque resolveString (decl_node__opaque n);
/*
- foldBinary -
+ addQuotes - adds delimiter quote char to string.
+*/
+
+static DynamicStrings_String addQuotes (DynamicStrings_String s, char quote);
+
+/*
+ foldBinary - attempt to fold binary + for string constants.
*/
static decl_node__opaque foldBinary (decl_nodeT k, decl_node__opaque l, decl_node__opaque r, decl_node__opaque res);
@@ -7590,6 +7604,32 @@ static DynamicStrings_String getLiteralStringContents (decl_node__opaque n)
/*
+ getStringChar - if the string is delimited by single
+ or double quotes then strip both
+ quotes from the string.
+*/
+
+static DynamicStrings_String getStringChar (decl_node__opaque n)
+{
+ DynamicStrings_String s;
+
+ s = getString (n);
+ if (((DynamicStrings_char (s, 0)) == '\'') && ((DynamicStrings_char (s, -1)) == '\''))
+ {
+ s = DynamicStrings_Slice (s, 1, -1);
+ }
+ else if (((DynamicStrings_char (s, 0)) == '"') && ((DynamicStrings_char (s, -1)) == '"'))
+ {
+ /* avoid dangling else. */
+ s = DynamicStrings_Slice (s, 1, -1);
+ }
+ return s;
+ /* static analysis guarentees a RETURN statement will be used before here. */
+ __builtin_unreachable ();
+}
+
+
+/*
getStringContents - return the string contents of a constant, literal,
string or a constexp node.
*/
@@ -7608,7 +7648,7 @@ static DynamicStrings_String getStringContents (decl_node__opaque n)
else if (isString (n))
{
/* avoid dangling else. */
- return getString (n);
+ return getStringChar (n);
}
else if (isConstExp (n))
{
@@ -7672,11 +7712,29 @@ static decl_node__opaque resolveString (decl_node__opaque n)
/*
- foldBinary -
+ addQuotes - adds delimiter quote char to string.
+*/
+
+static DynamicStrings_String addQuotes (DynamicStrings_String s, char quote)
+{
+ DynamicStrings_String qs;
+
+ s = DynamicStrings_ConCatChar (s, quote);
+ qs = DynamicStrings_InitStringChar (quote);
+ qs = DynamicStrings_ConCat (qs, DynamicStrings_Mark (s));
+ return qs;
+ /* static analysis guarentees a RETURN statement will be used before here. */
+ __builtin_unreachable ();
+}
+
+
+/*
+ foldBinary - attempt to fold binary + for string constants.
*/
static decl_node__opaque foldBinary (decl_nodeT k, decl_node__opaque l, decl_node__opaque r, decl_node__opaque res)
{
+ char qc;
decl_node__opaque n;
DynamicStrings_String ls;
DynamicStrings_String rs;
@@ -7686,7 +7744,12 @@ static decl_node__opaque foldBinary (decl_nodeT k, decl_node__opaque l, decl_nod
{
ls = getStringContents (l);
rs = getStringContents (r);
+ qc = '\'';
+ /* Add unquoted contents. */
ls = DynamicStrings_Add (ls, rs);
+ /* Add quote. */
+ ls = addQuotes (ls, qc);
+ /* Build new string. */
n = static_cast<decl_node__opaque> (decl_makeString (nameKey_makekey (DynamicStrings_string (ls))));
ls = DynamicStrings_KillString (ls);
rs = DynamicStrings_KillString (rs);
@@ -22789,7 +22852,7 @@ static decl_node__opaque doDupExpr (decl_node__opaque n)
break;
case decl_length:
- M2RTS_HALT (-1);
+ M2RTS_HALT (-1); /* length should have been converted into unary. */
__builtin_unreachable ();
break;
diff --git a/gcc/m2/mc-boot/GmcFileName.h b/gcc/m2/mc-boot/GmcFileName.h
index 11f1512..6c7ec75 100644
--- a/gcc/m2/mc-boot/GmcFileName.h
+++ b/gcc/m2/mc-boot/GmcFileName.h
@@ -50,7 +50,7 @@ extern "C" {
given a module and an extension. This file name
length will be operating system specific.
String, Extension, is concatenated onto
- Module and thus it is safe to `Mark' the extension
+ Module and thus it is safe to Mark the extension
for garbage collection.
*/
diff --git a/gcc/m2/mc/decl.mod b/gcc/m2/mc/decl.mod
index 342487e..197ca5e 100644
--- a/gcc/m2/mc/decl.mod
+++ b/gcc/m2/mc/decl.mod
@@ -4643,6 +4643,28 @@ END getLiteralStringContents ;
(*
+ getStringChar - if the string is delimited by single
+ or double quotes then strip both
+ quotes from the string.
+*)
+
+PROCEDURE getStringChar (n: node) : String ;
+VAR
+ s: String ;
+BEGIN
+ s := getString (n) ;
+ IF (DynamicStrings.char (s, 0) = "'") AND (DynamicStrings.char (s, -1) = "'")
+ THEN
+ s := DynamicStrings.Slice (s, 1, -1)
+ ELSIF (DynamicStrings.char (s, 0) = '"') AND (DynamicStrings.char (s, -1) = '"')
+ THEN
+ s := DynamicStrings.Slice (s, 1, -1)
+ END ;
+ RETURN s
+END getStringChar ;
+
+
+(*
getStringContents - return the string contents of a constant, literal,
string or a constexp node.
*)
@@ -4657,7 +4679,7 @@ BEGIN
RETURN getLiteralStringContents (n)
ELSIF isString (n)
THEN
- RETURN getString (n)
+ RETURN getStringChar (n)
ELSIF isConstExp (n)
THEN
RETURN getStringContents (n^.unaryF.arg)
@@ -4709,11 +4731,27 @@ END resolveString ;
(*
- foldBinary -
+ addQuotes - adds delimiter quote char to string.
+*)
+
+PROCEDURE addQuotes (s: String; quote: CHAR) : String ;
+VAR
+ qs: String ;
+BEGIN
+ s := DynamicStrings.ConCatChar (s, quote) ;
+ qs := DynamicStrings.InitStringChar (quote) ;
+ qs := DynamicStrings.ConCat (qs, DynamicStrings.Mark (s)) ;
+ RETURN qs
+END addQuotes ;
+
+
+(*
+ foldBinary - attempt to fold binary + for string constants.
*)
PROCEDURE foldBinary (k: nodeT; l, r: node; res: node) : node ;
VAR
+ qc: CHAR ;
n : node ;
ls,
rs: String ;
@@ -4723,7 +4761,12 @@ BEGIN
THEN
ls := getStringContents (l) ;
rs := getStringContents (r) ;
+ qc := "'" ;
+ (* Add unquoted contents. *)
ls := DynamicStrings.Add (ls, rs) ;
+ (* Add quote. *)
+ ls := addQuotes (ls, qc) ;
+ (* Build new string. *)
n := makeString (makekey (DynamicStrings.string (ls))) ;
ls := DynamicStrings.KillString (ls) ;
rs := DynamicStrings.KillString (rs)
diff --git a/gcc/machmode.h b/gcc/machmode.h
index 467681d9..2f2d349 100644
--- a/gcc/machmode.h
+++ b/gcc/machmode.h
@@ -958,7 +958,8 @@ private:
/* Find the best mode to use to access a bit field. */
-extern bool get_best_mode (int, int, poly_uint64, poly_uint64, unsigned int,
+extern bool get_best_mode (HOST_WIDE_INT, HOST_WIDE_INT,
+ poly_uint64, poly_uint64, unsigned int,
unsigned HOST_WIDE_INT, bool, scalar_int_mode *);
/* Determine alignment, 1<=result<=BIGGEST_ALIGNMENT. */
diff --git a/gcc/stor-layout.cc b/gcc/stor-layout.cc
index 12071c9..63e830a 100644
--- a/gcc/stor-layout.cc
+++ b/gcc/stor-layout.cc
@@ -3167,7 +3167,7 @@ bit_field_mode_iterator::prefer_smaller_modes ()
decide which of the above modes should be used. */
bool
-get_best_mode (int bitsize, int bitpos,
+get_best_mode (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
poly_uint64 bitregion_start, poly_uint64 bitregion_end,
unsigned int align,
unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep,
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index bd14b44..5c227b5 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,131 @@
+2025-07-31 Gaius Mulley <gaiusmod2@gmail.com>
+
+ PR modula2/121314
+ * gm2/errors/fail/badindrtype.mod: New test.
+ * gm2/errors/fail/badindrtype2.mod: New test.
+
+2025-07-31 Mikael Morin <morin-mikael@orange.fr>
+
+ PR fortran/121342
+ * gfortran.dg/class_elemental_1.f90: New test.
+
+2025-07-31 Jason Merrill <jason@redhat.com>
+
+ PR c++/120800
+ * g++.dg/cpp0x/constexpr-array30.C: New test.
+
+2025-07-31 Marek Polacek <polacek@redhat.com>
+
+ PR c++/120775
+ * g++.dg/cpp26/consteval-block1.C: New test.
+ * g++.dg/cpp26/consteval-block2.C: New test.
+ * g++.dg/cpp26/consteval-block3.C: New test.
+ * g++.dg/cpp26/consteval-block4.C: New test.
+ * g++.dg/cpp26/consteval-block5.C: New test.
+ * g++.dg/cpp26/consteval-block6.C: New test.
+ * g++.dg/cpp26/consteval-block7.C: New test.
+ * g++.dg/cpp26/consteval-block8.C: New test.
+
+2025-07-31 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i16.c: Add asm check
+ for signed avg ceil.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i32.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i64.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i8.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i16.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i32.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i64.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i8.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i16.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i32.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i64.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i8.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i16.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i32.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i64.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i8.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx_binary.h: Add test
+ helper macros.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h: Add
+ test data for run test.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i16.c: New test.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i32.c: New test.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i64.c: New test.
+ * gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i8.c: New test.
+
+2025-07-31 Artemiy Granat <a.granat@ispras.ru>
+
+ * gcc.target/i386/attributes-error.c: Add more attributes
+ combinations.
+
+2025-07-31 Artemiy Granat <a.granat@ispras.ru>
+
+ * g++.dg/abi/regparm1.C: Require ia32 target.
+ * gcc.target/i386/20020224-1.c: Likewise.
+ * gcc.target/i386/pr103785.c: Use regparm attribute only if
+ not in 64-bit mode.
+ * gcc.target/i386/pr36533.c: Likewise.
+ * gcc.target/i386/pr59099.c: Likewise.
+ * gcc.target/i386/sibcall-8.c: Likewise.
+ * gcc.target/i386/sw-1.c: Likewise.
+ * gcc.target/i386/pr15184-2.c: Fix invalid comment.
+ * gcc.target/i386/attributes-ignore.c: New test.
+
+2025-07-31 Yury Khrustalev <yury.khrustalev@arm.com>
+
+ * g++.target/aarch64/mv-cpu-features.C: new test.
+
+2025-07-31 Yury Khrustalev <yury.khrustalev@arm.com>
+
+ * gcc.target/aarch64/ifunc-resolver.in: add core test functions.
+ * gcc.target/aarch64/ifunc-resolver-0.c: new test.
+ * gcc.target/aarch64/ifunc-resolver-1.c: ditto.
+ * gcc.target/aarch64/ifunc-resolver-2.c: ditto.
+ * gcc.target/aarch64/ifunc-resolver-3.c: ditto.
+ * gcc.target/aarch64/ifunc-resolver-4.c: as above.
+
+2025-07-31 Spencer Abson <spencer.abson@arm.com>
+
+ PR target/121028
+ * gcc.target/aarch64/sme/call_sm_switch_1.c: Tell check-function
+ -bodies not to ignore .inst directives, and replace the test for
+ "smstart sm" with one for it's encoding.
+ * gcc.target/aarch64/sme/call_sm_switch_11.c: Likewise.
+ * gcc.target/aarch64/sme/pr121028.c: New test.
+
+2025-07-31 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/121264
+ * gcc.dg/tree-ssa/pr121264.c: New test.
+
+2025-07-31 Spencer Abson <spencer.abson@arm.com>
+
+ * gcc.target/aarch64/sme2/acle-asm/amax_f16_x2.c: Gate do-assemble on
+ assembler support for +faminmax and +sme2.
+ * gcc.target/aarch64/sme2/acle-asm/amax_f16_x4.c: Likewise.
+ * gcc.target/aarch64/sme2/acle-asm/amax_f32_x2.c: Likewise.
+ * gcc.target/aarch64/sme2/acle-asm/amax_f32_x4.c: Likewise.
+ * gcc.target/aarch64/sme2/acle-asm/amax_f64_x2.c: Likewise.
+ * gcc.target/aarch64/sme2/acle-asm/amax_f64_x4.c: Likewise.
+ * gcc.target/aarch64/sme2/acle-asm/amin_f16_x2.c: Likewise.
+ * gcc.target/aarch64/sme2/acle-asm/amin_f16_x4.c: Likewise.
+ * gcc.target/aarch64/sme2/acle-asm/amin_f32_x2.c: Likewise.
+ * gcc.target/aarch64/sme2/acle-asm/amin_f32_x4.c: Likewise.
+ * gcc.target/aarch64/sme2/acle-asm/amin_f64_x2.c: Likewise.
+ * gcc.target/aarch64/sme2/acle-asm/amin_f64_x4.c: Likewise.
+ * lib/target-supports.exp: Split the extensions that require SME into
+ a separate set, and use armv9-a as their baseline.
+
+2025-07-31 Jakub Jelinek <jakub@redhat.com>
+
+ * gcc.target/i386/apx-1.c (apx_hanlder): Rename to ...
+ (apx_handler): ... this.
+ * gcc.target/i386/uintr-2.c (UINTR_hanlder): Rename to ...
+ (UINTR_handler): ... this.
+ * gcc.target/i386/uintr-5.c (UINTR_hanlder): Rename to ...
+ (UINTR_handler): ... this.
+
2025-07-30 Nathaniel Shead <nathanieloshead@gmail.com>
PR c++/121291
diff --git a/gcc/testsuite/g++.dg/abi/regparm1.C b/gcc/testsuite/g++.dg/abi/regparm1.C
index c471046..3aae3dd 100644
--- a/gcc/testsuite/g++.dg/abi/regparm1.C
+++ b/gcc/testsuite/g++.dg/abi/regparm1.C
@@ -1,5 +1,5 @@
// PR c++/29911 (9381)
-// { dg-do run { target i?86-*-* x86_64-*-* } }
+// { dg-do run { target { { i?86-*-* x86_64-*-* } && ia32 } } }
// { dg-require-effective-target c++11 }
extern "C" int printf(const char *, ...);
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-array30.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-array30.C
new file mode 100644
index 0000000..3f72407
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-array30.C
@@ -0,0 +1,22 @@
+// PR c++/120800
+// { dg-do compile { target c++11 } }
+
+template<typename T>
+struct Container
+{
+ T m_data[1] {};
+};
+
+class Element
+{
+private:
+ Element() = default;
+
+private:
+ bool m_bool1 { false };
+ bool m_bool2;
+
+ friend struct Container<Element>;
+};
+
+Container<Element> element;
diff --git a/gcc/testsuite/g++.dg/cpp26/consteval-block1.C b/gcc/testsuite/g++.dg/cpp26/consteval-block1.C
new file mode 100644
index 0000000..9e2cf22
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/consteval-block1.C
@@ -0,0 +1,82 @@
+// { dg-do compile { target c++26 } }
+// Test consteval blocks, as specified by P2996.
+
+constexpr int fn () { return 42; }
+struct M {
+ static consteval void foo () {}
+};
+
+consteval { }
+consteval { fn (); }
+consteval { M::foo (); }
+consteval { auto x = fn (); return; }
+consteval {
+ [](int i) { return i; }(5);
+}
+auto lam = [] { };
+consteval { lam (); }
+
+struct S {
+ consteval { }
+};
+
+struct S2 {
+ consteval { fn(); }
+};
+
+class C {
+ consteval { }
+};
+
+class C2 {
+ consteval { M::foo (); }
+};
+
+union U {
+ consteval { }
+};
+
+template<typename>
+struct TS {
+ consteval { }
+};
+
+template<typename... Ts>
+struct TS2 {
+ consteval {
+ (Ts::foo (), ...);
+ }
+};
+
+TS2<M> ts2;
+
+void
+g ()
+{
+ consteval { }
+}
+
+template<typename>
+void
+tg ()
+{
+ consteval { }
+}
+
+void die ();
+constexpr int
+bar (int i)
+{
+ if (i != 42)
+ die ();
+ return 0;
+}
+
+void
+foo ()
+{
+ constexpr int r = 42;
+ consteval {
+ bar (r);
+ }
+}
diff --git a/gcc/testsuite/g++.dg/cpp26/consteval-block2.C b/gcc/testsuite/g++.dg/cpp26/consteval-block2.C
new file mode 100644
index 0000000..895fcb6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/consteval-block2.C
@@ -0,0 +1,49 @@
+// { dg-do compile { target c++26 } }
+// Test consteval blocks, as specified by P2996.
+
+void fn ();
+
+consteval { fn (); } // { dg-error "call to non-.constexpr. function" }
+consteval { return 42; } // { dg-error "return-statement with a value" }
+
+struct S {
+ consteval {
+ fn (); // { dg-error "call to non-.constexpr. function" }
+ }
+ consteval {
+ return 42; // { dg-error "return-statement with a value" }
+ }
+};
+
+template<typename T>
+constexpr void foo (T t) { return t; } // { dg-error "return-statement with a value" }
+
+template<int N>
+struct R {
+ consteval { foo (N); }
+};
+
+R<1> r;
+
+template<typename T>
+constexpr void foo2 (T t) { return t; } // { dg-error "return-statement with a value" }
+
+template<int N>
+void
+f ()
+{
+ consteval { foo2 (1); }
+}
+
+constexpr int bar (int) { return 0; }
+
+void
+g ()
+{
+ f<1>();
+
+ int r = 42;
+ consteval {
+ bar (r); // { dg-error ".r. is not captured" }
+ }
+}
diff --git a/gcc/testsuite/g++.dg/cpp26/consteval-block3.C b/gcc/testsuite/g++.dg/cpp26/consteval-block3.C
new file mode 100644
index 0000000..c1221c3
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/consteval-block3.C
@@ -0,0 +1,41 @@
+// { dg-do compile { target c++26 } }
+// Test consteval blocks, as specified by P2996.
+// Test that we actually evaluate the consteval block.
+
+void bar () { }
+
+template<int N>
+constexpr void
+fn ()
+{
+ if (N > 0)
+ bar (); // { dg-error "call to non-.constexpr. function" }
+}
+
+template<int N>
+struct S {
+ consteval { fn<N>(); } // { dg-error "called in a constant expression" }
+};
+
+S<1> s;
+
+template<int N>
+constexpr void
+fn2 ()
+{
+ if (N > 0)
+ bar (); // { dg-error "call to non-.constexpr. function" }
+}
+
+template<int N>
+void
+g ()
+{
+ consteval { fn2<N>(); } // { dg-error "called in a constant expression" }
+}
+
+void
+f ()
+{
+ g<1>();
+}
diff --git a/gcc/testsuite/g++.dg/cpp26/consteval-block4.C b/gcc/testsuite/g++.dg/cpp26/consteval-block4.C
new file mode 100644
index 0000000..be95e17
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/consteval-block4.C
@@ -0,0 +1,41 @@
+// { dg-do compile { target c++26 } }
+// Test consteval blocks, as specified by P2996.
+// Test that we actually evaluate the consteval block.
+
+void bar () { }
+
+template<int N>
+constexpr void
+fn ()
+{
+ if (N > 0)
+ bar ();
+}
+
+template<int N>
+struct S {
+ consteval { fn<N>(); }
+};
+
+S<0> s;
+
+template<int N>
+constexpr void
+fn2 ()
+{
+ if (N > 0)
+ bar ();
+}
+
+template<int N>
+void
+g ()
+{
+ consteval { fn2<N>(); }
+}
+
+void
+f ()
+{
+ g<0>();
+}
diff --git a/gcc/testsuite/g++.dg/cpp26/consteval-block5.C b/gcc/testsuite/g++.dg/cpp26/consteval-block5.C
new file mode 100644
index 0000000..462cebe
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/consteval-block5.C
@@ -0,0 +1,70 @@
+// { dg-do compile { target c++26 } }
+// Test consteval blocks, as specified by P2996.
+
+void bar () { }
+
+template<int N>
+constexpr void
+fn ()
+{
+ if (N > 0)
+ bar ();
+}
+
+template<typename>
+struct S {
+ consteval { fn<1>(); }
+};
+
+template<>
+struct S<int> {
+ consteval { fn<0>(); }
+};
+
+S<int> s1;
+
+template<typename T>
+struct S<T*> {
+ consteval { fn<0>(); }
+};
+
+S<int *> s2;
+
+template<typename T, int N>
+struct W {
+ consteval { T t; fn<N - 1>(); }
+};
+
+template<typename T>
+struct W<T, 0> {
+ consteval { T t; fn<0>(); }
+};
+
+template<>
+struct W<char, 0> {
+ consteval { fn<0>(); }
+};
+
+W<int, 0> w1;
+W<int, 1> w2;
+W<char, 0> w3;
+
+template<typename>
+void
+f ()
+{
+ consteval { fn<1>(); }
+}
+
+template<>
+void
+f<int> ()
+{
+ consteval { fn<0>(); }
+}
+
+void
+g ()
+{
+ f<int> ();
+}
diff --git a/gcc/testsuite/g++.dg/cpp26/consteval-block6.C b/gcc/testsuite/g++.dg/cpp26/consteval-block6.C
new file mode 100644
index 0000000..ca90b3e
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/consteval-block6.C
@@ -0,0 +1,108 @@
+// { dg-do compile { target c++26 } }
+// Test consteval blocks, as specified by P2996.
+
+void die () {}
+
+template<int N>
+constexpr void
+fn ()
+{
+ if (N > 0)
+ die ();
+}
+
+template<int N>
+void
+fn2 ()
+{
+ struct S {
+ consteval {
+ fn<N>();
+ }
+ };
+}
+
+template<int N>
+struct A {
+ struct B {
+ consteval {
+ fn<N>();
+ }
+ };
+ template<int M>
+ struct C {
+ consteval {
+ fn<N + M>();
+ }
+ };
+};
+
+template<int N>
+struct D {
+ constexpr static int i = 0;
+ struct E {
+ consteval {
+ fn<i>();
+ }
+ };
+};
+
+A<0>::B b;
+A<0>::C<0> c;
+D<0>::E e;
+
+void
+f ()
+{
+ fn2<0>();
+}
+
+static constexpr int j = 0;
+const int x = 0;
+
+consteval {
+ fn<j>();
+ consteval {
+ fn<j + j>();
+ consteval {
+ fn<j + j + j>();
+ consteval {
+ fn<j + j + x>();
+ consteval {
+ fn<j + x>();
+ }
+ }
+ }
+ }
+}
+
+struct R { constexpr R() {} };
+
+template<int N>
+constexpr auto X = N;
+
+consteval {
+ R{};
+ constexpr auto x = 0;
+ fn<x>();
+ fn<X<0>>();
+ if consteval
+ {
+ fn<j>();
+ }
+ else
+ {
+ die ();
+ }
+}
+
+template<typename T>
+struct G {
+ consteval {
+ using U = T[3];
+ U arr{};
+ int i = arr[2];
+ }
+};
+
+G<int> g;
diff --git a/gcc/testsuite/g++.dg/cpp26/consteval-block7.C b/gcc/testsuite/g++.dg/cpp26/consteval-block7.C
new file mode 100644
index 0000000..231682f
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/consteval-block7.C
@@ -0,0 +1,12 @@
+// { dg-do compile { target c++26 } }
+// Test consteval blocks, as specified by P2996.
+
+consteval {
+ template <class T> // { dg-error "template declaration cannot appear at block scope" }
+ struct X { };
+
+ template <class T> // { dg-error "template declaration cannot appear at block scope" }
+ concept C = true;
+
+ return; // OK
+}
diff --git a/gcc/testsuite/g++.dg/cpp26/consteval-block8.C b/gcc/testsuite/g++.dg/cpp26/consteval-block8.C
new file mode 100644
index 0000000..ad164fd
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/consteval-block8.C
@@ -0,0 +1,38 @@
+// { dg-do compile { target c++26 } }
+// Test consteval blocks, as specified by P2996.
+
+/* __func__ won't be set. Make sure we warn. */
+consteval { __func__; } // { dg-error "outside of function scope" }
+consteval { { __func__; } } // { dg-error "outside of function scope" }
+consteval { []() mutable consteval -> void { __func__; } (); } // { dg-bogus "outside of function scope" }
+consteval { []() mutable consteval -> void { consteval { __func__; } } (); } // { dg-bogus "outside of function scope" }
+
+auto l = []() -> void {
+ consteval { __func__; } // { dg-bogus "outside of function scope" }
+};
+
+struct F {
+ consteval { __func__; } // { dg-error "outside of function scope" }
+};
+template<typename>
+struct TF {
+ consteval { __func__; } // { dg-error "outside of function scope" }
+};
+
+void
+g ()
+{
+ consteval { __func__; } // { dg-bogus "outside of function scope" }
+ // Not a consteval-block-declaration.
+ []() mutable consteval -> void { __func__; } (); // { dg-bogus "outside of function scope" }
+}
+
+template<typename>
+void
+f ()
+{
+ consteval { __func__; } // { dg-bogus "outside of function scope" }
+ { consteval { __func__; } } // { dg-bogus "outside of function scope" }
+ __func__; // { dg-bogus "outside of function scope" }
+ []() mutable consteval -> void { __func__; } (); // { dg-bogus "outside of function scope" }
+}
diff --git a/gcc/testsuite/g++.dg/modules/merge-19.h b/gcc/testsuite/g++.dg/modules/merge-19.h
new file mode 100644
index 0000000..c3faadc
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/merge-19.h
@@ -0,0 +1,21 @@
+// PR c++/121238
+
+inline void inc(const char*& __first) {
+ ++__first;
+}
+
+template <typename = void>
+bool parse_integer(const char *first) {
+ const char *start = first;
+ inc(first);
+ return first != start;
+}
+template bool parse_integer<void>(const char*);
+
+
+struct S { ~S() {} int x; };
+template <typename = void>
+bool take_by_invisiref(S s) {
+ return s.x == 5;
+}
+template bool take_by_invisiref<void>(S);
diff --git a/gcc/testsuite/g++.dg/modules/merge-19_a.H b/gcc/testsuite/g++.dg/modules/merge-19_a.H
new file mode 100644
index 0000000..149a447
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/merge-19_a.H
@@ -0,0 +1,5 @@
+// PR c++/121238
+// { dg-additional-options "-fmodule-header" }
+// { dg-module-cmi {} }
+
+#include "merge-19.h"
diff --git a/gcc/testsuite/g++.dg/modules/merge-19_b.C b/gcc/testsuite/g++.dg/modules/merge-19_b.C
new file mode 100644
index 0000000..345e7fe
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/merge-19_b.C
@@ -0,0 +1,16 @@
+// PR c++/121238
+// { dg-module-do run }
+// { dg-additional-options "-fmodules -fno-module-lazy" }
+
+#include "merge-19.h"
+import "merge-19_a.H";
+
+int main() {
+ const char fmt[] = "5";
+ if (!parse_integer<void>(fmt))
+ __builtin_abort();
+
+ S s{ 5 };
+ if (!take_by_invisiref(s))
+ __builtin_abort();
+}
diff --git a/gcc/testsuite/g++.dg/modules/pr108080.H b/gcc/testsuite/g++.dg/modules/pr108080.H
new file mode 100644
index 0000000..b05d957
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/pr108080.H
@@ -0,0 +1,5 @@
+// PR c++/108080
+// { dg-additional-options "-fmodules" }
+// Give a diagnostic message rather than a crash for unsupported features.
+
+[[gnu::optimize("-O3")]] void foo(); // { dg-warning "optimize" }
diff --git a/gcc/testsuite/g++.target/aarch64/mv-cpu-features.C b/gcc/testsuite/g++.target/aarch64/mv-cpu-features.C
new file mode 100644
index 0000000..ad6accd
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/mv-cpu-features.C
@@ -0,0 +1,82 @@
+/* { dg-do run } */
+/* { dg-require-ifunc "" } */
+/* { dg-require-effective-target mmap } */
+/* { dg-options "-Wno-experimental-fmv-target" } */
+
+#include <cstdint>
+#include <sys/auxv.h>
+
+__attribute__((target_version ("default")))
+int foo ()
+{
+ return 0;
+}
+
+__attribute__((target_version ("rng")))
+int foo ()
+{
+ return 1;
+}
+
+__attribute__((target_version ("lse")))
+int foo ()
+{
+ return 2;
+}
+
+typedef struct {
+ uint64_t size;
+ uint64_t hwcap;
+ uint64_t hwcap2;
+ uint64_t hwcap3;
+ uint64_t hwcap4;
+} ifunc_arg_t;
+
+int impl ()
+{
+ return 0;
+}
+
+#ifndef _IFUNC_ARG_HWCAP
+#define _IFUNC_ARG_HWCAP (1ULL << 62)
+#endif
+
+extern "C" void
+__init_cpu_features_resolver (unsigned long hwcap, const ifunc_arg_t *arg);
+
+extern "C" void *
+fun_resolver (uint64_t a0, const ifunc_arg_t *a1)
+{
+ ifunc_arg_t arg = {};
+ arg.size = sizeof (ifunc_arg_t);
+ /* These flags determine that the implementation of foo ()
+ that returns 2 will be selected. */
+ arg.hwcap = HWCAP_ATOMICS;
+ arg.hwcap2 = HWCAP2_RNG;
+ __init_cpu_features_resolver (arg.hwcap | _IFUNC_ARG_HWCAP, &arg);
+ return (void *)(uintptr_t)impl;
+}
+
+extern "C" int fun (void) __attribute__((ifunc ("fun_resolver")));
+
+/* In this test we expect that the manual resolver for the fun ()
+ function will be executed before the automatic resolver for the
+ FMV function foo (). This is because resolvers from the same TU
+ are executed according to the offset of corresponding relocations.
+
+ Automatic resolver is generated in a dedicated section while the
+ manually written resolver will be put in the .text section which
+ will come first.
+
+ The manual resolver above calls __init_cpu_features_resolver()
+ supplying synthetic ifunc_arg_t fields that will determine the
+ choice for the FMV implementation.
+ */
+
+int main ()
+{
+ int res = fun ();
+ if (res == 0 && foo () == 2)
+ return 0;
+ return 1;
+}
diff --git a/gcc/testsuite/gcc.dg/pr121322.c b/gcc/testsuite/gcc.dg/pr121322.c
new file mode 100644
index 0000000..2fad5b5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr121322.c
@@ -0,0 +1,14 @@
+/* PR middle-end/121322 */
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+unsigned long long
+foo (unsigned long long *p)
+{
+ unsigned long long a = *p;
+ unsigned long long b = __builtin_bswap64 (a);
+ return ((b << 32)
+ | ((b >> 8) & 0xff000000ULL)
+ | ((b >> 24) & 0xff0000ULL)
+ | ((b >> 40) & 0xff00ULL));
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr121264.c b/gcc/testsuite/gcc.dg/tree-ssa/pr121264.c
new file mode 100644
index 0000000..bd5acc0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr121264.c
@@ -0,0 +1,12 @@
+/* PR tree-optimization/121264 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+/* { dg-final { scan-tree-dump " \\\| " "optimized" } } */
+
+struct A { char b; char c[0x20000010]; } a;
+
+int
+foo ()
+{
+ return a.c[0x20000000] || a.c[1];
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-0.c b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-0.c
new file mode 100644
index 0000000..e544b04f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-0.c
@@ -0,0 +1,12 @@
+/* { dg-do run } */
+/* { dg-require-ifunc "" } */
+/* { dg-require-effective-target mmap } */
+/* { dg-options "-Wno-experimental-fmv-target" } */
+
+#include <stdint.h>
+
+typedef struct {
+ uint64_t size;
+} ifunc_arg_t;
+
+#include "ifunc-resolver.in"
diff --git a/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-1.c b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-1.c
new file mode 100644
index 0000000..be70687
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-1.c
@@ -0,0 +1,13 @@
+/* { dg-do run } */
+/* { dg-require-ifunc "" } */
+/* { dg-require-effective-target mmap } */
+/* { dg-options "-Wno-experimental-fmv-target" } */
+
+#include <stdint.h>
+
+typedef struct {
+ uint64_t size;
+ uint64_t hwcap;
+} ifunc_arg_t;
+
+#include "ifunc-resolver.in"
diff --git a/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-2.c b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-2.c
new file mode 100644
index 0000000..bf594d0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-2.c
@@ -0,0 +1,14 @@
+/* { dg-do run } */
+/* { dg-require-ifunc "" } */
+/* { dg-require-effective-target mmap } */
+/* { dg-options "-Wno-experimental-fmv-target" } */
+
+#include <stdint.h>
+
+typedef struct {
+ uint64_t size;
+ uint64_t hwcap;
+ uint64_t hwcap2;
+} ifunc_arg_t;
+
+#include "ifunc-resolver.in"
diff --git a/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-3.c b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-3.c
new file mode 100644
index 0000000..f16d01b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-3.c
@@ -0,0 +1,15 @@
+/* { dg-do run } */
+/* { dg-require-ifunc "" } */
+/* { dg-require-effective-target mmap } */
+/* { dg-options "-Wno-experimental-fmv-target" } */
+
+#include <stdint.h>
+
+typedef struct {
+ uint64_t size;
+ uint64_t hwcap;
+ uint64_t hwcap2;
+ uint64_t hwcap3;
+} ifunc_arg_t;
+
+#include "ifunc-resolver.in"
diff --git a/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-4.c b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-4.c
new file mode 100644
index 0000000..1b4ccbd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver-4.c
@@ -0,0 +1,16 @@
+/* { dg-do run } */
+/* { dg-require-ifunc "" } */
+/* { dg-require-effective-target mmap } */
+/* { dg-options "-Wno-experimental-fmv-target" } */
+
+#include <stdint.h>
+
+typedef struct {
+ uint64_t size;
+ uint64_t hwcap;
+ uint64_t hwcap2;
+ uint64_t hwcap3;
+ uint64_t hwcap4;
+} ifunc_arg_t;
+
+#include "ifunc-resolver.in"
diff --git a/gcc/testsuite/gcc.target/aarch64/ifunc-resolver.in b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver.in
new file mode 100644
index 0000000..ada0b33
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ifunc-resolver.in
@@ -0,0 +1,48 @@
+#include <unistd.h>
+#include <string.h>
+#include <sys/mman.h>
+
+/* Allocate memory buffer of size LEN with a protected page
+ following right after the buffer end so that any memory
+ accesses past the end of the buffer would trigger SEGFAUL. */
+void *allocate_mem (size_t len)
+{
+ size_t pagesize = sysconf (_SC_PAGESIZE);
+ char *m = mmap (NULL, pagesize * 2,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0);
+ mprotect (m + pagesize, pagesize, PROT_NONE);
+ m = m + pagesize - len;
+ memset(m, 0, len);
+ return m;
+}
+
+int impl ()
+{
+ return 0;
+}
+
+#ifndef _IFUNC_ARG_HWCAP
+#define _IFUNC_ARG_HWCAP (1ULL << 62)
+#endif
+
+void
+__init_cpu_features_resolver (unsigned long hwcap, const void *arg);
+
+static void *
+fun_resolver (uint64_t a0, const uint64_t *a1)
+{
+ ifunc_arg_t *arg = allocate_mem (sizeof (ifunc_arg_t));
+ arg->size = sizeof (ifunc_arg_t);
+ /* Call this function with synthetic ifunc_arg_t arg. */
+ __init_cpu_features_resolver (_IFUNC_ARG_HWCAP, arg);
+ return (void *)(uintptr_t)impl;
+}
+
+int fun (void) __attribute__ ((ifunc ("fun_resolver")));
+
+int main (int argc, char *argv[])
+{
+ return fun ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_1.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_1.c
index 98922aa..3a63da7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_1.c
@@ -1,5 +1,5 @@
// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls -funwind-tables" }
-// { dg-final { check-function-bodies "**" "" } }
+// { dg-final { check-function-bodies "**" "" "" { target "*-*-*" } {\t\.inst} } }
void ns_callee ();
void s_callee () [[arm::streaming]];
@@ -218,7 +218,7 @@ sc_caller_x1 (int *ptr, int a) [[arm::streaming_compatible]]
** bl ns_callee_stack
** ldr x16, \[x29, #?16\]
** tbz x16, 0, .*
-** smstart sm
+** .inst 0xd503437f // smstart sm
** ...
*/
void
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_11.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_11.c
index ee6f987..c72d03f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_11.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_11.c
@@ -1,5 +1,6 @@
// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls -funwind-tables -mtrack-speculation" }
-// { dg-final { check-function-bodies "**" "" } }
+// { dg-final { check-function-bodies "**" "" "" { target "*-*-*" } {\t\.inst} } }
+
void ns_callee ();
void s_callee () [[arm::streaming]];
@@ -196,7 +197,7 @@ sc_caller_x1 (int *ptr, int a) [[arm::streaming_compatible]]
** tst x16, #?1
** beq [^\n]*
** csel x15, x15, xzr, ne
-** smstart sm
+** .inst 0xd503437f // smstart sm
** ...
*/
void
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/pr121028.c b/gcc/testsuite/gcc.target/aarch64/sme/pr121028.c
new file mode 100644
index 0000000..a6aa119
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/pr121028.c
@@ -0,0 +1,46 @@
+// PR121028
+// { dg-do assemble { target aarch64_asm_sme_ok } }
+// { dg-options "-O --save-temps" }
+// { dg-final { check-function-bodies "**" "" "" { target "*-*-*" } {\t\.inst} } }
+
+void ns_callee ();
+
+/*
+** sc_caller_sme:
+** ...
+** mrs x16, svcr
+** str x16, \[x29, #?16\]
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** smstop sm
+** bl ns_callee
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** smstart sm
+** ...
+*/
+void sc_caller_sme() __arm_streaming_compatible
+{
+ ns_callee ();
+}
+
+#pragma GCC target "+nosme"
+
+/*
+** sc_caller_nosme:
+** ...
+** bl __arm_sme_state
+** str x0, \[x29, #?16\]
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** .inst 0xd503427f // smstop sm
+** bl ns_callee
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** .inst 0xd503437f // smstart sm
+** ...
+*/
+void sc_caller_nosme() __arm_streaming_compatible
+{
+ ns_callee ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f16_x2.c
index 90b5438..b9fd96a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f16_x2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f16_x2.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f16_x4.c
index d168ad7..70e2697 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f16_x4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f16_x4.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f32_x2.c
index 618d50b9..cf57d1b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f32_x2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f32_x2.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f32_x4.c
index 981e78c..10d9175 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f32_x4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f32_x4.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f64_x2.c
index e93a409..b7918ab 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f64_x2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f64_x2.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f64_x4.c
index 2db629e..153a37a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f64_x4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amax_f64_x4.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f16_x2.c
index 74604e1..bd6e13b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f16_x2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f16_x2.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f16_x4.c
index bc3779b..9f71b1f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f16_x4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f16_x4.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f32_x2.c
index 43e3075..aaa6a2e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f32_x2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f32_x2.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f32_x4.c
index 6bd20f8f..34c1098 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f32_x4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f32_x4.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f64_x2.c
index 3bbef3f..e4138e0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f64_x2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f64_x2.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f64_x4.c
index 6f4c9b7..8fbabe7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f64_x4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/amin_f64_x4.c
@@ -1,3 +1,5 @@
+/* { dg-do assemble { target { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } */
+/* { dg-do compile { target { ! { aarch64_asm_sme2_ok && aarch64_asm_faminmax_ok } } } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sme2_acle.h"
diff --git a/gcc/testsuite/gcc.target/i386/20020224-1.c b/gcc/testsuite/gcc.target/i386/20020224-1.c
index 2905719..769332b 100644
--- a/gcc/testsuite/gcc.target/i386/20020224-1.c
+++ b/gcc/testsuite/gcc.target/i386/20020224-1.c
@@ -4,6 +4,7 @@
while callee was actually not poping it up (as the hidden argument
was passed in register). */
/* { dg-do run } */
+/* { dg-require-effective-target ia32 } */
/* { dg-options "-O2 -fomit-frame-pointer" } */
extern void abort (void);
diff --git a/gcc/testsuite/gcc.target/i386/apx-1.c b/gcc/testsuite/gcc.target/i386/apx-1.c
index 4e580ec..b118928 100644
--- a/gcc/testsuite/gcc.target/i386/apx-1.c
+++ b/gcc/testsuite/gcc.target/i386/apx-1.c
@@ -3,6 +3,6 @@
/* { dg-error "'-mapxf' is not supported for 32-bit code" "" { target ia32 } 0 } */
void
-apx_hanlder ()
+apx_handler ()
{
}
diff --git a/gcc/testsuite/gcc.target/i386/attributes-error.c b/gcc/testsuite/gcc.target/i386/attributes-error.c
index 405eda5..5d1c77d 100644
--- a/gcc/testsuite/gcc.target/i386/attributes-error.c
+++ b/gcc/testsuite/gcc.target/i386/attributes-error.c
@@ -1,12 +1,40 @@
+/* { dg-options "-msse2" } */
/* { dg-do compile } */
/* { dg-require-effective-target ia32 } */
-void foo1(int i, int j) __attribute__((fastcall, cdecl)); /* { dg-error "not compatible" } */
-void foo2(int i, int j) __attribute__((fastcall, stdcall)); /* { dg-error "not compatible" } */
+void foo1(int i, int j) __attribute__((cdecl, regparm(2)));
+void foo2(int i, int j) __attribute__((stdcall, regparm(2)));
void foo3(int i, int j) __attribute__((fastcall, regparm(2))); /* { dg-error "not compatible" } */
-void foo4(int i, int j) __attribute__((stdcall, cdecl)); /* { dg-error "not compatible" } */
-void foo5(int i, int j) __attribute__((stdcall, fastcall)); /* { dg-error "not compatible" } */
-void foo6(int i, int j) __attribute__((cdecl, fastcall)); /* { dg-error "not compatible" } */
-void foo7(int i, int j) __attribute__((cdecl, stdcall)); /* { dg-error "not compatible" } */
-void foo8(int i, int j) __attribute__((regparm(2), fastcall)); /* { dg-error "not compatible" } */
+void foo4(int i, int j) __attribute__((thiscall, regparm(2))); /* { dg-error "not compatible" } */
+void foo5(int i, int j) __attribute__((sseregparm, regparm(2)));
+
+void foo6(int i, int j) __attribute__((stdcall, fastcall)); /* { dg-error "not compatible" } */
+void foo7(int i, int j) __attribute__((regparm(2), fastcall)); /* { dg-error "not compatible" } */
+void foo8(int i, int j) __attribute__((cdecl, fastcall)); /* { dg-error "not compatible" } */
+void foo9(int i, int j) __attribute__((thiscall, fastcall)); /* { dg-error "not compatible" } */
+void foo10(int i, int j) __attribute__((sseregparm, fastcall));
+
+void foo11(int i, int j) __attribute__((cdecl, stdcall)); /* { dg-error "not compatible" } */
+void foo12(int i, int j) __attribute__((fastcall, stdcall)); /* { dg-error "not compatible" } */
+void foo13(int i, int j) __attribute__((thiscall, stdcall)); /* { dg-error "not compatible" } */
+void foo14(int i, int j) __attribute__((regparm(2), stdcall));
+void foo15(int i, int j) __attribute__((sseregparm, stdcall));
+
+void foo16(int i, int j) __attribute__((stdcall, cdecl)); /* { dg-error "not compatible" } */
+void foo17(int i, int j) __attribute__((fastcall, cdecl)); /* { dg-error "not compatible" } */
+void foo18(int i, int j) __attribute__((thiscall, cdecl)); /* { dg-error "not compatible" } */
+void foo19(int i, int j) __attribute__((regparm(2), cdecl));
+void foo20(int i, int j) __attribute__((sseregparm, cdecl));
+
+void foo21(int i, int j) __attribute__((stdcall, thiscall)); /* { dg-error "not compatible" } */
+void foo22(int i, int j) __attribute__((fastcall, thiscall)); /* { dg-error "not compatible" } */
+void foo23(int i, int j) __attribute__((cdecl, thiscall)); /* { dg-error "not compatible" } */
+void foo24(int i, int j) __attribute__((regparm(2), thiscall)); /* { dg-error "not compatible" } */
+void foo25(int i, int j) __attribute__((sseregparm, thiscall));
+
+void foo26(int i, int j) __attribute__((cdecl, sseregparm));
+void foo27(int i, int j) __attribute__((fastcall, sseregparm));
+void foo28(int i, int j) __attribute__((stdcall, sseregparm));
+void foo29(int i, int j) __attribute__((thiscall, sseregparm));
+void foo30(int i, int j) __attribute__((regparm(2), sseregparm));
diff --git a/gcc/testsuite/gcc.target/i386/attributes-ignore.c b/gcc/testsuite/gcc.target/i386/attributes-ignore.c
new file mode 100644
index 0000000..93a3770
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/attributes-ignore.c
@@ -0,0 +1,8 @@
+/* { dg-do compile { target { ! ia32 } } } */
+
+void foo1(int i, int j) __attribute__((regparm(0))); /* { dg-warning "ignored" } */
+void foo2(int i, int j) __attribute__((stdcall)); /* { dg-warning "ignored" } */
+void foo3(int i, int j) __attribute__((fastcall)); /* { dg-warning "ignored" } */
+void foo4(int i, int j) __attribute__((cdecl)); /* { dg-warning "ignored" } */
+void foo5(int i, int j) __attribute__((thiscall)); /* { dg-warning "ignored" } */
+void foo6(int i, int j) __attribute__((sseregparm)); /* { dg-warning "ignored" } */
diff --git a/gcc/testsuite/gcc.target/i386/pr103785.c b/gcc/testsuite/gcc.target/i386/pr103785.c
index 5503b96..49d6c56 100644
--- a/gcc/testsuite/gcc.target/i386/pr103785.c
+++ b/gcc/testsuite/gcc.target/i386/pr103785.c
@@ -11,7 +11,10 @@ struct wrapper_t
struct wrapper_t **table;
-__attribute__ ((weak, regparm (2)))
+#ifndef __x86_64__
+__attribute__ ((regparm (2)))
+#endif
+__attribute__ ((weak))
void
update (long k, long e)
{
diff --git a/gcc/testsuite/gcc.target/i386/pr15184-2.c b/gcc/testsuite/gcc.target/i386/pr15184-2.c
index cb8201f..dd50c42 100644
--- a/gcc/testsuite/gcc.target/i386/pr15184-2.c
+++ b/gcc/testsuite/gcc.target/i386/pr15184-2.c
@@ -1,4 +1,4 @@
-/* PR 15184 second two tests
+/* PR 15184 second two tests */
/* { dg-do compile { target ia32 } } */
/* { dg-options "-O2 -march=pentiumpro" } */
/* { dg-additional-options "-fno-PIE" { target ia32 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr36533.c b/gcc/testsuite/gcc.target/i386/pr36533.c
index 8d71ece..8699d26 100644
--- a/gcc/testsuite/gcc.target/i386/pr36533.c
+++ b/gcc/testsuite/gcc.target/i386/pr36533.c
@@ -55,14 +55,22 @@ typedef struct
S1 *s18;
} S7;
-__attribute__((regparm (3), noinline)) int
+#ifndef __x86_64__
+__attribute__((regparm (3)))
+#endif
+__attribute__((noinline))
+int
fn1 (const char *x, void *y, S1 *z)
{
asm volatile ("" : : : "memory");
return *x + (y != 0);
}
-__attribute__((regparm (3), noinline)) int
+#ifndef __x86_64__
+__attribute__((regparm (3)))
+#endif
+__attribute__((noinline))
+int
fn2 (const char *x, int y, S2 *z)
{
asm volatile ("" : : : "memory");
@@ -84,7 +92,11 @@ fn3 (S3 *p)
return (S3 *) ((char *) p + fn4 (p->s9));
}
-__attribute__((regparm (3), noinline)) int
+#ifndef __x86_64__
+__attribute__((regparm (3)))
+#endif
+__attribute__((noinline))
+int
fn5 (void)
{
asm volatile ("" : : : "memory");
@@ -116,7 +128,11 @@ fn6 (S3 *w, int x, S2 *y, S4 *z)
return a;
}
-__attribute__((regparm (3), noinline)) unsigned int
+#ifndef __x86_64__
+__attribute__((regparm (3)))
+#endif
+__attribute__((noinline))
+unsigned int
test (void *u, S6 *v, S1 **w, S7 *x, S2 *y, S1 *z)
{
unsigned b = v->s17->s16;
diff --git a/gcc/testsuite/gcc.target/i386/pr59099.c b/gcc/testsuite/gcc.target/i386/pr59099.c
index cf4a8da..21dfbc2 100644
--- a/gcc/testsuite/gcc.target/i386/pr59099.c
+++ b/gcc/testsuite/gcc.target/i386/pr59099.c
@@ -13,10 +13,17 @@ struct s
};
-void* f (struct s *, struct s *) __attribute__ ((noinline, regparm(1)));
+void* f (struct s *, struct s *)
+#ifndef __x86_64__
+__attribute__ ((regparm(1)))
+#endif
+__attribute__ ((noinline))
+;
void*
+#ifndef __x86_64__
__attribute__ ((regparm(1)))
+#endif
f (struct s *p, struct s *p2)
{
void *gp, *gp1;
diff --git a/gcc/testsuite/gcc.target/i386/sibcall-8.c b/gcc/testsuite/gcc.target/i386/sibcall-8.c
index 3ab3809..29ebfe5 100644
--- a/gcc/testsuite/gcc.target/i386/sibcall-8.c
+++ b/gcc/testsuite/gcc.target/i386/sibcall-8.c
@@ -1,23 +1,29 @@
/* { dg-do run } */
/* { dg-options "-O2" } */
+#ifndef __x86_64__
+#define REGPARM __attribute__((regparm(1)))
+#else
+#define REGPARM
+#endif
+
extern void abort (void);
-static int __attribute__((regparm(1)))
+static int REGPARM
bar(void *arg)
{
return arg != bar;
}
-static int __attribute__((noinline,noclone,regparm(1)))
-foo(int (__attribute__((regparm(1))) **bar)(void*))
+static int __attribute__((noinline,noclone)) REGPARM
+foo(int (REGPARM **bar)(void*))
{
return (*bar)(*bar);
}
int main()
{
- int (__attribute__((regparm(1))) *p)(void*) = bar;
+ int (REGPARM *p)(void*) = bar;
if (foo(&p))
abort();
return 0;
diff --git a/gcc/testsuite/gcc.target/i386/sw-1.c b/gcc/testsuite/gcc.target/i386/sw-1.c
index 14db3ce..025f0e1 100644
--- a/gcc/testsuite/gcc.target/i386/sw-1.c
+++ b/gcc/testsuite/gcc.target/i386/sw-1.c
@@ -7,7 +7,10 @@
int c;
int x[2000];
-__attribute__((regparm(1))) void foo (int a, int b)
+#ifndef __x86_64__
+__attribute__((regparm(1)))
+#endif
+void foo (int a, int b)
{
int t[200];
if (a == 0 || c == 0)
diff --git a/gcc/testsuite/gcc.target/i386/uintr-2.c b/gcc/testsuite/gcc.target/i386/uintr-2.c
index 0a83c66..a0d2514 100644
--- a/gcc/testsuite/gcc.target/i386/uintr-2.c
+++ b/gcc/testsuite/gcc.target/i386/uintr-2.c
@@ -15,6 +15,6 @@ foo (void *frame, uword_t uirrv)
void
__attribute__((interrupt))
-UINTR_hanlder (struct __uintr_frame *frame, uword_t uirrv)
+UINTR_handler (struct __uintr_frame *frame, uword_t uirrv)
{
}
diff --git a/gcc/testsuite/gcc.target/i386/uintr-5.c b/gcc/testsuite/gcc.target/i386/uintr-5.c
index 49cb2ec..7c7c12f 100644
--- a/gcc/testsuite/gcc.target/i386/uintr-5.c
+++ b/gcc/testsuite/gcc.target/i386/uintr-5.c
@@ -7,6 +7,6 @@
typedef unsigned int uword_t __attribute__ ((mode (__word__)));
void
-UINTR_hanlder (struct __uintr_frame *frame, uword_t uirrv)
+UINTR_handler (struct __uintr_frame *frame, uword_t uirrv)
{
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i16.c
index f84d7f5..4e1a575 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i16.c
@@ -20,4 +20,4 @@ TEST_BINARY_VX_SIGNED_0(T)
/* { dg-final { scan-assembler-times {vmin.vx} 2 } } */
/* { dg-final { scan-assembler-times {vsadd.vx} 1 } } */
/* { dg-final { scan-assembler-times {vssub.vx} 1 } } */
-/* { dg-final { scan-assembler-times {vaadd.vx} 1 } } */
+/* { dg-final { scan-assembler-times {vaadd.vx} 2 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i32.c
index 70b6743..4c4f72d 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i32.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i32.c
@@ -20,4 +20,4 @@ TEST_BINARY_VX_SIGNED_0(T)
/* { dg-final { scan-assembler-times {vmin.vx} 2 } } */
/* { dg-final { scan-assembler-times {vsadd.vx} 1 } } */
/* { dg-final { scan-assembler-times {vssub.vx} 1 } } */
-/* { dg-final { scan-assembler-times {vaadd.vx} 1 } } */
+/* { dg-final { scan-assembler-times {vaadd.vx} 2 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i64.c
index 986fa4c..abf62c2 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i64.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i64.c
@@ -20,4 +20,7 @@ TEST_BINARY_VX_SIGNED_0(T)
/* { dg-final { scan-assembler-times {vmin.vx} 2 } } */
/* { dg-final { scan-assembler-times {vsadd.vx} 1 } } */
/* { dg-final { scan-assembler-times {vssub.vx} 1 } } */
-/* { dg-final { scan-assembler-times {vaadd.vx} 1 { target { no-opts "-O3 -mrvv-vector-bits=zvl -mrvv-max-lmul=m2" "-O3 -mrvv-vector-bits=zvl -mrvv-max-lmul=m4" } } } } */
+/* { dg-final { scan-assembler-times {vaadd.vx} 2 { target { no-opts
+ "-O3 -mrvv-vector-bits=zvl -mrvv-max-lmul=m2"
+ "-O3 -mrvv-vector-bits=zvl -mrvv-max-lmul=m4"
+ } } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i8.c
index c479295..7744bcb 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-i8.c
@@ -20,4 +20,4 @@ TEST_BINARY_VX_SIGNED_0(T)
/* { dg-final { scan-assembler-times {vmin.vx} 2 } } */
/* { dg-final { scan-assembler-times {vsadd.vx} 1 } } */
/* { dg-final { scan-assembler-times {vssub.vx} 1 } } */
-/* { dg-final { scan-assembler-times {vaadd.vx} 1 } } */
+/* { dg-final { scan-assembler-times {vaadd.vx} 2 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i16.c
index 86c8040..2ae4804 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i16.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY_X8)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY_X8)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i32.c
index e2d1613..88cfc72 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i32.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i32.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY_X4)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY_X4)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY_X4)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY_X4)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY_X4)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i64.c
index 06ffa15..6b29a72 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i64.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i64.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -35,4 +36,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_B
/* { dg-final { scan-assembler {vmin.vx} } } */
/* { dg-final { scan-assembler-not {vsadd.vx} } } */
/* { dg-final { scan-assembler-not {vssub.vx} } } */
-/* { dg-final { scan-assembler {vaadd.vx} { target { no-opts "-O3 -mrvv-vector-bits=zvl -mrvv-max-lmul=m2" "-O3 -mrvv-vector-bits=zvl -mrvv-max-lmul=m4" } } } } */
+/* { dg-final { scan-assembler {vaadd.vx} { target { no-opts {
+ "-O3 -mrvv-vector-bits=zvl -mrvv-max-lmul=m2"
+ "-O3 -mrvv-vector-bits=zvl -mrvv-max-lmul=m4"
+ } } } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i8.c
index cb086aa..f862eb7 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-i8.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY_X8)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY_X8)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i16.c
index 7c7bf09..df6872c 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i16.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY_X8)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY_X8)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i32.c
index 6d161bd..05ed639 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i32.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i32.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY_X4)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY_X4)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY_X4)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY_X4)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY_X4)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i64.c
index 0409012..6776b1f 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i64.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i64.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i8.c
index ed437319..d3e2785 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-i8.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY_X8)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY_X8)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i16.c
index 1e18342..0bfa2cb 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i16.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY_X8)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY_X8)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i32.c
index fd6e47c..3e3acfc 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i32.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i32.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY_X4)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY_X4)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY_X4)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY_X4)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY_X4)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i64.c
index 399d0f5..531c119 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i64.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i64.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler-not {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i8.c
index 98567a3..43246bb 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-i8.c
@@ -21,6 +21,7 @@ DEF_VX_BINARY_CASE_3_WRAP(T, MIN_FUNC_1_WARP(T), min, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_ADD_FUNC_WRAP(T), sat_add, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, SAT_S_SUB_FUNC_WRAP(T), sat_sub, VX_BINARY_FUNC_BODY_X8)
DEF_VX_BINARY_CASE_3_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor, VX_BINARY_FUNC_BODY_X8)
+DEF_VX_BINARY_CASE_3_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil, VX_BINARY_FUNC_BODY_X8)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary.h
index de48ebd..4a9daff 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary.h
@@ -374,11 +374,16 @@ DEF_AVG_CEIL(uint8_t, uint16_t)
DEF_AVG_CEIL(uint16_t, uint32_t)
DEF_AVG_CEIL(uint32_t, uint64_t)
+DEF_AVG_CEIL(int8_t, int16_t)
+DEF_AVG_CEIL(int16_t, int32_t)
+DEF_AVG_CEIL(int32_t, int64_t)
+
#ifdef HAS_INT128
DEF_AVG_FLOOR(uint64_t, uint128_t)
DEF_AVG_FLOOR(int64_t, int128_t)
DEF_AVG_CEIL(uint64_t, uint128_t)
+ DEF_AVG_CEIL(int64_t, int128_t)
#endif
#define AVG_FLOOR_FUNC(T) test_##T##_avg_floor
@@ -404,6 +409,7 @@ DEF_AVG_CEIL(uint32_t, uint64_t)
DEF_VX_BINARY_CASE_2_WRAP(T, SAT_S_ADD_FUNC(T), sat_add) \
DEF_VX_BINARY_CASE_2_WRAP(T, SAT_S_SUB_FUNC(T), sat_sub) \
DEF_VX_BINARY_CASE_2_WRAP(T, AVG_FLOOR_FUNC_WRAP(T), avg_floor) \
+ DEF_VX_BINARY_CASE_2_WRAP(T, AVG_CEIL_FUNC_WRAP(T), avg_ceil) \
#define TEST_BINARY_VX_UNSIGNED_0(T) \
DEF_VX_BINARY_CASE_0_WRAP(T, +, add) \
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h
index 5024ae7..626347c 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h
@@ -5494,4 +5494,200 @@ uint64_t TEST_BINARY_DATA(uint64_t, avg_ceil)[][3][N] =
},
};
+int8_t TEST_BINARY_DATA(int8_t, avg_ceil)[][3][N] =
+{
+ {
+ { 0 },
+ {
+ 2, 2, 2, 2,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 4, 4, 4, 4,
+ },
+ {
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 2, 2, 2, 2,
+ },
+ },
+ {
+ { 127 },
+ {
+ 127, 127, 127, 127,
+ -128, -128, -128, -128,
+ -127, -127, -127, -127,
+ 1, 1, 1, 1,
+ },
+ {
+ 127, 127, 127, 127,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 64, 64, 64, 64,
+ },
+ },
+ {
+ {-128 },
+ {
+ 0, 0, 0, 0,
+ -128, -128, -128, -128,
+ 126, 126, 126, 126,
+ 127, 127, 127, 127,
+ },
+ {
+ -64, -64, -64, -64,
+ -128, -128, -128, -128,
+ -1, -1, -1, -1,
+ 0, 0, 0, 0,
+ },
+ },
+};
+
+int16_t TEST_BINARY_DATA(int16_t, avg_ceil)[][3][N] =
+{
+ {
+ { 0 },
+ {
+ 2, 2, 2, 2,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 4, 4, 4, 4,
+ },
+ {
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 2, 2, 2, 2,
+ },
+ },
+ {
+ { 32767 },
+ {
+ 32767, 32767, 32767, 32767,
+ -32768, -32768, -32768, -32768,
+ -32767, -32767, -32767, -32767,
+ 1, 1, 1, 1,
+ },
+ {
+ 32767, 32767, 32767, 32767,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 16384, 16384, 16384, 16384,
+ },
+ },
+ {
+ {-32768 },
+ {
+ 0, 0, 0, 0,
+ -32768, -32768, -32768, -32768,
+ 32766, 32766, 32766, 32766,
+ 32767, 32767, 32767, 32767,
+ },
+ {
+ -16384, -16384, -16384, -16384,
+ -32768, -32768, -32768, -32768,
+ -1, -1, -1, -1,
+ 0, 0, 0, 0,
+ },
+ },
+};
+
+int32_t TEST_BINARY_DATA(int32_t, avg_ceil)[][3][N] =
+{
+ {
+ { 0 },
+ {
+ 2, 2, 2, 2,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 4, 4, 4, 4,
+ },
+ {
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 2, 2, 2, 2,
+ },
+ },
+ {
+ { 2147483647 },
+ {
+ 2147483647, 2147483647, 2147483647, 2147483647,
+ -2147483648, -2147483648, -2147483648, -2147483648,
+ -2147483647, -2147483647, -2147483647, -2147483647,
+ 1, 1, 1, 1,
+ },
+ {
+ 2147483647, 2147483647, 2147483647, 2147483647,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 1073741824, 1073741824, 1073741824, 1073741824,
+ },
+ },
+ {
+ {-2147483648 },
+ {
+ 0, 0, 0, 0,
+ -2147483648, -2147483648, -2147483648, -2147483648,
+ 2147483646, 2147483646, 2147483646, 2147483646,
+ 2147483647, 2147483647, 2147483647, 2147483647,
+ },
+ {
+ -1073741824, -1073741824, -1073741824, -1073741824,
+ -2147483648, -2147483648, -2147483648, -2147483648,
+ -1, -1, -1, -1,
+ 0, 0, 0, 0,
+ },
+ },
+};
+
+int64_t TEST_BINARY_DATA(int64_t, avg_ceil)[][3][N] =
+{
+ {
+ { 0 },
+ {
+ 2, 2, 2, 2,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 4, 4, 4, 4,
+ },
+ {
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 2, 2, 2, 2,
+ },
+ },
+ {
+ { 9223372036854775807ull },
+ {
+ 9223372036854775807ull, 9223372036854775807ull, 9223372036854775807ull, 9223372036854775807ull,
+ -9223372036854775808ull, -9223372036854775808ull, -9223372036854775808ull, -9223372036854775808ull,
+ -9223372036854775807ull, -9223372036854775807ull, -9223372036854775807ull, -9223372036854775807ull,
+ 1, 1, 1, 1,
+ },
+ {
+ 9223372036854775807ull, 9223372036854775807ull, 9223372036854775807ull, 9223372036854775807ull,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 4611686018427387904ull, 4611686018427387904ull, 4611686018427387904ull, 4611686018427387904ull,
+ },
+ },
+ {
+ {-9223372036854775808ull },
+ {
+ 0, 0, 0, 0,
+ -9223372036854775808ull, -9223372036854775808ull, -9223372036854775808ull, -9223372036854775808ull,
+ 9223372036854775806ull, 9223372036854775806ull, 9223372036854775806ull, 9223372036854775806ull,
+ 9223372036854775807ull, 9223372036854775807ull, 9223372036854775807ull, 9223372036854775807ull,
+ },
+ {
+ -4611686018427387904ull, -4611686018427387904ull, -4611686018427387904ull, -4611686018427387904ull,
+ -9223372036854775808ull, -9223372036854775808ull, -9223372036854775808ull, -9223372036854775808ull,
+ -1, -1, -1, -1,
+ 0, 0, 0, 0,
+ },
+ },
+};
+
#endif
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i16.c
new file mode 100644
index 0000000..8def643
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i16.c
@@ -0,0 +1,17 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 --param=gpr2vr-cost=0" } */
+
+#include "vx_binary.h"
+#include "vx_binary_data.h"
+
+#define T int16_t
+#define NAME avg_ceil
+#define FUNC AVG_CEIL_FUNC_WRAP(T)
+#define TEST_DATA TEST_BINARY_DATA_WRAP(T, NAME)
+
+DEF_VX_BINARY_CASE_2_WRAP(T, FUNC, NAME)
+
+#define TEST_RUN(T, NAME, out, in, x, n) \
+ RUN_VX_BINARY_CASE_2_WRAP(T, NAME, FUNC, out, in, x, n)
+
+#include "vx_binary_run.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i32.c
new file mode 100644
index 0000000..d9ca67d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i32.c
@@ -0,0 +1,17 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 --param=gpr2vr-cost=0" } */
+
+#include "vx_binary.h"
+#include "vx_binary_data.h"
+
+#define T int32_t
+#define NAME avg_ceil
+#define FUNC AVG_CEIL_FUNC_WRAP(T)
+#define TEST_DATA TEST_BINARY_DATA_WRAP(T, NAME)
+
+DEF_VX_BINARY_CASE_2_WRAP(T, FUNC, NAME)
+
+#define TEST_RUN(T, NAME, out, in, x, n) \
+ RUN_VX_BINARY_CASE_2_WRAP(T, NAME, FUNC, out, in, x, n)
+
+#include "vx_binary_run.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i64.c
new file mode 100644
index 0000000..313109a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i64.c
@@ -0,0 +1,17 @@
+/* { dg-do run { target { riscv_v && rv64 } } } */
+/* { dg-additional-options "-std=c99 --param=gpr2vr-cost=0" } */
+
+#include "vx_binary.h"
+#include "vx_binary_data.h"
+
+#define T int64_t
+#define NAME avg_ceil
+#define FUNC AVG_CEIL_FUNC_WRAP(T)
+#define TEST_DATA TEST_BINARY_DATA_WRAP(T, NAME)
+
+DEF_VX_BINARY_CASE_2_WRAP(T, FUNC, NAME)
+
+#define TEST_RUN(T, NAME, out, in, x, n) \
+ RUN_VX_BINARY_CASE_2_WRAP(T, NAME, FUNC, out, in, x, n)
+
+#include "vx_binary_run.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i8.c
new file mode 100644
index 0000000..47e4a5d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vaadd-run-2-i8.c
@@ -0,0 +1,17 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 --param=gpr2vr-cost=0" } */
+
+#include "vx_binary.h"
+#include "vx_binary_data.h"
+
+#define T int8_t
+#define NAME avg_ceil
+#define FUNC AVG_CEIL_FUNC_WRAP(T)
+#define TEST_DATA TEST_BINARY_DATA_WRAP(T, NAME)
+
+DEF_VX_BINARY_CASE_2_WRAP(T, FUNC, NAME)
+
+#define TEST_RUN(T, NAME, out, in, x, n) \
+ RUN_VX_BINARY_CASE_2_WRAP(T, NAME, FUNC, out, in, x, n)
+
+#include "vx_binary_run.h"
diff --git a/gcc/testsuite/gfortran.dg/class_elemental_1.f90 b/gcc/testsuite/gfortran.dg/class_elemental_1.f90
new file mode 100644
index 0000000..547ae98
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/class_elemental_1.f90
@@ -0,0 +1,35 @@
+! { dg-do run }
+!
+! PR fortran/121342
+! The polymorphic function result as actual argument used to force the loop
+! bounds around the elemental call, altering access to the other arrays.
+
+program p
+ implicit none
+ type :: t
+ integer :: i
+ end type
+ type :: u
+ integer :: i, a
+ end type
+ type(u) :: accum(5)
+ integer :: a(3:7), k
+ a = [ (k*k, k=1,5) ]
+ call s(accum, f(), a)
+ ! print *, accum%i
+ ! print *, accum%a
+ if (any(accum%i /= accum%a)) error stop 1
+contains
+ elemental subroutine s(l, c, a)
+ type(u) , intent(out) :: l
+ class(t) , intent(in) :: c
+ integer , intent(in) :: a
+ l%i = c%i
+ l%a = a
+ end subroutine
+ function f()
+ class(t), allocatable :: f(:)
+ allocate(f(-1:3))
+ f%i = [ (k*k, k=1,5) ]
+ end function
+end program
diff --git a/gcc/testsuite/gm2/errors/fail/badindrtype.mod b/gcc/testsuite/gm2/errors/fail/badindrtype.mod
new file mode 100644
index 0000000..b393027
--- /dev/null
+++ b/gcc/testsuite/gm2/errors/fail/badindrtype.mod
@@ -0,0 +1,16 @@
+MODULE badindrtype ;
+
+
+PROCEDURE init (VAR ch: CHAR) ;
+VAR
+ c: CARDINAL ;
+BEGIN
+ ch := c
+END init ;
+
+
+VAR
+ ch: CHAR ;
+BEGIN
+ init (ch)
+END badindrtype.
diff --git a/gcc/testsuite/gm2/errors/fail/badindrtype2.mod b/gcc/testsuite/gm2/errors/fail/badindrtype2.mod
new file mode 100644
index 0000000..a31303b
--- /dev/null
+++ b/gcc/testsuite/gm2/errors/fail/badindrtype2.mod
@@ -0,0 +1,16 @@
+MODULE badindrtype2 ;
+
+
+PROCEDURE init (VAR ch: CHAR) ;
+VAR
+ c: CARDINAL ;
+BEGIN
+ c := ch
+END init ;
+
+
+VAR
+ ch: CHAR ;
+BEGIN
+ init (ch)
+END badindrtype2.
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index e375b1e..7435519 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -12522,10 +12522,16 @@ proc check_effective_target_aarch64_gas_has_build_attributes { } {
# various architecture extensions via the .arch_extension pseudo-op.
set exts {
- "bf16" "cmpbr" "crc" "crypto" "dotprod" "f32mm" "f64mm" "fp" "fp8"
- "fp8dot2" "fp8dot4" "fp8fma" "i8mm" "ls64" "lse" "lut" "sb" "simd"
- "sme-b16b16" "sme-f16f16" "sme-i16i64" "sme" "sme2" "sme2p1" "ssve-fp8dot2"
- "ssve-fp8dot4" "ssve-fp8fma" "sve-b16b16" "sve" "sve2"
+ "bf16" "cmpbr" "crc" "crypto" "dotprod" "f32mm" "f64mm" "faminmax"
+ "fp" "fp8" "fp8dot2" "fp8dot4" "fp8fma" "i8mm" "ls64" "lse" "lut"
+ "sb" "simd" "sve-b16b16" "sve" "sve2"
+}
+
+# We don't support SME without SVE2, so we'll use armv9 as the base
+# archiecture for SME and the features that require it.
+set exts_sve2 {
+ "sme-b16b16" "sme-f16f16" "sme-i16i64" "sme" "sme2" "sme2p1"
+ "ssve-fp8dot2" "ssve-fp8dot4" "ssve-fp8fma"
}
foreach { aarch64_ext } $exts {
@@ -12542,6 +12548,20 @@ foreach { aarch64_ext } $exts {
}]
}
+foreach { aarch64_ext } $exts_sve2 {
+ eval [string map [list FUNC $aarch64_ext] {
+ proc check_effective_target_aarch64_asm_FUNC_ok { } {
+ if { [istarget aarch64*-*-*] } {
+ return [check_no_compiler_messages aarch64_FUNC_assembler object {
+ __asm__ (".arch_extension FUNC");
+ } "-march=armv9-a+FUNC"]
+ } else {
+ return 0
+ }
+ }
+ }]
+}
+
proc check_effective_target_aarch64_asm_sve2p1_ok { } {
if { [istarget aarch64*-*-*] } {
return [check_no_compiler_messages aarch64_sve2p1_assembler object {
diff --git a/gcc/tree-ssa-alias.cc b/gcc/tree-ssa-alias.cc
index 4119343..9b028e0 100644
--- a/gcc/tree-ssa-alias.cc
+++ b/gcc/tree-ssa-alias.cc
@@ -901,7 +901,9 @@ ao_ref_init_from_ptr_and_range (ao_ref *ref, tree ptr,
if (TREE_CODE (ptr) == ADDR_EXPR)
{
ref->base = get_addr_base_and_unit_offset (TREE_OPERAND (ptr, 0), &t);
- if (ref->base)
+ if (ref->base
+ && coeffs_in_range_p (t, -HOST_WIDE_INT_MAX / BITS_PER_UNIT,
+ HOST_WIDE_INT_MAX / BITS_PER_UNIT))
ref->offset = BITS_PER_UNIT * t;
else
{
diff --git a/gcc/tree-ssa-loop-ivopts.cc b/gcc/tree-ssa-loop-ivopts.cc
index 544a946..879668c 100644
--- a/gcc/tree-ssa-loop-ivopts.cc
+++ b/gcc/tree-ssa-loop-ivopts.cc
@@ -147,7 +147,7 @@ along with GCC; see the file COPYING3. If not see
The average trip count is computed from profile data if it
exists. */
-static inline HOST_WIDE_INT
+static inline unsigned HOST_WIDE_INT
avg_loop_niter (class loop *loop)
{
HOST_WIDE_INT niter = estimated_stmt_executions_int (loop);
@@ -4213,7 +4213,9 @@ adjust_setup_cost (struct ivopts_data *data, int64_t cost,
return cost;
else if (optimize_loop_for_speed_p (data->current_loop))
{
- int64_t niters = (int64_t) avg_loop_niter (data->current_loop);
+ uint64_t niters = avg_loop_niter (data->current_loop);
+ if (niters > (uint64_t) cost)
+ return (round_up_p && cost != 0) ? 1 : 0;
return (cost + (round_up_p ? niters - 1 : 0)) / niters;
}
else
@@ -7277,7 +7279,7 @@ create_new_ivs (struct ivopts_data *data, class iv_ca *set)
if (data->loop_loc != UNKNOWN_LOCATION)
fprintf (dump_file, " at %s:%d", LOCATION_FILE (data->loop_loc),
LOCATION_LINE (data->loop_loc));
- fprintf (dump_file, ", " HOST_WIDE_INT_PRINT_DEC " avg niters",
+ fprintf (dump_file, ", " HOST_WIDE_INT_PRINT_UNSIGNED " avg niters",
avg_loop_niter (data->current_loop));
fprintf (dump_file, ", %lu IVs:\n", bitmap_count_bits (set->cands));
EXECUTE_IF_SET_IN_BITMAP (set->cands, 0, i, bi)
diff --git a/gcc/tree-ssa-sccvn.cc b/gcc/tree-ssa-sccvn.cc
index 45fb79c..a3117da 100644
--- a/gcc/tree-ssa-sccvn.cc
+++ b/gcc/tree-ssa-sccvn.cc
@@ -1219,7 +1219,7 @@ ao_ref_init_from_vn_reference (ao_ref *ref,
offset = 0;
}
else
- offset += pop->off * BITS_PER_UNIT;
+ offset += poly_offset_int (pop->off) * BITS_PER_UNIT;
op0_p = NULL;
break;
}
@@ -1270,7 +1270,7 @@ ao_ref_init_from_vn_reference (ao_ref *ref,
if (maybe_eq (op->off, -1))
max_size = -1;
else
- offset += op->off * BITS_PER_UNIT;
+ offset += poly_offset_int (op->off) * BITS_PER_UNIT;
break;
case REALPART_EXPR:
diff --git a/gcc/tree-vect-loop-manip.cc b/gcc/tree-vect-loop-manip.cc
index 7fcbc1a..6c1b26a 100644
--- a/gcc/tree-vect-loop-manip.cc
+++ b/gcc/tree-vect-loop-manip.cc
@@ -2857,7 +2857,7 @@ vect_gen_vector_loop_niters (loop_vec_info loop_vinfo, tree niters,
we set range information to make niters analyzer's life easier.
Note the number of latch iteration value can be TYPE_MAX_VALUE so
we have to represent the vector niter TYPE_MAX_VALUE + 1 / vf. */
- if (stmts != NULL && const_vf > 0)
+ if (stmts != NULL && const_vf > 0 && !LOOP_VINFO_EPILOGUE_P (loop_vinfo))
{
if (niters_no_overflow
&& LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
index 80b5a0a..460de57 100644
--- a/gcc/tree-vect-loop.cc
+++ b/gcc/tree-vect-loop.cc
@@ -11292,11 +11292,9 @@ update_epilogue_loop_vinfo (class loop *epilogue, tree advance)
updated offset we set using ADVANCE. Instead we have to make sure the
reference in the data references point to the corresponding copy of
the original in the epilogue. Make sure to update both
- gather/scatters recognized by dataref analysis and also other
- refs that get_load_store_type classified as VMAT_GATHER_SCATTER. */
+ gather/scatters recognized by dataref analysis. */
auto vstmt_vinfo = vect_stmt_to_vectorize (stmt_vinfo);
- if (STMT_VINFO_MEMORY_ACCESS_TYPE (vstmt_vinfo) == VMAT_GATHER_SCATTER
- || STMT_VINFO_STRIDED_P (vstmt_vinfo)
+ if (STMT_VINFO_STRIDED_P (vstmt_vinfo)
|| STMT_VINFO_GATHER_SCATTER_P (vstmt_vinfo))
{
/* ??? As we copy epilogues from the main loop incremental
@@ -11318,9 +11316,6 @@ update_epilogue_loop_vinfo (class loop *epilogue, tree advance)
/* Remember the advancement made. */
LOOP_VINFO_DRS_ADVANCED_BY (epilogue_vinfo) = advance;
-
- epilogue_vinfo->shared->datarefs_copy.release ();
- epilogue_vinfo->shared->save_datarefs ();
}
/* When vectorizing early break statements instructions that happen before
@@ -11426,7 +11421,8 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
DUMP_VECT_SCOPE ("vec_transform_loop");
- loop_vinfo->shared->check_datarefs ();
+ if (! LOOP_VINFO_EPILOGUE_P (loop_vinfo))
+ loop_vinfo->shared->check_datarefs ();
/* Use the more conservative vectorization threshold. If the number
of iterations is constant assume the cost check has been performed
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index a9c7105..ca14a2d 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -118,20 +118,19 @@ _slp_tree::_slp_tree ()
SLP_TREE_CHILDREN (this) = vNULL;
SLP_TREE_LOAD_PERMUTATION (this) = vNULL;
SLP_TREE_LANE_PERMUTATION (this) = vNULL;
- SLP_TREE_SIMD_CLONE_INFO (this) = vNULL;
SLP_TREE_DEF_TYPE (this) = vect_uninitialized_def;
SLP_TREE_CODE (this) = ERROR_MARK;
this->ldst_lanes = false;
this->avoid_stlf_fail = false;
SLP_TREE_VECTYPE (this) = NULL_TREE;
SLP_TREE_REPRESENTATIVE (this) = NULL;
- SLP_TREE_MEMORY_ACCESS_TYPE (this) = VMAT_INVARIANT;
+ SLP_TREE_MEMORY_ACCESS_TYPE (this) = VMAT_UNINITIALIZED;
SLP_TREE_REF_COUNT (this) = 1;
this->failed = NULL;
this->max_nunits = 1;
this->lanes = 0;
SLP_TREE_TYPE (this) = undef_vec_info_type;
- this->u.undef = NULL;
+ this->data = NULL;
}
/* Tear down a SLP node. */
@@ -150,9 +149,10 @@ _slp_tree::~_slp_tree ()
SLP_TREE_VEC_DEFS (this).release ();
SLP_TREE_LOAD_PERMUTATION (this).release ();
SLP_TREE_LANE_PERMUTATION (this).release ();
- SLP_TREE_SIMD_CLONE_INFO (this).release ();
if (this->failed)
free (failed);
+ if (this->data)
+ delete this->data;
}
/* Push the single SSA definition in DEF to the vector of vector defs. */
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 88a12a1..97222f6 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -1423,13 +1423,30 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
vect_memory_access_type
memory_access_type,
const gather_scatter_info *gs_info,
- tree scalar_mask,
+ slp_tree mask_node,
vec<int> *elsvals = nullptr)
{
/* Invariant loads need no special support. */
if (memory_access_type == VMAT_INVARIANT)
return;
+ /* Figure whether the mask is uniform. scalar_mask is used to
+ populate the scalar_cond_masked_set. */
+ tree scalar_mask = NULL_TREE;
+ if (mask_node)
+ for (unsigned i = 0; i < SLP_TREE_LANES (mask_node); ++i)
+ {
+ tree def = vect_get_slp_scalar_def (mask_node, i);
+ if (!def
+ || (scalar_mask && def != scalar_mask))
+ {
+ scalar_mask = NULL;
+ break;
+ }
+ else
+ scalar_mask = def;
+ }
+
unsigned int nvectors = vect_get_num_copies (loop_vinfo, slp_node, vectype);
vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo);
@@ -1515,7 +1532,7 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
}
/* We might load more scalars than we need for permuting SLP loads.
- We checked in get_group_load_store_type that the extra elements
+ We checked in get_load_store_type that the extra elements
don't leak into a new vector. */
auto group_memory_nvectors = [](poly_uint64 size, poly_uint64 nunits)
{
@@ -1915,38 +1932,45 @@ vector_vector_composition_type (tree vtype, poly_uint64 nelts, tree *ptype)
return NULL_TREE;
}
-/* A subroutine of get_load_store_type, with a subset of the same
- arguments. Handle the case where STMT_INFO is part of a grouped load
- or store.
+/* Analyze load or store SLP_NODE of type VLS_TYPE. Return true
+ if there is a memory access type that the vectorized form can use,
+ storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
+ or scatters, fill in GS_INFO accordingly. In addition
+ *ALIGNMENT_SUPPORT_SCHEME is filled out and false is returned if
+ the target does not support the alignment scheme. *MISALIGNMENT
+ is set according to the alignment of the access (including
+ DR_MISALIGNMENT_UNKNOWN when it is unknown).
- For stores, the statements in the group are all consecutive
- and there is no gap at the end. For loads, the statements in the
- group might not be consecutive; there can be gaps between statements
- as well as at the end.
+ MASKED_P is true if the statement is conditional on a vectorized mask.
+ VECTYPE is the vector type that the vectorized statements will use.
- If we can use gather/scatter and ELSVALS is nonzero the supported
- else values will be stored in the vector ELSVALS points to.
-*/
+ If ELSVALS is nonzero the supported else values will be stored in the
+ vector ELSVALS points to. */
static bool
-get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
- tree vectype, slp_tree slp_node,
- bool masked_p, vec_load_store_type vls_type,
- vect_memory_access_type *memory_access_type,
- poly_int64 *poffset,
- dr_alignment_support *alignment_support_scheme,
- int *misalignment,
- gather_scatter_info *gs_info,
- internal_fn *lanes_ifn,
- vec<int> *elsvals)
+get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
+ tree vectype, slp_tree slp_node,
+ bool masked_p, vec_load_store_type vls_type,
+ vect_memory_access_type *memory_access_type,
+ poly_int64 *poffset,
+ dr_alignment_support *alignment_support_scheme,
+ int *misalignment,
+ gather_scatter_info *gs_info,
+ internal_fn *lanes_ifn,
+ vec<int> *elsvals = nullptr)
{
loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
stmt_vec_info first_stmt_info;
unsigned int group_size;
unsigned HOST_WIDE_INT gap;
bool single_element_p;
poly_int64 neg_ldst_offset = 0;
+
+ *misalignment = DR_MISALIGNMENT_UNKNOWN;
+ *poffset = 0;
+
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
@@ -1963,7 +1987,6 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
single_element_p = true;
}
dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
- poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* True if the vectorized statements would access beyond the last
statement in the group. */
@@ -2007,7 +2030,7 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
if (GATHER_SCATTER_IFN_P (*gs_info)
&& !is_gimple_call (stmt_info->stmt)
&& !tree_nop_conversion_p (TREE_TYPE (gs_info->offset),
- offset_vectype))
+ TREE_TYPE (offset_vectype)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -2308,26 +2331,6 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
*memory_access_type == VMAT_GATHER_SCATTER ? gs_info : nullptr);
}
- if (vls_type != VLS_LOAD && first_stmt_info == stmt_info)
- {
- /* STMT is the leader of the group. Check the operands of all the
- stmts of the group. */
- stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info);
- while (next_stmt_info)
- {
- tree op = vect_get_store_rhs (next_stmt_info);
- enum vect_def_type dt;
- if (!vect_is_simple_use (op, vinfo, &dt))
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "use not simple.\n");
- return false;
- }
- next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
- }
- }
-
if (overrun_p)
{
gcc_assert (can_overrun_p);
@@ -2338,51 +2341,6 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
}
- return true;
-}
-
-/* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true
- if there is a memory access type that the vectorized form can use,
- storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
- or scatters, fill in GS_INFO accordingly. In addition
- *ALIGNMENT_SUPPORT_SCHEME is filled out and false is returned if
- the target does not support the alignment scheme. *MISALIGNMENT
- is set according to the alignment of the access (including
- DR_MISALIGNMENT_UNKNOWN when it is unknown).
-
- SLP says whether we're performing SLP rather than loop vectorization.
- MASKED_P is true if the statement is conditional on a vectorized mask.
- VECTYPE is the vector type that the vectorized statements will use.
- NCOPIES is the number of vector statements that will be needed.
-
- If ELSVALS is nonzero the supported else values will be stored in the
- vector ELSVALS points to. */
-
-static bool
-get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
- tree vectype, slp_tree slp_node,
- bool masked_p, vec_load_store_type vls_type,
- unsigned int,
- vect_memory_access_type *memory_access_type,
- poly_int64 *poffset,
- dr_alignment_support *alignment_support_scheme,
- int *misalignment,
- gather_scatter_info *gs_info,
- internal_fn *lanes_ifn,
- vec<int> *elsvals = nullptr)
-{
- loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
- poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
- *misalignment = DR_MISALIGNMENT_UNKNOWN;
- *poffset = 0;
- if (!get_group_load_store_type (vinfo, stmt_info, vectype, slp_node,
- masked_p,
- vls_type, memory_access_type, poffset,
- alignment_support_scheme,
- misalignment, gs_info, lanes_ifn,
- elsvals))
- return false;
-
if ((*memory_access_type == VMAT_ELEMENTWISE
|| *memory_access_type == VMAT_STRIDED_SLP)
&& !nunits.is_constant ())
@@ -2394,7 +2352,6 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
return false;
}
-
/* Checks if all scalar iterations are known to be inbounds. */
bool inbounds = DR_SCALAR_KNOWN_BOUNDS (STMT_VINFO_DR_INFO (stmt_info));
@@ -2528,9 +2485,6 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
/* FIXME: At the moment the cost model seems to underestimate the
cost of using elementwise accesses. This check preserves the
traditional behavior until that can be fixed. */
- stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
- if (!first_stmt_info)
- first_stmt_info = stmt_info;
if (*memory_access_type == VMAT_ELEMENTWISE
&& !STMT_VINFO_STRIDED_P (first_stmt_info)
&& !(stmt_info == DR_GROUP_FIRST_ELEMENT (stmt_info)
@@ -2547,21 +2501,21 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
/* Return true if boolean argument at MASK_INDEX is suitable for vectorizing
conditional operation STMT_INFO. When returning true, store the mask
- in *MASK, the type of its definition in *MASK_DT_OUT, the type of the
- vectorized mask in *MASK_VECTYPE_OUT and the SLP node corresponding
- to the mask in *MASK_NODE if MASK_NODE is not NULL. */
+ in *MASK_NODE, the type of its definition in *MASK_DT_OUT and the type of
+ the vectorized mask in *MASK_VECTYPE_OUT. */
static bool
vect_check_scalar_mask (vec_info *vinfo,
slp_tree slp_node, unsigned mask_index,
- tree *mask, slp_tree *mask_node,
+ slp_tree *mask_node,
vect_def_type *mask_dt_out, tree *mask_vectype_out)
{
enum vect_def_type mask_dt;
tree mask_vectype;
slp_tree mask_node_1;
+ tree mask_;
if (!vect_is_simple_use (vinfo, slp_node, mask_index,
- mask, &mask_node_1, &mask_dt, &mask_vectype))
+ &mask_, &mask_node_1, &mask_dt, &mask_vectype))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -2570,7 +2524,7 @@ vect_check_scalar_mask (vec_info *vinfo,
}
if ((mask_dt == vect_constant_def || mask_dt == vect_external_def)
- && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (*mask)))
+ && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask_)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -2578,17 +2532,6 @@ vect_check_scalar_mask (vec_info *vinfo,
return false;
}
- /* If the caller is not prepared for adjusting an external/constant
- SLP mask vector type fail. */
- if (!mask_node
- && SLP_TREE_DEF_TYPE (mask_node_1) != vect_internal_def)
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "SLP mask argument is not vectorized.\n");
- return false;
- }
-
tree vectype = SLP_TREE_VECTYPE (slp_node);
if (!mask_vectype)
mask_vectype = get_mask_type_for_scalar_type (vinfo, TREE_TYPE (vectype),
@@ -2616,11 +2559,11 @@ vect_check_scalar_mask (vec_info *vinfo,
*mask_dt_out = mask_dt;
*mask_vectype_out = mask_vectype;
- if (mask_node)
- *mask_node = mask_node_1;
+ *mask_node = mask_node_1;
return true;
}
+
/* Return true if stored value is suitable for vectorizing store
statement STMT_INFO. When returning true, store the scalar stored
in *RHS and *RHS_NODE, the type of the definition in *RHS_DT_OUT,
@@ -2629,7 +2572,7 @@ vect_check_scalar_mask (vec_info *vinfo,
static bool
vect_check_store_rhs (vec_info *vinfo, stmt_vec_info stmt_info,
- slp_tree slp_node, tree *rhs, slp_tree *rhs_node,
+ slp_tree slp_node, slp_tree *rhs_node,
vect_def_type *rhs_dt_out, tree *rhs_vectype_out,
vec_load_store_type *vls_type_out)
{
@@ -2645,8 +2588,9 @@ vect_check_store_rhs (vec_info *vinfo, stmt_vec_info stmt_info,
enum vect_def_type rhs_dt;
tree rhs_vectype;
+ tree rhs;
if (!vect_is_simple_use (vinfo, slp_node, op_no,
- rhs, rhs_node, &rhs_dt, &rhs_vectype))
+ &rhs, rhs_node, &rhs_dt, &rhs_vectype))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -2657,7 +2601,7 @@ vect_check_store_rhs (vec_info *vinfo, stmt_vec_info stmt_info,
/* In the case this is a store from a constant make sure
native_encode_expr can handle it. */
if (rhs_dt == vect_constant_def
- && CONSTANT_CLASS_P (*rhs) && native_encode_expr (*rhs, NULL, 64) == 0)
+ && CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -3327,7 +3271,7 @@ vectorizable_call (vec_info *vinfo,
if ((int) i == mask_opno)
{
if (!vect_check_scalar_mask (vinfo, slp_node, mask_opno,
- &op, &slp_op[i], &dt[i], &vectypes[i]))
+ &slp_op[i], &dt[i], &vectypes[i]))
return false;
continue;
}
@@ -3892,9 +3836,9 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
if (nargs == 0)
return false;
- vec<tree>& simd_clone_info = SLP_TREE_SIMD_CLONE_INFO (slp_node);
- if (cost_vec)
- simd_clone_info.truncate (0);
+ vect_simd_clone_data _data;
+ vect_simd_clone_data &data = slp_node->get_data (_data);
+ vec<tree>& simd_clone_info = data.simd_clone_info;
arginfo.reserve (nargs, true);
auto_vec<slp_tree> slp_op;
slp_op.safe_grow_cleared (nargs);
@@ -4291,6 +4235,7 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
}
SLP_TREE_TYPE (slp_node) = call_simd_clone_vec_info_type;
+ slp_node->data = new vect_simd_clone_data (std::move (_data));
DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
/* vect_model_simple_cost (vinfo, 1, slp_node, cost_vec); */
return true;
@@ -7090,7 +7035,8 @@ scan_store_can_perm_p (tree vectype, tree init,
static bool
check_scan_store (vec_info *vinfo, stmt_vec_info stmt_info, tree vectype,
- enum vect_def_type rhs_dt, slp_tree slp_node, tree mask,
+ enum vect_def_type rhs_dt, slp_tree slp_node,
+ slp_tree mask_node,
vect_memory_access_type memory_access_type)
{
loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
@@ -7098,13 +7044,14 @@ check_scan_store (vec_info *vinfo, stmt_vec_info stmt_info, tree vectype,
tree ref_type;
gcc_assert (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) > 1);
- if ((slp_node && SLP_TREE_LANES (slp_node) > 1)
- || mask
+ if (SLP_TREE_LANES (slp_node) > 1
+ || mask_node
|| memory_access_type != VMAT_CONTIGUOUS
|| TREE_CODE (DR_BASE_ADDRESS (dr_info->dr)) != ADDR_EXPR
|| !VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (dr_info->dr), 0))
|| loop_vinfo == NULL
|| LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
+ || LOOP_VINFO_EPILOGUE_P (loop_vinfo)
|| STMT_VINFO_GROUPED_ACCESS (stmt_info)
|| !integer_zerop (get_dr_vinfo_offset (vinfo, dr_info))
|| !integer_zerop (DR_INIT (dr_info->dr))
@@ -7795,7 +7742,7 @@ vectorizable_store (vec_info *vinfo,
/* Is vectorizable store? */
- tree mask = NULL_TREE, mask_vectype = NULL_TREE;
+ tree mask_vectype = NULL_TREE;
slp_tree mask_node = NULL;
if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
{
@@ -7828,7 +7775,7 @@ vectorizable_store (vec_info *vinfo,
(call, mask_index, STMT_VINFO_GATHER_SCATTER_P (stmt_info));
if (mask_index >= 0
&& !vect_check_scalar_mask (vinfo, slp_node, mask_index,
- &mask, &mask_node, &mask_dt,
+ &mask_node, &mask_dt,
&mask_vectype))
return false;
}
@@ -7859,10 +7806,9 @@ vectorizable_store (vec_info *vinfo,
return false;
}
- tree op;
slp_tree op_node;
if (!vect_check_store_rhs (vinfo, stmt_info, slp_node,
- &op, &op_node, &rhs_dt, &rhs_vectype, &vls_type))
+ &op_node, &rhs_dt, &rhs_vectype, &vls_type))
return false;
elem_type = TREE_TYPE (vectype);
@@ -7876,8 +7822,8 @@ vectorizable_store (vec_info *vinfo,
int misalignment;
poly_int64 poffset;
internal_fn lanes_ifn;
- if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask, vls_type,
- 1, &memory_access_type, &poffset,
+ if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask_node,
+ vls_type, &memory_access_type, &poffset,
&alignment_support_scheme, &misalignment, &gs_info,
&lanes_ifn))
return false;
@@ -7891,7 +7837,7 @@ vectorizable_store (vec_info *vinfo,
return false;
}
- if (mask)
+ if (mask_node)
{
if (memory_access_type == VMAT_CONTIGUOUS)
{
@@ -7945,15 +7891,14 @@ vectorizable_store (vec_info *vinfo,
if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) > 1 && cost_vec)
{
- if (!check_scan_store (vinfo, stmt_info, vectype, rhs_dt, slp_node, mask,
- memory_access_type))
+ if (!check_scan_store (vinfo, stmt_info, vectype, rhs_dt, slp_node,
+ mask_node, memory_access_type))
return false;
}
bool costing_p = cost_vec;
if (costing_p) /* transformation not required. */
{
- STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type;
if (loop_vinfo
@@ -7961,10 +7906,10 @@ vectorizable_store (vec_info *vinfo,
check_load_store_for_partial_vectors (loop_vinfo, vectype, slp_node,
vls_type, group_size,
memory_access_type, &gs_info,
- mask);
+ mask_node);
if (!vect_maybe_update_slp_op_vectype (op_node, vectype)
- || (mask
+ || (mask_node
&& !vect_maybe_update_slp_op_vectype (mask_node,
mask_vectype)))
{
@@ -7984,7 +7929,7 @@ vectorizable_store (vec_info *vinfo,
SLP_TREE_TYPE (slp_node) = store_vec_info_type;
}
- gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (stmt_info));
+ gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (slp_node));
/* Transform. */
@@ -8028,7 +7973,6 @@ vectorizable_store (vec_info *vinfo,
gcc_assert (!STMT_VINFO_GROUPED_ACCESS (first_stmt_info)
|| (DR_GROUP_FIRST_ELEMENT (first_stmt_info) == first_stmt_info));
first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
- op = vect_get_store_rhs (first_stmt_info);
ref_type = get_group_alias_ptr_type (first_stmt_info);
@@ -8214,7 +8158,7 @@ vectorizable_store (vec_info *vinfo,
unsigned int n_adjacent_stores = 0;
running_off = offvar;
if (!costing_p)
- vect_get_vec_defs (vinfo, slp_node, op, &vec_oprnds);
+ vect_get_slp_defs (op_node, &vec_oprnds);
unsigned int group_el = 0;
unsigned HOST_WIDE_INT elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
@@ -8289,7 +8233,7 @@ vectorizable_store (vec_info *vinfo,
else
inside_cost
+= record_stmt_cost (cost_vec, n_adjacent_stores,
- scalar_store, stmt_info, 0, vect_body);
+ scalar_store, slp_node, 0, vect_body);
/* Only need vector extracting when there are more
than one stores. */
if (nstores > 1)
@@ -8329,7 +8273,7 @@ vectorizable_store (vec_info *vinfo,
realignment. vect_supportable_dr_alignment always returns either
dr_aligned or dr_unaligned_supported for masked operations. */
gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
- && !mask
+ && !mask_node
&& !loop_masks)
|| alignment_support_scheme == dr_aligned
|| alignment_support_scheme == dr_unaligned_supported);
@@ -8364,7 +8308,7 @@ vectorizable_store (vec_info *vinfo,
memory_access_type, loop_lens);
}
- if (mask && !costing_p)
+ if (mask_node && !costing_p)
LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
/* In case the vectorization factor (VF) is bigger than the number
@@ -8404,7 +8348,7 @@ vectorizable_store (vec_info *vinfo,
{
if (!costing_p)
{
- if (mask)
+ if (mask_node)
{
vect_get_slp_defs (mask_node, &vec_masks);
vec_mask = vec_masks[0];
@@ -8418,7 +8362,7 @@ vectorizable_store (vec_info *vinfo,
else if (!costing_p)
{
gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
- if (mask)
+ if (mask_node)
vec_mask = vec_masks[j];
dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
stmt_info, bump);
@@ -8553,7 +8497,7 @@ vectorizable_store (vec_info *vinfo,
DR_CHAIN is of size 1. */
gcc_assert (group_size == 1);
vect_get_slp_defs (op_node, gvec_oprnds[0]);
- if (mask)
+ if (mask_node)
vect_get_slp_defs (mask_node, &vec_masks);
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
@@ -8579,7 +8523,7 @@ vectorizable_store (vec_info *vinfo,
if (!costing_p)
{
vec_oprnd = (*gvec_oprnds[0])[j];
- if (mask)
+ if (mask_node)
vec_mask = vec_masks[j];
/* We should have catched mismatched types earlier. */
gcc_assert (useless_type_conversion_p (vectype,
@@ -8879,10 +8823,13 @@ vectorizable_store (vec_info *vinfo,
if (!costing_p)
{
/* Get vectorized arguments for SLP_NODE. */
- vect_get_vec_defs (vinfo, slp_node, op, &vec_oprnds, mask, &vec_masks);
+ vect_get_slp_defs (op_node, &vec_oprnds);
vec_oprnd = vec_oprnds[0];
- if (mask)
- vec_mask = vec_masks[0];
+ if (mask_node)
+ {
+ vect_get_slp_defs (mask_node, &vec_masks);
+ vec_mask = vec_masks[0];
+ }
}
/* We should have catched mismatched types earlier. */
@@ -8924,10 +8871,7 @@ vectorizable_store (vec_info *vinfo,
else
{
tree perm_mask = perm_mask_for_reverse (vectype);
- tree perm_dest
- = vect_create_destination_var (vect_get_store_rhs (stmt_info),
- vectype);
- tree new_temp = make_ssa_name (perm_dest);
+ tree new_temp = make_ssa_name (vectype);
/* Generate the permute statement. */
gimple *perm_stmt
@@ -9313,12 +9257,12 @@ vectorizable_load (vec_info *vinfo,
if (!STMT_VINFO_DATA_REF (stmt_info))
return false;
- tree mask = NULL_TREE, mask_vectype = NULL_TREE;
+ tree mask_vectype = NULL_TREE;
tree els = NULL_TREE; tree els_vectype = NULL_TREE;
int mask_index = -1;
int els_index = -1;
- slp_tree slp_op = NULL;
+ slp_tree mask_node = NULL;
slp_tree els_op = NULL;
if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
{
@@ -9357,7 +9301,7 @@ vectorizable_load (vec_info *vinfo,
(call, mask_index, STMT_VINFO_GATHER_SCATTER_P (stmt_info));
if (mask_index >= 0
&& !vect_check_scalar_mask (vinfo, slp_node, mask_index,
- &mask, &slp_op, &mask_dt, &mask_vectype))
+ &mask_node, &mask_dt, &mask_vectype))
return false;
els_index = internal_fn_else_index (ifn);
@@ -9440,8 +9384,8 @@ vectorizable_load (vec_info *vinfo,
auto_vec<int> elsvals;
int maskload_elsval = 0;
bool need_zeroing = false;
- if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask, VLS_LOAD,
- 1, &memory_access_type, &poffset,
+ if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask_node,
+ VLS_LOAD, &memory_access_type, &poffset,
&alignment_support_scheme, &misalignment, &gs_info,
&lanes_ifn, &elsvals))
return false;
@@ -9455,7 +9399,7 @@ vectorizable_load (vec_info *vinfo,
= TYPE_PRECISION (scalar_type) < GET_MODE_PRECISION (GET_MODE_INNER (mode));
/* ??? The following checks should really be part of
- get_group_load_store_type. */
+ get_load_store_type. */
if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
&& !((memory_access_type == VMAT_ELEMENTWISE
|| memory_access_type == VMAT_GATHER_SCATTER)
@@ -9508,7 +9452,7 @@ vectorizable_load (vec_info *vinfo,
return false;
}
- if (mask)
+ if (mask_node)
{
if (memory_access_type == VMAT_CONTIGUOUS)
{
@@ -9549,8 +9493,8 @@ vectorizable_load (vec_info *vinfo,
if (costing_p) /* transformation not required. */
{
- if (mask
- && !vect_maybe_update_slp_op_vectype (slp_op,
+ if (mask_node
+ && !vect_maybe_update_slp_op_vectype (mask_node,
mask_vectype))
{
if (dump_enabled_p ())
@@ -9566,7 +9510,7 @@ vectorizable_load (vec_info *vinfo,
check_load_store_for_partial_vectors (loop_vinfo, vectype, slp_node,
VLS_LOAD, group_size,
memory_access_type, &gs_info,
- mask, &elsvals);
+ mask_node, &elsvals);
if (dump_enabled_p ()
&& memory_access_type != VMAT_ELEMENTWISE
@@ -9590,7 +9534,7 @@ vectorizable_load (vec_info *vinfo,
check_load_store_for_partial_vectors (loop_vinfo, vectype, slp_node,
VLS_LOAD, group_size,
memory_access_type, &gs_info,
- mask, &elsvals);
+ mask_node, &elsvals);
}
/* If the type needs padding we must zero inactive elements.
@@ -9625,7 +9569,7 @@ vectorizable_load (vec_info *vinfo,
if (memory_access_type == VMAT_INVARIANT)
{
- gcc_assert (!grouped_load && !mask && !bb_vinfo);
+ gcc_assert (!grouped_load && !mask_node && !bb_vinfo);
/* If we have versioned for aliasing or the loop doesn't
have any data dependencies that would preclude this,
then we are sure this is a loop invariant load and
@@ -10100,7 +10044,7 @@ vectorizable_load (vec_info *vinfo,
dr_aligned or dr_unaligned_supported for (non-length) masked
operations. */
gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
- && !mask
+ && !mask_node
&& !loop_masks)
|| memory_access_type == VMAT_GATHER_SCATTER
|| alignment_support_scheme == dr_aligned
@@ -10222,7 +10166,7 @@ vectorizable_load (vec_info *vinfo,
auto_vec<tree> vec_offsets;
auto_vec<tree> vec_masks;
- if (mask && !costing_p)
+ if (mask_node && !costing_p)
vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[mask_index],
&vec_masks);
@@ -10290,7 +10234,7 @@ vectorizable_load (vec_info *vinfo,
dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
stmt_info, bump);
}
- if (mask)
+ if (mask_node)
vec_mask = vec_masks[j];
tree vec_array = create_vector_array (vectype, group_size);
@@ -10433,7 +10377,7 @@ vectorizable_load (vec_info *vinfo,
tree bias = NULL_TREE;
if (!costing_p)
{
- if (mask)
+ if (mask_node)
vec_mask = vec_masks[i];
if (loop_masks)
final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks,
@@ -10829,7 +10773,7 @@ vectorizable_load (vec_info *vinfo,
if (!costing_p)
{
- if (mask)
+ if (mask_node)
vec_mask = vec_masks[i];
if (loop_masks)
final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks,
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index e1900279..0a75ee1 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -173,6 +173,8 @@ struct vect_scalar_ops_slice_hash : typed_noop_remove<vect_scalar_ops_slice>
/* Describes how we're going to vectorize an individual load or store,
or a group of loads or stores. */
enum vect_memory_access_type {
+ VMAT_UNINITIALIZED,
+
/* An access to an invariant address. This is used only for loads. */
VMAT_INVARIANT,
@@ -239,6 +241,23 @@ typedef auto_vec<std::pair<unsigned, unsigned>, 16> auto_lane_permutation_t;
typedef vec<unsigned> load_permutation_t;
typedef auto_vec<unsigned, 16> auto_load_permutation_t;
+struct vect_data {
+ virtual ~vect_data () = default;
+};
+
+/* Analysis data from vectorizable_simd_clone_call for
+ call_simd_clone_vec_info_type. */
+struct vect_simd_clone_data : vect_data {
+ virtual ~vect_simd_clone_data () = default;
+ vect_simd_clone_data () = default;
+ vect_simd_clone_data (vect_simd_clone_data &&other) = default;
+
+ /* Selected SIMD clone's function info. First vector element
+ is SIMD clone's function decl, followed by a pair of trees (base + step)
+ for linear arguments (pair of NULLs for other arguments). */
+ auto_vec<tree> simd_clone_info;
+};
+
/* A computation tree of an SLP instance. Each node corresponds to a group of
stmts to be packed in a SIMD stmt. */
struct _slp_tree {
@@ -267,11 +286,6 @@ struct _slp_tree {
denotes the number of output lanes. */
lane_permutation_t lane_permutation;
- /* Selected SIMD clone's function info. First vector element
- is SIMD clone's function decl, followed by a pair of trees (base + step)
- for linear arguments (pair of NULLs for other arguments). */
- vec<tree> simd_clone_info;
-
tree vectype;
/* Vectorized defs. */
vec<tree> vec_defs;
@@ -305,12 +319,13 @@ struct _slp_tree {
for loop vectorization. */
vect_memory_access_type memory_access_type;
- /* The kind of operation as determined by analysis and a tagged
- union with kind specific data. */
+ /* The kind of operation as determined by analysis and optional
+ kind specific data. */
enum stmt_vec_info_type type;
- union {
- void *undef;
- } u;
+ vect_data *data;
+
+ template <class T>
+ T& get_data (T& else_) { return data ? *static_cast <T *> (data) : else_; }
/* If not NULL this is a cached failed SLP discovery attempt with
the lanes that failed during SLP discovery as 'false'. This is
@@ -390,7 +405,6 @@ public:
#define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
#define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
#define SLP_TREE_LANE_PERMUTATION(S) (S)->lane_permutation
-#define SLP_TREE_SIMD_CLONE_INFO(S) (S)->simd_clone_info
#define SLP_TREE_DEF_TYPE(S) (S)->def_type
#define SLP_TREE_VECTYPE(S) (S)->vectype
#define SLP_TREE_REPRESENTATIVE(S) (S)->representative
@@ -1438,10 +1452,6 @@ public:
/* For both loads and stores. */
unsigned simd_lane_access_p : 3;
- /* Classifies how the load or store is going to be implemented
- for loop vectorization. */
- vect_memory_access_type memory_access_type;
-
/* For INTEGER_INDUC_COND_REDUCTION, the initial value to be used. */
tree induc_cond_initial_val;
@@ -1584,7 +1594,6 @@ struct gather_scatter_info {
#define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0)
#define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p
#define STMT_VINFO_STRIDED_P(S) (S)->strided_p
-#define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type
#define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
#define STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL(S) (S)->induc_cond_initial_val
#define STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT(S) (S)->reduc_epilogue_adjustment
@@ -1718,6 +1727,7 @@ public:
unsigned int total_cost () const;
unsigned int suggested_unroll_factor () const;
machine_mode suggested_epilogue_mode (int &masked) const;
+ bool costing_for_scalar () const { return m_costing_for_scalar; }
protected:
unsigned int record_stmt_cost (stmt_vec_info, vect_cost_model_location,
@@ -2000,6 +2010,13 @@ add_stmt_cost (vector_costs *costs, int count,
tree vectype, int misalign,
enum vect_cost_model_location where)
{
+ /* Even though a vector type might be set on stmt do not pass that on when
+ costing the scalar IL. A SLP node shouldn't have been recorded. */
+ if (costs->costing_for_scalar ())
+ {
+ vectype = NULL_TREE;
+ gcc_checking_assert (node == NULL);
+ }
unsigned cost = costs->add_stmt_cost (count, kind, stmt_info, node, vectype,
misalign, where);
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -2825,18 +2842,6 @@ vect_is_reduction (stmt_vec_info stmt_info)
return STMT_VINFO_REDUC_IDX (stmt_info) >= 0;
}
-/* Returns the memory acccess type being used to vectorize the statement. If
- SLP this is read from NODE, otherwise it's read from the STMT_VINFO. */
-
-inline vect_memory_access_type
-vect_mem_access_type (stmt_vec_info stmt_info, slp_tree node)
-{
- if (node)
- return SLP_TREE_MEMORY_ACCESS_TYPE (node);
- else
- return STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info);
-}
-
/* If STMT_INFO describes a reduction, return the vect_reduction_type
of the reduction it describes, otherwise return -1. */
inline int