aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog111
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/common/config/riscv/riscv-common.cc2
-rw-r--r--gcc/config/aarch64/aarch64-protos.h3
-rw-r--r--gcc/config/aarch64/aarch64-simd.md10
-rw-r--r--gcc/config/aarch64/aarch64-sve.md38
-rw-r--r--gcc/config/aarch64/aarch64.cc108
-rw-r--r--gcc/config/arm/arm.cc6
-rw-r--r--gcc/config/arm/arm.h6
-rw-r--r--gcc/config/i386/i386-features.cc39
-rw-r--r--gcc/config/i386/i386.cc50
-rw-r--r--gcc/config/riscv/riscv-vect-permconst.cc20
-rw-r--r--gcc/config/riscv/riscv.opt2
-rw-r--r--gcc/config/s390/s390-protos.h1
-rw-r--r--gcc/config/s390/s390.cc82
-rw-r--r--gcc/config/s390/s390.md4
-rw-r--r--gcc/config/s390/vector.md30
-rw-r--r--gcc/fortran/ChangeLog8
-rw-r--r--gcc/gimple-fold.cc26
-rw-r--r--gcc/gimple-fold.h2
-rw-r--r--gcc/simplify-rtx.cc7
-rw-r--r--gcc/testsuite/ChangeLog71
-rw-r--r--gcc/testsuite/g++.target/i386/pr120036.C113
-rw-r--r--gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047-1.h6
-rw-r--r--gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047-2.h1
-rw-r--r--gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047.c5
-rw-r--r--gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061-1.h6
-rw-r--r--gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061-2.h1
-rw-r--r--gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061.c6
-rw-r--r--gcc/testsuite/gcc.dg/plugin/location_overflow_plugin.cc15
-rw-r--r--gcc/testsuite/gcc.dg/plugin/plugin.exp4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-early-break_134-pr120089.c66
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-early-break_135-pr120143.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general/whilelt_5.c24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/ldst_ptrue_pat_128_to_neon.c81
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/while_7.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/while_9.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/armv8_2-fp16-arith-1.c3
-rw-r--r--gcc/testsuite/gcc.target/i386/pr117839-3a.c22
-rw-r--r--gcc/testsuite/gcc.target/i386/pr117839-3b.c5
-rw-r--r--gcc/testsuite/gcc.target/i386/pr119919.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/arch-48.c5
-rw-r--r--gcc/testsuite/gcc.target/riscv/pr120137.c12
-rw-r--r--gcc/testsuite/gcc.target/riscv/pr120154.c22
-rw-r--r--gcc/testsuite/gcc.target/s390/vector/cstoreti-1.c127
-rw-r--r--gcc/testsuite/gcc.target/s390/vector/cstoreti-2.c25
-rw-r--r--gcc/tree-if-conv.cc16
-rw-r--r--gcc/tree-scalar-evolution.cc5
-rw-r--r--gcc/tree-ssa-ifcombine.cc10
-rw-r--r--gcc/tree-ssa-loop-im.cc28
-rw-r--r--gcc/tree-ssa-loop-split.cc5
-rw-r--r--gcc/tree-ssa-reassoc.cc40
-rw-r--r--gcc/tree-vect-data-refs.cc1
-rw-r--r--gcc/tree-vect-slp.cc17
-rw-r--r--gcc/tree-vect-stmts.cc66
-rw-r--r--libcpp/ChangeLog13
-rw-r--r--libcpp/files.cc8
-rw-r--r--libcpp/line-map.cc48
-rw-r--r--libgfortran/ChangeLog22
-rw-r--r--libgfortran/Makefile.am8
-rw-r--r--libgfortran/Makefile.in35
-rw-r--r--libgfortran/generated/maxloc1_16_m16.c591
-rw-r--r--libgfortran/gfortran.map12
-rw-r--r--libgfortran/intrinsics/random.c2
-rw-r--r--libgomp/ChangeLog6
-rw-r--r--libgomp/testsuite/libgomp.fortran/map-alloc-comp-9-usm.f9011
-rw-r--r--libgomp/testsuite/libgomp.fortran/map-alloc-comp-9.f9019
-rw-r--r--libstdc++-v3/ChangeLog51
-rw-r--r--libstdc++-v3/acinclude.m46
-rwxr-xr-xlibstdc++-v3/configure20
-rw-r--r--libstdc++-v3/doc/doxygen/user.cfg.in1
-rw-r--r--libstdc++-v3/include/Makefile.am1
-rw-r--r--libstdc++-v3/include/Makefile.in1
-rw-r--r--libstdc++-v3/include/bits/chrono_io.h9
-rw-r--r--libstdc++-v3/include/bits/version.def9
-rw-r--r--libstdc++-v3/include/bits/version.h9
-rw-r--r--libstdc++-v3/include/precompiled/stdc++.h1
-rw-r--r--libstdc++-v3/include/std/mdspan309
-rw-r--r--libstdc++-v3/src/c++23/std.cc.in10
-rw-r--r--libstdc++-v3/testsuite/23_containers/mdspan/extents/class_mandates_neg.cc8
-rw-r--r--libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_copy.cc82
-rw-r--r--libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_ints.cc62
-rw-r--r--libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_shape.cc160
-rw-r--r--libstdc++-v3/testsuite/23_containers/mdspan/extents/custom_integer.cc87
-rw-r--r--libstdc++-v3/testsuite/23_containers/mdspan/extents/misc.cc224
-rw-r--r--libstdc++-v3/testsuite/std/time/format/pr120114.cc125
86 files changed, 3156 insertions, 185 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index d239c35..cbce913 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,114 @@
+2025-05-07 Jeff Law <jlaw@ventanamicro.com>
+
+ PR target/120137
+ PR target/120154
+ * config/riscv/riscv-vect-permconst.cc (process_bb): Verify each
+ canonicalized element fits into the vector element mode.
+
+2025-05-07 Dongyan Chen <chendongyan@isrc.iscas.ac.cn>
+
+ * common/config/riscv/riscv-common.cc: New extension.
+ * config/riscv/riscv.opt: Ditto.
+
+2025-05-07 Richard Earnshaw <rearnsha@arm.com>
+
+ PR target/91323
+ * config/arm/arm.cc (arm_select_cc_mode): Use CCFPEmode for LTGT.
+
+2025-05-07 Richard Earnshaw <rearnsha@arm.com>
+
+ PR target/110796
+ PR target/118446
+ * config/arm/arm.h (REVERSIBLE_CC_MODE): FP modes are only
+ reversible if flag_finite_math_only.
+ * config/arm/arm.cc (arm_select_cc_mode): Return CCFPmode for all
+ FP comparisons if flag_finite_math_only.
+
+2025-05-07 Andrew Pinski <quic_apinski@quicinc.com>
+
+ PR tree-optimization/111276
+ * gimple-fold.cc (arith_code_with_undefined_signed_overflow): Make static.
+ (gimple_with_undefined_signed_overflow): New function.
+ * gimple-fold.h (arith_code_with_undefined_signed_overflow): Remove.
+ (gimple_with_undefined_signed_overflow): Add declaration.
+ * tree-if-conv.cc (if_convertible_gimple_assign_stmt_p): Use
+ gimple_with_undefined_signed_overflow instead of manually
+ checking lhs and the code of the stmt.
+ (predicate_statements): Likewise.
+ * tree-ssa-ifcombine.cc (ifcombine_rewrite_to_defined_overflow): Likewise.
+ * tree-ssa-loop-im.cc (move_computations_worker): Likewise.
+ * tree-ssa-reassoc.cc (update_range_test): Likewise. Reformat.
+ * tree-scalar-evolution.cc (final_value_replacement_loop): Use
+ gimple_with_undefined_signed_overflow instead of
+ arith_code_with_undefined_signed_overflow.
+ * tree-ssa-loop-split.cc (split_loop): Likewise.
+
+2025-05-07 Andrew Pinski <quic_apinski@quicinc.com>
+
+ * tree-ssa-loop-im.cc (compute_invariantness): Hoist to the always executed point
+ if ignorning the cost.
+
+2025-05-07 Jan Hubicka <hubicka@ucw.cz>
+
+ * config/i386/i386.cc (ix86_vector_costs::add_stmt_cost): Add FLOAT_EXPR;
+ FIX_TRUNC_EXPR and vec_promote_demote costs.
+
+2025-05-07 Jennifer Schmitz <jschmitz@nvidia.com>
+
+ PR target/117978
+ * config/aarch64/aarch64-protos.h: Declare
+ aarch64_emit_load_store_through_mode and aarch64_sve_maskloadstore.
+ * config/aarch64/aarch64-sve.md
+ (maskload<mode><vpred>): New define_expand folding maskloads with
+ certain predicate patterns to ASIMD loads.
+ (*aarch64_maskload<mode><vpred>): Renamed from maskload<mode><vpred>.
+ (maskstore<mode><vpred>): New define_expand folding maskstores with
+ certain predicate patterns to ASIMD stores.
+ (*aarch64_maskstore<mode><vpred>): Renamed from maskstore<mode><vpred>.
+ * config/aarch64/aarch64.cc
+ (aarch64_emit_load_store_through_mode): New function emitting a
+ load/store through subregs of a given mode.
+ (aarch64_emit_sve_pred_move): Refactor to use
+ aarch64_emit_load_store_through_mode.
+ (aarch64_expand_maskloadstore): New function to emit ASIMD loads/stores
+ for maskloads/stores with SVE predicates with VL1, VL2, VL4, VL8, or
+ VL16 patterns.
+ (aarch64_partial_ptrue_length): New function returning number of leading
+ set bits in a predicate.
+
+2025-05-07 Stefan Schulze Frielinghaus <stefansf@gcc.gnu.org>
+
+ * config/s390/s390-protos.h (s390_expand_cstoreti4): New
+ function.
+ * config/s390/s390.cc (s390_expand_cstoreti4): New function.
+ * config/s390/s390.md (CC_SUZ): New mode iterator.
+ (l): New mode attribute.
+ (cc_tolower): New mode attribute.
+ * config/s390/vector.md (cstoreti4): New expander.
+ (*vec_cmpv2di_lane0_<cc_tolower>): New insn.
+ (*vec_cmpti_<cc_tolower>): New insn.
+
+2025-05-07 H.J. Lu <hjl.tools@gmail.com>
+
+ PR target/120036
+ * config/i386/i386-features.cc (ix86_get_vector_load_mode):
+ Handle 8/4/2 bytes.
+ (remove_redundant_vector_load): If the mode size is smaller than
+ its natural size, first insert an extra move with a QI vector
+ SUBREG of the same size to avoid validate_subreg failure.
+
+2025-05-07 hongtao.liu <hongtao.liu@intel.com>
+
+ PR gcov-profile/118508
+ * auto-profile.cc
+ (autofdo_source_profile::get_callsite_total_count): Fix name
+ mismatch for fortran.
+
+2025-05-07 Jeff Law <jlaw@ventanamicro.com>
+
+ * config/riscv/riscv.md (*branch<ANYI:mode>_shiftedarith_equals_zero):
+ Avoid generating unnecessary andi. Fix formatting.
+
2025-05-06 Dongyan Chen <chendongyan@isrc.iscas.ac.cn>
* common/config/riscv/riscv-common.cc: New extension.
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 9687431..8cb3c2b 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20250507
+20250508
diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc
index 0233e1a..ca14eb9 100644
--- a/gcc/common/config/riscv/riscv-common.cc
+++ b/gcc/common/config/riscv/riscv-common.cc
@@ -327,6 +327,7 @@ static const struct riscv_ext_version riscv_ext_version_table[] =
{"zalrsc", ISA_SPEC_CLASS_NONE, 1, 0},
{"zabha", ISA_SPEC_CLASS_NONE, 1, 0},
{"zacas", ISA_SPEC_CLASS_NONE, 1, 0},
+ {"zama16b", ISA_SPEC_CLASS_NONE, 1, 0},
{"zba", ISA_SPEC_CLASS_NONE, 1, 0},
{"zbb", ISA_SPEC_CLASS_NONE, 1, 0},
@@ -1657,6 +1658,7 @@ static const riscv_ext_flag_table_t riscv_ext_flag_table[] =
RISCV_EXT_FLAG_ENTRY ("zalrsc", x_riscv_za_subext, MASK_ZALRSC),
RISCV_EXT_FLAG_ENTRY ("zabha", x_riscv_za_subext, MASK_ZABHA),
RISCV_EXT_FLAG_ENTRY ("zacas", x_riscv_za_subext, MASK_ZACAS),
+ RISCV_EXT_FLAG_ENTRY ("zama16b", x_riscv_za_subext, MASK_ZAMA16B),
RISCV_EXT_FLAG_ENTRY ("zba", x_riscv_zb_subext, MASK_ZBA),
RISCV_EXT_FLAG_ENTRY ("zbb", x_riscv_zb_subext, MASK_ZBB),
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 1ca86c9..c935e7b 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -1026,6 +1026,8 @@ rtx aarch64_ptrue_reg (machine_mode, unsigned int);
rtx aarch64_ptrue_reg (machine_mode, machine_mode);
rtx aarch64_pfalse_reg (machine_mode);
bool aarch64_sve_same_pred_for_ptest_p (rtx *, rtx *);
+void aarch64_emit_load_store_through_mode (rtx, rtx, machine_mode);
+bool aarch64_expand_maskloadstore (rtx *, machine_mode);
void aarch64_emit_sve_pred_move (rtx, rtx, rtx);
void aarch64_expand_sve_mem_move (rtx, rtx, machine_mode);
bool aarch64_maybe_expand_sve_subreg_move (rtx, rtx);
@@ -1053,6 +1055,7 @@ void aarch64_subvti_scratch_regs (rtx, rtx, rtx *,
rtx *, rtx *, rtx *);
void aarch64_expand_subvti (rtx, rtx, rtx,
rtx, rtx, rtx, rtx, bool);
+int aarch64_exact_log2_inverse (unsigned int, rtx);
/* Initialize builtins for SIMD intrinsics. */
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index e2afe87..1099e74 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1193,12 +1193,14 @@
(define_insn "aarch64_simd_vec_set_zero<mode>"
[(set (match_operand:VALL_F16 0 "register_operand" "=w")
(vec_merge:VALL_F16
- (match_operand:VALL_F16 1 "aarch64_simd_imm_zero" "")
- (match_operand:VALL_F16 3 "register_operand" "0")
+ (match_operand:VALL_F16 1 "register_operand" "0")
+ (match_operand:VALL_F16 3 "aarch64_simd_imm_zero" "")
(match_operand:SI 2 "immediate_operand" "i")))]
- "TARGET_SIMD && exact_log2 (INTVAL (operands[2])) >= 0"
+ "TARGET_SIMD && aarch64_exact_log2_inverse (<nunits>, operands[2]) >= 0"
{
- int elt = ENDIAN_LANE_N (<nunits>, exact_log2 (INTVAL (operands[2])));
+ int elt = ENDIAN_LANE_N (<nunits>,
+ aarch64_exact_log2_inverse (<nunits>,
+ operands[2]));
operands[2] = GEN_INT ((HOST_WIDE_INT) 1 << elt);
return "ins\\t%0.<Vetype>[%p2], <vwcore>zr";
}
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index 7bf12ff..f39af6e 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -1286,7 +1286,24 @@
;; -------------------------------------------------------------------------
;; Predicated LD1 (single).
-(define_insn "maskload<mode><vpred>"
+(define_expand "maskload<mode><vpred>"
+ [(set (match_operand:SVE_ALL 0 "register_operand")
+ (unspec:SVE_ALL
+ [(match_operand:<VPRED> 2 "nonmemory_operand")
+ (match_operand:SVE_ALL 1 "memory_operand")
+ (match_operand:SVE_ALL 3 "aarch64_maskload_else_operand")]
+ UNSPEC_LD1_SVE))]
+ "TARGET_SVE"
+ {
+ if (aarch64_expand_maskloadstore (operands, <MODE>mode))
+ DONE;
+ if (CONSTANT_P (operands[2]))
+ operands[2] = force_reg (<VPRED>mode, operands[2]);
+ }
+)
+
+;; Predicated LD1 (single).
+(define_insn "*aarch64_maskload<mode><vpred>"
[(set (match_operand:SVE_ALL 0 "register_operand" "=w")
(unspec:SVE_ALL
[(match_operand:<VPRED> 2 "register_operand" "Upl")
@@ -2287,7 +2304,24 @@
;; -------------------------------------------------------------------------
;; Predicated ST1 (single).
-(define_insn "maskstore<mode><vpred>"
+(define_expand "maskstore<mode><vpred>"
+ [(set (match_operand:SVE_ALL 0 "memory_operand")
+ (unspec:SVE_ALL
+ [(match_operand:<VPRED> 2 "nonmemory_operand")
+ (match_operand:SVE_ALL 1 "register_operand")
+ (match_dup 0)]
+ UNSPEC_ST1_SVE))]
+ "TARGET_SVE"
+ {
+ if (aarch64_expand_maskloadstore (operands, <MODE>mode))
+ DONE;
+ if (CONSTANT_P (operands[2]))
+ operands[2] = force_reg (<VPRED>mode, operands[2]);
+ }
+)
+
+;; Predicated ST1 (single).
+(define_insn "*aarch64_maskstore<mode><vpred>"
[(set (match_operand:SVE_ALL 0 "memory_operand" "+m")
(unspec:SVE_ALL
[(match_operand:<VPRED> 2 "register_operand" "Upl")
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index fff8d9d..9e3f288 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -3667,6 +3667,14 @@ aarch64_partial_ptrue_length (rtx_vector_builder &builder,
if (builder.nelts_per_pattern () == 3)
return 0;
+ /* It is conservatively correct to drop the element size to a lower value,
+ and we must do so if the predicate consists of a leading "foreground"
+ sequence that is smaller than the element size. Without this,
+ we would test only one bit and so treat everything as either an
+ all-true or an all-false predicate. */
+ if (builder.nelts_per_pattern () == 2)
+ elt_size = MIN (elt_size, builder.npatterns ());
+
/* Skip over leading set bits. */
unsigned int nelts = builder.encoded_nelts ();
unsigned int i = 0;
@@ -3698,6 +3706,24 @@ aarch64_partial_ptrue_length (rtx_vector_builder &builder,
return vl;
}
+/* Return:
+
+ * -1 if all bits of PRED are set
+ * N if PRED has N leading set bits followed by all clear bits
+ * 0 if PRED does not have any of these forms. */
+
+int
+aarch64_partial_ptrue_length (rtx pred)
+{
+ rtx_vector_builder builder;
+ if (!aarch64_get_sve_pred_bits (builder, pred))
+ return 0;
+
+ auto elt_size = vector_element_size (GET_MODE_BITSIZE (GET_MODE (pred)),
+ GET_MODE_NUNITS (GET_MODE (pred)));
+ return aarch64_partial_ptrue_length (builder, elt_size);
+}
+
/* See if there is an svpattern that encodes an SVE predicate of mode
PRED_MODE in which the first VL bits are set and the rest are clear.
Return the pattern if so, otherwise return AARCH64_NUM_SVPATTERNS.
@@ -6410,8 +6436,32 @@ aarch64_stack_protect_canary_mem (machine_mode mode, rtx decl_rtl,
return gen_rtx_MEM (mode, force_reg (Pmode, addr));
}
-/* Emit an SVE predicated move from SRC to DEST. PRED is a predicate
- that is known to contain PTRUE. */
+/* Emit a load/store from a subreg of SRC to a subreg of DEST.
+ The subregs have mode NEW_MODE. Use only for reg<->mem moves. */
+void
+aarch64_emit_load_store_through_mode (rtx dest, rtx src, machine_mode new_mode)
+{
+ gcc_assert ((MEM_P (dest) && register_operand (src, VOIDmode))
+ || (MEM_P (src) && register_operand (dest, VOIDmode)));
+ auto mode = GET_MODE (dest);
+ auto int_mode = aarch64_sve_int_mode (mode);
+ if (MEM_P (src))
+ {
+ rtx tmp = force_reg (new_mode, adjust_address (src, new_mode, 0));
+ tmp = force_lowpart_subreg (int_mode, tmp, new_mode);
+ emit_move_insn (dest, force_lowpart_subreg (mode, tmp, int_mode));
+ }
+ else
+ {
+ src = force_lowpart_subreg (int_mode, src, mode);
+ emit_move_insn (adjust_address (dest, new_mode, 0),
+ force_lowpart_subreg (new_mode, src, int_mode));
+ }
+}
+
+/* PRED is a predicate that is known to contain PTRUE.
+ For 128-bit VLS loads/stores, emit LDR/STR.
+ Else, emit an SVE predicated move from SRC to DEST. */
void
aarch64_emit_sve_pred_move (rtx dest, rtx pred, rtx src)
@@ -6421,16 +6471,7 @@ aarch64_emit_sve_pred_move (rtx dest, rtx pred, rtx src)
&& known_eq (GET_MODE_SIZE (mode), 16)
&& aarch64_classify_vector_mode (mode) == VEC_SVE_DATA
&& !BYTES_BIG_ENDIAN)
- {
- if (MEM_P (src))
- {
- rtx tmp = force_reg (V16QImode, adjust_address (src, V16QImode, 0));
- emit_move_insn (dest, lowpart_subreg (mode, tmp, V16QImode));
- }
- else
- emit_move_insn (adjust_address (dest, V16QImode, 0),
- force_lowpart_subreg (V16QImode, src, mode));
- }
+ aarch64_emit_load_store_through_mode (dest, src, V16QImode);
else
{
expand_operand ops[3];
@@ -23526,6 +23567,39 @@ aarch64_simd_valid_imm (rtx op, simd_immediate_info *info,
return false;
}
+/* Try to optimize the expansion of a maskload or maskstore with
+ the operands in OPERANDS, given that the vector being loaded or
+ stored has mode MODE. Return true on success or false if the normal
+ expansion should be used. */
+
+bool
+aarch64_expand_maskloadstore (rtx *operands, machine_mode mode)
+{
+ /* If the predicate in operands[2] is a patterned SVE PTRUE predicate
+ with patterns VL1, VL2, VL4, VL8, or VL16 and at most the bottom
+ 128 bits are loaded/stored, emit an ASIMD load/store. */
+ int vl = aarch64_partial_ptrue_length (operands[2]);
+ int width = vl * GET_MODE_UNIT_BITSIZE (mode);
+ if (width <= 128
+ && pow2p_hwi (vl)
+ && (vl == 1
+ || (!BYTES_BIG_ENDIAN
+ && aarch64_classify_vector_mode (mode) == VEC_SVE_DATA)))
+ {
+ machine_mode new_mode;
+ if (known_eq (width, 128))
+ new_mode = V16QImode;
+ else if (known_eq (width, 64))
+ new_mode = V8QImode;
+ else
+ new_mode = int_mode_for_size (width, 0).require ();
+ aarch64_emit_load_store_through_mode (operands[0], operands[1],
+ new_mode);
+ return true;
+ }
+ return false;
+}
+
/* Return true if OP is a valid SIMD move immediate for SVE or AdvSIMD. */
bool
aarch64_simd_valid_mov_imm (rtx op)
@@ -23840,6 +23914,16 @@ aarch64_strided_registers_p (rtx *operands, unsigned int num_operands,
return true;
}
+/* Return the base 2 logarithm of the bit inverse of OP masked by the lowest
+ NELTS bits, if OP is a power of 2. Otherwise, returns -1. */
+
+int
+aarch64_exact_log2_inverse (unsigned int nelts, rtx op)
+{
+ return exact_log2 ((~INTVAL (op))
+ & ((HOST_WIDE_INT_1U << nelts) - 1));
+}
+
/* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and
HIGH (exclusive). */
void
diff --git a/gcc/config/arm/arm.cc b/gcc/config/arm/arm.cc
index 670f487..6bdb68a 100644
--- a/gcc/config/arm/arm.cc
+++ b/gcc/config/arm/arm.cc
@@ -16211,14 +16211,16 @@ arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
case UNGT:
case UNGE:
case UNEQ:
- case LTGT:
return CCFPmode;
case LT:
case LE:
case GT:
case GE:
- return CCFPEmode;
+ case LTGT:
+ return (flag_finite_math_only
+ ? CCFPmode
+ : CCFPEmode);
default:
gcc_unreachable ();
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 8472b75..08d3f0d 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -2257,7 +2257,11 @@ extern int making_const_table;
#define SELECT_CC_MODE(OP, X, Y) arm_select_cc_mode (OP, X, Y)
-#define REVERSIBLE_CC_MODE(MODE) 1
+/* Floating-point modes cannot be reversed unless we don't care about
+ NaNs. */
+#define REVERSIBLE_CC_MODE(MODE) \
+ (flag_finite_math_only \
+ || !((MODE) == CCFPmode || (MODE) == CCFPEmode))
#define REVERSE_CONDITION(CODE,MODE) \
(((MODE) == CCFPmode || (MODE) == CCFPEmode) \
diff --git a/gcc/config/i386/i386-features.cc b/gcc/config/i386/i386-features.cc
index 31f3ee2..1ba5ac4 100644
--- a/gcc/config/i386/i386-features.cc
+++ b/gcc/config/i386/i386-features.cc
@@ -3309,8 +3309,16 @@ ix86_get_vector_load_mode (unsigned int size)
mode = V64QImode;
else if (size == 32)
mode = V32QImode;
- else
+ else if (size == 16)
mode = V16QImode;
+ else if (size == 8)
+ mode = V8QImode;
+ else if (size == 4)
+ mode = V4QImode;
+ else if (size == 2)
+ mode = V2QImode;
+ else
+ gcc_unreachable ();
return mode;
}
@@ -3338,13 +3346,36 @@ replace_vector_const (machine_mode vector_mode, rtx vector_const,
if (SUBREG_P (dest) || mode == vector_mode)
replace = vector_const;
else
- replace = gen_rtx_SUBREG (mode, vector_const, 0);
+ {
+ unsigned int size = GET_MODE_SIZE (mode);
+ if (size < ix86_regmode_natural_size (mode))
+ {
+ /* If the mode size is smaller than its natural size,
+ first insert an extra move with a QI vector SUBREG
+ of the same size to avoid validate_subreg failure. */
+ machine_mode vmode = ix86_get_vector_load_mode (size);
+ rtx vreg;
+ if (mode == vmode)
+ vreg = vector_const;
+ else
+ {
+ vreg = gen_reg_rtx (vmode);
+ rtx vsubreg = gen_rtx_SUBREG (vmode, vector_const, 0);
+ rtx pat = gen_rtx_SET (vreg, vsubreg);
+ rtx_insn *vinsn = emit_insn_before (pat, insn);
+ df_insn_rescan (vinsn);
+ }
+ replace = gen_rtx_SUBREG (mode, vreg, 0);
+ }
+ else
+ replace = gen_rtx_SUBREG (mode, vector_const, 0);
+ }
- /* NB: Don't run recog_memoized here since vector SUBREG may not
- be valid. Let LRA handle vector SUBREG. */
SET_SRC (set) = replace;
/* Drop possible dead definitions. */
PATTERN (insn) = set;
+ INSN_CODE (insn) = -1;
+ recog_memoized (insn);
df_insn_rescan (insn);
}
}
diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
index bef95ea..fd36ea8 100644
--- a/gcc/config/i386/i386.cc
+++ b/gcc/config/i386/i386.cc
@@ -25767,6 +25767,26 @@ ix86_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
(ix86_tune_cost, GET_MODE_BITSIZE (mode));
break;
+ case FLOAT_EXPR:
+ if (SSE_FLOAT_MODE_SSEMATH_OR_HFBF_P (mode))
+ stmt_cost = ix86_cost->cvtsi2ss;
+ else if (X87_FLOAT_MODE_P (mode))
+ /* TODO: We do not have cost tables for x87. */
+ stmt_cost = ix86_cost->fadd;
+ else
+ stmt_cost = ix86_vec_cost (mode, ix86_cost->cvtpi2ps);
+ break;
+
+ case FIX_TRUNC_EXPR:
+ if (SSE_FLOAT_MODE_SSEMATH_OR_HFBF_P (mode))
+ stmt_cost = ix86_cost->cvtss2si;
+ else if (X87_FLOAT_MODE_P (mode))
+ /* TODO: We do not have cost tables for x87. */
+ stmt_cost = ix86_cost->fadd;
+ else
+ stmt_cost = ix86_vec_cost (mode, ix86_cost->cvtps2pi);
+ break;
+
case COND_EXPR:
{
/* SSE2 conditinal move sequence is:
@@ -25930,8 +25950,7 @@ ix86_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
break;
}
- if (kind == vec_promote_demote
- && fp && FLOAT_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt_info->stmt))))
+ if (kind == vec_promote_demote)
{
int outer_size
= tree_to_uhwi
@@ -25941,16 +25960,25 @@ ix86_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
= tree_to_uhwi
(TYPE_SIZE
(TREE_TYPE (gimple_assign_rhs1 (stmt_info->stmt))));
- int stmt_cost = vec_fp_conversion_cost
- (ix86_tune_cost, GET_MODE_BITSIZE (mode));
- /* VEC_PACK_TRUNC_EXPR: If inner size is greater than outer size we will end
- up doing two conversions and packing them. */
+ bool inner_fp = FLOAT_TYPE_P
+ (TREE_TYPE (gimple_assign_rhs1 (stmt_info->stmt)));
+
+ if (fp && inner_fp)
+ stmt_cost = vec_fp_conversion_cost
+ (ix86_tune_cost, GET_MODE_BITSIZE (mode));
+ else if (fp && !inner_fp)
+ stmt_cost = ix86_vec_cost (mode, ix86_cost->cvtpi2ps);
+ else if (!fp && inner_fp)
+ stmt_cost = ix86_vec_cost (mode, ix86_cost->cvtps2pi);
+ else
+ stmt_cost = ix86_vec_cost (mode, ix86_cost->sse_op);
+ /* VEC_PACK_TRUNC_EXPR and similar demote operations: If outer size is
+ greater than inner size we will end up doing two conversions and
+ packing them. We always pack pairs; if the size difference is greater
+ it is split into multiple demote operations. */
if (inner_size > outer_size)
- {
- int n = inner_size / outer_size;
- stmt_cost = stmt_cost * n
- + (n - 1) * ix86_vec_cost (mode, ix86_cost->sse_op);
- }
+ stmt_cost = stmt_cost * 2
+ + ix86_vec_cost (mode, ix86_cost->sse_op);
}
/* If we do elementwise loads into a vector then we are bound by
diff --git a/gcc/config/riscv/riscv-vect-permconst.cc b/gcc/config/riscv/riscv-vect-permconst.cc
index feecc7e..8e13cf8 100644
--- a/gcc/config/riscv/riscv-vect-permconst.cc
+++ b/gcc/config/riscv/riscv-vect-permconst.cc
@@ -203,6 +203,24 @@ vector_permconst::process_bb (basic_block bb)
if (bias < 0 || bias > 16384 / 8)
continue;
+ /* We need to verify that each element would be a valid value
+ in the inner mode after applying the bias. */
+ machine_mode inner = GET_MODE_INNER (GET_MODE (cvec));
+ HOST_WIDE_INT precision = GET_MODE_PRECISION (inner).to_constant ();
+ int i;
+ for (i = 0; i < CONST_VECTOR_NUNITS (cvec).to_constant (); i++)
+ {
+ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (cvec, i)) - bias;
+ if (val != sext_hwi (val, precision))
+ break;
+ }
+
+ /* If the loop terminated early, then we found a case where the
+ adjusted constant would not fit, so we can't record the constant
+ for this case (it's unlikely to be useful anyway. */
+ if (i != CONST_VECTOR_NUNITS (cvec).to_constant ())
+ continue;
+
/* At this point we have a load of a constant integer vector from the
constant pool. That constant integer vector is hopefully a
permutation constant. We need to make a copy of the vector and
@@ -211,7 +229,7 @@ vector_permconst::process_bb (basic_block bb)
XXX This violates structure sharing conventions. */
rtvec_def *nvec = gen_rtvec (CONST_VECTOR_NUNITS (cvec).to_constant ());
- for (int i = 0; i < CONST_VECTOR_NUNITS (cvec).to_constant (); i++)
+ for (i = 0; i < CONST_VECTOR_NUNITS (cvec).to_constant (); i++)
nvec->elem[i] = GEN_INT (INTVAL (CONST_VECTOR_ELT (cvec, i)) - bias);
rtx copy = gen_rtx_CONST_VECTOR (GET_MODE (cvec), nvec);
diff --git a/gcc/config/riscv/riscv.opt b/gcc/config/riscv/riscv.opt
index 9e471be..80593ee 100644
--- a/gcc/config/riscv/riscv.opt
+++ b/gcc/config/riscv/riscv.opt
@@ -274,6 +274,8 @@ Mask(ZA64RS) Var(riscv_za_subext)
Mask(ZA128RS) Var(riscv_za_subext)
+Mask(ZAMA16B) Var(riscv_za_subext)
+
TargetVariable
int riscv_zb_subext
diff --git a/gcc/config/s390/s390-protos.h b/gcc/config/s390/s390-protos.h
index e8c7f83..d760a7e 100644
--- a/gcc/config/s390/s390-protos.h
+++ b/gcc/config/s390/s390-protos.h
@@ -114,6 +114,7 @@ extern bool s390_expand_cmpmem (rtx, rtx, rtx, rtx);
extern void s390_expand_vec_strlen (rtx, rtx, rtx);
extern void s390_expand_vec_movstr (rtx, rtx, rtx);
extern bool s390_expand_addcc (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
+extern void s390_expand_cstoreti4 (rtx, rtx, rtx, rtx);
extern bool s390_expand_insv (rtx, rtx, rtx, rtx);
extern void s390_expand_cs (machine_mode, rtx, rtx, rtx, rtx, rtx, bool);
extern void s390_expand_atomic_exchange_tdsi (rtx, rtx, rtx);
diff --git a/gcc/config/s390/s390.cc b/gcc/config/s390/s390.cc
index e3edf85..2d44cec 100644
--- a/gcc/config/s390/s390.cc
+++ b/gcc/config/s390/s390.cc
@@ -7210,6 +7210,82 @@ s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
NULL_RTX, 1, OPTAB_DIRECT);
}
+/* Expand optab cstoreti4. */
+
+void
+s390_expand_cstoreti4 (rtx dst, rtx cmp, rtx op1, rtx op2)
+{
+ rtx_code code = GET_CODE (cmp);
+
+ if (TARGET_VXE3)
+ {
+ rtx cond = s390_emit_compare (GET_MODE (cmp), code, op1, op2);
+ emit_insn (gen_movsicc (dst, cond, const1_rtx, const0_rtx));
+ return;
+ }
+
+ /* Prior VXE3 emulate the comparison. For an (in)equality test exploit
+ VECTOR COMPARE EQUAL. For a relational test, first compare the high part
+ via VECTOR ELEMENT COMPARE (LOGICAL). If the high part does not equal,
+ then consume the CC immediatelly by a subsequent LOAD ON CONDITION.
+ Otherweise, if the high part equals, then perform a subsequent VECTOR
+ COMPARE HIGH LOGICAL followed by a LOAD ON CONDITION. */
+
+ op1 = force_reg (V2DImode, simplify_gen_subreg (V2DImode, op1, TImode, 0));
+ op2 = force_reg (V2DImode, simplify_gen_subreg (V2DImode, op2, TImode, 0));
+
+ if (code == EQ || code == NE)
+ {
+ s390_expand_vec_compare_cc (dst, code, op1, op2, code == EQ);
+ return;
+ }
+
+ /* Normalize code into either GE(U) or GT(U). */
+ if (code == LT || code == LE || code == LTU || code == LEU)
+ {
+ std::swap (op1, op2);
+ code = swap_condition (code);
+ }
+
+ /* For (un)signed comparisons
+ - high(op1) >= high(op2) instruction VECG op1, op2 sets CC1
+ if the relation does _not_ hold.
+ - high(op1) > high(op2) instruction VECG op2, op1 sets CC1
+ if the relation holds. */
+ if (code == GT || code == GTU)
+ std::swap (op1, op2);
+ machine_mode cc_mode = (code == GEU || code == GTU) ? CCUmode : CCSmode;
+ rtx lane0 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
+ emit_insn (
+ gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
+ gen_rtx_COMPARE (cc_mode,
+ gen_rtx_VEC_SELECT (DImode, op1, lane0),
+ gen_rtx_VEC_SELECT (DImode, op2, lane0))));
+ rtx ccs_reg = gen_rtx_REG (CCSmode, CC_REGNUM);
+ rtx lab = gen_label_rtx ();
+ s390_emit_jump (lab, gen_rtx_NE (VOIDmode, ccs_reg, const0_rtx));
+ /* At this point we have that high(op1) == high(op2). Thus, test the low
+ part, now. For unsigned comparisons
+ - low(op1) >= low(op2) instruction VCHLGS op2, op1 sets CC1
+ if the relation does _not_ hold.
+ - low(op1) > low(op2) instruction VCHLGS op1, op2 sets CC1
+ if the relation holds. */
+ std::swap (op1, op2);
+ emit_insn (gen_rtx_PARALLEL (
+ VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (gen_rtx_REG (CCVIHUmode, CC_REGNUM),
+ gen_rtx_COMPARE (CCVIHUmode, op1, op2)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode)))));
+ emit_label (lab);
+ /* For (un)signed comparison >= any CC except CC1 means that the relation
+ holds. For (un)signed comparison > only CC1 means that the relation
+ holds. */
+ rtx_code cmp_code = (code == GE || code == GEU) ? UNGE : LT;
+ rtx cond = gen_rtx_fmt_ee (cmp_code, CCSmode, ccs_reg, const0_rtx);
+ emit_insn (gen_movsicc (dst, cond, const1_rtx, const0_rtx));
+}
+
/* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
the result in TARGET. */
@@ -7310,9 +7386,9 @@ s390_expand_vec_compare (rtx target, enum rtx_code cond,
/* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
elements in CMP1 and CMP2 fulfill the comparison.
- This function is only used to emit patterns for the vx builtins and
- therefore only handles comparison codes required by the
- builtins. */
+ This function is only used in s390_expand_cstoreti4 and to emit patterns for
+ the vx builtins and therefore only handles comparison codes required by
+ those. */
void
s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
rtx cmp1, rtx cmp2, bool all_p)
diff --git a/gcc/config/s390/s390.md b/gcc/config/s390/s390.md
index 05b9da6..97a4bdf 100644
--- a/gcc/config/s390/s390.md
+++ b/gcc/config/s390/s390.md
@@ -993,6 +993,10 @@
(define_mode_attr asm_fcmp [(CCVEQ "e") (CCVFH "h") (CCVFHE "he")])
(define_mode_attr insn_cmp [(CCVEQ "eq") (CCVIH "h") (CCVIHU "hl") (CCVFH "h") (CCVFHE "he")])
+(define_mode_iterator CC_SUZ [CCS CCU CCZ])
+(define_mode_attr l [(CCS "") (CCU "l") (CCZ "")])
+(define_mode_attr cc_tolower [(CCS "ccs") (CCU "ccu") (CCZ "ccz")])
+
; Analogue to TOINTVEC / tointvec
(define_mode_attr TOINT [(TF "TI") (DF "DI") (SF "SI")])
(define_mode_attr toint [(TF "ti") (DF "di") (SF "si")])
diff --git a/gcc/config/s390/vector.md b/gcc/config/s390/vector.md
index e29255f..160e42a 100644
--- a/gcc/config/s390/vector.md
+++ b/gcc/config/s390/vector.md
@@ -538,6 +538,14 @@
"vlvg<bhfgq>\t%v0,%1,%Y4(%2)"
[(set_attr "op_type" "VRS")])
+(define_expand "cstoreti4"
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "ordered_comparison_operator"
+ [(match_operand:TI 2 "register_operand")
+ (match_operand:TI 3 "register_operand")]))]
+ "TARGET_VX"
+ "s390_expand_cstoreti4 (operands[0], operands[1], operands[2], operands[3]); DONE;")
+
;; FIXME: Support also vector mode operands for 0
;; This is used via RTL standard name as well as for expanding the builtin
@@ -2209,6 +2217,28 @@
operands[5] = gen_reg_rtx (V2DImode);
})
+(define_insn "*vec_cmpv2di_lane0_<cc_tolower>"
+ [(set (reg:CC_SUZ CC_REGNUM)
+ (compare:CC_SUZ
+ (vec_select:DI
+ (match_operand:V2DI 0 "register_operand" "v")
+ (parallel [(const_int 0)]))
+ (vec_select:DI
+ (match_operand:V2DI 1 "register_operand" "v")
+ (parallel [(const_int 0)]))))]
+ "TARGET_VX"
+ "vec<l>g\t%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "*vec_cmpti_<cc_tolower>"
+ [(set (reg:CC_SUZ CC_REGNUM)
+ (compare:CC_SUZ
+ (match_operand:TI 0 "register_operand" "v")
+ (match_operand:TI 1 "register_operand" "v")))]
+ "TARGET_VXE3"
+ "vec<l>q\t%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
;;
;; Floating point compares
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index e6d9fa6..d92b9d6 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,11 @@
+2025-05-07 Paul Thomas <pault@gcc.gnu.org>
+ and Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/119948
+ * primary.cc (match_variable): Module procedures with sym the
+ same as result can be treated as variables, although marked
+ external.
+
2025-05-06 Jerry DeLisle <jvdelisle@gcc.gnu.org>
PR fortran/120049
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index 5884b79..7721795 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -10573,7 +10573,7 @@ gimple_fold_indirect_ref (tree t)
integer types involves undefined behavior on overflow and the
operation can be expressed with unsigned arithmetic. */
-bool
+static bool
arith_code_with_undefined_signed_overflow (tree_code code)
{
switch (code)
@@ -10590,6 +10590,30 @@ arith_code_with_undefined_signed_overflow (tree_code code)
}
}
+/* Return true if STMT has an operation that operates on a signed
+ integer types involves undefined behavior on overflow and the
+ operation can be expressed with unsigned arithmetic. */
+
+bool
+gimple_with_undefined_signed_overflow (gimple *stmt)
+{
+ if (!is_gimple_assign (stmt))
+ return false;
+ tree lhs = gimple_assign_lhs (stmt);
+ if (!lhs)
+ return false;
+ tree lhs_type = TREE_TYPE (lhs);
+ if (!INTEGRAL_TYPE_P (lhs_type)
+ && !POINTER_TYPE_P (lhs_type))
+ return false;
+ if (!TYPE_OVERFLOW_UNDEFINED (lhs_type))
+ return false;
+ if (!arith_code_with_undefined_signed_overflow
+ (gimple_assign_rhs_code (stmt)))
+ return false;
+ return true;
+}
+
/* Rewrite STMT, an assignment with a signed integer or pointer arithmetic
operation that can be transformed to unsigned arithmetic by converting
its operand, carrying out the operation in the corresponding unsigned
diff --git a/gcc/gimple-fold.h b/gcc/gimple-fold.h
index 2790d0f..5fcfdcd 100644
--- a/gcc/gimple-fold.h
+++ b/gcc/gimple-fold.h
@@ -59,7 +59,7 @@ extern tree gimple_get_virt_method_for_vtable (HOST_WIDE_INT, tree,
extern tree gimple_fold_indirect_ref (tree);
extern bool gimple_fold_builtin_sprintf (gimple_stmt_iterator *);
extern bool gimple_fold_builtin_snprintf (gimple_stmt_iterator *);
-extern bool arith_code_with_undefined_signed_overflow (tree_code);
+extern bool gimple_with_undefined_signed_overflow (gimple *);
extern void rewrite_to_defined_overflow (gimple_stmt_iterator *);
extern gimple_seq rewrite_to_defined_overflow (gimple *);
extern void replace_call_with_value (gimple_stmt_iterator *, tree);
diff --git a/gcc/simplify-rtx.cc b/gcc/simplify-rtx.cc
index 7bcbe11..b34fd2f 100644
--- a/gcc/simplify-rtx.cc
+++ b/gcc/simplify-rtx.cc
@@ -7387,6 +7387,13 @@ simplify_context::simplify_ternary_operation (rtx_code code, machine_mode mode,
return gen_rtx_CONST_VECTOR (mode, v);
}
+ if (swap_commutative_operands_p (op0, op1)
+ /* Two operands have same precedence, then first bit of mask
+ select first operand. */
+ || (!swap_commutative_operands_p (op1, op0) && !(sel & 1)))
+ return simplify_gen_ternary (code, mode, mode, op1, op0,
+ GEN_INT (~sel & mask));
+
/* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
if no element from a appears in the result. */
if (GET_CODE (op0) == VEC_MERGE)
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index b2ebcef..73e8f7c 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,74 @@
+2025-05-07 Jeff Law <jlaw@ventanamicro.com>
+
+ PR target/120137
+ PR target/120154
+ * gcc.target/riscv/pr120137.c: New test.
+ * gcc.target/riscv/pr120154.c: New test.
+
+2025-05-07 Dongyan Chen <chendongyan@isrc.iscas.ac.cn>
+
+ * gcc.target/riscv/arch-48.c: New test.
+
+2025-05-07 Richard Earnshaw <rearnsha@arm.com>
+
+ PR target/110796
+ PR target/118446
+ * gcc.target/arm/armv8_2-fp16-arith-1.c: Adjust due to no-longer
+ emitting VCMPE when -ffast-math..
+
+2025-05-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR preprocessor/108900
+ PR preprocessor/116047
+ PR preprocessor/120061
+ * gcc.dg/plugin/plugin.exp: Add location-overflow-test-pr116047.c
+ and location-overflow-test-pr120061.c.
+ * gcc.dg/plugin/location_overflow_plugin.cc (plugin_init): Don't error
+ on unknown values, instead just break. Handle 0x4fHHHHHH arguments
+ differently.
+ * gcc.dg/plugin/location-overflow-test-pr116047.c: New test.
+ * gcc.dg/plugin/location-overflow-test-pr116047-1.h: New test.
+ * gcc.dg/plugin/location-overflow-test-pr116047-2.h: New test.
+ * gcc.dg/plugin/location-overflow-test-pr120061.c: New test.
+ * gcc.dg/plugin/location-overflow-test-pr120061-1.h: New test.
+ * gcc.dg/plugin/location-overflow-test-pr120061-2.h: New test.
+
+2025-05-07 Jan Hubicka <hubicka@ucw.cz>
+
+ * gcc.target/i386/pr119919.c: Add -mtune=znver1
+
+2025-05-07 Jennifer Schmitz <jschmitz@nvidia.com>
+
+ PR target/117978
+ * gcc.target/aarch64/sve/acle/general/whilelt_5.c: Adjust expected
+ outcome.
+ * gcc.target/aarch64/sve/ldst_ptrue_pat_128_to_neon.c: New test.
+ * gcc.target/aarch64/sve/while_7.c: Adjust expected outcome.
+ * gcc.target/aarch64/sve/while_9.c: Adjust expected outcome.
+
+2025-05-07 Stefan Schulze Frielinghaus <stefansf@gcc.gnu.org>
+
+ * gcc.target/s390/vector/cstoreti-1.c: New test.
+ * gcc.target/s390/vector/cstoreti-2.c: New test.
+
+2025-05-07 H.J. Lu <hjl.tools@gmail.com>
+
+ PR target/120036
+ * g++.target/i386/pr120036.C: New test.
+ * gcc.target/i386/pr117839-3a.c: Likewise.
+ * gcc.target/i386/pr117839-3b.c: Likewise.
+
+2025-05-07 Paul Thomas <pault@gcc.gnu.org>
+ and Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/119948
+ * gfortran.dg/pr119948.f90: Update to incorporate failing test,
+ where module procedure is the result. Test submodule cases.
+
+2025-05-07 Jeff Law <jlaw@ventanamicro.com>
+
+ * g++.target/riscv/redundant-andi.C: New test.
+
2025-05-06 Dongyan Chen <chendongyan@isrc.iscas.ac.cn>
* gcc.target/riscv/arch-47.c: New test.
diff --git a/gcc/testsuite/g++.target/i386/pr120036.C b/gcc/testsuite/g++.target/i386/pr120036.C
new file mode 100644
index 0000000..a2fc24f
--- /dev/null
+++ b/gcc/testsuite/g++.target/i386/pr120036.C
@@ -0,0 +1,113 @@
+/* { dg-do compile { target fpic } } */
+/* { dg-options "-O2 -std=c++11 -march=sapphirerapids -fPIC" } */
+
+typedef _Float16 Native;
+struct float16_t
+{
+ Native native;
+ float16_t ();
+ float16_t (Native arg) : native (arg) {}
+ operator Native ();
+ float16_t
+ operator+ (float16_t rhs)
+ {
+ return native + rhs.native;
+ }
+ float16_t
+ operator* (float16_t)
+ {
+ return native * native;
+ }
+};
+template <int N> struct Simd
+{
+ static constexpr int kPrivateLanes = N;
+};
+template <int N> struct ClampNAndPow2
+{
+ using type = Simd<N>;
+};
+template <int kLimit> struct CappedTagChecker
+{
+ static constexpr int N = sizeof (int) ? kLimit : 0;
+ using type = typename ClampNAndPow2<N>::type;
+};
+template <typename, int kLimit, int>
+using CappedTag = typename CappedTagChecker<kLimit>::type;
+template <class D>
+int
+Lanes (D)
+{
+ return D::kPrivateLanes;
+}
+template <class D> int Zero (D);
+template <class D> using VFromD = decltype (Zero (D ()));
+struct Vec512
+{
+ __attribute__ ((__vector_size__ (16))) _Float16 raw;
+};
+Vec512 Zero (Simd<2>);
+template <class D> void ReduceSum (D, VFromD<D>);
+struct Dot
+{
+ template <int, class D, typename T>
+ static T
+ Compute (D d, T *pa, int num_elements)
+ {
+ T *pb;
+ int N = Lanes (d), i = 0;
+ if (__builtin_expect (num_elements < N, 0))
+ {
+ T sum0 = 0, sum1 = 0;
+ for (; i + 2 <= num_elements; i += 2)
+ {
+ float16_t __trans_tmp_6 = pa[i] * pb[i],
+ __trans_tmp_5 = sum0 + __trans_tmp_6,
+ __trans_tmp_8 = pa[i + 1] * pb[1],
+ __trans_tmp_7 = sum1 + __trans_tmp_8;
+ sum0 = __trans_tmp_5;
+ sum1 = __trans_tmp_7;
+ }
+ float16_t __trans_tmp_9 = sum0 + sum1;
+ return __trans_tmp_9;
+ }
+ decltype (Zero (d)) sum0;
+ ReduceSum (d, sum0);
+ __builtin_trap ();
+ }
+};
+template <int kMul, class Test, int kPow2> struct ForeachCappedR
+{
+ static void
+ Do (int min_lanes, int max_lanes)
+ {
+ CappedTag<int, kMul, kPow2> d;
+ Test () (int (), d);
+ ForeachCappedR<kMul / 2, Test, kPow2>::Do (min_lanes, max_lanes);
+ }
+};
+template <class Test, int kPow2> struct ForeachCappedR<0, Test, kPow2>
+{
+ static void Do (int, int);
+};
+struct TestDot
+{
+ template <class T, class D>
+ void
+ operator() (T, D d)
+ {
+ int counts[]{ 1, 3 };
+ for (int num : counts)
+ {
+ float16_t a;
+ T __trans_tmp_4 = Dot::Compute<0> (d, &a, num);
+ }
+ }
+};
+int DotTest_TestAllDot_TestTestBody_max_lanes;
+void
+DotTest_TestAllDot_TestTestBody ()
+{
+ ForeachCappedR<64, TestDot, 0>::Do (
+ 1, DotTest_TestAllDot_TestTestBody_max_lanes);
+}
diff --git a/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047-1.h b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047-1.h
new file mode 100644
index 0000000..3dd6434
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047-1.h
@@ -0,0 +1,6 @@
+
+
+
+
+#include "location-overflow-test-pr116047-2.h"
+static_assert (__LINE__ == 6, "");
diff --git a/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047-2.h b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047-2.h
new file mode 100644
index 0000000..048f715
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047-2.h
@@ -0,0 +1 @@
+int i;
diff --git a/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047.c b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047.c
new file mode 100644
index 0000000..75161fa
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr116047.c
@@ -0,0 +1,5 @@
+/* PR preprocessor/116047 */
+/* { dg-do preprocess } */
+/* { dg-options "-nostdinc -std=c23 -fplugin-arg-location_overflow_plugin-value=0x4ffe0180" } */
+#include "location-overflow-test-pr116047-1.h"
+/* { dg-final { scan-file location-overflow-test-pr116047.i "static_assert\[^\n\r]\*6\[^\n\r]\*== 6" } } */
diff --git a/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061-1.h b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061-1.h
new file mode 100644
index 0000000..ebf7704
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061-1.h
@@ -0,0 +1,6 @@
+
+
+
+
+#include "location-overflow-test-pr120061-2.h"
+
diff --git a/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061-2.h b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061-2.h
new file mode 100644
index 0000000..048f715
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061-2.h
@@ -0,0 +1 @@
+int i;
diff --git a/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061.c b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061.c
new file mode 100644
index 0000000..e8e8038
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/plugin/location-overflow-test-pr120061.c
@@ -0,0 +1,6 @@
+/* PR preprocessor/120061 */
+/* { dg-do preprocess } */
+/* { dg-options "-nostdinc -std=c23 -fplugin-arg-location_overflow_plugin-value=0x61000000" } */
+#include "location-overflow-test-pr120061-1.h"
+static_assert (__LINE__ == 5, "");
+/* { dg-final { scan-file location-overflow-test-pr120061.i "static_assert\[^\n\r]\*5\[^\n\r]\*== 5" } } */
diff --git a/gcc/testsuite/gcc.dg/plugin/location_overflow_plugin.cc b/gcc/testsuite/gcc.dg/plugin/location_overflow_plugin.cc
index f731b14..f770d35 100644
--- a/gcc/testsuite/gcc.dg/plugin/location_overflow_plugin.cc
+++ b/gcc/testsuite/gcc.dg/plugin/location_overflow_plugin.cc
@@ -85,9 +85,18 @@ plugin_init (struct plugin_name_args *plugin_info,
error_at (UNKNOWN_LOCATION, "missing plugin argument");
/* With 64-bit locations, the thresholds are larger, so shift the base
- location argument accordingly. */
+ location argument accordingly, basically remap the GCC 14 32-bit
+ location_t argument values to 64-bit location_t counterparts. There
+ is one exception for values slightly before the 32-bit location_t
+ LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES (0x50000000). In that case
+ remap them to the same amount before the 64-bit location_t
+ LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES -
+ ((location_t) 0x50000000) << 31. */
gcc_assert (sizeof (location_t) == sizeof (uint64_t));
- base_location = 1 + ((base_location - 1) << 31);
+ if (base_location >= 0x4f000000 && base_location <= 0x4fffffff)
+ base_location += (((location_t) 0x50000000) << 31) - 0x50000000;
+ else
+ base_location = 1 + ((base_location - 1) << 31);
register_callback (plugin_info->base_name,
PLUGIN_PRAGMAS,
@@ -107,7 +116,7 @@ plugin_init (struct plugin_name_args *plugin_info,
break;
default:
- error_at (UNKNOWN_LOCATION, "unrecognized value for plugin argument");
+ break;
}
return 0;
diff --git a/gcc/testsuite/gcc.dg/plugin/plugin.exp b/gcc/testsuite/gcc.dg/plugin/plugin.exp
index 90c9162..96e76d2 100644
--- a/gcc/testsuite/gcc.dg/plugin/plugin.exp
+++ b/gcc/testsuite/gcc.dg/plugin/plugin.exp
@@ -138,7 +138,9 @@ set plugin_test_list [list \
{ location_overflow_plugin.cc \
location-overflow-test-1.c \
location-overflow-test-2.c \
- location-overflow-test-pr83173.c } \
+ location-overflow-test-pr83173.c \
+ location-overflow-test-pr116047.c \
+ location-overflow-test-pr120061.c } \
{ must_tail_call_plugin.cc \
must-tail-call-1.c \
must-tail-call-2.c } \
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_134-pr120089.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_134-pr120089.c
new file mode 100644
index 0000000..4d8199c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_134-pr120089.c
@@ -0,0 +1,66 @@
+/* { dg-add-options vect_early_break } */
+/* { dg-additional-options "-funswitch-loops" } */
+
+#include "tree-vect.h"
+
+typedef int type;
+typedef type Vec2[2];
+
+struct BytesVec {
+ type d[100];
+};
+
+__attribute__((noipa)) struct BytesVec
+buildVertexBufferData(const Vec2 *origVertices, bool needsZW,
+ unsigned paddingSize, unsigned long t) {
+ const unsigned vertexCount = t;
+ struct BytesVec data = (struct BytesVec){.d = {0}};
+ type *nextVertexPtr = data.d;
+
+ for (unsigned vertexIdx = 0u; vertexIdx < vertexCount; ++vertexIdx) {
+
+ if (vertexIdx > t)
+ __builtin_trap();
+ __builtin_memcpy(nextVertexPtr, &origVertices[vertexIdx],
+ 2 * sizeof(type));
+ nextVertexPtr += 2;
+
+ if (needsZW) {
+ nextVertexPtr += 2;
+ }
+
+ nextVertexPtr += paddingSize;
+ }
+
+ return data;
+}
+Vec2 origVertices[] = {
+ {0, 1}, {2, 3}, {4, 5}, {6, 7},
+ {8, 9}, {10, 11}, {12, 13}, {14, 15},
+ {16, 17}, {18, 19}, {20, 21}, {22, 23},
+ {24, 25}, {26, 27}, {27, 28}, {29, 30},
+};
+
+int main()
+{
+ check_vect ();
+ struct BytesVec vec
+ = buildVertexBufferData(origVertices, false, 0,
+ sizeof(origVertices) / sizeof(origVertices[0]));
+
+ int errors = 0;
+ for (unsigned i = 0; i < 100; i++) {
+ if (i / 2 < sizeof(origVertices) / sizeof(origVertices[0])) {
+ int ii = i;
+ int e = origVertices[ii / 2][ii % 2];
+ if (vec.d[i] != e)
+ errors++;
+ } else {
+ if (vec.d[i] != 0)
+ errors++;
+ }
+ }
+ if (errors)
+ __builtin_abort();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_135-pr120143.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_135-pr120143.c
new file mode 100644
index 0000000..1ee30a8
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_135-pr120143.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-add-options vect_early_break } */
+/* { dg-additional-options "-O3 -fwhole-program" } */
+
+short a;
+extern _Bool b[][23];
+short g = 6;
+int v[4];
+int x[3];
+void c(short g, int v[], int x[]) {
+ for (;;)
+ for (unsigned y = 0; y < 023; y++) {
+ b[y][y] = v[y];
+ for (_Bool aa = 0; aa < (_Bool)g; aa = x[y])
+ a = a > 0;
+ }
+}
+int main() { c(g, v, x); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/whilelt_5.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/whilelt_5.c
index f06a74a..05e266a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/whilelt_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/whilelt_5.c
@@ -11,8 +11,7 @@ extern "C" {
/*
** load_vl1:
-** ptrue (p[0-7])\.[bhsd], vl1
-** ld1h z0\.h, \1/z, \[x0\]
+** ldr h0, \[x0\]
** ret
*/
svint16_t
@@ -22,7 +21,12 @@ load_vl1 (int16_t *ptr)
}
/*
-** load_vl2:
+** load_vl2: { target aarch64_little_endian }
+** ldr s0, \[x0\]
+** ret
+*/
+/*
+** load_vl2: { target aarch64_big_endian }
** ptrue (p[0-7])\.h, vl2
** ld1h z0\.h, \1/z, \[x0\]
** ret
@@ -46,7 +50,12 @@ load_vl3 (int16_t *ptr)
}
/*
-** load_vl4:
+** load_vl4: { target aarch64_little_endian }
+** ldr d0, \[x0\]
+** ret
+*/
+/*
+** load_vl4: { target aarch64_big_endian }
** ptrue (p[0-7])\.h, vl4
** ld1h z0\.h, \1/z, \[x0\]
** ret
@@ -94,7 +103,12 @@ load_vl7 (int16_t *ptr)
}
/*
-** load_vl8:
+** load_vl8: { target aarch64_little_endian }
+** ldr q0, \[x0\]
+** ret
+*/
+/*
+** load_vl8: { target aarch64_big_endian }
** ptrue (p[0-7])\.h, vl8
** ld1h z0\.h, \1/z, \[x0\]
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/ldst_ptrue_pat_128_to_neon.c b/gcc/testsuite/gcc.target/aarch64/sve/ldst_ptrue_pat_128_to_neon.c
new file mode 100644
index 0000000..2d47c1f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/ldst_ptrue_pat_128_to_neon.c
@@ -0,0 +1,81 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+/* { dg-require-effective-target aarch64_little_endian } */
+
+#include <arm_sve.h>
+
+#define TEST(TYPE, TY, W, B) \
+ sv##TYPE \
+ ld1_##TY##W##B##_1 (TYPE *x) \
+ { \
+ svbool_t pg = svwhilelt_b##B (0, W); \
+ return svld1_##TY##B (pg, x); \
+ } \
+ sv##TYPE \
+ ld1_##TY##W##B##_2 (TYPE *x) \
+ { \
+ svbool_t pg = svptrue_pat_b##B ((enum svpattern) (W > 8 ? 9 : W)); \
+ return svld1_##TY##B (pg, x); \
+ } \
+ void \
+ st1_##TY##W##B##_1 (TYPE *x, sv##TYPE data) \
+ { \
+ svbool_t pg = svwhilelt_b##B (0, W); \
+ return svst1_##TY##B (pg, x, data); \
+ } \
+ void \
+ st1_##TY##W##B##_2 (TYPE *x, sv##TYPE data) \
+ { \
+ svbool_t pg = svptrue_pat_b##B ((enum svpattern) (W > 8 ? 9 : W)); \
+ return svst1_##TY##B (pg, x, data); \
+ } \
+
+#define TEST64(TYPE, TY, B) \
+ TEST (TYPE, TY, 1, B) \
+ TEST (TYPE, TY, 2, B) \
+
+#define TEST32(TYPE, TY, B) \
+ TEST64 (TYPE, TY, B) \
+ TEST (TYPE, TY, 4, B) \
+
+#define TEST16(TYPE, TY, B) \
+ TEST32 (TYPE, TY, B) \
+ TEST (TYPE, TY, 8, B) \
+
+#define TEST8(TYPE, TY, B) \
+ TEST16 (TYPE, TY, B) \
+ TEST (TYPE, TY, 16, B)
+
+#define T(TYPE, TY, B) \
+ TEST##B (TYPE, TY, B)
+
+T (bfloat16_t, bf, 16)
+T (float16_t, f, 16)
+T (float32_t, f, 32)
+T (float64_t, f, 64)
+T (int8_t, s, 8)
+T (int16_t, s, 16)
+T (int32_t, s, 32)
+T (int64_t, s, 64)
+T (uint8_t, u, 8)
+T (uint16_t, u, 16)
+T (uint32_t, u, 32)
+T (uint64_t, u, 64)
+
+/* { dg-final { scan-assembler-times {\tldr\tq0, \[x0\]} 24 } } */
+/* { dg-final { scan-assembler-times {\tldr\td0, \[x0\]} 24 } } */
+/* { dg-final { scan-assembler-times {\tldr\ts0, \[x0\]} 18 } } */
+/* { dg-final { scan-assembler-times {\tldr\th0, \[x0\]} 12 } } */
+/* { dg-final { scan-assembler-times {\tldr\tb0, \[x0\]} 4 } } */
+
+/* { dg-final { scan-assembler-times {\tstr\tq0, \[x0\]} 24 } } */
+/* { dg-final { scan-assembler-times {\tstr\td0, \[x0\]} 24 } } */
+/* { dg-final { scan-assembler-times {\tstr\ts0, \[x0\]} 18 } } */
+/* { dg-final { scan-assembler-times {\tstr\th0, \[x0\]} 12 } } */
+/* { dg-final { scan-assembler-times {\tstr\tb0, \[x0\]} 4 } } */
+
+svint8_t foo (int8_t *x)
+{
+ return svld1_s8 (svptrue_b16 (), x);
+}
+/* { dg-final { scan-assembler-times {\tptrue\tp[0-7]\.h, all\n\tld1b} 1 } } */ \ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/while_7.c b/gcc/testsuite/gcc.target/aarch64/sve/while_7.c
index a66a20d..ab2fa36 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/while_7.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/while_7.c
@@ -19,7 +19,7 @@
TEST_ALL (ADD_LOOP)
-/* { dg-final { scan-assembler-times {\tptrue\tp[0-7]\.b, vl8\n} 1 } } */
-/* { dg-final { scan-assembler-times {\tptrue\tp[0-7]\.h, vl8\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tldr\td[0-9]+, \[x0\]} 1 } } */
+/* { dg-final { scan-assembler-times {\tldr\tq[0-9]+, \[x0\]} 1 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.s,} 2 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.d,} 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/while_9.c b/gcc/testsuite/gcc.target/aarch64/sve/while_9.c
index dd3f404..99940dd 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/while_9.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/while_9.c
@@ -19,7 +19,7 @@
TEST_ALL (ADD_LOOP)
-/* { dg-final { scan-assembler-times {\tptrue\tp[0-7]\.b, vl16\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tldr\tq[0-9]+\, \[x0\]} 1 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.h,} 2 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.s,} 2 } } */
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7]\.d,} 2 } } */
diff --git a/gcc/testsuite/gcc.target/arm/armv8_2-fp16-arith-1.c b/gcc/testsuite/gcc.target/arm/armv8_2-fp16-arith-1.c
index 52b8737..f3fea52 100644
--- a/gcc/testsuite/gcc.target/arm/armv8_2-fp16-arith-1.c
+++ b/gcc/testsuite/gcc.target/arm/armv8_2-fp16-arith-1.c
@@ -106,8 +106,7 @@ TEST_CMP (greaterthanqual, >=, int16x8_t, float16x8_t)
/* { dg-final { scan-assembler-times {vdiv\.f16\ts[0-9]+, s[0-9]+, s[0-9]+} 13 } } */
/* For float16_t. */
-/* { dg-final { scan-assembler-times {vcmp\.f32\ts[0-9]+, s[0-9]+} 2 } } */
-/* { dg-final { scan-assembler-times {vcmpe\.f32\ts[0-9]+, s[0-9]+} 4 } } */
+/* { dg-final { scan-assembler-times {vcmp\.f32\ts[0-9]+, s[0-9]+} 6 } } */
/* For float16x4_t. */
/* { dg-final { scan-assembler-times {vceq\.f16\td[0-9]+, d[0-9]+} 2 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr117839-3a.c b/gcc/testsuite/gcc.target/i386/pr117839-3a.c
new file mode 100644
index 0000000..81afa9d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr117839-3a.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mno-avx -msse2 -mtune=generic" } */
+/* { dg-final { scan-assembler-times "xor\[a-z\]*\[\t \]*%xmm\[0-9\]\+,\[^,\]*" 1 } } */
+
+typedef char v4qi __attribute__((vector_size(4)));
+typedef char v16qi __attribute__((vector_size(16)));
+
+v4qi a;
+v16qi b;
+void
+foo (v4qi* c, v16qi* d)
+{
+ v4qi sum = __extension__(v4qi){0, 0, 0, 0};
+ v16qi sum2 = __extension__(v16qi){0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0};
+ for (int i = 0; i != 100; i++)
+ sum += c[i];
+ for (int i = 0 ; i != 100; i++)
+ sum2 += d[i];
+ a = sum;
+ b = sum2;
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr117839-3b.c b/gcc/testsuite/gcc.target/i386/pr117839-3b.c
new file mode 100644
index 0000000..a599c28
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr117839-3b.c
@@ -0,0 +1,5 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=x86-64-v3" } */
+/* { dg-final { scan-assembler-times "xor\[a-z\]*\[\t \]*%xmm\[0-9\]\+,\[^,\]*" 1 } } */
+
+#include "pr117839-3a.c"
diff --git a/gcc/testsuite/gcc.target/i386/pr119919.c b/gcc/testsuite/gcc.target/i386/pr119919.c
index ed64656..e39819f 100644
--- a/gcc/testsuite/gcc.target/i386/pr119919.c
+++ b/gcc/testsuite/gcc.target/i386/pr119919.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -msse2 -fdump-tree-vect-details" } */
+/* { dg-options "-O2 -msse2 -fdump-tree-vect-details -mtune=znver1" } */
int a[9*9];
bool b[9];
void test()
diff --git a/gcc/testsuite/gcc.target/riscv/arch-48.c b/gcc/testsuite/gcc.target/riscv/arch-48.c
new file mode 100644
index 0000000..58a558e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/arch-48.c
@@ -0,0 +1,5 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zama16b -mabi=lp64" } */
+int foo()
+{
+}
diff --git a/gcc/testsuite/gcc.target/riscv/pr120137.c b/gcc/testsuite/gcc.target/riscv/pr120137.c
new file mode 100644
index 0000000..c55a1c1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/pr120137.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl256b -mrvv-vector-bits=zvl -mabi=lp64" } */
+
+char b[13][13];
+void c() {
+ for (int d = 0; d < 13; ++d)
+ for (int e = 0; e < 13; ++e)
+ b[d][e] = e == 0 ? -98 : 38;
+}
+
+
+
diff --git a/gcc/testsuite/gcc.target/riscv/pr120154.c b/gcc/testsuite/gcc.target/riscv/pr120154.c
new file mode 100644
index 0000000..fd849ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/pr120154.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gv -mabi=lp64" } */
+
+
+
+typedef __attribute__((__vector_size__(4))) char V;
+
+V g;
+
+V
+bar(V a, V b)
+{
+ V s = a + b + g;
+ return s;
+}
+
+V
+foo()
+{
+ return bar((V){20}, (V){23, 150});
+}
+
diff --git a/gcc/testsuite/gcc.target/s390/vector/cstoreti-1.c b/gcc/testsuite/gcc.target/s390/vector/cstoreti-1.c
new file mode 100644
index 0000000..f2a131b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/vector/cstoreti-1.c
@@ -0,0 +1,127 @@
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-O2 -march=z13" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/*
+** test_le:
+** vl (%v.),0\(%r2\),3
+** vl (%v.),0\(%r3\),3
+** vecg \2,\1
+** jne \.L.+
+** vchlgs %v.,\1,\2
+** lghi %r2,0
+** locghinl %r2,1
+** br %r14
+*/
+
+int test_le (__int128 x, __int128 y) { return x <= y; }
+
+/*
+** test_leu:
+** vl (%v.),0\(%r2\),3
+** vl (%v.),0\(%r3\),3
+** veclg \2,\1
+** jne \.L.+
+** vchlgs %v.,\1,\2
+** lghi %r2,0
+** locghinl %r2,1
+** br %r14
+*/
+
+int test_leu (unsigned __int128 x, unsigned __int128 y) { return x <= y; }
+
+/*
+** test_lt:
+** vl (%v.),0\(%r2\),3
+** vl (%v.),0\(%r3\),3
+** vecg \1,\2
+** jne \.L.+
+** vchlgs %v.,\2,\1
+** lghi %r2,0
+** locghil %r2,1
+** br %r14
+*/
+
+int test_lt (__int128 x, __int128 y) { return x < y; }
+
+/*
+** test_ltu:
+** vl (%v.),0\(%r2\),3
+** vl (%v.),0\(%r3\),3
+** veclg \1,\2
+** jne \.L.+
+** vchlgs %v.,\2,\1
+** lghi %r2,0
+** locghil %r2,1
+** br %r14
+*/
+
+int test_ltu (unsigned __int128 x, unsigned __int128 y) { return x < y; }
+
+/*
+** test_ge:
+** vl (%v.),0\(%r2\),3
+** vl (%v.),0\(%r3\),3
+** vecg \1,\2
+** jne \.L.+
+** vchlgs %v.,\2,\1
+** lghi %r2,0
+** locghinl %r2,1
+** br %r14
+*/
+
+int test_ge (__int128 x, __int128 y) { return x >= y; }
+
+/*
+** test_geu:
+** vl (%v.),0\(%r2\),3
+** vl (%v.),0\(%r3\),3
+** veclg \1,\2
+** jne \.L.+
+** vchlgs %v.,\2,\1
+** lghi %r2,0
+** locghinl %r2,1
+** br %r14
+*/
+
+int test_geu (unsigned __int128 x, unsigned __int128 y) { return x >= y; }
+
+/*
+** test_gt:
+** vl (%v.),0\(%r2\),3
+** vl (%v.),0\(%r3\),3
+** vecg \2,\1
+** jne \.L.+
+** vchlgs %v.,\1,\2
+** lghi %r2,0
+** locghil %r2,1
+** br %r14
+*/
+
+int test_gt (__int128 x, __int128 y) { return x > y; }
+
+/*
+** test_gtu:
+** vl (%v.),0\(%r2\),3
+** vl (%v.),0\(%r3\),3
+** veclg \2,\1
+** jne \.L.+
+** vchlgs %v.,\1,\2
+** lghi %r2,0
+** locghil %r2,1
+** br %r14
+*/
+
+int test_gtu (unsigned __int128 x, unsigned __int128 y) { return x > y; }
+
+/* { dg-final { scan-assembler-times {vceqgs\t} 4 } } */
+/* { dg-final { scan-assembler-times {locghie\t} 2 } } */
+/* { dg-final { scan-assembler-times {locghine\t} 2 } } */
+
+int test_eq (__int128 x, __int128 y) { return x == y; }
+
+int test_equ (unsigned __int128 x, unsigned __int128 y) { return x == y; }
+
+int test_ne (__int128 x, __int128 y) { return x != y; }
+
+int test_neu (unsigned __int128 x, unsigned __int128 y) { return x != y; }
diff --git a/gcc/testsuite/gcc.target/s390/vector/cstoreti-2.c b/gcc/testsuite/gcc.target/s390/vector/cstoreti-2.c
new file mode 100644
index 0000000..d7b0382
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/vector/cstoreti-2.c
@@ -0,0 +1,25 @@
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-O2 -march=z17" } */
+/* { dg-final { scan-assembler-times {vecq\t} 8 } } */
+/* { dg-final { scan-assembler-times {veclq\t} 4 } } */
+/* { dg-final { scan-assembler-times {locghile\t} 1 } } LE */
+/* { dg-final { scan-assembler-times {slbgr\t} 1 } } LEU */
+/* { dg-final { scan-assembler-times {locghil\t} 2 } } LT LTU */
+/* { dg-final { scan-assembler-times {locghihe\t} 2 } } GE GEU */
+/* { dg-final { scan-assembler-times {locghih\t} 1 } } GT */
+/* { dg-final { scan-assembler-times {alcgr\t} 1 } } GTU */
+/* { dg-final { scan-assembler-times {locghie\t} 2 } } EQ EQU */
+/* { dg-final { scan-assembler-times {locghine\t} 2 } } NE NEU */
+
+int test_le (__int128 x, __int128 y) { return x <= y; }
+int test_leu (unsigned __int128 x, unsigned __int128 y) { return x <= y; }
+int test_lt (__int128 x, __int128 y) { return x < y; }
+int test_ltu (unsigned __int128 x, unsigned __int128 y) { return x < y; }
+int test_ge (__int128 x, __int128 y) { return x >= y; }
+int test_geu (unsigned __int128 x, unsigned __int128 y) { return x >= y; }
+int test_gt (__int128 x, __int128 y) { return x > y; }
+int test_gtu (unsigned __int128 x, unsigned __int128 y) { return x > y; }
+int test_eq (__int128 x, __int128 y) { return x == y; }
+int test_equ (unsigned __int128 x, unsigned __int128 y) { return x == y; }
+int test_ne (__int128 x, __int128 y) { return x != y; }
+int test_neu (unsigned __int128 x, unsigned __int128 y) { return x != y; }
diff --git a/gcc/tree-if-conv.cc b/gcc/tree-if-conv.cc
index 5b63bf6..fe8aee0 100644
--- a/gcc/tree-if-conv.cc
+++ b/gcc/tree-if-conv.cc
@@ -1066,11 +1066,7 @@ if_convertible_gimple_assign_stmt_p (gimple *stmt,
fprintf (dump_file, "tree could trap...\n");
return false;
}
- else if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- || POINTER_TYPE_P (TREE_TYPE (lhs)))
- && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (lhs))
- && arith_code_with_undefined_signed_overflow
- (gimple_assign_rhs_code (stmt)))
+ else if (gimple_with_undefined_signed_overflow (stmt))
/* We have to rewrite stmts with undefined overflow. */
need_to_rewrite_undefined = true;
@@ -2830,7 +2826,6 @@ predicate_statements (loop_p loop)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
{
gassign *stmt = dyn_cast <gassign *> (gsi_stmt (gsi));
- tree lhs;
if (!stmt)
;
else if (is_false_predicate (cond)
@@ -2886,12 +2881,7 @@ predicate_statements (loop_p loop)
gsi_replace (&gsi, new_stmt, true);
}
- else if (((lhs = gimple_assign_lhs (stmt)), true)
- && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- || POINTER_TYPE_P (TREE_TYPE (lhs)))
- && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (lhs))
- && arith_code_with_undefined_signed_overflow
- (gimple_assign_rhs_code (stmt)))
+ else if (gimple_with_undefined_signed_overflow (stmt))
rewrite_to_defined_overflow (&gsi);
else if (gimple_vdef (stmt))
{
@@ -2946,7 +2936,7 @@ predicate_statements (loop_p loop)
gsi_replace (&gsi, new_call, true);
}
- lhs = gimple_get_lhs (gsi_stmt (gsi));
+ tree lhs = gimple_get_lhs (gsi_stmt (gsi));
if (lhs && TREE_CODE (lhs) == SSA_NAME)
ssa_names.add (lhs);
gsi_next (&gsi);
diff --git a/gcc/tree-scalar-evolution.cc b/gcc/tree-scalar-evolution.cc
index 4ca0875..9d64d3a 100644
--- a/gcc/tree-scalar-evolution.cc
+++ b/gcc/tree-scalar-evolution.cc
@@ -3932,10 +3932,7 @@ final_value_replacement_loop (class loop *loop)
gsi2 = gsi_start (stmts);
while (!gsi_end_p (gsi2))
{
- gimple *stmt = gsi_stmt (gsi2);
- if (is_gimple_assign (stmt)
- && arith_code_with_undefined_signed_overflow
- (gimple_assign_rhs_code (stmt)))
+ if (gimple_with_undefined_signed_overflow (gsi_stmt (gsi2)))
rewrite_to_defined_overflow (&gsi2);
gsi_next (&gsi2);
}
diff --git a/gcc/tree-ssa-ifcombine.cc b/gcc/tree-ssa-ifcombine.cc
index f791994..19990d6 100644
--- a/gcc/tree-ssa-ifcombine.cc
+++ b/gcc/tree-ssa-ifcombine.cc
@@ -514,15 +514,9 @@ ifcombine_mark_ssa_name_walk (tree *t, int *, void *data_)
static inline void
ifcombine_rewrite_to_defined_overflow (gimple_stmt_iterator gsi)
{
- gassign *ass = dyn_cast <gassign *> (gsi_stmt (gsi));
- if (!ass)
+ if (!gimple_with_undefined_signed_overflow (gsi_stmt (gsi)))
return;
- tree lhs = gimple_assign_lhs (ass);
- if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- || POINTER_TYPE_P (TREE_TYPE (lhs)))
- && arith_code_with_undefined_signed_overflow
- (gimple_assign_rhs_code (ass)))
- rewrite_to_defined_overflow (&gsi);
+ rewrite_to_defined_overflow (&gsi);
}
diff --git a/gcc/tree-ssa-loop-im.cc b/gcc/tree-ssa-loop-im.cc
index a3ca5af..ae2fd87 100644
--- a/gcc/tree-ssa-loop-im.cc
+++ b/gcc/tree-ssa-loop-im.cc
@@ -1241,12 +1241,24 @@ compute_invariantness (basic_block bb)
lim_data->cost);
}
- if (lim_data->cost >= LIM_EXPENSIVE
- /* When we run before PRE and PRE is active hoist all expressions
- since PRE would do so anyway and we can preserve range info
- but PRE cannot. */
- || (flag_tree_pre && !in_loop_pipeline))
+ if (lim_data->cost >= LIM_EXPENSIVE)
set_profitable_level (stmt);
+ /* When we run before PRE and PRE is active hoist all expressions
+ to the always executed loop since PRE would do so anyway
+ and we can preserve range info while PRE cannot. */
+ else if (flag_tree_pre && !in_loop_pipeline
+ && outermost)
+ {
+ class loop *mloop = lim_data->max_loop;
+ if (loop_depth (outermost) > loop_depth (mloop))
+ {
+ mloop = outermost;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " constraining to loop depth %d\n\n\n",
+ loop_depth (mloop));
+ }
+ set_level (stmt, bb->loop_father, mloop);
+ }
}
}
@@ -1407,11 +1419,7 @@ move_computations_worker (basic_block bb)
when the target loop header is executed and the stmt may
invoke undefined integer or pointer overflow rewrite it to
unsigned arithmetic. */
- if (is_gimple_assign (stmt)
- && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
- && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
- && arith_code_with_undefined_signed_overflow
- (gimple_assign_rhs_code (stmt))
+ if (gimple_with_undefined_signed_overflow (stmt)
&& (!ALWAYS_EXECUTED_IN (bb)
|| !(ALWAYS_EXECUTED_IN (bb) == level
|| flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
diff --git a/gcc/tree-ssa-loop-split.cc b/gcc/tree-ssa-loop-split.cc
index 5f78c0b..80f488a 100644
--- a/gcc/tree-ssa-loop-split.cc
+++ b/gcc/tree-ssa-loop-split.cc
@@ -663,10 +663,7 @@ split_loop (class loop *loop1)
gsi = gsi_start (stmts2);
while (!gsi_end_p (gsi))
{
- gimple *stmt = gsi_stmt (gsi);
- if (is_gimple_assign (stmt)
- && arith_code_with_undefined_signed_overflow
- (gimple_assign_rhs_code (stmt)))
+ if (gimple_with_undefined_signed_overflow (gsi_stmt (gsi)))
rewrite_to_defined_overflow (&gsi);
gsi_next (&gsi);
}
diff --git a/gcc/tree-ssa-reassoc.cc b/gcc/tree-ssa-reassoc.cc
index 4017eea..13bb85c 100644
--- a/gcc/tree-ssa-reassoc.cc
+++ b/gcc/tree-ssa-reassoc.cc
@@ -2925,30 +2925,22 @@ update_range_test (struct range_entry *range, struct range_entry *otherrange,
!gsi_end_p (gsi); gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
- if (is_gimple_assign (stmt))
- if (tree lhs = gimple_assign_lhs (stmt))
- if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- || POINTER_TYPE_P (TREE_TYPE (lhs)))
- && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (lhs)))
- {
- enum tree_code code = gimple_assign_rhs_code (stmt);
- if (arith_code_with_undefined_signed_overflow (code))
- {
- gimple_stmt_iterator gsip = gsi;
- gimple_stmt_iterator gsin = gsi;
- gsi_prev (&gsip);
- gsi_next (&gsin);
- rewrite_to_defined_overflow (&gsi);
- unsigned uid = gimple_uid (stmt);
- if (gsi_end_p (gsip))
- gsip = gsi_after_labels (bb);
- else
- gsi_next (&gsip);
- for (; gsi_stmt (gsip) != gsi_stmt (gsin);
- gsi_next (&gsip))
- gimple_set_uid (gsi_stmt (gsip), uid);
- }
- }
+ if (gimple_with_undefined_signed_overflow (stmt))
+ {
+ gimple_stmt_iterator gsip = gsi;
+ gimple_stmt_iterator gsin = gsi;
+ gsi_prev (&gsip);
+ gsi_next (&gsin);
+ rewrite_to_defined_overflow (&gsi);
+ unsigned uid = gimple_uid (stmt);
+ if (gsi_end_p (gsip))
+ gsip = gsi_after_labels (bb);
+ else
+ gsi_next (&gsip);
+ for (; gsi_stmt (gsip) != gsi_stmt (gsin);
+ gsi_next (&gsip))
+ gimple_set_uid (gsi_stmt (gsip), uid);
+ }
}
if (opcode == BIT_IOR_EXPR
diff --git a/gcc/tree-vect-data-refs.cc b/gcc/tree-vect-data-refs.cc
index 231a3ca..9fd1ef2 100644
--- a/gcc/tree-vect-data-refs.cc
+++ b/gcc/tree-vect-data-refs.cc
@@ -734,7 +734,6 @@ vect_analyze_early_break_dependences (loop_vec_info loop_vinfo)
stmt_vec_info stmt_vinfo
= vect_stmt_to_vectorize (loop_vinfo->lookup_stmt (stmt));
- stmt = STMT_VINFO_STMT (stmt_vinfo);
auto dr_ref = STMT_VINFO_DATA_REF (stmt_vinfo);
if (!dr_ref)
continue;
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index 562e222..80e9c01 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -5042,14 +5042,17 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size,
vec<stmt_vec_info> roots = vNULL;
vec<tree> remain = vNULL;
gphi *phi = as_a<gphi *> (STMT_VINFO_STMT (stmt_info));
- stmts.create (1);
tree def = gimple_phi_arg_def_from_edge (phi, latch_e);
stmt_vec_info lc_info = loop_vinfo->lookup_def (def);
- stmts.quick_push (vect_stmt_to_vectorize (lc_info));
- vect_build_slp_instance (vinfo, slp_inst_kind_reduc_group,
- stmts, roots, remain,
- max_tree_size, &limit,
- bst_map, NULL, force_single_lane);
+ if (lc_info)
+ {
+ stmts.create (1);
+ stmts.quick_push (vect_stmt_to_vectorize (lc_info));
+ vect_build_slp_instance (vinfo, slp_inst_kind_reduc_group,
+ stmts, roots, remain,
+ max_tree_size, &limit,
+ bst_map, NULL, force_single_lane);
+ }
/* When the latch def is from a different cycle this can only
be a induction. Build a simple instance for this.
??? We should be able to start discovery from the PHI
@@ -5059,8 +5062,6 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size,
tem.quick_push (stmt_info);
if (!bst_map->get (tem))
{
- gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info)
- == vect_induction_def);
stmts.create (1);
stmts.quick_push (stmt_info);
vect_build_slp_instance (vinfo, slp_inst_kind_reduc_group,
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index ea0b426..a8762ba 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -419,18 +419,21 @@ vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
}
}
- /* Check if it's an induction and multiple exits. In this case there will be
- a usage later on after peeling which is needed for the alternate exit. */
+ /* Check if it's a not live PHI and multiple exits. In this case
+ there will be a usage later on after peeling which is needed for the
+ alternate exit. */
if (LOOP_VINFO_EARLY_BREAKS (loop_vinfo)
- && STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
+ && is_a <gphi *> (stmt)
+ && ((! VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))
+ && ! *live_p)
+ || STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def))
{
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "vec_stmt_relevant_p: induction forced for "
- "early break.\n");
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vec_stmt_relevant_p: PHI forced live for "
+ "early break.\n");
LOOP_VINFO_EARLY_BREAKS_LIVE_IVS (loop_vinfo).safe_push (stmt_info);
*live_p = true;
-
}
if (*live_p && *relevant == vect_unused_in_scope
@@ -714,6 +717,8 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo, bool *fatal)
bb = bbs[i];
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
+ if (virtual_operand_p (gimple_phi_result (gsi_stmt (si))))
+ continue;
stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? %G",
@@ -8786,6 +8791,15 @@ vectorizable_store (vec_info *vinfo,
if (n == const_nunits)
{
int mis_align = dr_misalignment (first_dr_info, vectype);
+ /* With VF > 1 we advance the DR by step, if that is constant
+ and only aligned when performed VF times, DR alignment
+ analysis can analyze this as aligned since it assumes
+ contiguous accesses. But that is not how we code generate
+ here, so adjust for this. */
+ if (maybe_gt (vf, 1u)
+ && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
+ DR_TARGET_ALIGNMENT (first_dr_info)))
+ mis_align = -1;
dr_alignment_support dr_align
= vect_supportable_dr_alignment (vinfo, dr_info, vectype,
mis_align);
@@ -8807,6 +8821,10 @@ vectorizable_store (vec_info *vinfo,
ltype = build_vector_type (elem_type, n);
lvectype = vectype;
int mis_align = dr_misalignment (first_dr_info, ltype);
+ if (maybe_gt (vf, 1u)
+ && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
+ DR_TARGET_ALIGNMENT (first_dr_info)))
+ mis_align = -1;
dr_alignment_support dr_align
= vect_supportable_dr_alignment (vinfo, dr_info, ltype,
mis_align);
@@ -8867,17 +8885,10 @@ vectorizable_store (vec_info *vinfo,
}
}
unsigned align;
- /* ??? We'd want to use
- if (alignment_support_scheme == dr_aligned)
- align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
- since doing that is what we assume we can in the above checks.
- But this interferes with groups with gaps where for example
- VF == 2 makes the group in the unrolled loop aligned but the
- fact that we advance with step between the two subgroups
- makes the access to the second unaligned. See PR119586.
- We have to anticipate that here or adjust code generation to
- avoid the misaligned loads by means of permutations. */
- align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
+ if (alignment_support_scheme == dr_aligned)
+ align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
+ else
+ align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
/* Alignment is at most the access size if we do multiple stores. */
if (nstores > 1)
align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);
@@ -10805,6 +10816,15 @@ vectorizable_load (vec_info *vinfo,
if (n == const_nunits)
{
int mis_align = dr_misalignment (first_dr_info, vectype);
+ /* With VF > 1 we advance the DR by step, if that is constant
+ and only aligned when performed VF times, DR alignment
+ analysis can analyze this as aligned since it assumes
+ contiguous accesses. But that is not how we code generate
+ here, so adjust for this. */
+ if (maybe_gt (vf, 1u)
+ && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
+ DR_TARGET_ALIGNMENT (first_dr_info)))
+ mis_align = -1;
dr_alignment_support dr_align
= vect_supportable_dr_alignment (vinfo, dr_info, vectype,
mis_align);
@@ -10833,6 +10853,10 @@ vectorizable_load (vec_info *vinfo,
if (VECTOR_TYPE_P (ptype))
{
mis_align = dr_misalignment (first_dr_info, ptype);
+ if (maybe_gt (vf, 1u)
+ && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
+ DR_TARGET_ALIGNMENT (first_dr_info)))
+ mis_align = -1;
dr_align
= vect_supportable_dr_alignment (vinfo, dr_info, ptype,
mis_align);
@@ -10852,8 +10876,10 @@ vectorizable_load (vec_info *vinfo,
}
}
unsigned align;
- /* ??? The above is still wrong, see vectorizable_store. */
- align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
+ if (alignment_support_scheme == dr_aligned)
+ align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
+ else
+ align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
/* Alignment is at most the access size if we do multiple loads. */
if (nloads > 1)
align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);
diff --git a/libcpp/ChangeLog b/libcpp/ChangeLog
index 9a5208c..eef6ec7 100644
--- a/libcpp/ChangeLog
+++ b/libcpp/ChangeLog
@@ -1,3 +1,16 @@
+2025-05-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR preprocessor/108900
+ PR preprocessor/116047
+ PR preprocessor/120061
+ * files.cc (_cpp_stack_file): Revert 2025-03-28 change.
+ * line-map.cc (linemap_add): Use
+ SOURCE_LINE (from, linemap_included_from (map - 1)) + 1; instead of
+ SOURCE_LINE (from, from[1].start_location); to compute to_line
+ for LC_LEAVE. For LC_ENTER included_from computation, look at
+ map[-2] or even lower if map[-1] has the same start_location as
+ map[0].
+
2025-04-28 Lewis Hyatt <lhyatt@gmail.com>
PR c/118838
diff --git a/libcpp/files.cc b/libcpp/files.cc
index c1abde6..d80c4bf 100644
--- a/libcpp/files.cc
+++ b/libcpp/files.cc
@@ -1047,14 +1047,6 @@ _cpp_stack_file (cpp_reader *pfile, _cpp_file *file, include_type type,
&& (pfile->line_table->highest_location
!= LINE_MAP_MAX_LOCATION - 1));
- if (decrement && LINEMAPS_ORDINARY_USED (pfile->line_table))
- {
- const line_map_ordinary *map
- = LINEMAPS_LAST_ORDINARY_MAP (pfile->line_table);
- if (map && map->start_location == pfile->line_table->highest_location)
- decrement = false;
- }
-
if (decrement)
pfile->line_table->highest_location--;
diff --git a/libcpp/line-map.cc b/libcpp/line-map.cc
index 17e7f12..cf65571 100644
--- a/libcpp/line-map.cc
+++ b/libcpp/line-map.cc
@@ -621,8 +621,8 @@ linemap_add (line_maps *set, enum lc_reason reason,
#include "included", inside the same "includer" file. */
linemap_assert (!MAIN_FILE_P (map - 1));
- /* (MAP - 1) points to the map we are leaving. The
- map from which (MAP - 1) got included should be the map
+ /* (MAP - 1) points to the map we are leaving. The
+ map from which (MAP - 1) got included should be usually the map
that comes right before MAP in the same file. */
from = linemap_included_from_linemap (set, map - 1);
@@ -630,7 +630,24 @@ linemap_add (line_maps *set, enum lc_reason reason,
if (to_file == NULL)
{
to_file = ORDINARY_MAP_FILE_NAME (from);
- to_line = SOURCE_LINE (from, from[1].start_location);
+ /* Compute the line on which the map resumes, for #include this
+ should be the line after the #include line. Usually FROM is
+ the map right before LC_ENTER map - the first map of the included
+ file, and in that case SOURCE_LINE (from, from[1].start_location);
+ computes the right line (and does handle even some special cases
+ (e.g. where for returning from <command line> we still want to
+ be at line 0 or some -traditional-cpp cases). In rare cases
+ FROM can be followed by LC_RENAME created by linemap_line_start
+ for line right after #include line. If that happens,
+ start_location of the FROM[1] map will be the same as
+ start_location of FROM[2] LC_ENTER, but FROM[1] start_location
+ might not have advance enough for moving to a full next line.
+ In that case compute the line of #include line and add 1 to it
+ to advance to the next line. See PR120061. */
+ if (from[1].reason == LC_RENAME)
+ to_line = SOURCE_LINE (from, linemap_included_from (map - 1)) + 1;
+ else
+ to_line = SOURCE_LINE (from, from[1].start_location);
sysp = ORDINARY_MAP_IN_SYSTEM_HEADER_P (from);
}
else
@@ -660,11 +677,26 @@ linemap_add (line_maps *set, enum lc_reason reason,
if (set->depth == 0)
map->included_from = 0;
else
- /* The location of the end of the just-closed map. */
- map->included_from
- = (((map[0].start_location - 1 - map[-1].start_location)
- & ~((loc_one << map[-1].m_column_and_range_bits) - 1))
- + map[-1].start_location);
+ {
+ /* Compute location from whence this line map was included.
+ For #include this should be preferrably column 0 of the
+ line on which #include directive appears.
+ map[-1] is the just closed map and usually included_from
+ falls within that map. In rare cases linemap_line_start
+ can insert a new LC_RENAME map for the line immediately
+ after #include line, in that case map[-1] will have the
+ same start_location as the new one and so included_from
+ would not be from map[-1] but likely map[-2]. If that
+ happens, mask off map[-2] m_column_and_range_bits bits
+ instead of map[-1]. See PR120061. */
+ int i = -1;
+ while (map[i].start_location == map[0].start_location)
+ --i;
+ map->included_from
+ = (((map[0].start_location - 1 - map[i].start_location)
+ & ~((loc_one << map[i].m_column_and_range_bits) - 1))
+ + map[i].start_location);
+ }
set->depth++;
if (set->trace_includes)
trace_include (set, map);
diff --git a/libgfortran/ChangeLog b/libgfortran/ChangeLog
index aa92b02..dd177ab 100644
--- a/libgfortran/ChangeLog
+++ b/libgfortran/ChangeLog
@@ -1,3 +1,25 @@
+2025-05-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR libfortran/120153
+ * Makefile.am (i_maxloc1_c): Add generated/maxloc1_16_m16.c.
+ * intrinsics/random.c (arandom_m16): Use #ifdef HAVE_GFC_UINTEGER_16
+ guard rather than #ifdef GFC_HAVE_GFC_UINTEGER_16.
+ * gfortran.map (GFORTRAN_15): Remove _gfortran_arandom_m16,
+ _gfortran_maxloc1_16_m16, _gfortran_mmaxloc1_16_m16 and
+ _gfortran_smaxloc1_16_m16.
+ (GFORTRAN_15.2): New symbol version, add those 4 symbols to it.
+ * generated/maxloc1_16_m16.c: New file.
+ * Makefile.in: Regenerate.
+
+2025-05-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR libfortran/120152
+ * Makefile.am (i_maxloc1_c): Readd generated/maxloc1_4_i8.c,
+ generated/maxloc1_8_i8.c, generated/maxloc1_16_i8.c,
+ generated/maxloc1_4_i16.c, generated/maxloc1_8_i16.c. Move
+ generated/maxloc1_16_i16.c entry earlier in the list.
+ * Makefile.in: Regenerated.
+
2025-04-22 Andre Vehreschild <vehre@gcc.gnu.org>
* caf/libcaf.h: Add mapping mode to coarray's register.
diff --git a/libgfortran/Makefile.am b/libgfortran/Makefile.am
index 21b35c7..60aa949 100644
--- a/libgfortran/Makefile.am
+++ b/libgfortran/Makefile.am
@@ -400,6 +400,12 @@ generated/maxloc1_16_i2.c \
generated/maxloc1_4_i4.c \
generated/maxloc1_8_i4.c \
generated/maxloc1_16_i4.c \
+generated/maxloc1_4_i8.c \
+generated/maxloc1_8_i8.c \
+generated/maxloc1_16_i8.c \
+generated/maxloc1_4_i16.c \
+generated/maxloc1_8_i16.c \
+generated/maxloc1_16_i16.c \
generated/maxloc1_4_m1.c \
generated/maxloc1_8_m1.c \
generated/maxloc1_16_m1.c \
@@ -414,7 +420,7 @@ generated/maxloc1_8_m8.c \
generated/maxloc1_16_m8.c \
generated/maxloc1_4_m16.c \
generated/maxloc1_8_m16.c \
-generated/maxloc1_16_i16.c \
+generated/maxloc1_16_m16.c \
generated/maxloc1_4_r4.c \
generated/maxloc1_8_r4.c \
generated/maxloc1_16_r4.c \
diff --git a/libgfortran/Makefile.in b/libgfortran/Makefile.in
index 6a63d88..c171b3d 100644
--- a/libgfortran/Makefile.in
+++ b/libgfortran/Makefile.in
@@ -265,14 +265,17 @@ am__objects_8 = generated/maxloc1_4_i1.lo generated/maxloc1_8_i1.lo \
generated/maxloc1_16_i1.lo generated/maxloc1_4_i2.lo \
generated/maxloc1_8_i2.lo generated/maxloc1_16_i2.lo \
generated/maxloc1_4_i4.lo generated/maxloc1_8_i4.lo \
- generated/maxloc1_16_i4.lo generated/maxloc1_4_m1.lo \
+ generated/maxloc1_16_i4.lo generated/maxloc1_4_i8.lo \
+ generated/maxloc1_8_i8.lo generated/maxloc1_16_i8.lo \
+ generated/maxloc1_4_i16.lo generated/maxloc1_8_i16.lo \
+ generated/maxloc1_16_i16.lo generated/maxloc1_4_m1.lo \
generated/maxloc1_8_m1.lo generated/maxloc1_16_m1.lo \
generated/maxloc1_4_m2.lo generated/maxloc1_8_m2.lo \
generated/maxloc1_16_m2.lo generated/maxloc1_4_m4.lo \
generated/maxloc1_8_m4.lo generated/maxloc1_16_m4.lo \
generated/maxloc1_4_m8.lo generated/maxloc1_8_m8.lo \
generated/maxloc1_16_m8.lo generated/maxloc1_4_m16.lo \
- generated/maxloc1_8_m16.lo generated/maxloc1_16_i16.lo \
+ generated/maxloc1_8_m16.lo generated/maxloc1_16_m16.lo \
generated/maxloc1_4_r4.lo generated/maxloc1_8_r4.lo \
generated/maxloc1_16_r4.lo generated/maxloc1_4_r8.lo \
generated/maxloc1_8_r8.lo generated/maxloc1_16_r8.lo \
@@ -1205,6 +1208,12 @@ generated/maxloc1_16_i2.c \
generated/maxloc1_4_i4.c \
generated/maxloc1_8_i4.c \
generated/maxloc1_16_i4.c \
+generated/maxloc1_4_i8.c \
+generated/maxloc1_8_i8.c \
+generated/maxloc1_16_i8.c \
+generated/maxloc1_4_i16.c \
+generated/maxloc1_8_i16.c \
+generated/maxloc1_16_i16.c \
generated/maxloc1_4_m1.c \
generated/maxloc1_8_m1.c \
generated/maxloc1_16_m1.c \
@@ -1219,7 +1228,7 @@ generated/maxloc1_8_m8.c \
generated/maxloc1_16_m8.c \
generated/maxloc1_4_m16.c \
generated/maxloc1_8_m16.c \
-generated/maxloc1_16_i16.c \
+generated/maxloc1_16_m16.c \
generated/maxloc1_4_r4.c \
generated/maxloc1_8_r4.c \
generated/maxloc1_16_r4.c \
@@ -2311,6 +2320,18 @@ generated/maxloc1_8_i4.lo: generated/$(am__dirstamp) \
generated/$(DEPDIR)/$(am__dirstamp)
generated/maxloc1_16_i4.lo: generated/$(am__dirstamp) \
generated/$(DEPDIR)/$(am__dirstamp)
+generated/maxloc1_4_i8.lo: generated/$(am__dirstamp) \
+ generated/$(DEPDIR)/$(am__dirstamp)
+generated/maxloc1_8_i8.lo: generated/$(am__dirstamp) \
+ generated/$(DEPDIR)/$(am__dirstamp)
+generated/maxloc1_16_i8.lo: generated/$(am__dirstamp) \
+ generated/$(DEPDIR)/$(am__dirstamp)
+generated/maxloc1_4_i16.lo: generated/$(am__dirstamp) \
+ generated/$(DEPDIR)/$(am__dirstamp)
+generated/maxloc1_8_i16.lo: generated/$(am__dirstamp) \
+ generated/$(DEPDIR)/$(am__dirstamp)
+generated/maxloc1_16_i16.lo: generated/$(am__dirstamp) \
+ generated/$(DEPDIR)/$(am__dirstamp)
generated/maxloc1_4_m1.lo: generated/$(am__dirstamp) \
generated/$(DEPDIR)/$(am__dirstamp)
generated/maxloc1_8_m1.lo: generated/$(am__dirstamp) \
@@ -2339,7 +2360,7 @@ generated/maxloc1_4_m16.lo: generated/$(am__dirstamp) \
generated/$(DEPDIR)/$(am__dirstamp)
generated/maxloc1_8_m16.lo: generated/$(am__dirstamp) \
generated/$(DEPDIR)/$(am__dirstamp)
-generated/maxloc1_16_i16.lo: generated/$(am__dirstamp) \
+generated/maxloc1_16_m16.lo: generated/$(am__dirstamp) \
generated/$(DEPDIR)/$(am__dirstamp)
generated/maxloc1_4_r4.lo: generated/$(am__dirstamp) \
generated/$(DEPDIR)/$(am__dirstamp)
@@ -4197,7 +4218,9 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_i16.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_i2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_i4.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_i8.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_m1.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_m16.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_m2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_m4.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_m8.Plo@am__quote@
@@ -4209,8 +4232,10 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_s1.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_16_s4.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_4_i1.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_4_i16.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_4_i2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_4_i4.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_4_i8.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_4_m1.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_4_m16.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_4_m2.Plo@am__quote@
@@ -4224,8 +4249,10 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_4_s1.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_4_s4.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_8_i1.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_8_i16.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_8_i2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_8_i4.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_8_i8.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_8_m1.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_8_m16.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@generated/$(DEPDIR)/maxloc1_8_m2.Plo@am__quote@
diff --git a/libgfortran/generated/maxloc1_16_m16.c b/libgfortran/generated/maxloc1_16_m16.c
new file mode 100644
index 0000000..d97dbc0
--- /dev/null
+++ b/libgfortran/generated/maxloc1_16_m16.c
@@ -0,0 +1,591 @@
+/* Implementation of the MAXLOC intrinsic
+ Copyright (C) 2002-2025 Free Software Foundation, Inc.
+ Contributed by Paul Brook <paul@nowt.org>
+
+This file is part of the GNU Fortran runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+Libgfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "libgfortran.h"
+#include <assert.h>
+
+
+#if defined (HAVE_GFC_UINTEGER_16) && defined (HAVE_GFC_INTEGER_16)
+
+#define HAVE_BACK_ARG 1
+
+
+extern void maxloc1_16_m16 (gfc_array_i16 * const restrict,
+ gfc_array_m16 * const restrict, const index_type * const restrict, GFC_LOGICAL_4 back);
+export_proto(maxloc1_16_m16);
+
+void
+maxloc1_16_m16 (gfc_array_i16 * const restrict retarray,
+ gfc_array_m16 * const restrict array,
+ const index_type * const restrict pdim, GFC_LOGICAL_4 back)
+{
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type sstride[GFC_MAX_DIMENSIONS];
+ index_type dstride[GFC_MAX_DIMENSIONS];
+ const GFC_UINTEGER_16 * restrict base;
+ GFC_INTEGER_16 * restrict dest;
+ index_type rank;
+ index_type n;
+ index_type len;
+ index_type delta;
+ index_type dim;
+ int continue_loop;
+
+ /* Make dim zero based to avoid confusion. */
+ rank = GFC_DESCRIPTOR_RANK (array) - 1;
+ dim = (*pdim) - 1;
+
+ if (unlikely (dim < 0 || dim > rank))
+ {
+ runtime_error ("Dim argument incorrect in MAXLOC intrinsic: "
+ "is %ld, should be between 1 and %ld",
+ (long int) dim + 1, (long int) rank + 1);
+ }
+
+ len = GFC_DESCRIPTOR_EXTENT(array,dim);
+ if (len < 0)
+ len = 0;
+ delta = GFC_DESCRIPTOR_STRIDE(array,dim);
+
+ for (n = 0; n < dim; n++)
+ {
+ sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n);
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
+
+ if (extent[n] < 0)
+ extent[n] = 0;
+ }
+ for (n = dim; n < rank; n++)
+ {
+ sstride[n] = GFC_DESCRIPTOR_STRIDE(array, n + 1);
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array, n + 1);
+
+ if (extent[n] < 0)
+ extent[n] = 0;
+ }
+
+ if (retarray->base_addr == NULL)
+ {
+ size_t alloc_size, str;
+
+ for (n = 0; n < rank; n++)
+ {
+ if (n == 0)
+ str = 1;
+ else
+ str = GFC_DESCRIPTOR_STRIDE(retarray,n-1) * extent[n-1];
+
+ GFC_DIMENSION_SET(retarray->dim[n], 0, extent[n] - 1, str);
+
+ }
+
+ retarray->offset = 0;
+ retarray->dtype.rank = rank;
+
+ alloc_size = GFC_DESCRIPTOR_STRIDE(retarray,rank-1) * extent[rank-1];
+
+ retarray->base_addr = xmallocarray (alloc_size, sizeof (GFC_INTEGER_16));
+ if (alloc_size == 0)
+ return;
+ }
+ else
+ {
+ if (rank != GFC_DESCRIPTOR_RANK (retarray))
+ runtime_error ("rank of return array incorrect in"
+ " MAXLOC intrinsic: is %ld, should be %ld",
+ (long int) (GFC_DESCRIPTOR_RANK (retarray)),
+ (long int) rank);
+
+ if (unlikely (compile_options.bounds_check))
+ bounds_ifunction_return ((array_t *) retarray, extent,
+ "return value", "MAXLOC");
+ }
+
+ for (n = 0; n < rank; n++)
+ {
+ count[n] = 0;
+ dstride[n] = GFC_DESCRIPTOR_STRIDE(retarray,n);
+ if (extent[n] <= 0)
+ return;
+ }
+
+ base = array->base_addr;
+ dest = retarray->base_addr;
+
+ continue_loop = 1;
+ while (continue_loop)
+ {
+ const GFC_UINTEGER_16 * restrict src;
+ GFC_INTEGER_16 result;
+ src = base;
+ {
+
+ GFC_UINTEGER_16 maxval;
+#if defined (GFC_UINTEGER_16_INFINITY)
+ maxval = -GFC_UINTEGER_16_INFINITY;
+#else
+ maxval = -GFC_UINTEGER_16_HUGE;
+#endif
+ result = 1;
+ if (len <= 0)
+ *dest = 0;
+ else
+ {
+#if ! defined HAVE_BACK_ARG
+ for (n = 0; n < len; n++, src += delta)
+ {
+#endif
+
+#if defined (GFC_UINTEGER_16_QUIET_NAN)
+ for (n = 0; n < len; n++, src += delta)
+ {
+ if (*src >= maxval)
+ {
+ maxval = *src;
+ result = (GFC_INTEGER_16)n + 1;
+ break;
+ }
+ }
+#else
+ n = 0;
+#endif
+ for (; n < len; n++, src += delta)
+ {
+ if (back ? *src >= maxval : *src > maxval)
+ {
+ maxval = *src;
+ result = (GFC_INTEGER_16)n + 1;
+ }
+ }
+
+ *dest = result;
+ }
+ }
+ /* Advance to the next element. */
+ count[0]++;
+ base += sstride[0];
+ dest += dstride[0];
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ base -= sstride[n] * extent[n];
+ dest -= dstride[n] * extent[n];
+ n++;
+ if (n >= rank)
+ {
+ /* Break out of the loop. */
+ continue_loop = 0;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ base += sstride[n];
+ dest += dstride[n];
+ }
+ }
+ }
+}
+
+
+extern void mmaxloc1_16_m16 (gfc_array_i16 * const restrict,
+ gfc_array_m16 * const restrict, const index_type * const restrict,
+ gfc_array_l1 * const restrict, GFC_LOGICAL_4 back);
+export_proto(mmaxloc1_16_m16);
+
+void
+mmaxloc1_16_m16 (gfc_array_i16 * const restrict retarray,
+ gfc_array_m16 * const restrict array,
+ const index_type * const restrict pdim,
+ gfc_array_l1 * const restrict mask, GFC_LOGICAL_4 back)
+{
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type sstride[GFC_MAX_DIMENSIONS];
+ index_type dstride[GFC_MAX_DIMENSIONS];
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ GFC_INTEGER_16 * restrict dest;
+ const GFC_UINTEGER_16 * restrict base;
+ const GFC_LOGICAL_1 * restrict mbase;
+ index_type rank;
+ index_type dim;
+ index_type n;
+ index_type len;
+ index_type delta;
+ index_type mdelta;
+ int mask_kind;
+
+ if (mask == NULL)
+ {
+#ifdef HAVE_BACK_ARG
+ maxloc1_16_m16 (retarray, array, pdim, back);
+#else
+ maxloc1_16_m16 (retarray, array, pdim);
+#endif
+ return;
+ }
+
+ dim = (*pdim) - 1;
+ rank = GFC_DESCRIPTOR_RANK (array) - 1;
+
+
+ if (unlikely (dim < 0 || dim > rank))
+ {
+ runtime_error ("Dim argument incorrect in MAXLOC intrinsic: "
+ "is %ld, should be between 1 and %ld",
+ (long int) dim + 1, (long int) rank + 1);
+ }
+
+ len = GFC_DESCRIPTOR_EXTENT(array,dim);
+ if (len < 0)
+ len = 0;
+
+ mbase = mask->base_addr;
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ mbase = GFOR_POINTER_TO_L1 (mbase, mask_kind);
+ else
+ runtime_error ("Funny sized logical array");
+
+ delta = GFC_DESCRIPTOR_STRIDE(array,dim);
+ mdelta = GFC_DESCRIPTOR_STRIDE_BYTES(mask,dim);
+
+ for (n = 0; n < dim; n++)
+ {
+ sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n);
+ mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask,n);
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
+
+ if (extent[n] < 0)
+ extent[n] = 0;
+
+ }
+ for (n = dim; n < rank; n++)
+ {
+ sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n + 1);
+ mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask, n + 1);
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array, n + 1);
+
+ if (extent[n] < 0)
+ extent[n] = 0;
+ }
+
+ if (retarray->base_addr == NULL)
+ {
+ size_t alloc_size, str;
+
+ for (n = 0; n < rank; n++)
+ {
+ if (n == 0)
+ str = 1;
+ else
+ str= GFC_DESCRIPTOR_STRIDE(retarray,n-1) * extent[n-1];
+
+ GFC_DIMENSION_SET(retarray->dim[n], 0, extent[n] - 1, str);
+
+ }
+
+ alloc_size = GFC_DESCRIPTOR_STRIDE(retarray,rank-1) * extent[rank-1];
+
+ retarray->offset = 0;
+ retarray->dtype.rank = rank;
+
+ retarray->base_addr = xmallocarray (alloc_size, sizeof (GFC_INTEGER_16));
+ if (alloc_size == 0)
+ return;
+ }
+ else
+ {
+ if (rank != GFC_DESCRIPTOR_RANK (retarray))
+ runtime_error ("rank of return array incorrect in MAXLOC intrinsic");
+
+ if (unlikely (compile_options.bounds_check))
+ {
+ bounds_ifunction_return ((array_t *) retarray, extent,
+ "return value", "MAXLOC");
+ bounds_equal_extents ((array_t *) mask, (array_t *) array,
+ "MASK argument", "MAXLOC");
+ }
+ }
+
+ for (n = 0; n < rank; n++)
+ {
+ count[n] = 0;
+ dstride[n] = GFC_DESCRIPTOR_STRIDE(retarray,n);
+ if (extent[n] <= 0)
+ return;
+ }
+
+ dest = retarray->base_addr;
+ base = array->base_addr;
+
+ while (base)
+ {
+ const GFC_UINTEGER_16 * restrict src;
+ const GFC_LOGICAL_1 * restrict msrc;
+ GFC_INTEGER_16 result;
+ src = base;
+ msrc = mbase;
+ {
+
+ GFC_UINTEGER_16 maxval;
+#if defined (GFC_UINTEGER_16_INFINITY)
+ maxval = -GFC_UINTEGER_16_INFINITY;
+#else
+ maxval = -GFC_UINTEGER_16_HUGE;
+#endif
+#if defined (GFC_UINTEGER_16_QUIET_NAN)
+ GFC_INTEGER_16 result2 = 0;
+#endif
+ result = 0;
+ for (n = 0; n < len; n++, src += delta, msrc += mdelta)
+ {
+
+ if (*msrc)
+ {
+#if defined (GFC_UINTEGER_16_QUIET_NAN)
+ if (!result2)
+ result2 = (GFC_INTEGER_16)n + 1;
+ if (*src >= maxval)
+#endif
+ {
+ maxval = *src;
+ result = (GFC_INTEGER_16)n + 1;
+ break;
+ }
+ }
+ }
+#if defined (GFC_UINTEGER_16_QUIET_NAN)
+ if (unlikely (n >= len))
+ result = result2;
+ else
+#endif
+ if (back)
+ for (; n < len; n++, src += delta, msrc += mdelta)
+ {
+ if (*msrc && unlikely (*src >= maxval))
+ {
+ maxval = *src;
+ result = (GFC_INTEGER_16)n + 1;
+ }
+ }
+ else
+ for (; n < len; n++, src += delta, msrc += mdelta)
+ {
+ if (*msrc && unlikely (*src > maxval))
+ {
+ maxval = *src;
+ result = (GFC_INTEGER_16)n + 1;
+ }
+ }
+ *dest = result;
+ }
+ /* Advance to the next element. */
+ count[0]++;
+ base += sstride[0];
+ mbase += mstride[0];
+ dest += dstride[0];
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ base -= sstride[n] * extent[n];
+ mbase -= mstride[n] * extent[n];
+ dest -= dstride[n] * extent[n];
+ n++;
+ if (n >= rank)
+ {
+ /* Break out of the loop. */
+ base = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ base += sstride[n];
+ mbase += mstride[n];
+ dest += dstride[n];
+ }
+ }
+ }
+}
+
+
+extern void smaxloc1_16_m16 (gfc_array_i16 * const restrict,
+ gfc_array_m16 * const restrict, const index_type * const restrict,
+ GFC_LOGICAL_4 *, GFC_LOGICAL_4 back);
+export_proto(smaxloc1_16_m16);
+
+void
+smaxloc1_16_m16 (gfc_array_i16 * const restrict retarray,
+ gfc_array_m16 * const restrict array,
+ const index_type * const restrict pdim,
+ GFC_LOGICAL_4 * mask, GFC_LOGICAL_4 back)
+{
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type dstride[GFC_MAX_DIMENSIONS];
+ GFC_INTEGER_16 * restrict dest;
+ index_type rank;
+ index_type n;
+ index_type dim;
+
+
+ if (mask == NULL || *mask)
+ {
+#ifdef HAVE_BACK_ARG
+ maxloc1_16_m16 (retarray, array, pdim, back);
+#else
+ maxloc1_16_m16 (retarray, array, pdim);
+#endif
+ return;
+ }
+ /* Make dim zero based to avoid confusion. */
+ dim = (*pdim) - 1;
+ rank = GFC_DESCRIPTOR_RANK (array) - 1;
+
+ if (unlikely (dim < 0 || dim > rank))
+ {
+ runtime_error ("Dim argument incorrect in MAXLOC intrinsic: "
+ "is %ld, should be between 1 and %ld",
+ (long int) dim + 1, (long int) rank + 1);
+ }
+
+ for (n = 0; n < dim; n++)
+ {
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
+
+ if (extent[n] <= 0)
+ extent[n] = 0;
+ }
+
+ for (n = dim; n < rank; n++)
+ {
+ extent[n] =
+ GFC_DESCRIPTOR_EXTENT(array,n + 1);
+
+ if (extent[n] <= 0)
+ extent[n] = 0;
+ }
+
+ if (retarray->base_addr == NULL)
+ {
+ size_t alloc_size, str;
+
+ for (n = 0; n < rank; n++)
+ {
+ if (n == 0)
+ str = 1;
+ else
+ str = GFC_DESCRIPTOR_STRIDE(retarray,n-1) * extent[n-1];
+
+ GFC_DIMENSION_SET(retarray->dim[n], 0, extent[n] - 1, str);
+
+ }
+
+ retarray->offset = 0;
+ retarray->dtype.rank = rank;
+
+ alloc_size = GFC_DESCRIPTOR_STRIDE(retarray,rank-1) * extent[rank-1];
+
+ retarray->base_addr = xmallocarray (alloc_size, sizeof (GFC_INTEGER_16));
+ if (alloc_size == 0)
+ return;
+ }
+ else
+ {
+ if (rank != GFC_DESCRIPTOR_RANK (retarray))
+ runtime_error ("rank of return array incorrect in"
+ " MAXLOC intrinsic: is %ld, should be %ld",
+ (long int) (GFC_DESCRIPTOR_RANK (retarray)),
+ (long int) rank);
+
+ if (unlikely (compile_options.bounds_check))
+ {
+ for (n=0; n < rank; n++)
+ {
+ index_type ret_extent;
+
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,n);
+ if (extent[n] != ret_extent)
+ runtime_error ("Incorrect extent in return value of"
+ " MAXLOC intrinsic in dimension %ld:"
+ " is %ld, should be %ld", (long int) n + 1,
+ (long int) ret_extent, (long int) extent[n]);
+ }
+ }
+ }
+
+ for (n = 0; n < rank; n++)
+ {
+ count[n] = 0;
+ dstride[n] = GFC_DESCRIPTOR_STRIDE(retarray,n);
+ }
+
+ dest = retarray->base_addr;
+
+ while(1)
+ {
+ *dest = 0;
+ count[0]++;
+ dest += dstride[0];
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ dest -= dstride[n] * extent[n];
+ n++;
+ if (n >= rank)
+ return;
+ else
+ {
+ count[n]++;
+ dest += dstride[n];
+ }
+ }
+ }
+}
+
+#endif
diff --git a/libgfortran/gfortran.map b/libgfortran/gfortran.map
index 7725e12..742dddf 100644
--- a/libgfortran/gfortran.map
+++ b/libgfortran/gfortran.map
@@ -1786,7 +1786,6 @@ GFORTRAN_15 {
_gfortran_arandom_m2;
_gfortran_arandom_m4;
_gfortran_arandom_m8;
- _gfortran_arandom_m16;
_gfortran_minval_m16;
_gfortran_minval_m1;
_gfortran_minval_m2;
@@ -1832,7 +1831,6 @@ GFORTRAN_15 {
_gfortran_maxloc0_8_m2;
_gfortran_maxloc0_8_m4;
_gfortran_maxloc0_8_m8;
- _gfortran_maxloc1_16_m16;
_gfortran_maxloc1_16_m1;
_gfortran_maxloc1_16_m2;
_gfortran_maxloc1_16_m4;
@@ -1862,7 +1860,6 @@ GFORTRAN_15 {
_gfortran_mmaxloc0_8_m2;
_gfortran_mmaxloc0_8_m4;
_gfortran_mmaxloc0_8_m8;
- _gfortran_mmaxloc1_16_m16;
_gfortran_mmaxloc1_16_m1;
_gfortran_mmaxloc1_16_m2;
_gfortran_mmaxloc1_16_m4;
@@ -1892,7 +1889,6 @@ GFORTRAN_15 {
_gfortran_smaxloc0_8_m2;
_gfortran_smaxloc0_8_m4;
_gfortran_smaxloc0_8_m8;
- _gfortran_smaxloc1_16_m16;
_gfortran_smaxloc1_16_m1;
_gfortran_smaxloc1_16_m2;
_gfortran_smaxloc1_16_m4;
@@ -2028,3 +2024,11 @@ GFORTRAN_15 {
_gfortran_reduce_c;
_gfortran_reduce_scalar_c;
} GFORTRAN_14;
+
+GFORTRAN_15.2 {
+ global:
+ _gfortran_arandom_m16;
+ _gfortran_maxloc1_16_m16;
+ _gfortran_mmaxloc1_16_m16;
+ _gfortran_smaxloc1_16_m16;
+} GFORTRAN_15;
diff --git a/libgfortran/intrinsics/random.c b/libgfortran/intrinsics/random.c
index e0178bf..225eb60 100644
--- a/libgfortran/intrinsics/random.c
+++ b/libgfortran/intrinsics/random.c
@@ -1215,7 +1215,7 @@ arandom_m8 (gfc_array_m8 *x)
}
}
-#ifdef GFC_HAVE_GFC_UINTEGER_16
+#ifdef HAVE_GFC_UINTEGER_16
/* Fill an unsigned array with random bytes. */
diff --git a/libgomp/ChangeLog b/libgomp/ChangeLog
index b4032bc..a0d7c72 100644
--- a/libgomp/ChangeLog
+++ b/libgomp/ChangeLog
@@ -1,3 +1,9 @@
+2025-05-07 Tobias Burnus <tburnus@baylibre.com>
+
+ * testsuite/libgomp.fortran/map-alloc-comp-9.f90: Process differently
+ when USE_USM_REQUIREMENT is set.
+ * testsuite/libgomp.fortran/map-alloc-comp-9-usm.f90: New test.
+
2025-05-06 Tejas Belagod <tejas.belagod@arm.com>
* testsuite/libgomp.c-target/aarch64/udr-sve.c: Fix test.
diff --git a/libgomp/testsuite/libgomp.fortran/map-alloc-comp-9-usm.f90 b/libgomp/testsuite/libgomp.fortran/map-alloc-comp-9-usm.f90
new file mode 100644
index 0000000..90378c0
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/map-alloc-comp-9-usm.f90
@@ -0,0 +1,11 @@
+! { dg-additional-options "-cpp -DUSE_USM_REQUIREMENT=1 -Wno-openmp" }
+!
+! We silence the warning:
+! Mapping of polymorphic list item '...' is unspecified behavior [-Wopenmp]
+!
+! Ensure that polymorphic mapping is diagnosed as undefined behavior
+! Ensure that static access to polymorphic variables works
+
+! Run map-alloc-comp-9.f90 in unified-shared-memory mode
+
+#include "map-alloc-comp-9.f90"
diff --git a/libgomp/testsuite/libgomp.fortran/map-alloc-comp-9.f90 b/libgomp/testsuite/libgomp.fortran/map-alloc-comp-9.f90
index 3cec392..26c73d7 100644
--- a/libgomp/testsuite/libgomp.fortran/map-alloc-comp-9.f90
+++ b/libgomp/testsuite/libgomp.fortran/map-alloc-comp-9.f90
@@ -1,8 +1,19 @@
+! { dg-additional-options "-cpp" }
+!
! Ensure that polymorphic mapping is diagnosed as undefined behavior
! Ensure that static access to polymorphic variables works
+! Some extended tests are only run with shared memory
+! To enforce this (where possible) on the device side:
+! #define USE_USM_REQUIREMENT
+! which is done in map-alloc-comp-9-usm.f90
+
subroutine test(case)
implicit none(type, external)
+#ifdef USE_USM_REQUIREMENT
+ !$omp requires unified_shared_memory
+#endif
+
type t
integer :: x(4)
end type t
@@ -73,10 +84,14 @@ var4%y2(2)%y%x%x = -7 * [1111,2222,3333,4444]
var4%y2(2)%y%x2(1)%x = -8 * [1111,2222,3333,4444]
var4%y2(2)%y%x2(2)%x = -9 * [1111,2222,3333,4444]
+#ifdef USE_USM_REQUIREMENT
+is_shared_mem = .true.
+#else
is_shared_mem = .false.
!$omp target map(to: is_shared_mem)
is_shared_mem = .true.
!$omp end target
+#endif
if (case == 1) then
! implicit mapping
@@ -532,6 +547,10 @@ end subroutine test
program main
use omp_lib
implicit none(type, external)
+#ifdef USE_USM_REQUIREMENT
+ !$omp requires unified_shared_memory
+#endif
+
interface
subroutine test(case)
integer, value :: case
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index 8aa7ec8..b7cce6d 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,54 @@
+2025-05-07 Jonathan Wakely <jwakely@redhat.com>
+
+ PR libstdc++/120159
+ * src/c++23/std.cc.in (is_layout_compatible_v): Export.
+
+2025-05-07 Jonathan Wakely <jwakely@redhat.com>
+
+ * src/c++23/std.cc.in: Fix export for std::extents.
+
+2025-05-07 Luc Grosheintz <luc.grosheintz@gmail.com>
+
+ * testsuite/23_containers/mdspan/extents/class_mandates_neg.cc: New test.
+ * testsuite/23_containers/mdspan/extents/ctor_copy.cc: New test.
+ * testsuite/23_containers/mdspan/extents/ctor_ints.cc: New test.
+ * testsuite/23_containers/mdspan/extents/ctor_shape.cc: New test.
+ * testsuite/23_containers/mdspan/extents/custom_integer.cc: New test.
+ * testsuite/23_containers/mdspan/extents/misc.cc: New test.
+
+2025-05-07 Luc Grosheintz <luc.grosheintz@gmail.com>
+
+ PR libstdc++/107761
+ * include/std/mdspan (extents): New class.
+ * src/c++23/std.cc.in: Add 'using std::extents'.
+
+2025-05-07 Luc Grosheintz <luc.grosheintz@gmail.com>
+
+ * doc/doxygen/user.cfg.in: Add <mdspan>.
+ * include/Makefile.am: Ditto.
+ * include/Makefile.in: Ditto.
+ * include/precompiled/stdc++.h: Ditto.
+ * include/std/mdspan: New file.
+
+2025-05-07 Luc Grosheintz <luc.grosheintz@gmail.com>
+
+ * include/bits/version.def: Add internal feature testing macro
+ __glibcxx_mdspan.
+ * include/bits/version.h: Regenerate.
+
+2025-05-07 Tomasz Kamiński <tkaminsk@redhat.com>
+
+ PR libstdc++/120114
+ * include/bits/chrono_io.h (__formatter_chrono::_M_format): Use __field_width.
+ * testsuite/std/time/format/pr120114.cc: New test.
+
+2025-05-07 Jonathan Wakely <jwakely@redhat.com>
+
+ PR libstdc++/120147
+ * acinclude.m4 (GLIBCXX_ENABLE_BACKTRACE): Restore use of
+ AC_LANG_CPLUSPLUS.
+ * configure: Regenerate.
+
2025-05-06 Jonathan Wakely <jwakely@redhat.com>
PR libstdc++/70560
diff --git a/libstdc++-v3/acinclude.m4 b/libstdc++-v3/acinclude.m4
index 0fc74d0..204bed5b 100644
--- a/libstdc++-v3/acinclude.m4
+++ b/libstdc++-v3/acinclude.m4
@@ -5290,7 +5290,8 @@ AC_DEFUN([GLIBCXX_ENABLE_BACKTRACE], [
BACKTRACE_CPPFLAGS="-D_GNU_SOURCE"
- GLIBCXX_LANG_PUSH
+ AC_LANG_CPLUSPLUS
+ old_CXXFLAGS="$CXXFLAGS"
# libbacktrace's own configure.ac only tests atomics for int,
# but the code actually uses atomics for size_t and pointers as well.
@@ -5356,7 +5357,8 @@ EOF
rm -f conftest*
fi
- GLIBCXX_LANG_POP
+ CXXFLAGS="$old_CXXFLAGS"
+ AC_LANG_RESTORE
if test "$glibcxx_cv_libbacktrace_atomics" = yes; then
BACKTRACE_CPPFLAGS="$BACKTRACE_CPPFLAGS -DHAVE_ATOMIC_FUNCTIONS=1"
diff --git a/libstdc++-v3/configure b/libstdc++-v3/configure
index 3fd03b8..0529ff5 100755
--- a/libstdc++-v3/configure
+++ b/libstdc++-v3/configure
@@ -53537,7 +53537,13 @@ fi
BACKTRACE_CPPFLAGS="-D_GNU_SOURCE"
- GLIBCXX_LANG_PUSH
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ old_CXXFLAGS="$CXXFLAGS"
# libbacktrace's own configure.ac only tests atomics for int,
# but the code actually uses atomics for size_t and pointers as well.
@@ -53578,7 +53584,7 @@ main ()
return 0;
}
_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
+if ac_fn_cxx_try_link "$LINENO"; then :
glibcxx_cv_libbacktrace_atomics=yes
else
glibcxx_cv_libbacktrace_atomics=no
@@ -53595,7 +53601,7 @@ $as_echo "$glibcxx_cv_libbacktrace_atomics" >&6; }
CXXFLAGS='-O0 -S'
cat > conftest.$ac_ext << EOF
-#line 53598 "configure"
+#line 53604 "configure"
#include <stddef.h>
int main()
{
@@ -53633,7 +53639,13 @@ $as_echo "$glibcxx_cv_libbacktrace_atomics" >&6; }
rm -f conftest*
fi
- GLIBCXX_LANG_POP
+ CXXFLAGS="$old_CXXFLAGS"
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
if test "$glibcxx_cv_libbacktrace_atomics" = yes; then
BACKTRACE_CPPFLAGS="$BACKTRACE_CPPFLAGS -DHAVE_ATOMIC_FUNCTIONS=1"
diff --git a/libstdc++-v3/doc/doxygen/user.cfg.in b/libstdc++-v3/doc/doxygen/user.cfg.in
index 19ae67a..e926c67 100644
--- a/libstdc++-v3/doc/doxygen/user.cfg.in
+++ b/libstdc++-v3/doc/doxygen/user.cfg.in
@@ -880,6 +880,7 @@ INPUT = @srcdir@/doc/doxygen/doxygroups.cc \
include/list \
include/locale \
include/map \
+ include/mdspan \
include/memory \
include/memory_resource \
include/mutex \
diff --git a/libstdc++-v3/include/Makefile.am b/libstdc++-v3/include/Makefile.am
index 537774c..1140fa0 100644
--- a/libstdc++-v3/include/Makefile.am
+++ b/libstdc++-v3/include/Makefile.am
@@ -38,6 +38,7 @@ std_freestanding = \
${std_srcdir}/generator \
${std_srcdir}/iterator \
${std_srcdir}/limits \
+ ${std_srcdir}/mdspan \
${std_srcdir}/memory \
${std_srcdir}/numbers \
${std_srcdir}/numeric \
diff --git a/libstdc++-v3/include/Makefile.in b/libstdc++-v3/include/Makefile.in
index 7b96b22..c96e981 100644
--- a/libstdc++-v3/include/Makefile.in
+++ b/libstdc++-v3/include/Makefile.in
@@ -396,6 +396,7 @@ std_freestanding = \
${std_srcdir}/generator \
${std_srcdir}/iterator \
${std_srcdir}/limits \
+ ${std_srcdir}/mdspan \
${std_srcdir}/memory \
${std_srcdir}/numbers \
${std_srcdir}/numeric \
diff --git a/libstdc++-v3/include/bits/chrono_io.h b/libstdc++-v3/include/bits/chrono_io.h
index b7f6f5f..620227a 100644
--- a/libstdc++-v3/include/bits/chrono_io.h
+++ b/libstdc++-v3/include/bits/chrono_io.h
@@ -705,8 +705,13 @@ namespace __format
if (__write_direct)
return __out;
- auto __str = std::move(__sink).get();
- return __format::__write_padded_as_spec(__str, __str.size(),
+ auto __str = __sink.view();
+ size_t __width;
+ if constexpr (__unicode::__literal_encoding_is_unicode<_CharT>())
+ __width = __unicode::__field_width(__str);
+ else
+ __width = __str.size();
+ return __format::__write_padded_as_spec(__str, __width,
__fc, _M_spec);
}
diff --git a/libstdc++-v3/include/bits/version.def b/libstdc++-v3/include/bits/version.def
index 282667e..f4d3de8 100644
--- a/libstdc++-v3/include/bits/version.def
+++ b/libstdc++-v3/include/bits/version.def
@@ -1000,6 +1000,15 @@ ftms = {
};
ftms = {
+ name = mdspan;
+ no_stdname = true; // FIXME: remove
+ values = {
+ v = 1; // FIXME: 202207
+ cxxmin = 23;
+ };
+};
+
+ftms = {
name = ssize;
values = {
v = 201902;
diff --git a/libstdc++-v3/include/bits/version.h b/libstdc++-v3/include/bits/version.h
index bb7c047..d5d75ce 100644
--- a/libstdc++-v3/include/bits/version.h
+++ b/libstdc++-v3/include/bits/version.h
@@ -1114,6 +1114,15 @@
#endif /* !defined(__cpp_lib_span) && defined(__glibcxx_want_span) */
#undef __glibcxx_want_span
+#if !defined(__cpp_lib_mdspan)
+# if (__cplusplus >= 202100L)
+# define __glibcxx_mdspan 1L
+# if defined(__glibcxx_want_all) || defined(__glibcxx_want_mdspan)
+# endif
+# endif
+#endif /* !defined(__cpp_lib_mdspan) && defined(__glibcxx_want_mdspan) */
+#undef __glibcxx_want_mdspan
+
#if !defined(__cpp_lib_ssize)
# if (__cplusplus >= 202002L)
# define __glibcxx_ssize 201902L
diff --git a/libstdc++-v3/include/precompiled/stdc++.h b/libstdc++-v3/include/precompiled/stdc++.h
index f4b312d..e7d89c9 100644
--- a/libstdc++-v3/include/precompiled/stdc++.h
+++ b/libstdc++-v3/include/precompiled/stdc++.h
@@ -228,6 +228,7 @@
#include <flat_map>
#include <flat_set>
#include <generator>
+#include <mdspan>
#include <print>
#include <spanstream>
#include <stacktrace>
diff --git a/libstdc++-v3/include/std/mdspan b/libstdc++-v3/include/std/mdspan
new file mode 100644
index 0000000..aee96dd
--- /dev/null
+++ b/libstdc++-v3/include/std/mdspan
@@ -0,0 +1,309 @@
+// <mdspan> -*- C++ -*-
+
+// Copyright The GNU Toolchain Authors.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+/** @file mdspan
+ * This is a Standard C++ Library header.
+ */
+
+#ifndef _GLIBCXX_MDSPAN
+#define _GLIBCXX_MDSPAN 1
+
+#ifdef _GLIBCXX_SYSHDR
+#pragma GCC system_header
+#endif
+
+#include <span>
+#include <array>
+#include <type_traits>
+#include <limits>
+#include <utility>
+
+#define __glibcxx_want_mdspan
+#include <bits/version.h>
+
+#ifdef __glibcxx_mdspan
+
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+ namespace __mdspan
+ {
+ template<typename _IndexType, array _Extents>
+ class _ExtentsStorage
+ {
+ public:
+ static consteval bool
+ _S_is_dyn(size_t __ext) noexcept
+ { return __ext == dynamic_extent; }
+
+ template<typename _OIndexType>
+ static constexpr _IndexType
+ _S_int_cast(const _OIndexType& __other) noexcept
+ { return _IndexType(__other); }
+
+ static constexpr size_t _S_rank = _Extents.size();
+
+ // For __r in [0, _S_rank], _S_dynamic_index[__r] is the number
+ // of dynamic extents up to (and not including) __r.
+ //
+ // If __r is the index of a dynamic extent, then
+ // _S_dynamic_index[__r] is the index of that extent in
+ // _M_dynamic_extents.
+ static constexpr auto _S_dynamic_index = [] consteval
+ {
+ array<size_t, _S_rank+1> __ret;
+ size_t __dyn = 0;
+ for(size_t __i = 0; __i < _S_rank; ++__i)
+ {
+ __ret[__i] = __dyn;
+ __dyn += _S_is_dyn(_Extents[__i]);
+ }
+ __ret[_S_rank] = __dyn;
+ return __ret;
+ }();
+
+ static constexpr size_t _S_rank_dynamic = _S_dynamic_index[_S_rank];
+
+ // For __r in [0, _S_rank_dynamic), _S_dynamic_index_inv[__r] is the
+ // index of the __r-th dynamic extent in _Extents.
+ static constexpr auto _S_dynamic_index_inv = [] consteval
+ {
+ array<size_t, _S_rank_dynamic> __ret;
+ for (size_t __i = 0, __r = 0; __i < _S_rank; ++__i)
+ if (_S_is_dyn(_Extents[__i]))
+ __ret[__r++] = __i;
+ return __ret;
+ }();
+
+ static constexpr size_t
+ _S_static_extent(size_t __r) noexcept
+ { return _Extents[__r]; }
+
+ constexpr _IndexType
+ _M_extent(size_t __r) const noexcept
+ {
+ auto __se = _Extents[__r];
+ if (__se == dynamic_extent)
+ return _M_dynamic_extents[_S_dynamic_index[__r]];
+ else
+ return __se;
+ }
+
+ template<size_t _OtherRank, typename _GetOtherExtent>
+ constexpr void
+ _M_init_dynamic_extents(_GetOtherExtent __get_extent) noexcept
+ {
+ for(size_t __i = 0; __i < _S_rank_dynamic; ++__i)
+ {
+ size_t __di = __i;
+ if constexpr (_OtherRank != _S_rank_dynamic)
+ __di = _S_dynamic_index_inv[__i];
+ _M_dynamic_extents[__i] = _S_int_cast(__get_extent(__di));
+ }
+ }
+
+ constexpr
+ _ExtentsStorage() noexcept = default;
+
+ template<typename _OIndexType, array _OExtents>
+ constexpr
+ _ExtentsStorage(const _ExtentsStorage<_OIndexType, _OExtents>&
+ __other) noexcept
+ {
+ _M_init_dynamic_extents<_S_rank>([&__other](size_t __i)
+ { return __other._M_extent(__i); });
+ }
+
+ template<typename _OIndexType, size_t _Nm>
+ constexpr
+ _ExtentsStorage(span<const _OIndexType, _Nm> __exts) noexcept
+ {
+ _M_init_dynamic_extents<_Nm>(
+ [&__exts](size_t __i) -> const _OIndexType&
+ { return __exts[__i]; });
+ }
+
+ private:
+ using _S_storage = __array_traits<_IndexType, _S_rank_dynamic>::_Type;
+ [[no_unique_address]] _S_storage _M_dynamic_extents;
+ };
+
+ template<typename _OIndexType, typename _SIndexType>
+ concept __valid_index_type =
+ is_convertible_v<_OIndexType, _SIndexType> &&
+ is_nothrow_constructible_v<_SIndexType, _OIndexType>;
+
+ template<size_t _Extent, typename _IndexType>
+ concept
+ __valid_static_extent = _Extent == dynamic_extent
+ || _Extent <= numeric_limits<_IndexType>::max();
+ }
+
+ template<typename _IndexType, size_t... _Extents>
+ class extents
+ {
+ static_assert(is_integral_v<_IndexType>, "_IndexType must be integral.");
+ static_assert(
+ (__mdspan::__valid_static_extent<_Extents, _IndexType> && ...),
+ "Extents must either be dynamic or representable as _IndexType");
+
+ public:
+ using index_type = _IndexType;
+ using size_type = make_unsigned_t<index_type>;
+ using rank_type = size_t;
+
+ static constexpr rank_type
+ rank() noexcept { return _S_storage::_S_rank; }
+
+ static constexpr rank_type
+ rank_dynamic() noexcept { return _S_storage::_S_rank_dynamic; }
+
+ static constexpr size_t
+ static_extent(rank_type __r) noexcept
+ {
+ __glibcxx_assert(__r < rank());
+ if constexpr (rank() == 0)
+ __builtin_trap();
+ else
+ return _S_storage::_S_static_extent(__r);
+ }
+
+ constexpr index_type
+ extent(rank_type __r) const noexcept
+ {
+ __glibcxx_assert(__r < rank());
+ if constexpr (rank() == 0)
+ __builtin_trap();
+ else
+ return _M_dynamic_extents._M_extent(__r);
+ }
+
+ constexpr
+ extents() noexcept = default;
+
+ private:
+ static consteval bool
+ _S_is_less_dynamic(size_t __ext, size_t __oext)
+ { return (__ext != dynamic_extent) && (__oext == dynamic_extent); }
+
+ template<typename _OIndexType, size_t... _OExtents>
+ static consteval bool
+ _S_ctor_explicit()
+ {
+ return (_S_is_less_dynamic(_Extents, _OExtents) || ...)
+ || (numeric_limits<index_type>::max()
+ < numeric_limits<_OIndexType>::max());
+ }
+
+ template<size_t... _OExtents>
+ static consteval bool
+ _S_is_compatible_extents()
+ {
+ if constexpr (sizeof...(_OExtents) != rank())
+ return false;
+ else
+ return ((_OExtents == dynamic_extent || _Extents == dynamic_extent
+ || _OExtents == _Extents) && ...);
+ }
+
+ public:
+ template<typename _OIndexType, size_t... _OExtents>
+ requires (_S_is_compatible_extents<_OExtents...>())
+ constexpr explicit(_S_ctor_explicit<_OIndexType, _OExtents...>())
+ extents(const extents<_OIndexType, _OExtents...>& __other) noexcept
+ : _M_dynamic_extents(__other._M_dynamic_extents)
+ { }
+
+ template<__mdspan::__valid_index_type<index_type>... _OIndexTypes>
+ requires (sizeof...(_OIndexTypes) == rank()
+ || sizeof...(_OIndexTypes) == rank_dynamic())
+ constexpr explicit extents(_OIndexTypes... __exts) noexcept
+ : _M_dynamic_extents(span<const _IndexType, sizeof...(_OIndexTypes)>(
+ initializer_list{_S_storage::_S_int_cast(__exts)...}))
+ { }
+
+ template<__mdspan::__valid_index_type<index_type> _OIndexType, size_t _Nm>
+ requires (_Nm == rank() || _Nm == rank_dynamic())
+ constexpr explicit(_Nm != rank_dynamic())
+ extents(span<_OIndexType, _Nm> __exts) noexcept
+ : _M_dynamic_extents(span<const _OIndexType, _Nm>(__exts))
+ { }
+
+
+ template<__mdspan::__valid_index_type<index_type> _OIndexType, size_t _Nm>
+ requires (_Nm == rank() || _Nm == rank_dynamic())
+ constexpr explicit(_Nm != rank_dynamic())
+ extents(const array<_OIndexType, _Nm>& __exts) noexcept
+ : _M_dynamic_extents(span<const _OIndexType, _Nm>(__exts))
+ { }
+
+ template<typename _OIndexType, size_t... _OExtents>
+ friend constexpr bool
+ operator==(const extents& __self,
+ const extents<_OIndexType, _OExtents...>& __other) noexcept
+ {
+ if constexpr (!_S_is_compatible_extents<_OExtents...>())
+ return false;
+ else
+ {
+ for (size_t __i = 0; __i < __self.rank(); ++__i)
+ if (!cmp_equal(__self.extent(__i), __other.extent(__i)))
+ return false;
+ return true;
+ }
+ }
+
+ private:
+ using _S_storage = __mdspan::_ExtentsStorage<
+ _IndexType, array<size_t, sizeof...(_Extents)>{_Extents...}>;
+ [[no_unique_address]] _S_storage _M_dynamic_extents;
+
+ template<typename _OIndexType, size_t... _OExtents>
+ friend class extents;
+ };
+
+ namespace __mdspan
+ {
+ template<typename _IndexType, size_t... _Counts>
+ auto __build_dextents_type(integer_sequence<size_t, _Counts...>)
+ -> extents<_IndexType, ((void) _Counts, dynamic_extent)...>;
+
+ template<typename _Tp>
+ consteval size_t
+ __dynamic_extent() { return dynamic_extent; }
+ }
+
+ template<typename _IndexType, size_t _Rank>
+ using dextents = decltype(__mdspan::__build_dextents_type<_IndexType>(
+ make_index_sequence<_Rank>()));
+
+ template<typename... _Integrals>
+ requires (is_convertible_v<_Integrals, size_t> && ...)
+ explicit extents(_Integrals...) ->
+ extents<size_t, __mdspan::__dynamic_extent<_Integrals>()...>;
+
+_GLIBCXX_END_NAMESPACE_VERSION
+}
+#endif
+#endif
diff --git a/libstdc++-v3/src/c++23/std.cc.in b/libstdc++-v3/src/c++23/std.cc.in
index 930a489..d45ae63 100644
--- a/libstdc++-v3/src/c++23/std.cc.in
+++ b/libstdc++-v3/src/c++23/std.cc.in
@@ -1833,7 +1833,14 @@ export namespace std
}
}
-// FIXME <mdspan>
+// <mdspan>
+#if __glibcxx_mdspan
+export namespace std
+{
+ using std::extents;
+ // FIXME layout_*, default_accessor and mdspan
+}
+#endif
// 20.2 <memory>
export namespace std
@@ -3107,6 +3114,7 @@ export namespace std
#if __cpp_lib_is_layout_compatible
using std::is_corresponding_member;
using std::is_layout_compatible;
+ using std::is_layout_compatible_v;
#endif
#if __cpp_lib_is_pointer_interconvertible
using std::is_pointer_interconvertible_base_of;
diff --git a/libstdc++-v3/testsuite/23_containers/mdspan/extents/class_mandates_neg.cc b/libstdc++-v3/testsuite/23_containers/mdspan/extents/class_mandates_neg.cc
new file mode 100644
index 0000000..b654e39
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/mdspan/extents/class_mandates_neg.cc
@@ -0,0 +1,8 @@
+// { dg-do compile { target c++23 } }
+#include<mdspan>
+
+std::extents<char, size_t(1) << 9> e1; // { dg-error "from here" }
+std::extents<double, 1> e2; // { dg-error "from here" }
+// { dg-prune-output "dynamic or representable as _IndexType" }
+// { dg-prune-output "must be integral" }
+// { dg-prune-output "invalid use of incomplete type" }
diff --git a/libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_copy.cc b/libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_copy.cc
new file mode 100644
index 0000000..a7b3a169
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_copy.cc
@@ -0,0 +1,82 @@
+// { dg-do run { target c++23 } }
+#include <mdspan>
+
+#include <testsuite_hooks.h>
+
+// Test the copy ctor and the ctor from other extents.
+
+constexpr auto dyn = std::dynamic_extent;
+
+// Not constructible
+static_assert(!std::is_constructible_v<std::extents<int>,
+ std::extents<int, 1>>);
+
+static_assert(!std::is_constructible_v<std::extents<int, 1, 1>,
+ std::extents<int, 1>>);
+
+static_assert(!std::is_constructible_v<std::extents<int, dyn>,
+ std::extents<int, dyn, dyn>>);
+
+static_assert(!std::is_constructible_v<std::extents<int, 2, 2>,
+ std::extents<int, 1, 2>>);
+
+// Nothrow constructible
+static_assert(std::is_nothrow_constructible_v<std::extents<int, 1>,
+ std::extents<unsigned int, dyn>>);
+static_assert(std::is_nothrow_constructible_v<std::extents<unsigned int, dyn>,
+ std::extents<int, 1>>);
+
+// Implicit conversion
+static_assert(!std::is_convertible_v<std::extents<unsigned int>,
+ std::extents<int>>);
+static_assert(std::is_convertible_v<std::extents<int>,
+ std::extents<unsigned int>>);
+
+static_assert(!std::is_convertible_v<std::extents<unsigned int, 1>,
+ std::extents<int, 1>>);
+static_assert(std::is_convertible_v<std::extents<int, 1>,
+ std::extents<unsigned int, 1>>);
+
+static_assert(!std::is_convertible_v<std::extents<int, dyn>,
+ std::extents<int, 1>>);
+static_assert(std::is_convertible_v<std::extents<int, 1>,
+ std::extents<int, dyn>>);
+
+static_assert(!std::is_convertible_v<std::extents<unsigned int, 1>,
+ std::extents<int, dyn>>);
+static_assert(std::is_convertible_v<std::extents<int, 1>,
+ std::extents<unsigned int, dyn>>);
+
+template<typename T, size_t... Extents, typename Other>
+ constexpr void
+ test_ctor(const Other& other)
+ {
+ auto e = std::extents<T, Extents...>(other);
+ VERIFY(e == other);
+ }
+
+constexpr int
+test_all()
+{
+ auto e0 = std::extents<int>();
+ test_ctor<int>(e0);
+
+ auto e1 = std::extents<int, 1, 2, 3>();
+ test_ctor<int, 1, 2, 3>(e1);
+ test_ctor<int, 1, dyn, 3>(e1);
+ test_ctor<unsigned int, 1, dyn, 3>(e1);
+
+ auto e2 = std::extents<unsigned int, 1, dyn, 3>{1, 2, 3};
+ test_ctor<int, 1, 2, 3>(e2);
+ test_ctor<int, 1, dyn, 3>(e2);
+ test_ctor<int, 1, dyn, dyn>(e2);
+ return true;
+}
+
+int
+main()
+{
+ test_all();
+ static_assert(test_all());
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_ints.cc b/libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_ints.cc
new file mode 100644
index 0000000..3a70efd
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_ints.cc
@@ -0,0 +1,62 @@
+// { dg-do run { target c++23 } }
+#include <mdspan>
+
+#include <testsuite_hooks.h>
+
+constexpr auto dyn = std::dynamic_extent;
+
+class A {};
+
+// Not constructible if the number of integer-like arguments isn't either
+// rank() or rank_dynamic().
+static_assert(!std::is_constructible_v<std::extents<int>, int>);
+static_assert(!std::is_constructible_v<std::extents<int, dyn, dyn>, int>);
+static_assert(!std::is_constructible_v<std::extents<int, 1, dyn, 3>, int, int>);
+
+// Not constructible from non integer-like objects.
+static_assert(!std::is_constructible_v<std::extents<int, 1>, int, A>);
+
+// No implicit conversion from integer-like objects.
+template<typename Extent, typename... OExtents>
+ constexpr bool
+ is_explicit()
+ {
+ return std::is_nothrow_constructible_v<Extent, OExtents...>
+ && !std::is_convertible_v<Extent, OExtents...>;
+ }
+
+static_assert(is_explicit<std::extents<int, 1>, int>());
+static_assert(is_explicit<std::extents<int, 1>, unsigned int>());
+static_assert(is_explicit<std::extents<unsigned int, 1>, int>());
+
+constexpr bool
+test_all()
+{
+ auto expected = std::extents<int, 1, 2, 3>(1, 2, 3);
+
+ // From all extents.
+ VERIFY((std::extents<int, 1, 2, 3>(1, 2, 3)) == expected);
+ VERIFY((std::extents<int, dyn, 2, 3>(1, 2, 3)) == expected);
+ VERIFY((std::extents<int, dyn, 2, dyn>(1, 2, 3)) == expected);
+
+ VERIFY((std::extents<int, 1, 2, 3>{1, 2, 3}) == expected);
+ VERIFY((std::extents<int, dyn, 2, 3>{1, 2, 3}) == expected);
+ VERIFY((std::extents<int, dyn, 2, dyn>{1, 2, 3}) == expected);
+
+ // From only dynamic extents.
+ VERIFY((std::extents<int, dyn, 2, 3>(1)) == expected);
+ VERIFY((std::extents<int, dyn, 2, dyn>(1, 3)) == expected);
+
+ VERIFY((std::extents<int, dyn, 2, 3>{1}) == expected);
+ VERIFY((std::extents<int, dyn, 2, dyn>{1, 3}) == expected);
+
+ return true;
+}
+
+int
+main()
+{
+ test_all();
+ static_assert(test_all());
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_shape.cc b/libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_shape.cc
new file mode 100644
index 0000000..01624f2
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/mdspan/extents/ctor_shape.cc
@@ -0,0 +1,160 @@
+// { dg-do run { target c++23 } }
+#include <mdspan>
+
+#include <testsuite_hooks.h>
+
+constexpr auto dyn = std::dynamic_extent;
+
+template<typename Extent, typename T, size_t N>
+ constexpr bool
+ constructible()
+ {
+ return std::is_nothrow_constructible_v<Extent, std::array<T, N>>
+ && std::is_nothrow_constructible_v<Extent, std::span<T, N>>;
+ }
+
+template<typename Extent, typename T, size_t N>
+ constexpr bool
+ not_constructible()
+ {
+ return !std::is_constructible_v<Extent, std::array<T, N>>
+ && !std::is_constructible_v<Extent, std::span<T, N>>;
+ }
+
+template<typename Extent, typename T, size_t N>
+ constexpr bool
+ convertible()
+ {
+ return std::is_convertible_v<std::array<T, N>, Extent>
+ && std::is_convertible_v<std::span<T, N>, Extent>;
+ }
+
+template<typename Extent, typename T, size_t N>
+ constexpr bool
+ not_convertible()
+ {
+ return !std::is_convertible_v<std::array<T, N>, Extent>
+ && !std::is_convertible_v<std::span<T, N>, Extent>;
+ }
+
+static_assert(constructible<std::extents<int, 1, 2>, int, 2>());
+static_assert(not_constructible<std::extents<int, 1, 2>, int, 1>());
+
+static_assert(constructible<std::extents<int>, int, 0>());
+static_assert(convertible<std::extents<int>, int, 0>());
+static_assert(convertible<std::extents<unsigned int>, int, 0>());
+static_assert(convertible<std::extents<int>, unsigned int, 0>());
+
+static_assert(constructible<std::extents<int, 1, dyn>, int, 1>());
+static_assert(convertible<std::extents<int, 1, dyn>, int, 1>());
+static_assert(convertible<std::extents<unsigned int, 1, dyn>, int, 1>());
+static_assert(convertible<std::extents<int, 1, dyn>, unsigned int, 1>());
+
+static_assert(constructible<std::extents<int, 1, dyn>, int, 2>());
+static_assert(not_convertible<std::extents<int, 1, dyn>, int, 2>());
+static_assert(not_convertible<std::extents<unsigned int, 1, dyn>, int, 2>());
+static_assert(not_convertible<std::extents<int, 1, dyn>, unsigned int, 2>());
+
+// Non-integer, but convertible.
+static_assert(constructible<std::extents<int, dyn>, double, 1>());
+static_assert(convertible<std::extents<int, dyn>, double, 1>());
+
+namespace all_extents
+{
+ template<typename Shape>
+ constexpr void
+ test_ctor(Shape shape)
+ {
+ auto expected = std::extents<int, 1, 2, 3>();
+ VERIFY((std::extents<int, 1, dyn, 3>(shape)) == expected);
+ VERIFY((std::extents<int, dyn, dyn, dyn>(shape)) == expected);
+ VERIFY((std::extents<int, 1, 2, 3>(shape)) == expected);
+ }
+
+ constexpr void
+ test_common_shapes()
+ {
+ auto array = std::array<int, 3>{1, 2, 3};
+ auto span_const = std::span<const int, 3>(array);
+ auto span = std::span<int, 3>(array);
+
+ test_ctor(array);
+ test_ctor(span);
+ test_ctor(span_const);
+ }
+
+ constexpr void
+ test_empty_shapes()
+ {
+ auto shape = std::array<int, 0>();
+ auto span = std::span<int, 0>(shape);
+
+ auto expected = std::extents<int>();
+ VERIFY((std::extents<int>(shape)) == expected);
+ VERIFY((std::extents<int>(span)) == expected);
+ }
+
+ constexpr bool
+ test_all()
+ {
+ test_common_shapes();
+ test_empty_shapes();
+ return true;
+ }
+}
+
+namespace only_dynamic_extents
+{
+ template<typename Extents, typename Shape>
+ constexpr void
+ test_ctor(const Shape& shape)
+ {
+ Extents e = shape;
+
+ VERIFY(e.rank_dynamic() == shape.size());
+
+ size_t di = 0;
+ for(size_t i = 0; i < e.rank(); ++i)
+ if(e.static_extent(i) == dyn)
+ VERIFY(e.extent(i) == shape[di++]);
+ }
+
+ template<typename Extents, typename T, size_t N>
+ constexpr void
+ test_all_shape_types(std::array<T, N> shape)
+ {
+ test_ctor<Extents>(shape);
+ test_ctor<Extents>(std::span<T, N>(shape));
+ test_ctor<Extents>(std::span<const T, N>(shape));
+ }
+
+ constexpr void
+ test_common_shapes()
+ {
+ auto s = std::array<int, 0>{};
+ auto s2 = std::array<int, 1>{2};
+ auto s123 = std::array<int, 3>{1, 2, 3};
+
+ test_all_shape_types<std::extents<int, 1, dyn, 3>>(s2);
+ test_all_shape_types<std::extents<int, dyn, dyn, dyn>>(s123);
+ test_all_shape_types<std::extents<int, 1, 2, 3>>(s);
+ }
+
+ constexpr bool
+ test_all()
+ {
+ test_common_shapes();
+ return true;
+ }
+}
+
+int
+main()
+{
+ all_extents::test_all();
+ static_assert(all_extents::test_all());
+
+ only_dynamic_extents::test_all();
+ static_assert(only_dynamic_extents::test_all());
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/23_containers/mdspan/extents/custom_integer.cc b/libstdc++-v3/testsuite/23_containers/mdspan/extents/custom_integer.cc
new file mode 100644
index 0000000..2907ad1
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/mdspan/extents/custom_integer.cc
@@ -0,0 +1,87 @@
+// { dg-do run { target c++23 } }
+#include <mdspan>
+
+#include <testsuite_hooks.h>
+
+// Test construction from a custom integer-like object, that has
+// no copy/move ctor or copy/move assignment operator.
+
+constexpr size_t dyn = std::dynamic_extent;
+
+class IntLike
+{
+public:
+ explicit
+ IntLike(int i)
+ : _M_i(i)
+ { }
+
+ IntLike() = delete;
+ IntLike(const IntLike&) = delete;
+ IntLike(IntLike&&) = delete;
+
+ const IntLike&
+ operator=(const IntLike&) = delete;
+
+ const IntLike&
+ operator=(IntLike&&) = delete;
+
+ constexpr
+ operator int() const noexcept
+ { return _M_i; }
+
+private:
+ int _M_i;
+};
+
+static_assert(std::is_convertible_v<IntLike, int>);
+static_assert(std::is_nothrow_constructible_v<int, IntLike>);
+
+void
+test_shape(const auto& s2, const auto& s23)
+{
+ std::extents<int, 2, 3> expected;
+
+ std::extents<int, 2, 3> e1(s23);
+ VERIFY(e1 == expected);
+
+ std::extents<int, dyn, 3> e2(s2);
+ VERIFY(e2 == expected);
+
+ std::extents<int, dyn, 3> e3(s23);
+ VERIFY(e3 == expected);
+
+ std::extents<int, dyn, dyn> e4(s23);
+ VERIFY(e4 == expected);
+}
+
+void
+test_pack()
+{
+ std::extents<int, 2, 3> expected;
+
+ std::extents<int, dyn, 3> e1(IntLike(2));
+ VERIFY(e1 == expected);
+
+ std::extents<int, dyn, 3> e2(IntLike(2), IntLike(3));
+ VERIFY(e2 == expected);
+
+ std::extents<int, dyn, dyn> e3(IntLike(2), IntLike(3));
+ VERIFY(e3 == expected);
+}
+
+int
+main()
+{
+ auto a2 = std::array<IntLike, 1>{IntLike(2)};
+ auto s2 = std::span<IntLike, 1>(a2);
+
+ auto a23 = std::array<IntLike, 2>{IntLike(2), IntLike(3)};
+ auto s23 = std::span<IntLike, 2>(a23);
+
+ test_shape(a2, a23);
+ test_shape(s2, s23);
+ test_pack();
+
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/23_containers/mdspan/extents/misc.cc b/libstdc++-v3/testsuite/23_containers/mdspan/extents/misc.cc
new file mode 100644
index 0000000..16204aa
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/mdspan/extents/misc.cc
@@ -0,0 +1,224 @@
+// { dg-do run { target c++23 } }
+#include <mdspan>
+
+#include <testsuite_hooks.h>
+
+constexpr size_t dyn = std::dynamic_extent;
+
+// Check class traits.
+static_assert(std::regular<std::extents<int>>);
+static_assert(std::regular<std::extents<int, 1>>);
+static_assert(std::regular<std::extents<int, dyn>>);
+
+static_assert(std::is_trivially_copyable_v<std::extents<int>>);
+static_assert(std::is_trivially_copyable_v<std::extents<int, 1>>);
+static_assert(std::is_trivially_copyable_v<std::extents<int, dyn>>);
+
+// Check member typedefs.
+static_assert(std::is_same_v<std::extents<int, 1, 2>::rank_type, size_t>);
+
+static_assert(std::is_unsigned_v<std::extents<int, 2>::size_type>);
+static_assert(std::is_unsigned_v<std::extents<unsigned int, 2>::size_type>);
+
+static_assert(std::is_same_v<std::extents<char, 2>::index_type, char>);
+static_assert(std::is_same_v<std::extents<int, 2>::index_type, int>);
+static_assert(std::is_same_v<std::extents<unsigned int, 2>::index_type,
+ unsigned int>);
+
+// Check `rank`.
+static_assert(std::extents<int, 1>::rank() == 1);
+static_assert(std::extents<int, dyn>::rank() == 1);
+static_assert(std::extents<int, 2, dyn>::rank() == 2);
+
+// Check `rank_dynamic`.
+static_assert(std::extents<int, 1>::rank_dynamic() == 0);
+static_assert(std::extents<int, dyn>::rank_dynamic() == 1);
+static_assert(std::extents<int, 2, dyn>::rank_dynamic() == 1);
+static_assert(std::extents<int, dyn, dyn>::rank_dynamic() == 2);
+
+template<typename T, size_t... Extents>
+ constexpr bool
+ check_rank_return_types()
+ {
+ auto e = std::extents<T, Extents...>();
+ return std::is_same_v<decltype(e.rank()), size_t>
+ && std::is_same_v<decltype(e.rank_dynamic()), size_t>;
+ }
+
+static_assert(check_rank_return_types<int, 1>());
+
+// Check that the static extents don't take up space.
+static_assert(sizeof(std::extents<int, 1, dyn>) == sizeof(int));
+static_assert(sizeof(std::extents<char, 1, dyn>) == sizeof(char));
+
+template<typename Extents>
+class Container
+{
+ int dummy;
+ [[no_unique_address]] std::extents<size_t> b0;
+};
+
+static_assert(sizeof(Container<std::extents<char, 1, 2>>) == sizeof(int));
+static_assert(sizeof(Container<std::extents<size_t, 1, 2>>) == sizeof(int));
+
+// operator=
+static_assert(std::is_nothrow_assignable_v<std::extents<int, dyn, 2>,
+ std::extents<int, 1, 2>>);
+
+constexpr bool
+test_assign()
+{
+ auto e1 = std::extents<int, 1, 2>();
+ auto e2 = std::extents<int, 1, 2>();
+
+ e2 = e1;
+ VERIFY(e2 == e1);
+
+ auto e5 = std::extents<int, 1, dyn>();
+ e5 = e1;
+ VERIFY(e5 == e1);
+
+ auto e3 = std::extents<int, dyn, dyn>(1, 2);
+ auto e4 = std::extents<int, dyn, dyn>(3, 4);
+ e3 = e4;
+ VERIFY(e3 == e4);
+ return true;
+}
+
+// Deduction guide
+template<size_t Rank, typename... Extents>
+constexpr void
+test_deduction(Extents... exts)
+{
+ std::array<size_t, sizeof...(exts)> shape{static_cast<size_t>(exts)...};
+ std::dextents<size_t, Rank> expected(shape);
+ std::extents e(exts...);
+ static_assert(std::is_same_v<decltype(e), std::dextents<size_t, Rank>>);
+ VERIFY(e == expected);
+}
+
+constexpr bool
+test_deduction_all()
+{
+ test_deduction<0>();
+ test_deduction<1>(1);
+ test_deduction<2>(1.0, 2.0f);
+ test_deduction<3>(int(1), char(2), size_t(3));
+ return true;
+}
+
+class A {};
+
+template<typename... Extents>
+ concept deducible = requires
+ {
+ { std::extents(Extents{}...) }
+ -> std::convertible_to<std::dextents<size_t, sizeof...(Extents)>>;
+ };
+
+static_assert(deducible<int>);
+static_assert(!deducible<A, A>);
+
+// dextents
+static_assert(std::is_same_v<std::dextents<int, 0>, std::extents<int>>);
+static_assert(std::is_same_v<std::dextents<int, 1>, std::extents<int, dyn>>);
+static_assert(std::is_same_v<std::dextents<int, 5>,
+ std::extents<int, dyn, dyn, dyn, dyn, dyn>>);
+
+static_assert(std::dextents<int, 5>::rank() == 5);
+static_assert(std::dextents<int, 5>::rank_dynamic() == 5);
+static_assert(std::is_same_v<typename std::dextents<int, 5>::index_type, int>);
+
+// static_extent
+static_assert(std::extents<int, 1, 2>::static_extent(0) == 1);
+static_assert(std::extents<int, 1, 2>::static_extent(1) == 2);
+
+static_assert(std::extents<int, 1, dyn>::static_extent(0) == 1);
+static_assert(std::extents<int, 1, dyn>::static_extent(1) == dyn);
+
+static_assert(std::extents<int, dyn, dyn>::static_extent(0) == dyn);
+static_assert(std::extents<int, dyn, dyn>::static_extent(1) == dyn);
+
+// extent
+template<typename Extent>
+ constexpr void
+ test_extent(const Extent& e,
+ const std::array<typename Extent::index_type, Extent::rank()>& shape)
+ {
+ for(size_t i = 0; i < e.rank(); ++i)
+ VERIFY(e.extent(i) == shape[i]);
+ }
+
+constexpr bool
+test_extent_all()
+{
+ test_extent(std::extents<int, 1, 2>{}, {1, 2});
+ test_extent(std::extents<int, 1, dyn>{2}, {1, 2});
+ test_extent(std::extents<int, dyn, dyn>{1, 2}, {1, 2});
+ return true;
+}
+
+// operator==
+template<typename Lhs, typename Rhs>
+ constexpr void
+ test_ops_eq(const Lhs& lhs, const Rhs& rhs, bool expected)
+ {
+ VERIFY((lhs == rhs) == expected);
+ VERIFY((lhs != rhs) == !expected);
+ }
+
+constexpr void
+test_op_eq_rank_zero()
+{
+ auto e1 = std::extents<int>();
+ auto e2 = std::extents<int>();
+ auto e3 = std::extents<unsigned int>();
+
+ test_ops_eq(e1, e2, true);
+ test_ops_eq(e1, e3, true);
+}
+
+constexpr void
+test_op_eq_common()
+{
+ auto e1 = std::extents<int, 1, 2, 3>();
+ auto e2 = std::extents<int, 1, 2, 3>();
+ auto e3 = std::extents<int, 1, dyn, 3>(2);
+ auto e4 = std::extents<int, 1, dyn, 3>(3);
+
+ auto e5 = std::extents<int, 1>();
+ auto e6 = std::extents<int, 1, 3, 3>();
+
+ test_ops_eq(e1, e2, true);
+ test_ops_eq(e1, e3, true);
+ test_ops_eq(e1, e4, false);
+
+ test_ops_eq(e1, e5, false);
+ test_ops_eq(e1, e6, false);
+ test_ops_eq(e3, e6, false);
+}
+
+constexpr bool
+test_op_eq_all()
+{
+ test_op_eq_rank_zero();
+ test_op_eq_common();
+ return true;
+}
+
+int
+main()
+{
+ test_assign();
+ static_assert(test_assign());
+
+ test_deduction_all();
+ static_assert(test_deduction_all());
+
+ test_extent_all();
+ static_assert(test_extent_all());
+
+ test_op_eq_all();
+ static_assert(test_op_eq_all());
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/std/time/format/pr120114.cc b/libstdc++-v3/testsuite/std/time/format/pr120114.cc
new file mode 100644
index 0000000..c630bb3
--- /dev/null
+++ b/libstdc++-v3/testsuite/std/time/format/pr120114.cc
@@ -0,0 +1,125 @@
+// { dg-do run { target c++23 } }
+// { dg-options "-fexec-charset=UTF-8" }
+// { dg-timeout-factor 2 }
+
+#include <algorithm>
+#include <chrono>
+#include <testsuite_hooks.h>
+
+#define WIDEN_(C, S) ::std::__format::_Widen<C>(S, L##S)
+#define WIDEN(S) WIDEN_(_CharT, S)
+
+template<typename _CharT>
+void
+test_from_format_string()
+{
+ std::basic_string<_CharT> res;
+ using namespace std::chrono_literals;
+ auto date = 2025y/std::chrono::May/05d;
+
+ res = std::format(WIDEN("{:+<13%F\U0001f921}"), date);
+ VERIFY( res == WIDEN("2025-05-05\U0001f921+") );
+
+ res = std::format(WIDEN("{:->15%F\U0001f921}"), date);
+ VERIFY( res == WIDEN("---2025-05-05\U0001f921") );
+
+ res = std::format(WIDEN("{:=^20%F\U0001f921}"), date);
+ VERIFY( res == WIDEN("====2025-05-05\U0001f921====") );
+}
+
+template<typename _CharT>
+void
+test_formatted_value()
+{
+ // Custom time_put facet which returns Ideographic Telegraph Symbol
+ // for given month for Om.
+ struct TimePut : std::time_put<_CharT>
+ {
+ using iter_type = std::time_put<_CharT>::iter_type;
+ using char_type = std::time_put<_CharT>::char_type;
+
+ iter_type
+ do_put(iter_type out, std::ios_base& io, char_type fill, const tm* t,
+ char format, char modifier) const override
+ {
+ if (format != 'm' && modifier != 'm')
+ return std::time_put<_CharT>::do_put(out, io, fill, t, format, modifier);
+ std::basic_string_view<_CharT> str;
+ switch (t->tm_mon)
+ {
+ case 0:
+ str = WIDEN("\u32C0");
+ break;
+ case 1:
+ str = WIDEN("\u32C1");
+ break;
+ case 2:
+ str = WIDEN("\u32C2");
+ break;
+ case 3:
+ str = WIDEN("\u32C3");
+ break;
+ case 4:
+ str = WIDEN("\u32C4");
+ break;
+ case 5:
+ str = WIDEN("\u32C5");
+ break;
+ case 6:
+ str = WIDEN("\u32C6");
+ break;
+ case 7:
+ str = WIDEN("\u32C7");
+ break;
+ case 8:
+ str = WIDEN("\u32C8");
+ break;
+ case 9:
+ str = WIDEN("\u32C9");
+ break;
+ case 10:
+ str = WIDEN("\u32CA");
+ break;
+ case 11:
+ str = WIDEN("\u32CB");
+ break;
+ };
+ return std::copy(str.begin(), str.end(), out);
+ }
+ };
+ const std::locale loc(std::locale::classic(), new TimePut);
+
+ std::basic_string<_CharT> res;
+
+ res = std::format(loc, WIDEN("{:<1L%Om}"), std::chrono::January);
+ VERIFY( res == WIDEN("\u32C0") );
+
+ res = std::format(loc, WIDEN("{:>2L%Om}"), std::chrono::February);
+ VERIFY( res == WIDEN("\u32C1") );
+
+ res = std::format(loc, WIDEN("{:<3L%Om}"), std::chrono::March);
+ VERIFY( res == WIDEN("\u32C2 ") );
+
+ res = std::format(loc, WIDEN("{:^4L%Om}"), std::chrono::April);
+ VERIFY( res == WIDEN(" \u32C3 ") );
+
+ res = std::format(loc, WIDEN("{:>5L%Om}"), std::chrono::May);
+ VERIFY( res == WIDEN(" \u32C4") );
+
+ res = std::format(loc, WIDEN("{:+<6L%Om}"), std::chrono::June);
+ VERIFY( res == WIDEN("\u32C5++++") );
+
+ res = std::format(loc, WIDEN("{:=^7L%Om}"), std::chrono::July);
+ VERIFY( res == WIDEN("==\u32C6===") );
+
+ res = std::format(loc, WIDEN("{:->8L%Om}"), std::chrono::August);
+ VERIFY( res == WIDEN("------\u32C7") );
+}
+
+int main()
+{
+ test_from_format_string<char>();
+ test_from_format_string<wchar_t>();
+ test_formatted_value<char>();
+ test_formatted_value<wchar_t>();
+}