aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--gcc/ChangeLog156
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/ada/ChangeLog6
-rw-r--r--gcc/cfgexpand.cc13
-rw-r--r--gcc/config/i386/i386-features.cc177
-rw-r--r--gcc/config/i386/i386-features.h11
-rw-r--r--gcc/config/s390/vector.md2
-rw-r--r--gcc/cp/cp-gimplify.cc15
-rw-r--r--gcc/cp/decl2.cc14
-rw-r--r--gcc/cp/method.cc4
-rw-r--r--gcc/cp/module.cc6
-rw-r--r--gcc/cp/name-lookup.cc3
-rw-r--r--gcc/cp/optimize.cc4
-rw-r--r--gcc/fortran/ChangeLog20
-rw-r--r--gcc/fortran/intrinsic.cc19
-rw-r--r--gcc/fortran/intrinsic.texi13
-rw-r--r--gcc/fortran/simplify.cc41
-rw-r--r--gcc/gimple-fold.cc26
-rw-r--r--gcc/m2/ChangeLog11
-rw-r--r--gcc/m2/m2.flex25
-rw-r--r--gcc/match.pd52
-rw-r--r--gcc/testsuite/ChangeLog66
-rw-r--r--gcc/testsuite/c-c++-common/pr118868-1.c9
-rw-r--r--gcc/testsuite/g++.dg/modules/clone-4_a.C12
-rw-r--r--gcc/testsuite/g++.dg/modules/clone-4_b.C12
-rw-r--r--gcc/testsuite/g++.dg/modules/openmp-1.C9
-rw-r--r--gcc/testsuite/g++.dg/modules/tpl-friend-19_a.C16
-rw-r--r--gcc/testsuite/g++.dg/modules/tpl-friend-19_b.C6
-rw-r--r--gcc/testsuite/g++.dg/opt/pr96780_cpp23.C16
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr119903-1.C24
-rw-r--r--gcc/testsuite/g++.dg/warn/ignore-virtual-move-assign.C45
-rw-r--r--gcc/testsuite/gcc.dg/ipa/pr120044-1.c17
-rw-r--r--gcc/testsuite/gcc.dg/ipa/pr120044-2.c17
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr114864.c15
-rw-r--r--gcc/testsuite/gcc.target/i386/pr103771-4.c82
-rw-r--r--gcc/testsuite/gfortran.dg/dec_math.f9069
-rw-r--r--gcc/testsuite/gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp2
-rw-r--r--gcc/tree-cfg.cc12
-rw-r--r--gcc/tree-sra.cc4
-rw-r--r--gcc/tree-vect-loop.cc1020
-rw-r--r--gcc/tree-vect-patterns.cc8
-rw-r--r--gcc/tree-vect-slp.cc2
-rw-r--r--gcc/tree-vect-stmts.cc35
-rw-r--r--gcc/tree-vectorizer.h11
-rw-r--r--include/libiberty.h4
-rw-r--r--libgcobol/ChangeLog4
-rw-r--r--libgfortran/ChangeLog45
-rw-r--r--libiberty/ChangeLog5
-rw-r--r--libstdc++-v3/doc/doxygen/stdheader.cc3
-rw-r--r--libstdc++-v3/include/Makefile.am3
-rw-r--r--libstdc++-v3/include/Makefile.in3
-rw-r--r--libstdc++-v3/include/bits/cpyfunc_impl.h269
-rw-r--r--libstdc++-v3/include/bits/funcwrap.h507
-rw-r--r--libstdc++-v3/include/bits/mofunc_impl.h78
-rw-r--r--libstdc++-v3/include/bits/move_only_function.h218
-rw-r--r--libstdc++-v3/include/bits/version.def9
-rw-r--r--libstdc++-v3/include/bits/version.h10
-rw-r--r--libstdc++-v3/include/std/format217
-rw-r--r--libstdc++-v3/include/std/functional31
-rw-r--r--libstdc++-v3/include/std/utility2
-rw-r--r--libstdc++-v3/src/c++23/std.cc.in3
-rw-r--r--libstdc++-v3/testsuite/20_util/copyable_function/call.cc224
-rw-r--r--libstdc++-v3/testsuite/20_util/copyable_function/cons.cc126
-rw-r--r--libstdc++-v3/testsuite/20_util/copyable_function/conv.cc251
-rw-r--r--libstdc++-v3/testsuite/20_util/copyable_function/copy.cc154
-rw-r--r--libstdc++-v3/testsuite/20_util/copyable_function/move.cc120
-rw-r--r--libstdc++-v3/testsuite/20_util/move_only_function/call.cc14
-rw-r--r--libstdc++-v3/testsuite/20_util/move_only_function/conv.cc188
-rw-r--r--libstdc++-v3/testsuite/20_util/move_only_function/move.cc11
-rw-r--r--libstdc++-v3/testsuite/std/format/arguments/args.cc45
-rw-r--r--libstdc++-v3/testsuite/std/format/parse_ctx.cc72
72 files changed, 3522 insertions, 1225 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index b1e7fad..a3e3f25 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -57,7 +57,6 @@ docs, and the testsuite related to that.
aarch64 ldp/stp Alex Coplan <alex.coplan@arm.com>
aarch64 port Richard Earnshaw <richard.earnshaw@arm.com>
aarch64 port Richard Sandiford <richard.sandiford@arm.com>
-aarch64 port Marcus Shawcroft <marcus.shawcroft@arm.com>
aarch64 port Kyrylo Tkachov <ktkachov@nvidia.com>
alpha port Richard Henderson <rth@gcc.gnu.org>
amdgcn port Julian Brown <julian@codesourcery.com>
@@ -792,7 +791,6 @@ Senthil Kumar Selvaraj saaadhu <saaadhu@gcc.gnu.org>
Kostya Serebryany kcc <kcc@google.com>
Thiemo Seufer - <ths@networkno.de>
Bill Seurer seurer <seurer@linux.vnet.ibm.com>
-Marcus Shawcroft mshawcroft <marcus.shawcroft@arm.com>
Nathaniel Shead nshead <nathanieloshead@gmail.com>
Tim Shen timshen <timshen@google.com>
Joel Sherrill joel <joel@oarcorp.com>
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index d597002..d475eee 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,159 @@
+2025-05-13 Andrew Pinski <quic_apinski@quicinc.com>
+
+ PR tree-optimization/119903
+ * gimple-fold.cc (replace_stmt_with_simplification): Reject for
+ noncall exceptions replacing comparison with itself.
+
+2025-05-13 Andrew Pinski <quic_apinski@quicinc.com>
+
+ PR middle-end/118868
+ * tree-cfg.cc (verify_gimple_assign_unary): Allow pointers
+ but disallow aggregate types for PAREN_EXPR.
+
+2025-05-13 Andrew Pinski <quic_apinski@quicinc.com>
+
+ * cfgexpand.cc (vars_ssa_cache::operator()): Update the cache if the use is already
+ has a cache.
+
+2025-05-13 Andrew Pinski <quic_apinski@quicinc.com>
+
+ * cfgexpand.cc (vars_ssa_cache::operator()): Reverse the order of the going
+ through the update list.
+
+2025-05-13 Richard Biener <rguenther@suse.de>
+
+ * tree-vect-loop.cc (vectorizable_nonlinear_induction):
+ Remove non-SLP path, use SLP_TREE_VECTYPE.
+ (vectorizable_induction): Likewise. Drop ncopies variable
+ which is always 1.
+
+2025-05-13 Kito Cheng <kito.cheng@sifive.com>
+
+ * common/config/riscv/riscv-common.cc (riscv_extra_ext_flag_table_t):
+ New.
+ (riscv_ext_flag_table): Rename to ...
+ (riscv_extra_ext_flag_table): this, and drop most of definitions
+ that can obtained from the flags field of the riscv_ext_info_t
+ structures.
+ (apply_extra_extension_flags): Use riscv_ext_info_t.
+ (riscv_ext_is_subset): Ditto.
+
+2025-05-13 Kito Cheng <kito.cheng@sifive.com>
+
+ * common/config/riscv/riscv-common.cc (riscv_ext_version_table):
+ Remove.
+ (standard_extensions_p): Use riscv_ext_info_t.
+ (get_default_version): Use riscv_ext_info_t.
+ (riscv_arch_help): Ditto.
+
+2025-05-13 Kito Cheng <kito.cheng@sifive.com>
+
+ * common/config/riscv/riscv-common.cc
+ (riscv_implied_info::riscv_implied_info_t): Remove unused
+ variant.
+ (struct riscv_implied_info_t): Remove unsued field.
+ (riscv_implied_info::match): Remove unused variant, and adjust
+ the logic.
+ (get_riscv_ext_info): New.
+ (riscv_implied_info): Remove.
+ (riscv_ext_info_t::apply_implied_ext): New.
+ (riscv_combine_info). Remove.
+ (riscv_subset_list::handle_implied_ext): Use riscv_ext_info_t
+ rather than riscv_implied_info.
+ (riscv_subset_list::check_implied_ext): Ditto.
+ (riscv_subset_list::handle_combine_ext): Use riscv_ext_info_t
+ rather than riscv_combine_info.
+ (riscv_minimal_hwprobe_feature_bits): Use riscv_ext_info_t
+ rather than riscv_implied_info.
+
+2025-05-13 Kito Cheng <kito.cheng@sifive.com>
+
+ * common/config/riscv/riscv-common.cc (riscv_ext_info_t): New
+ struct.
+ (opt_var_ref_t): Adjust order.
+ (cl_opt_var_ref_t): Ditto.
+ (riscv_ext_flag_table_t): Adjust order, and add a new construct
+ that not hold the extension name.
+ (riscv_version_t): New struct.
+ (riscv_implied_info_t): Adjust order, and add a new construct that not
+ hold the extension name.
+ (apply_extra_extension_flags): New function.
+ (riscv_ext_infos): New.
+ (riscv_implied_info): Adjust.
+ * config/riscv/riscv-opts.h (EXT_FLAG_MACRO): New macro.
+ (BITMASK_NOT_YET_ALLOCATED): New macro.
+
+2025-05-13 Kito Cheng <kito.cheng@sifive.com>
+
+ * common/config/riscv/riscv-common.cc (riscv_can_inline_p): Drop
+ extension flags check from `target_flags`.
+ * config/riscv/riscv-subset.h (riscv_x_target_flags_isa_mask):
+ Remove.
+ * config/riscv/riscv.cc (riscv_x_target_flags_isa_mask): Remove.
+
+2025-05-13 Kito Cheng <kito.cheng@sifive.com>
+
+ * doc/invoke.texi: Replace hand‑written extension table with
+ `@include riscv-ext.texi` to pull in auto‑generated entries.
+ * doc/riscv-ext.texi: New generated definition file
+ containing formatted documentation entries for each extension.
+ * Makefile.in: Add riscv-ext.texi to the list of files to be
+ processed by the Texinfo generator.
+ * config/riscv/gen-riscv-ext-texi.cc: New.
+ * config/riscv/t-riscv: Add rule for generating riscv-ext.texi.
+
+2025-05-13 Kito Cheng <kito.cheng@sifive.com>
+
+ * config/riscv/gen-riscv-ext-opt.cc: New.
+ * config/riscv/riscv.opt: Drop manual entries for target
+ options, and include riscv-ext.opt.
+ * config/riscv/riscv-ext.opt: New.
+ * config/riscv/riscv-ext.opt.urls: New.
+ * config.gcc: Add riscv-ext.opt to the list of target options files.
+ * common/config/riscv/riscv-common.cc (riscv_ext_flag_table): Adjsut target
+ option variable entry.
+ (riscv_set_arch_by_subset_list): Adjust target option variable.
+ * config/riscv/riscv-c.cc (riscv_ext_flag_table): Adjust target
+ option variable entry.
+ * config/riscv/riscv-vector-builtins.cc (pragma_intrinsic_flags):
+ Adjust variable name.
+ (riscv_pragma_intrinsic_flags_pollute): Adjust variable name.
+ (riscv_pragma_intrinsic_flags_restore): Ditto.
+ * config/riscv/t-riscv: Add the rule for generating
+ riscv-ext.opt.
+ * config/riscv/riscv-opts.h (TARGET_MIN_VLEN): Update.
+ (TARGET_MIN_VLEN_OPTS): Update.
+
+2025-05-13 Kito Cheng <kito.cheng@sifive.com>
+
+ * config/riscv/riscv-ext.def: New file; define extension metadata table.
+ * config/riscv/riscv-ext-corev.def: New.
+ * config/riscv/riscv-ext-sifive.def: New.
+ * config/riscv/riscv-ext-thead.def: New.
+ * config/riscv/riscv-ext-ventana.def: New.
+
+2025-05-13 David Malcolm <dmalcolm@redhat.com>
+
+ PR other/116792
+ * diagnostic-format-html.cc: Include "diagnostic-format-text.h",
+ "pretty-print-urlifier.h" and "edit-context.h".
+ (html_builder::html_builder): Fix indentation in decl.
+ (html_builder::make_element_for_diagnostic): Split out metadata
+ code into make_element_for_metadata. Call
+ make_element_for_source, make_element_for_path, and
+ make_element_for_patch.
+ (html_builder::make_element_for_source): New.
+ (html_builder::make_element_for_path): New.
+ (html_builder::make_element_for_patch): New.
+ (html_builder::make_metadata_element): New.
+ (html_builder::make_element_for_metadata): New.
+ (html_output_format::get_builder): New.
+ (selftest::test_html_diagnostic_context::get_builder): New.
+ (selftest::test_simple_log): Update test to print a quoted string,
+ and verify that it uses a "gcc-quoted-text" span.
+ (selftest::test_metadata): New.
+ (selftest::diagnostic_format_html_cc_tests): Call it.
+
2025-05-13 Andrew MacLeod <amacleod@redhat.com>
* tree-ssanames.cc (set_bitmask): Use int_range_max for temps.
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 83f5cb2..cfb9239 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20250513
+20250514
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index 128ea05..f0046a0 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,9 @@
+2025-05-13 Nicolas Boulenguez <nicolas@debian.org>
+
+ PR ada/87778
+ * Make-generated.in: Remove -q gnatmake option.
+ * gcc-interface/Makefile.in: Likewise.
+
2025-05-05 Eric Botcazou <ebotcazou@adacore.com>
PR ada/120104
diff --git a/gcc/cfgexpand.cc b/gcc/cfgexpand.cc
index 2b27076..277ef65 100644
--- a/gcc/cfgexpand.cc
+++ b/gcc/cfgexpand.cc
@@ -766,7 +766,12 @@ vars_ssa_cache::operator() (tree name)
/* If the cache exists for the use, don't try to recreate it. */
if (exists (use))
- continue;
+ {
+ /* Update the cache here, this can reduce the number of
+ times through the update loop below. */
+ update (old_name, use);
+ continue;
+ }
/* Create the cache bitmap for the use and also
so we don't go into an infinite loop for some phi nodes with loops. */
@@ -804,9 +809,11 @@ vars_ssa_cache::operator() (tree name)
bool changed;
do {
changed = false;
- for (auto &e : update_cache_list)
+ unsigned int i;
+ std::pair<tree,tree> *e;
+ FOR_EACH_VEC_ELT_REVERSE (update_cache_list, i, e)
{
- if (update (e.second, e.first))
+ if (update (e->second, e->first))
changed = true;
}
} while (changed);
diff --git a/gcc/config/i386/i386-features.cc b/gcc/config/i386/i386-features.cc
index cc8313b..6491c6b 100644
--- a/gcc/config/i386/i386-features.cc
+++ b/gcc/config/i386/i386-features.cc
@@ -296,9 +296,8 @@ scalar_chain::scalar_chain (enum machine_mode smode_, enum machine_mode vmode_)
insns_conv = BITMAP_ALLOC (NULL);
queue = NULL;
- n_sse_to_integer = 0;
- n_integer_to_sse = 0;
-
+ cost_sse_integer = 0;
+ weighted_cost_sse_integer = 0 ;
max_visits = x86_stv_max_visits;
}
@@ -337,20 +336,52 @@ scalar_chain::mark_dual_mode_def (df_ref def)
/* Record the def/insn pair so we can later efficiently iterate over
the defs to convert on insns not in the chain. */
bool reg_new = bitmap_set_bit (defs_conv, DF_REF_REGNO (def));
+ basic_block bb = BLOCK_FOR_INSN (DF_REF_INSN (def));
+ profile_count entry_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
+ bool speed_p = optimize_bb_for_speed_p (bb);
+ int cost = 0;
+
if (!bitmap_bit_p (insns, DF_REF_INSN_UID (def)))
{
if (!bitmap_set_bit (insns_conv, DF_REF_INSN_UID (def))
&& !reg_new)
return;
- n_integer_to_sse++;
+
+ /* Cost integer to sse moves. */
+ if (speed_p)
+ cost = COSTS_N_INSNS (ix86_cost->integer_to_sse) / 2;
+ else if (TARGET_64BIT || smode == SImode)
+ cost = COSTS_N_BYTES (4);
+ /* vmovd (4 bytes) + vpinsrd (6 bytes). */
+ else if (TARGET_SSE4_1)
+ cost = COSTS_N_BYTES (10);
+ /* movd (4 bytes) + movd (4 bytes) + unpckldq (4 bytes). */
+ else
+ cost = COSTS_N_BYTES (12);
}
else
{
if (!reg_new)
return;
- n_sse_to_integer++;
+
+ /* Cost sse to integer moves. */
+ if (speed_p)
+ cost = COSTS_N_INSNS (ix86_cost->sse_to_integer) / 2;
+ else if (TARGET_64BIT || smode == SImode)
+ cost = COSTS_N_BYTES (4);
+ /* vmovd (4 bytes) + vpextrd (6 bytes). */
+ else if (TARGET_SSE4_1)
+ cost = COSTS_N_BYTES (10);
+ /* movd (4 bytes) + psrlq (5 bytes) + movd (4 bytes). */
+ else
+ cost = COSTS_N_BYTES (13);
}
+ if (speed_p)
+ weighted_cost_sse_integer += bb->count.to_sreal_scale (entry_count) * cost;
+
+ cost_sse_integer += cost;
+
if (dump_file)
fprintf (dump_file,
" Mark r%d def in insn %d as requiring both modes in chain #%d\n",
@@ -531,15 +562,15 @@ general_scalar_chain::vector_const_cost (rtx exp, basic_block bb)
return COSTS_N_INSNS (ix86_cost->sse_load[smode == DImode ? 1 : 0]) / 2;
}
-/* Compute a gain for chain conversion. */
+/* Return true if it's cost profitable for chain conversion. */
-int
+bool
general_scalar_chain::compute_convert_gain ()
{
bitmap_iterator bi;
unsigned insn_uid;
int gain = 0;
- int cost = 0;
+ sreal weighted_gain = 0;
if (dump_file)
fprintf (dump_file, "Computing gain for chain #%d...\n", chain_id);
@@ -559,10 +590,13 @@ general_scalar_chain::compute_convert_gain ()
rtx dst = SET_DEST (def_set);
basic_block bb = BLOCK_FOR_INSN (insn);
int igain = 0;
+ profile_count entry_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
+ bool speed_p = optimize_bb_for_speed_p (bb);
+ sreal bb_freq = bb->count.to_sreal_scale (entry_count);
if (REG_P (src) && REG_P (dst))
{
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
/* reg-reg move is 2 bytes, while SSE 3. */
igain += COSTS_N_BYTES (2 * m - 3);
else
@@ -571,7 +605,7 @@ general_scalar_chain::compute_convert_gain ()
}
else if (REG_P (src) && MEM_P (dst))
{
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
/* Integer load/store is 3+ bytes and SSE 4+. */
igain += COSTS_N_BYTES (3 * m - 4);
else
@@ -581,7 +615,7 @@ general_scalar_chain::compute_convert_gain ()
}
else if (MEM_P (src) && REG_P (dst))
{
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
igain += COSTS_N_BYTES (3 * m - 4);
else
igain += COSTS_N_INSNS (m * ix86_cost->int_load[2]
@@ -593,7 +627,7 @@ general_scalar_chain::compute_convert_gain ()
of explicit load and store instructions. */
if (MEM_P (dst))
{
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
/* ??? This probably should account size difference
of SSE and integer load rather than full SSE load. */
igain -= COSTS_N_BYTES (8);
@@ -667,7 +701,7 @@ general_scalar_chain::compute_convert_gain ()
igain -= vector_const_cost (XEXP (src, 1), bb);
if (MEM_P (XEXP (src, 1)))
{
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
igain -= COSTS_N_BYTES (m == 2 ? 3 : 5);
else
igain += COSTS_N_INSNS
@@ -730,7 +764,7 @@ general_scalar_chain::compute_convert_gain ()
case CONST_INT:
if (REG_P (dst))
{
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
{
/* xor (2 bytes) vs. xorps (3 bytes). */
if (src == const0_rtx)
@@ -769,14 +803,14 @@ general_scalar_chain::compute_convert_gain ()
if (XVECEXP (XEXP (src, 1), 0, 0) == const0_rtx)
{
// movd (4 bytes) replaced with movdqa (4 bytes).
- if (!optimize_bb_for_size_p (bb))
+ if (!!speed_p)
igain += COSTS_N_INSNS (ix86_cost->sse_to_integer
- ix86_cost->xmm_move) / 2;
}
else
{
// pshufd; movd replaced with pshufd.
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
igain += COSTS_N_BYTES (4);
else
igain += ix86_cost->sse_to_integer;
@@ -788,55 +822,34 @@ general_scalar_chain::compute_convert_gain ()
}
}
+ if (speed_p)
+ weighted_gain += bb_freq * igain;
+ gain += igain;
+
if (igain != 0 && dump_file)
{
- fprintf (dump_file, " Instruction gain %d for ", igain);
+ fprintf (dump_file, " Instruction gain %d with bb_freq %.2f for",
+ igain, bb_freq.to_double ());
dump_insn_slim (dump_file, insn);
}
- gain += igain;
}
if (dump_file)
- fprintf (dump_file, " Instruction conversion gain: %d\n", gain);
-
- /* Cost the integer to sse and sse to integer moves. */
- if (!optimize_function_for_size_p (cfun))
{
- cost += n_sse_to_integer * COSTS_N_INSNS (ix86_cost->sse_to_integer) / 2;
- /* ??? integer_to_sse but we only have that in the RA cost table.
- Assume sse_to_integer/integer_to_sse are the same which they
- are at the moment. */
- cost += n_integer_to_sse * COSTS_N_INSNS (ix86_cost->integer_to_sse) / 2;
+ fprintf (dump_file, " Instruction conversion gain: %d, \n",
+ gain);
+ fprintf (dump_file, " Registers conversion cost: %d\n",
+ cost_sse_integer);
+ fprintf (dump_file, " Weighted instruction conversion gain: %.2f, \n",
+ weighted_gain.to_double ());
+ fprintf (dump_file, " Weighted registers conversion cost: %.2f\n",
+ weighted_cost_sse_integer.to_double ());
}
- else if (TARGET_64BIT || smode == SImode)
- {
- cost += n_sse_to_integer * COSTS_N_BYTES (4);
- cost += n_integer_to_sse * COSTS_N_BYTES (4);
- }
- else if (TARGET_SSE4_1)
- {
- /* vmovd (4 bytes) + vpextrd (6 bytes). */
- cost += n_sse_to_integer * COSTS_N_BYTES (10);
- /* vmovd (4 bytes) + vpinsrd (6 bytes). */
- cost += n_integer_to_sse * COSTS_N_BYTES (10);
- }
- else
- {
- /* movd (4 bytes) + psrlq (5 bytes) + movd (4 bytes). */
- cost += n_sse_to_integer * COSTS_N_BYTES (13);
- /* movd (4 bytes) + movd (4 bytes) + unpckldq (4 bytes). */
- cost += n_integer_to_sse * COSTS_N_BYTES (12);
- }
-
- if (dump_file)
- fprintf (dump_file, " Registers conversion cost: %d\n", cost);
-
- gain -= cost;
- if (dump_file)
- fprintf (dump_file, " Total gain: %d\n", gain);
-
- return gain;
+ if (weighted_gain != weighted_cost_sse_integer)
+ return weighted_gain > weighted_cost_sse_integer;
+ else
+ return gain > cost_sse_integer;;
}
/* Insert generated conversion instruction sequence INSNS
@@ -1553,21 +1566,22 @@ timode_immed_const_gain (rtx cst, basic_block bb)
return 0;
}
-/* Compute a gain for chain conversion. */
+/* Return true it's cost profitable for for chain conversion. */
-int
+bool
timode_scalar_chain::compute_convert_gain ()
{
/* Assume that if we have to move TImode values between units,
then transforming this chain isn't worth it. */
- if (n_sse_to_integer || n_integer_to_sse)
- return -1;
+ if (cost_sse_integer)
+ return false;
bitmap_iterator bi;
unsigned insn_uid;
/* Split ties to prefer V1TImode when not optimizing for size. */
int gain = optimize_size ? 0 : 1;
+ sreal weighted_gain = 0;
if (dump_file)
fprintf (dump_file, "Computing gain for chain #%d...\n", chain_id);
@@ -1582,32 +1596,33 @@ timode_scalar_chain::compute_convert_gain ()
basic_block bb = BLOCK_FOR_INSN (insn);
int scost, vcost;
int igain = 0;
+ profile_count entry_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
+ bool speed_p = optimize_bb_for_speed_p (bb);
+ sreal bb_freq = bb->count.to_sreal_scale (entry_count);
switch (GET_CODE (src))
{
case REG:
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
igain = MEM_P (dst) ? COSTS_N_BYTES (6) : COSTS_N_BYTES (3);
else
igain = COSTS_N_INSNS (1);
break;
case MEM:
- igain = optimize_bb_for_size_p (bb) ? COSTS_N_BYTES (7)
- : COSTS_N_INSNS (1);
+ igain = !speed_p ? COSTS_N_BYTES (7) : COSTS_N_INSNS (1);
break;
case CONST_INT:
if (MEM_P (dst)
&& standard_sse_constant_p (src, V1TImode))
- igain = optimize_bb_for_size_p (bb) ? COSTS_N_BYTES (11) : 1;
+ igain = !speed_p ? COSTS_N_BYTES (11) : 1;
break;
case CONST_WIDE_INT:
/* 2 x mov vs. vmovdqa. */
if (MEM_P (dst))
- igain = optimize_bb_for_size_p (bb) ? COSTS_N_BYTES (3)
- : COSTS_N_INSNS (1);
+ igain = !speed_p ? COSTS_N_BYTES (3) : COSTS_N_INSNS (1);
break;
case NOT:
@@ -1628,7 +1643,7 @@ timode_scalar_chain::compute_convert_gain ()
case LSHIFTRT:
/* See ix86_expand_v1ti_shift. */
op1val = INTVAL (XEXP (src, 1));
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
{
if (op1val == 64 || op1val == 65)
scost = COSTS_N_BYTES (5);
@@ -1662,7 +1677,7 @@ timode_scalar_chain::compute_convert_gain ()
case ASHIFTRT:
/* See ix86_expand_v1ti_ashiftrt. */
op1val = INTVAL (XEXP (src, 1));
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
{
if (op1val == 64 || op1val == 127)
scost = COSTS_N_BYTES (7);
@@ -1740,7 +1755,7 @@ timode_scalar_chain::compute_convert_gain ()
case ROTATERT:
/* See ix86_expand_v1ti_rotate. */
op1val = INTVAL (XEXP (src, 1));
- if (optimize_bb_for_size_p (bb))
+ if (!speed_p)
{
scost = COSTS_N_BYTES (13);
if ((op1val & 31) == 0)
@@ -1772,34 +1787,40 @@ timode_scalar_chain::compute_convert_gain ()
{
if (GET_CODE (XEXP (src, 0)) == AND)
/* and;and;or (9 bytes) vs. ptest (5 bytes). */
- igain = optimize_bb_for_size_p (bb) ? COSTS_N_BYTES (4)
- : COSTS_N_INSNS (2);
+ igain = !speed_p ? COSTS_N_BYTES (4) : COSTS_N_INSNS (2);
/* or (3 bytes) vs. ptest (5 bytes). */
- else if (optimize_bb_for_size_p (bb))
+ else if (!speed_p)
igain = -COSTS_N_BYTES (2);
}
else if (XEXP (src, 1) == const1_rtx)
/* and;cmp -1 (7 bytes) vs. pcmpeqd;pxor;ptest (13 bytes). */
- igain = optimize_bb_for_size_p (bb) ? -COSTS_N_BYTES (6)
- : -COSTS_N_INSNS (1);
+ igain = !speed_p ? -COSTS_N_BYTES (6) : -COSTS_N_INSNS (1);
break;
default:
break;
}
+ gain += igain;
+ if (speed_p)
+ weighted_gain += bb_freq * igain;
+
if (igain != 0 && dump_file)
{
- fprintf (dump_file, " Instruction gain %d for ", igain);
+ fprintf (dump_file, " Instruction gain %d with bb_freq %.2f for ",
+ igain, bb_freq.to_double ());
dump_insn_slim (dump_file, insn);
}
- gain += igain;
}
if (dump_file)
- fprintf (dump_file, " Total gain: %d\n", gain);
+ fprintf (dump_file, " Total gain: %d, weighted gain %.2f\n",
+ gain, weighted_gain.to_double ());
- return gain;
+ if (weighted_gain > (sreal) 0)
+ return true;
+ else
+ return gain > 0;
}
/* Fix uses of converted REG in debug insns. */
@@ -2595,7 +2616,7 @@ convert_scalars_to_vector (bool timode_p)
conversions. */
if (chain->build (&candidates[i], uid, disallowed))
{
- if (chain->compute_convert_gain () > 0)
+ if (chain->compute_convert_gain ())
converted_insns += chain->convert ();
else if (dump_file)
fprintf (dump_file, "Chain #%d conversion is not profitable\n",
diff --git a/gcc/config/i386/i386-features.h b/gcc/config/i386/i386-features.h
index 7f7c0f7..e3719b3 100644
--- a/gcc/config/i386/i386-features.h
+++ b/gcc/config/i386/i386-features.h
@@ -153,12 +153,13 @@ class scalar_chain
bitmap insns_conv;
hash_map<rtx, rtx> defs_map;
- unsigned n_sse_to_integer;
- unsigned n_integer_to_sse;
+ /* Cost of inserted conversion between ineteger and sse. */
+ int cost_sse_integer;
+ sreal weighted_cost_sse_integer;
auto_vec<rtx_insn *> control_flow_insns;
bool build (bitmap candidates, unsigned insn_uid, bitmap disallowed);
- virtual int compute_convert_gain () = 0;
+ virtual bool compute_convert_gain () = 0;
int convert ();
protected:
@@ -184,7 +185,7 @@ class general_scalar_chain : public scalar_chain
public:
general_scalar_chain (enum machine_mode smode_, enum machine_mode vmode_)
: scalar_chain (smode_, vmode_) {}
- int compute_convert_gain () final override;
+ bool compute_convert_gain () final override;
private:
void convert_insn (rtx_insn *insn) final override;
@@ -196,7 +197,7 @@ class timode_scalar_chain : public scalar_chain
{
public:
timode_scalar_chain () : scalar_chain (TImode, V1TImode) {}
- int compute_convert_gain () final override;
+ bool compute_convert_gain () final override;
private:
void fix_debug_reg_uses (rtx reg);
diff --git a/gcc/config/s390/vector.md b/gcc/config/s390/vector.md
index 160e42a..cdd55b6 100644
--- a/gcc/config/s390/vector.md
+++ b/gcc/config/s390/vector.md
@@ -953,7 +953,7 @@
else
{
reg_pair += 2; // get rid of prefix %f
- snprintf (buf, sizeof (buf), "ldr\t%%f0,%%f1;vpdi\t%%%%v%s,%%v1,%%%%v%s,5", reg_pair, reg_pair);
+ snprintf (buf, sizeof (buf), "vlr\t%%v0,%%v1;vpdi\t%%%%v%s,%%v1,%%%%v%s,5", reg_pair, reg_pair);
output_asm_insn (buf, operands);
return "";
}
diff --git a/gcc/cp/cp-gimplify.cc b/gcc/cp/cp-gimplify.cc
index d2423fd..eab5550 100644
--- a/gcc/cp/cp-gimplify.cc
+++ b/gcc/cp/cp-gimplify.cc
@@ -3343,19 +3343,14 @@ cp_fold (tree x, fold_flags_t flags)
|| id_equal (DECL_NAME (callee), "addressof")
/* This addressof equivalent is used heavily in libstdc++. */
|| id_equal (DECL_NAME (callee), "__addressof")
+ || id_equal (DECL_NAME (callee), "to_underlying")
|| id_equal (DECL_NAME (callee), "as_const")))
{
r = CALL_EXPR_ARG (x, 0);
- /* Check that the return and argument types are sane before
- folding. */
- if (INDIRECT_TYPE_P (TREE_TYPE (x))
- && INDIRECT_TYPE_P (TREE_TYPE (r)))
- {
- if (!same_type_p (TREE_TYPE (x), TREE_TYPE (r)))
- r = build_nop (TREE_TYPE (x), r);
- x = cp_fold (r, flags);
- break;
- }
+ if (!same_type_p (TREE_TYPE (x), TREE_TYPE (r)))
+ r = build_nop (TREE_TYPE (x), r);
+ x = cp_fold (r, flags);
+ break;
}
int sv = optimize, nw = sv;
diff --git a/gcc/cp/decl2.cc b/gcc/cp/decl2.cc
index 15db1d6..a08d173 100644
--- a/gcc/cp/decl2.cc
+++ b/gcc/cp/decl2.cc
@@ -4186,7 +4186,11 @@ start_objects (bool initp, unsigned priority, bool has_body,
bool omp_target = false)
{
bool default_init = initp && priority == DEFAULT_INIT_PRIORITY;
- bool is_module_init = default_init && module_global_init_needed ();
+ /* FIXME: We may eventually want to treat OpenMP offload initializers
+ in modules specially as well. */
+ bool is_module_init = (default_init
+ && !omp_target
+ && module_global_init_needed ());
tree name = NULL_TREE;
if (is_module_init)
@@ -5878,12 +5882,8 @@ c_parse_final_cleanups (void)
if (static_init_fini_fns[true]->get_or_insert (DEFAULT_INIT_PRIORITY))
has_module_inits = true;
- if (flag_openmp)
- {
- if (!static_init_fini_fns[2 + true])
- static_init_fini_fns[2 + true] = priority_map_t::create_ggc ();
- static_init_fini_fns[2 + true]->get_or_insert (DEFAULT_INIT_PRIORITY);
- }
+ /* FIXME: We need to work out what static constructors on OpenMP offload
+ target in modules will look like. */
}
/* Generate initialization and destruction functions for all
diff --git a/gcc/cp/method.cc b/gcc/cp/method.cc
index 05c19cf..092bae2 100644
--- a/gcc/cp/method.cc
+++ b/gcc/cp/method.cc
@@ -2949,7 +2949,9 @@ synthesized_method_walk (tree ctype, special_function_kind sfk, bool const_p,
&& BINFO_VIRTUAL_P (base_binfo)
&& fn && TREE_CODE (fn) == FUNCTION_DECL
&& move_fn_p (fn) && !trivial_fn_p (fn)
- && vbase_has_user_provided_move_assign (BINFO_TYPE (base_binfo)))
+ && vbase_has_user_provided_move_assign (BINFO_TYPE (base_binfo))
+ && warning_enabled_at (DECL_SOURCE_LOCATION (fn),
+ OPT_Wvirtual_move_assign))
warning (OPT_Wvirtual_move_assign,
"defaulted move assignment for %qT calls a non-trivial "
"move assignment operator for virtual base %qT",
diff --git a/gcc/cp/module.cc b/gcc/cp/module.cc
index f562bf8..e778262 100644
--- a/gcc/cp/module.cc
+++ b/gcc/cp/module.cc
@@ -12638,7 +12638,11 @@ trees_out::write_function_def (tree decl)
{
unsigned flags = 0;
- flags |= 1 * DECL_NOT_REALLY_EXTERN (decl);
+ /* Whether the importer should emit this definition, if used. */
+ flags |= 1 * (DECL_NOT_REALLY_EXTERN (decl)
+ && (get_importer_interface (decl)
+ != importer_interface::always_import));
+
if (f)
{
flags |= 2;
diff --git a/gcc/cp/name-lookup.cc b/gcc/cp/name-lookup.cc
index 9b317c4..84b5e67 100644
--- a/gcc/cp/name-lookup.cc
+++ b/gcc/cp/name-lookup.cc
@@ -4556,6 +4556,9 @@ lookup_imported_hidden_friend (tree friend_tmpl)
|| !DECL_MODULE_ENTITY_P (inner))
return NULL_TREE;
+ /* Load any templates matching FRIEND_TMPL from importers. */
+ lazy_load_pendings (friend_tmpl);
+
tree name = DECL_NAME (inner);
tree *slot = find_namespace_slot (current_namespace, name, false);
if (!slot || !*slot || TREE_CODE (*slot) != BINDING_VECTOR)
diff --git a/gcc/cp/optimize.cc b/gcc/cp/optimize.cc
index 6f9a77f..fc4d6c2 100644
--- a/gcc/cp/optimize.cc
+++ b/gcc/cp/optimize.cc
@@ -309,8 +309,8 @@ maybe_thunk_body (tree fn, bool force)
defer_mangling_aliases = save_defer_mangling_aliases;
cgraph_node::get_create (fns[0])->set_comdat_group (comdat_group);
cgraph_node::get_create (fns[1])->add_to_same_comdat_group
- (cgraph_node::get_create (fns[0]));
- symtab_node::get (fn)->add_to_same_comdat_group
+ (cgraph_node::get (fns[0]));
+ symtab_node::get_create (fn)->add_to_same_comdat_group
(symtab_node::get (fns[0]));
if (fns[2])
/* If *[CD][12]* dtors go into the *[CD]5* comdat group and dtor is
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index aa6d6cb..8b82b20 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,23 @@
+2025-05-13 Yuao Ma <c8ef@outlook.com>
+ Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/113413
+ * intrinsic.cc (do_check): Minor doc polish.
+ (add_functions): Add atand(y, x) mapping.
+ * intrinsic.texi: Update atand example.
+
+2025-05-13 Jakub Jelinek <jakub@redhat.com>
+ Daniil Kochergin <daniil2472s@gmail.com>
+ Tobias Burnus <tburnus@baylibre.com>
+
+ PR fortran/120191
+ * trans-intrinsic.cc (strip_kind_from_actual): Remove.
+ (gfc_conv_intrinsic_minmaxloc): Don't call strip_kind_from_actual.
+ Free and clear kind_arg->expr if non-NULL. Set back_arg->name to
+ "%VAL" instead of a loop looking for last argument. Remove actual
+ variable, use array_arg instead. Free and clear dim_arg->expr if
+ non-NULL for BT_CHARACTER cases instead of using a loop.
+
2025-05-11 Thomas Koenig <tkoenig@gcc.gnu.org>
PR fortran/120163
diff --git a/gcc/fortran/intrinsic.cc b/gcc/fortran/intrinsic.cc
index 2eba209..908e1da 100644
--- a/gcc/fortran/intrinsic.cc
+++ b/gcc/fortran/intrinsic.cc
@@ -376,11 +376,11 @@ do_check (gfc_intrinsic_sym *specific, gfc_actual_arglist *arg)
Argument list:
char * name of function
- int whether function is elemental
- int If the function can be used as an actual argument [1]
- bt return type of function
- int kind of return type of function
- int Fortran standard version
+ int whether function is elemental
+ int If the function can be used as an actual argument [1]
+ bt return type of function
+ int kind of return type of function
+ int Fortran standard version
check pointer to check function
simplify pointer to simplification function
resolve pointer to resolution function
@@ -396,7 +396,7 @@ do_check (gfc_intrinsic_sym *specific, gfc_actual_arglist *arg)
[1] Whether a function can or cannot be used as an actual argument is
- determined by its presence on the 13.6 list in Fortran 2003. The
+ determined by its presence in the 13.6 list in Fortran 2003. The
following intrinsics, which are GNU extensions, are considered allowed
as actual arguments: ACOSH ATANH DACOSH DASINH DATANH DCONJG DIMAG
ZABS ZCOS ZEXP ZLOG ZSIN ZSQRT. */
@@ -3479,6 +3479,13 @@ add_functions (void)
gfc_check_fn_r, gfc_simplify_atand, gfc_resolve_trigd,
x, BT_REAL, dr, REQUIRED);
+ /* Two-argument version of atand, equivalent to atan2d. */
+ add_sym_2 ("atand", GFC_ISYM_ATAN2D, CLASS_ELEMENTAL, ACTUAL_YES,
+ BT_REAL, dr, GFC_STD_F2023,
+ gfc_check_atan2, gfc_simplify_atan2d, gfc_resolve_trigd2,
+ y, BT_REAL, dr, REQUIRED,
+ x, BT_REAL, dr, REQUIRED);
+
make_generic ("atand", GFC_ISYM_ATAND, GFC_STD_F2023);
add_sym_1 ("datand", GFC_ISYM_ATAND, CLASS_ELEMENTAL, ACTUAL_YES,
diff --git a/gcc/fortran/intrinsic.texi b/gcc/fortran/intrinsic.texi
index 3a105bc..48c2d60 100644
--- a/gcc/fortran/intrinsic.texi
+++ b/gcc/fortran/intrinsic.texi
@@ -1547,7 +1547,7 @@ Fortran 90 and later
@node ATAN
-@section @code{ATAN} --- Arctangent function
+@section @code{ATAN} --- Arctangent function
@fnindex ATAN
@fnindex DATAN
@cindex trigonometric function, tangent, inverse
@@ -1619,6 +1619,7 @@ Degrees function: @*
@item @emph{Synopsis}:
@multitable @columnfractions .80
@item @code{RESULT = ATAND(X)}
+@item @code{RESULT = ATAND(Y, X)}
@end multitable
@item @emph{Description}:
@@ -1630,21 +1631,23 @@ Elemental function
@item @emph{Arguments}:
@multitable @columnfractions .15 .70
-@item @var{X} @tab The type shall be @code{REAL};
-if @var{Y} is present, @var{X} shall be REAL.
+@item @var{X} @tab The type shall be @code{REAL}.
@item @var{Y} @tab The type and kind type parameter shall be the same as @var{X}.
@end multitable
@item @emph{Return value}:
The return value is of the same type and kind as @var{X}.
-The result is in degrees and lies in the range
-@math{-90 \leq \Re \atand(x) \leq 90}.
+If @var{Y} is present, the result is identical to @code{ATAN2D(Y, X)}.
+Otherwise, the result is in degrees and lies in the range
+@math{-90 \leq \atand(x) \leq 90}.
@item @emph{Example}:
@smallexample
program test_atand
real(8) :: x = 2.866_8
+ real(4) :: x1 = 1.e0_4, y1 = 0.5e0_4
x = atand(x)
+ x1 = atand(y1, x1)
end program test_atand
@end smallexample
diff --git a/gcc/fortran/simplify.cc b/gcc/fortran/simplify.cc
index 208251b..1927097 100644
--- a/gcc/fortran/simplify.cc
+++ b/gcc/fortran/simplify.cc
@@ -1183,6 +1183,7 @@ gfc_simplify_asin (gfc_expr *x)
}
+#if MPFR_VERSION < MPFR_VERSION_NUM(4,2,0)
/* Convert radians to degrees, i.e., x * 180 / pi. */
static void
@@ -1196,6 +1197,7 @@ rad2deg (mpfr_t x)
mpfr_div (x, x, tmp, GFC_RND_MODE);
mpfr_clear (tmp);
}
+#endif
/* Simplify ACOSD(X) where the returned value has units of degree. */
@@ -1217,8 +1219,12 @@ gfc_simplify_acosd (gfc_expr *x)
}
result = gfc_get_constant_expr (x->ts.type, x->ts.kind, &x->where);
+#if MPFR_VERSION >= MPFR_VERSION_NUM(4,2,0)
+ mpfr_acosu (result->value.real, x->value.real, 360, GFC_RND_MODE);
+#else
mpfr_acos (result->value.real, x->value.real, GFC_RND_MODE);
rad2deg (result->value.real);
+#endif
return range_check (result, "ACOSD");
}
@@ -1243,8 +1249,12 @@ gfc_simplify_asind (gfc_expr *x)
}
result = gfc_get_constant_expr (x->ts.type, x->ts.kind, &x->where);
+#if MPFR_VERSION >= MPFR_VERSION_NUM(4,2,0)
+ mpfr_asinu (result->value.real, x->value.real, 360, GFC_RND_MODE);
+#else
mpfr_asin (result->value.real, x->value.real, GFC_RND_MODE);
rad2deg (result->value.real);
+#endif
return range_check (result, "ASIND");
}
@@ -1261,8 +1271,12 @@ gfc_simplify_atand (gfc_expr *x)
return NULL;
result = gfc_get_constant_expr (x->ts.type, x->ts.kind, &x->where);
+#if MPFR_VERSION >= MPFR_VERSION_NUM(4,2,0)
+ mpfr_atanu (result->value.real, x->value.real, 360, GFC_RND_MODE);
+#else
mpfr_atan (result->value.real, x->value.real, GFC_RND_MODE);
rad2deg (result->value.real);
+#endif
return range_check (result, "ATAND");
}
@@ -1954,8 +1968,13 @@ gfc_simplify_atan2d (gfc_expr *y, gfc_expr *x)
}
result = gfc_get_constant_expr (x->ts.type, x->ts.kind, &x->where);
+#if MPFR_VERSION >= MPFR_VERSION_NUM(4,2,0)
+ mpfr_atan2u (result->value.real, y->value.real, x->value.real, 360,
+ GFC_RND_MODE);
+#else
mpfr_atan2 (result->value.real, y->value.real, x->value.real, GFC_RND_MODE);
rad2deg (result->value.real);
+#endif
return range_check (result, "ATAN2D");
}
@@ -1990,6 +2009,8 @@ gfc_simplify_cos (gfc_expr *x)
}
+#if MPFR_VERSION < MPFR_VERSION_NUM(4,2,0)
+/* Used by trigd_fe.inc. */
static void
deg2rad (mpfr_t x)
{
@@ -2001,11 +2022,13 @@ deg2rad (mpfr_t x)
mpfr_mul (x, x, d2r, GFC_RND_MODE);
mpfr_clear (d2r);
}
+#endif
+#if MPFR_VERSION < MPFR_VERSION_NUM(4,2,0)
/* Simplification routines for SIND, COSD, TAND. */
#include "trigd_fe.inc"
-
+#endif
/* Simplify COSD(X) where X has the unit of degree. */
@@ -2018,8 +2041,12 @@ gfc_simplify_cosd (gfc_expr *x)
return NULL;
result = gfc_get_constant_expr (x->ts.type, x->ts.kind, &x->where);
+#if MPFR_VERSION >= MPFR_VERSION_NUM(4,2,0)
+ mpfr_cosu (result->value.real, x->value.real, 360, GFC_RND_MODE);
+#else
mpfr_set (result->value.real, x->value.real, GFC_RND_MODE);
simplify_cosd (result->value.real);
+#endif
return range_check (result, "COSD");
}
@@ -2036,8 +2063,12 @@ gfc_simplify_sind (gfc_expr *x)
return NULL;
result = gfc_get_constant_expr (x->ts.type, x->ts.kind, &x->where);
+#if MPFR_VERSION >= MPFR_VERSION_NUM(4,2,0)
+ mpfr_sinu (result->value.real, x->value.real, 360, GFC_RND_MODE);
+#else
mpfr_set (result->value.real, x->value.real, GFC_RND_MODE);
simplify_sind (result->value.real);
+#endif
return range_check (result, "SIND");
}
@@ -2054,8 +2085,12 @@ gfc_simplify_tand (gfc_expr *x)
return NULL;
result = gfc_get_constant_expr (x->ts.type, x->ts.kind, &x->where);
+#if MPFR_VERSION >= MPFR_VERSION_NUM(4,2,0)
+ mpfr_tanu (result->value.real, x->value.real, 360, GFC_RND_MODE);
+#else
mpfr_set (result->value.real, x->value.real, GFC_RND_MODE);
simplify_tand (result->value.real);
+#endif
return range_check (result, "TAND");
}
@@ -2078,7 +2113,11 @@ gfc_simplify_cotand (gfc_expr *x)
result = gfc_get_constant_expr (x->ts.type, x->ts.kind, &x->where);
mpfr_set (result->value.real, x->value.real, GFC_RND_MODE);
mpfr_add_ui (result->value.real, result->value.real, 90, GFC_RND_MODE);
+#if MPFR_VERSION >= MPFR_VERSION_NUM(4,2,0)
+ mpfr_tanu (result->value.real, result->value.real, 360, GFC_RND_MODE);
+#else
simplify_tand (result->value.real);
+#endif
mpfr_neg (result->value.real, result->value.real, GFC_RND_MODE);
return range_check (result, "COTAND");
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index e63fd6f..b8c1588 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -6276,6 +6276,32 @@ replace_stmt_with_simplification (gimple_stmt_iterator *gsi,
}
else if (!inplace)
{
+ /* For throwing comparisons, see if the GIMPLE_COND is the same as
+ the comparison would be.
+ This can happen due to the match pattern for
+ `(ne (cmp @0 @1) integer_zerop)` which creates a new expression
+ for the comparison. */
+ if (TREE_CODE_CLASS (code) == tcc_comparison
+ && flag_exceptions
+ && cfun->can_throw_non_call_exceptions
+ && operation_could_trap_p (code,
+ FLOAT_TYPE_P (TREE_TYPE (ops[0])),
+ false, NULL_TREE))
+ {
+ tree lhs = gimple_cond_lhs (cond_stmt);
+ if (gimple_cond_code (cond_stmt) == NE_EXPR
+ && TREE_CODE (lhs) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ && integer_zerop (gimple_cond_rhs (cond_stmt)))
+ {
+ gimple *s = SSA_NAME_DEF_STMT (lhs);
+ if (is_gimple_assign (s)
+ && gimple_assign_rhs_code (s) == code
+ && operand_equal_p (gimple_assign_rhs1 (s), ops[0])
+ && operand_equal_p (gimple_assign_rhs2 (s), ops[1]))
+ return false;
+ }
+ }
tree res = maybe_push_res_to_seq (res_op, seq);
if (!res)
return false;
diff --git a/gcc/m2/ChangeLog b/gcc/m2/ChangeLog
index 058468b..40396a2 100644
--- a/gcc/m2/ChangeLog
+++ b/gcc/m2/ChangeLog
@@ -1,3 +1,14 @@
+2025-05-13 Gaius Mulley <gaiusmod2@gmail.com>
+
+ PR modula2/120253
+ * m2.flex (FIRST_COLUMN): New define.
+ (updatepos): Remove commented code.
+ (consumeLine): Assign column to FIRST_COLUMN.
+ (initLine): Ditto.
+ (m2flex_GetColumnNo): Return FIRST_COLUMN if currentLine is NULL.
+ (m2flex_GetLineNo): Rewrite for positive logic.
+ (m2flex_GetLocation): Ditto.
+
2025-05-05 Gaius Mulley <gaiusmod2@gmail.com>
PR modula2/120117
diff --git a/gcc/m2/m2.flex b/gcc/m2/m2.flex
index d08ac3e..e3cf010 100644
--- a/gcc/m2/m2.flex
+++ b/gcc/m2/m2.flex
@@ -48,6 +48,8 @@ static int cpreprocessor = 0; /* Replace this with correct getter. */
#define EXTERN extern "C"
#endif
+#define FIRST_COLUMN 1
+
/* m2.flex provides a lexical analyser for GNU Modula-2. */
struct lineInfo {
@@ -558,7 +560,7 @@ static void consumeLine (void)
currentLine->lineno = lineno;
currentLine->tokenpos=0;
currentLine->nextpos=0;
- currentLine->column=0;
+ currentLine->column=FIRST_COLUMN;
START_LINE (lineno, yyleng);
yyless(1); /* push back all but the \n */
traceLine ();
@@ -621,7 +623,6 @@ static void updatepos (void)
seenModuleStart = false;
currentLine->nextpos = currentLine->tokenpos+yyleng;
currentLine->toklen = yyleng;
- /* if (currentLine->column == 0) */
currentLine->column = currentLine->tokenpos+1;
currentLine->location =
M2Options_OverrideLocation (GET_LOCATION (currentLine->column,
@@ -677,7 +678,7 @@ static void initLine (void)
currentLine->toklen = 0;
currentLine->nextpos = 0;
currentLine->lineno = lineno;
- currentLine->column = 0;
+ currentLine->column = FIRST_COLUMN;
currentLine->inuse = true;
currentLine->next = NULL;
}
@@ -812,10 +813,10 @@ EXTERN bool m2flex_OpenSource (char *s)
EXTERN int m2flex_GetLineNo (void)
{
- if (currentLine != NULL)
- return currentLine->lineno;
- else
+ if (currentLine == NULL)
return 0;
+ else
+ return currentLine->lineno;
}
/*
@@ -825,10 +826,10 @@ EXTERN int m2flex_GetLineNo (void)
EXTERN int m2flex_GetColumnNo (void)
{
- if (currentLine != NULL)
- return currentLine->column;
+ if (currentLine == NULL)
+ return FIRST_COLUMN;
else
- return 0;
+ return currentLine->column;
}
/*
@@ -837,10 +838,10 @@ EXTERN int m2flex_GetColumnNo (void)
EXTERN location_t m2flex_GetLocation (void)
{
- if (currentLine != NULL)
- return currentLine->location;
- else
+ if (currentLine == NULL)
return 0;
+ else
+ return currentLine->location;
}
/*
diff --git a/gcc/match.pd b/gcc/match.pd
index f405068..9613640 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -11308,26 +11308,58 @@ and,
(match (ctz_table_index @1 @2 @3)
(rshift (mult (bit_and:c (negate @1) @1) INTEGER_CST@2) INTEGER_CST@3))
+/* Floatint point/integer comparison and integer->integer
+ or floating point -> float point conversion. */
(match (cond_expr_convert_p @0 @2 @3 @6)
(cond (simple_comparison@6 @0 @1) (convert@4 @2) (convert@5 @3))
- (if (INTEGRAL_TYPE_P (type)
- && INTEGRAL_TYPE_P (TREE_TYPE (@2))
- && INTEGRAL_TYPE_P (TREE_TYPE (@0))
- && INTEGRAL_TYPE_P (TREE_TYPE (@3))
- && TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (@0))
- && TYPE_PRECISION (TREE_TYPE (@0))
- == TYPE_PRECISION (TREE_TYPE (@2))
- && TYPE_PRECISION (TREE_TYPE (@0))
- == TYPE_PRECISION (TREE_TYPE (@3))
+ (if ((INTEGRAL_TYPE_P (type)
+ || (!flag_trapping_math && SCALAR_FLOAT_TYPE_P (type)))
+ && ((INTEGRAL_TYPE_P (TREE_TYPE (@2))
+ && INTEGRAL_TYPE_P (TREE_TYPE (@3)))
+ || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@2))
+ && types_match (TREE_TYPE (@2), TREE_TYPE (@3))))
+ && !operand_equal_p (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (@0)))
+ && operand_equal_p (TYPE_SIZE (TREE_TYPE (@0)),
+ TYPE_SIZE (TREE_TYPE (@2)))
+ && operand_equal_p (TYPE_SIZE (TREE_TYPE (@0)),
+ TYPE_SIZE (TREE_TYPE (@3)))
/* For vect_recog_cond_expr_convert_pattern, @2 and @3 can differ in
signess when convert is truncation, but not ok for extension since
it's sign_extend vs zero_extend. */
- && (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type)
+ && (known_gt (tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (@0))),
+ tree_to_poly_uint64 (TYPE_SIZE (type)))
|| (TYPE_UNSIGNED (TREE_TYPE (@2))
== TYPE_UNSIGNED (TREE_TYPE (@3))))
&& single_use (@4)
&& single_use (@5))))
+/* Floating point or integer comparison and integer to floating point
+ conversion. */
+(match (cond_expr_convert_p @0 @2 @3 @6)
+ (cond (simple_comparison@6 @0 @1) (float@4 @2) (float@5 @3))
+ (if (SCALAR_FLOAT_TYPE_P (type) && !flag_trapping_math
+ && INTEGRAL_TYPE_P (TREE_TYPE (@2))
+ && types_match (TREE_TYPE (@2), TREE_TYPE (@3))
+ && !operand_equal_p (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (@0)))
+ && operand_equal_p (TYPE_SIZE (TREE_TYPE (@0)),
+ TYPE_SIZE (TREE_TYPE (@2)))
+ && single_use (@4)
+ && single_use (@5))))
+
+/* Floating point or integer comparison and floating point to integer
+ conversion. */
+(match (cond_expr_convert_p @0 @2 @3 @6)
+ (cond (simple_comparison@6 @0 @1) (fix_trunc@4 @2) (fix_trunc@5 @3))
+ (if (INTEGRAL_TYPE_P (type) && !flag_trapping_math
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (@2))
+ && types_match (TREE_TYPE (@2), TREE_TYPE (@3))
+ && !operand_equal_p (TYPE_SIZE (type),
+ TYPE_SIZE (TREE_TYPE (@0)))
+ && operand_equal_p (TYPE_SIZE (TREE_TYPE (@0)),
+ TYPE_SIZE (TREE_TYPE (@2)))
+ && single_use (@4)
+ && single_use (@5))))
+
(for bit_op (bit_and bit_ior bit_xor)
(match (bitwise_induction_p @0 @2 @3)
(bit_op:c
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index b7e62e8..6a9c9c7 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,69 @@
+2025-05-13 Tobias Burnus <tburnus@baylibre.com>
+
+ PR fortran/113413
+ * gfortran.dg/dec_math.f90: Add comment that degree
+ functions are part of F2023.
+
+2025-05-13 Yuao Ma <c8ef@outlook.com>
+ Steven G. Kargl <kargl@gcc.gnu.org>
+
+ PR fortran/113413
+ * gfortran.dg/dec_math.f90: Add atand(y, x) testcase.
+
+2025-05-13 Andrew Pinski <quic_apinski@quicinc.com>
+
+ PR tree-optimization/119903
+ * g++.dg/tree-ssa/pr119903-1.C: New test.
+
+2025-05-13 Andrew Pinski <quic_apinski@quicinc.com>
+
+ PR middle-end/118868
+ * c-c++-common/pr118868-1.c: New test.
+
+2025-05-13 Gaius Mulley <gaiusmod2@gmail.com>
+
+ PR modula2/120188
+ * gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp:
+ Remove call to gm2-dg-frontend-configure-check and replace with
+ tests for whether plugin variables exist.
+
+2025-05-13 Jakub Jelinek <jakub@redhat.com>
+
+ PR libfortran/120196
+ * gfortran.dg/pr120196.f90: New test.
+
+2025-05-13 Jakub Jelinek <jakub@redhat.com>
+
+ PR fortran/120191
+ * gfortran.dg/pr120191_3.f90: New test.
+
+2025-05-13 Jakub Jelinek <jakub@redhat.com>
+
+ PR fortran/120191
+ * gfortran.dg/pr120191_2.f90: New test.
+
+2025-05-13 Jakub Jelinek <jakub@redhat.com>
+ Daniil Kochergin <daniil2472s@gmail.com>
+ Tobias Burnus <tburnus@baylibre.com>
+
+ PR fortran/120191
+ * gfortran.dg/pr120191_1.f90: New test.
+
+2025-05-13 David Malcolm <dmalcolm@redhat.com>
+
+ PR other/116792
+ * gcc.dg/html-output/missing-semicolon.py: Verify that we don't
+ have an empty "gcc-annotated-source" and we do have a
+ "gcc-generated-patch".
+ * gcc.dg/plugin/diagnostic-test-metadata-html.c: New test.
+ * gcc.dg/plugin/diagnostic-test-metadata-html.py: New test script.
+ * gcc.dg/plugin/diagnostic-test-paths-2.c: Add
+ "-fdiagnostics-add-output=experimental-html" to options. Add
+ invocation of diagnostic-test-paths-2.py.
+ * gcc.dg/plugin/diagnostic-test-paths-2.py: New test script.
+ * gcc.dg/plugin/plugin.exp (plugin_test_list): Add
+ diagnostic-test-metadata-html.c.
+
2025-05-13 Andrew MacLeod <amacleod@redhat.com>
* gcc.dg/tree-ssa/vrp124.c: New.
diff --git a/gcc/testsuite/c-c++-common/pr118868-1.c b/gcc/testsuite/c-c++-common/pr118868-1.c
new file mode 100644
index 0000000..d0a9e77f7
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/pr118868-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+
+/* PR middle-end/118868 */
+
+/* __builtin_assoc_barrier should work on pointers without any ICE */
+void *f(void *a)
+{
+ return __builtin_assoc_barrier(a);
+}
diff --git a/gcc/testsuite/g++.dg/modules/clone-4_a.C b/gcc/testsuite/g++.dg/modules/clone-4_a.C
new file mode 100644
index 0000000..3ee6109
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/clone-4_a.C
@@ -0,0 +1,12 @@
+// PR c++/120125
+// { dg-additional-options "-fmodules -fdeclone-ctor-dtor" }
+// { dg-module-cmi M }
+
+export module M;
+
+void foo();
+export template <typename _Tp> struct __shared_ptr {
+ inline __shared_ptr() { foo(); }
+};
+
+template class __shared_ptr<int>;
diff --git a/gcc/testsuite/g++.dg/modules/clone-4_b.C b/gcc/testsuite/g++.dg/modules/clone-4_b.C
new file mode 100644
index 0000000..1b36cb4
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/clone-4_b.C
@@ -0,0 +1,12 @@
+// PR c++/120125
+// { dg-additional-options "-fmodules -fdeclone-ctor-dtor" }
+
+import M;
+
+int main() {
+ __shared_ptr<int> s1;
+ __shared_ptr<double> s2;
+}
+
+// { dg-final { scan-assembler-not {_ZNW1M12__shared_ptrIiEC[1-4]Ev:} } }
+// { dg-final { scan-assembler {_ZNW1M12__shared_ptrIdEC2Ev:} } }
diff --git a/gcc/testsuite/g++.dg/modules/openmp-1.C b/gcc/testsuite/g++.dg/modules/openmp-1.C
new file mode 100644
index 0000000..b5a30ad
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/openmp-1.C
@@ -0,0 +1,9 @@
+// PR c++/119864
+// { dg-do assemble }
+// { dg-additional-options "-fmodules -fopenmp" }
+// { dg-require-effective-target "fopenmp" }
+
+export module M;
+
+int foo();
+int x = foo();
diff --git a/gcc/testsuite/g++.dg/modules/tpl-friend-19_a.C b/gcc/testsuite/g++.dg/modules/tpl-friend-19_a.C
new file mode 100644
index 0000000..59f0175
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/tpl-friend-19_a.C
@@ -0,0 +1,16 @@
+// { dg-additional-options "-fmodules -Wno-global-module" }
+// { dg-module-cmi M }
+
+module;
+
+template <typename _MemFunPtr>
+class _Mem_fn_base {
+ template <typename> friend struct _Bind_check_arity;
+};
+
+template <typename> struct _Bind_check_arity {};
+
+export module M;
+
+template struct _Bind_check_arity<int>;
+export _Mem_fn_base<int> mem_fn();
diff --git a/gcc/testsuite/g++.dg/modules/tpl-friend-19_b.C b/gcc/testsuite/g++.dg/modules/tpl-friend-19_b.C
new file mode 100644
index 0000000..ce99647
--- /dev/null
+++ b/gcc/testsuite/g++.dg/modules/tpl-friend-19_b.C
@@ -0,0 +1,6 @@
+// { dg-additional-options "-fmodules" }
+
+import M;
+int main() {
+ mem_fn();
+}
diff --git a/gcc/testsuite/g++.dg/opt/pr96780_cpp23.C b/gcc/testsuite/g++.dg/opt/pr96780_cpp23.C
new file mode 100644
index 0000000..ba4a837
--- /dev/null
+++ b/gcc/testsuite/g++.dg/opt/pr96780_cpp23.C
@@ -0,0 +1,16 @@
+// PR c++/96780
+// Verify calls to std::move/forward are folded away by the frontend.
+// { dg-do compile { target c++23 } }
+// { dg-additional-options "-ffold-simple-inlines -fdump-tree-gimple" }
+
+#include <utility>
+
+enum class A : char {a};
+
+extern A& x;
+
+void f() {
+ auto&& x1 = std::to_underlying(x);
+}
+
+// { dg-final { scan-tree-dump-not "= std::to_underlying" "gimple" } }
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr119903-1.C b/gcc/testsuite/g++.dg/tree-ssa/pr119903-1.C
new file mode 100644
index 0000000..605f989
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr119903-1.C
@@ -0,0 +1,24 @@
+// { dg-do compile { target c++11 } }
+// { dg-options "-O2 -fnon-call-exceptions -ftrapping-math -fdump-tree-optimized-eh" }
+
+// PR tree-optimization/119903
+// match and simplify would cause the internal throwable fp comparison
+// to become only external throwable and lose the landing pad.
+
+int f() noexcept;
+int g() noexcept;
+
+int m(double a)
+{
+ try {
+ if (a < 1.0)
+ return f();
+ return g();
+ }catch(...)
+ {
+ return -1;
+ }
+}
+
+// Make sure There is a landing pad for the non-call exception from the comparison.
+// { dg-final { scan-tree-dump "LP " "optimized" } }
diff --git a/gcc/testsuite/g++.dg/warn/ignore-virtual-move-assign.C b/gcc/testsuite/g++.dg/warn/ignore-virtual-move-assign.C
new file mode 100644
index 0000000..73922e6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/warn/ignore-virtual-move-assign.C
@@ -0,0 +1,45 @@
+// { dg-do compile { target c++11 } }
+// { dg-options "-Wvirtual-move-assign -Wattributes" }
+
+#include <utility>
+
+class A
+{
+ int val;
+
+public:
+ explicit A (int val) : val (val) {}
+
+ A (const A &oth) : val (0) {}
+ A &operator= (const A &oth) { return *this; }
+ A (A &&oth) : val (oth.val) { oth.val = 0; }
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wvirtual-move-assign"
+ A &operator= (A &&oth)
+ {
+ val += oth.val;
+ oth.val = 0;
+ return *this;
+ }
+#pragma GCC diagnostic pop
+};
+
+class B : virtual A
+{
+public:
+ B () : A (12) {}
+ B &operator= (B &&) = default;
+};
+
+class C : virtual A
+{
+public:
+ C () : A (12) {}
+};
+
+void
+test_fn ()
+{
+ C x, y;
+ x = std::move (y);
+}
diff --git a/gcc/testsuite/gcc.dg/ipa/pr120044-1.c b/gcc/testsuite/gcc.dg/ipa/pr120044-1.c
new file mode 100644
index 0000000..f9fee3e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ipa/pr120044-1.c
@@ -0,0 +1,17 @@
+/* { dg-do run } */
+/* { dg-options "-O3 -fno-early-inlining -fno-tree-fre -fno-tree-pre -fno-code-hoisting -fno-inline" } */
+
+struct a {
+ int b;
+} const c;
+void d(char p, struct a e) {
+ while (e.b)
+ ;
+}
+static unsigned short f(const struct a g) {
+ d(g.b, g);
+ return g.b;
+}
+int main() {
+ return f(c);
+}
diff --git a/gcc/testsuite/gcc.dg/ipa/pr120044-2.c b/gcc/testsuite/gcc.dg/ipa/pr120044-2.c
new file mode 100644
index 0000000..5130791
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ipa/pr120044-2.c
@@ -0,0 +1,17 @@
+/* { dg-do run } */
+/* { dg-options "-O3 -fno-early-inlining -fno-tree-fre -fno-tree-pre -fno-code-hoisting -fno-ipa-cp" } */
+
+struct a {
+ int b;
+} const c;
+void d(char p, struct a e) {
+ while (e.b)
+ ;
+}
+static unsigned short f(const struct a g) {
+ d(g.b, g);
+ return g.b;
+}
+int main() {
+ return f(c);
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr114864.c b/gcc/testsuite/gcc.dg/tree-ssa/pr114864.c
new file mode 100644
index 0000000..cd9b94c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr114864.c
@@ -0,0 +1,15 @@
+/* { dg-do run } */
+/* { dg-options "-O1 -fno-tree-dce -fno-tree-fre" } */
+
+struct a {
+ int b;
+} const c;
+void d(const struct a f) {}
+void e(const struct a f) {
+ f.b == 0 ? 1 : f.b;
+ d(f);
+}
+int main() {
+ e(c);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr103771-4.c b/gcc/testsuite/gcc.target/i386/pr103771-4.c
new file mode 100644
index 0000000..299337d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr103771-4.c
@@ -0,0 +1,82 @@
+/* { dg-do compile } */
+/* { dg-options "-march=x86-64-v4 -Ofast -fdump-tree-vect-details" } */
+/* { dg-final { scan-assembler-not "kshift" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times "loop vectorized using 64 byte vectors" 6 "vect" { target { ! ia32 } } } } */
+
+void
+foo (float* a, float* b, int* c, int* d, long long* __restrict e, int n)
+{
+ for (int i = 0 ; i != n; i++)
+ {
+ long long tmp = c[i];
+ long long tmp2 = d[i];
+ if (a[i] < b[i])
+ tmp = tmp2;
+ e[i] = tmp;
+ }
+}
+
+void
+foo1 (double* a, double* b, long long* c, long long* d, int* __restrict e, int n)
+{
+ for (int i = 0 ; i != n; i++)
+ {
+ int tmp = (int)c[i];
+ int tmp2 = (int)d[i];
+ if (a[i] < b[i])
+ tmp = tmp2;
+ e[i] = tmp;
+ }
+}
+
+void
+foo2 (float* a, float* b, int* c, int* d, double* __restrict e, int n)
+{
+ for (int i = 0 ; i != n; i++)
+ {
+ double tmp = c[i];
+ double tmp2 = d[i];
+ if (a[i] < b[i])
+ tmp = tmp2;
+ e[i] = tmp;
+ }
+}
+
+void
+foo3 (double* a, double* b, long long* c, long long* d, float* __restrict e, int n)
+{
+ for (int i = 0 ; i != n; i++)
+ {
+ float tmp = c[i];
+ float tmp2 = d[i];
+ if (a[i] < b[i])
+ tmp = tmp2;
+ e[i] = tmp;
+ }
+}
+
+void
+foo4 (int* a, int* b, int* c, int* d, double* __restrict e, int n)
+{
+ for (int i = 0 ; i != n; i++)
+ {
+ double tmp = c[i];
+ double tmp2 = d[i];
+ if (a[i] < b[i])
+ tmp = tmp2;
+ e[i] = tmp;
+ }
+}
+
+void
+foo5 (long long* a, long long* b, long long* c, long long* d, float* __restrict e, int n)
+{
+ for (int i = 0 ; i != n; i++)
+ {
+ float tmp = c[i];
+ float tmp2 = d[i];
+ if (a[i] < b[i])
+ tmp = tmp2;
+ e[i] = tmp;
+ }
+}
diff --git a/gcc/testsuite/gfortran.dg/dec_math.f90 b/gcc/testsuite/gfortran.dg/dec_math.f90
index 393e7de..79c1807 100644
--- a/gcc/testsuite/gfortran.dg/dec_math.f90
+++ b/gcc/testsuite/gfortran.dg/dec_math.f90
@@ -5,6 +5,12 @@
! Test extra math intrinsics formerly offered by -fdec-math,
! now included with -std=gnu or -std=legacy.
!
+! Since Fortran 2023, the degree trigonometric functions (sind, cosd, ...)
+! are part of the standard; additionally, Fortran 2023 added a two-argument
+! version of atand as alias for atan2d.
+!
+! Note that cotan and cotand are not part of Fortran 2023; hence, this file
+! still requires -std=gnu and cannot be compiled with -std=f2023.
module dec_math
@@ -522,6 +528,69 @@ call cmpq(q_i1, q_oxe, q_ox, q_tol, "(x) qatand")
#endif
! Input
+f_i1 = 1.0_4
+f_i2 = 2.0_4
+d_i1 = 1.0_8
+d_i2 = 2.0_8
+#ifdef __GFC_REAL_10__
+l_i1 = 1.0_10
+l_i2 = 2.0_10
+#endif
+#ifdef __GFC_REAL_16__
+q_i1 = 1.0_16
+q_i2 = 2.0_16
+#endif
+
+! Expected
+f_oe = r2d_f * atan2 (f_i1, f_i2)
+f_oxe = r2d_f * atan2 (xf * f_i1, f_i2)
+d_oe = r2d_d * atan2 (d_i1, d_i2)
+d_oxe = r2d_d * atan2 (xd * d_i1, d_i2)
+#ifdef __GFC_REAL_10__
+l_oe = r2d_l * atan2 (l_i1, l_i2)
+l_oxe = r2d_l * atan2 (xl * l_i1, l_i2)
+#endif
+#ifdef __GFC_REAL_16__
+q_oe = r2d_q * atan2 (q_i1, q_i2)
+q_oxe = r2d_q * atan2 (xq * q_i1, q_i2)
+#endif
+
+! Actual
+f_oa = atand (f_i1, f_i2)
+f_oc = atand (1.0_4, 2.0_4)
+f_ox = atand (xf * f_i1, f_i2)
+d_oa = atand (d_i1, d_i2)
+d_oc = atand (1.0_8, 2.0_8)
+d_ox = atand (xd * d_i1, d_i2)
+#ifdef __GFC_REAL_10__
+l_oa = atand (l_i1, l_i2)
+l_oc = atand (1.0_10, 2.0_10)
+l_ox = atand (xl * l_i1, l_i2)
+#endif
+#ifdef __GFC_REAL_16__
+q_oa = atand (q_i1, q_i2)
+q_oc = atand (1.0_16, 2.0_16)
+q_ox = atand (xq * q_i1, q_i2)
+#endif
+
+call cmpf(f_i1, f_oe, f_oa, f_tol, "( ) fatand")
+call cmpf(f_i1, f_oe, f_oc, f_tol, "(c) fatand")
+call cmpf(f_i1, f_oxe, f_ox, f_tol, "(x) fatand")
+call cmpd(d_i1, d_oe, d_oa, d_tol, "( ) datand")
+call cmpd(d_i1, d_oe, d_oc, d_tol, "(c) datand")
+call cmpd(d_i1, d_oxe, d_ox, d_tol, "(x) atand")
+#ifdef __GFC_REAL_10__
+call cmpl(l_i1, l_oe, l_oa, l_tol, "( ) latand")
+call cmpl(l_i1, l_oe, l_oc, l_tol, "(c) latand")
+call cmpl(l_i1, l_oxe, l_ox, l_tol, "(x) latand")
+#endif
+#ifdef __GFC_REAL_16__
+call cmpq(q_i1, q_oe, q_oa, q_tol, "( ) qatand")
+call cmpq(q_i1, q_oe, q_oc, q_tol, "(c) qatand")
+call cmpq(q_i1, q_oxe, q_ox, q_tol, "(x) qatand")
+#endif
+
+! Input
f_i1 = 34.3775_4
d_i1 = 34.3774677078494_8
#ifdef __GFC_REAL_10__
diff --git a/gcc/testsuite/gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp b/gcc/testsuite/gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp
index 8a41ff8..6ddf2d5 100644
--- a/gcc/testsuite/gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp
+++ b/gcc/testsuite/gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp
@@ -11,7 +11,7 @@ gm2_init_pim4 $srcdir/$subdir
dg-init
# If the --enable-plugin has not been enabled during configure, bail.
-if { ![gm2-dg-frontend-configure-check "enable-plugin" ] } {
+if { ![info exists TESTING_IN_BUILD_TREE] || ![info exists ENABLE_PLUGIN] } {
return
}
diff --git a/gcc/tree-cfg.cc b/gcc/tree-cfg.cc
index 6a95b82..928459a 100644
--- a/gcc/tree-cfg.cc
+++ b/gcc/tree-cfg.cc
@@ -3870,7 +3870,6 @@ verify_gimple_assign_unary (gassign *stmt)
case NEGATE_EXPR:
case ABS_EXPR:
case BIT_NOT_EXPR:
- case PAREN_EXPR:
case CONJ_EXPR:
/* Disallow pointer and offset types for many of the unary gimple. */
if (POINTER_TYPE_P (lhs_type)
@@ -3883,6 +3882,17 @@ verify_gimple_assign_unary (gassign *stmt)
}
break;
+ case PAREN_EXPR:
+ /* Disallow non arthmetic types on PAREN_EXPR. */
+ if (AGGREGATE_TYPE_P (lhs_type))
+ {
+ error ("invalid types for %qs", code_name);
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs1_type);
+ return true;
+ }
+ break;
+
case ABSU_EXPR:
if (!ANY_INTEGRAL_TYPE_P (lhs_type)
|| !TYPE_UNSIGNED (lhs_type)
diff --git a/gcc/tree-sra.cc b/gcc/tree-sra.cc
index 302b73e..4b6daf7 100644
--- a/gcc/tree-sra.cc
+++ b/gcc/tree-sra.cc
@@ -4205,8 +4205,10 @@ sra_modify_expr (tree *expr, bool write, gimple_stmt_iterator *stmt_gsi,
}
else
{
- gassign *stmt;
+ if (TREE_READONLY (access->base))
+ return false;
+ gassign *stmt;
if (access->grp_partial_lhs)
repl = force_gimple_operand_gsi (stmt_gsi, repl, true,
NULL_TREE, true,
diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
index fe6f3cf..2d1a688 100644
--- a/gcc/tree-vect-loop.cc
+++ b/gcc/tree-vect-loop.cc
@@ -9698,7 +9698,7 @@ vectorizable_nonlinear_induction (loop_vec_info loop_vinfo,
gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ tree vectype = SLP_TREE_VECTYPE (slp_node);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
enum vect_induction_op_type induction_type
= STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE (stmt_info);
@@ -9723,7 +9723,7 @@ vectorizable_nonlinear_induction (loop_vec_info loop_vinfo,
/* TODO: Support multi-lane SLP for nonlinear iv. There should be separate
vector iv update for each iv and a permutation to generate wanted
vector iv. */
- if (slp_node && SLP_TREE_LANES (slp_node) > 1)
+ if (SLP_TREE_LANES (slp_node) > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -9934,13 +9934,7 @@ vectorizable_nonlinear_induction (loop_vec_info loop_vinfo,
add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
UNKNOWN_LOCATION);
- if (slp_node)
- slp_node->push_vec_def (induction_phi);
- else
- {
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (induction_phi);
- *vec_stmt = induction_phi;
- }
+ slp_node->push_vec_def (induction_phi);
/* In case that vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
@@ -9970,10 +9964,7 @@ vectorizable_nonlinear_induction (loop_vec_info loop_vinfo,
induction_type);
gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
new_stmt = SSA_NAME_DEF_STMT (vec_def);
- if (slp_node)
- slp_node->push_vec_def (new_stmt);
- else
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
}
}
@@ -9999,15 +9990,13 @@ vectorizable_induction (loop_vec_info loop_vinfo,
stmt_vector_for_cost *cost_vec)
{
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- unsigned ncopies;
bool nested_in_vect_loop = false;
class loop *iv_loop;
tree vec_def;
edge pe = loop_preheader_edge (loop);
basic_block new_bb;
- tree new_vec, vec_init = NULL_TREE, vec_step, t;
+ tree vec_init = NULL_TREE, vec_step, t;
tree new_name;
- gimple *new_stmt;
gphi *induction_phi;
tree induc_def, vec_dest;
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
@@ -10034,15 +10023,9 @@ vectorizable_induction (loop_vec_info loop_vinfo,
return vectorizable_nonlinear_induction (loop_vinfo, stmt_info,
vec_stmt, slp_node, cost_vec);
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ tree vectype = SLP_TREE_VECTYPE (slp_node);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
- if (slp_node)
- ncopies = 1;
- else
- ncopies = vect_get_num_copies (loop_vinfo, vectype);
- gcc_assert (ncopies >= 1);
-
/* FORNOW. These restrictions should be relaxed. */
if (nested_in_vect_loop_p (loop, stmt_info))
{
@@ -10052,14 +10035,6 @@ vectorizable_induction (loop_vec_info loop_vinfo,
edge latch_e;
tree loop_arg;
- if (ncopies > 1)
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "multiple types in nested loop.\n");
- return false;
- }
-
exit_phi = NULL;
latch_e = loop_latch_edge (loop->inner);
loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
@@ -10096,7 +10071,7 @@ vectorizable_induction (loop_vec_info loop_vinfo,
iv_loop = loop;
gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
- if (slp_node && (!nunits.is_constant () && SLP_TREE_LANES (slp_node) != 1))
+ if (!nunits.is_constant () && SLP_TREE_LANES (slp_node) != 1)
{
/* The current SLP code creates the step value element-by-element. */
if (dump_enabled_p ())
@@ -10152,41 +10127,28 @@ vectorizable_induction (loop_vec_info loop_vinfo,
if (!vec_stmt) /* transformation not required. */
{
unsigned inside_cost = 0, prologue_cost = 0;
- if (slp_node)
- {
- /* We eventually need to set a vector type on invariant
- arguments. */
- unsigned j;
- slp_tree child;
- FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (slp_node), j, child)
- if (!vect_maybe_update_slp_op_vectype
- (child, SLP_TREE_VECTYPE (slp_node)))
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "incompatible vector types for "
- "invariants\n");
- return false;
- }
- /* loop cost for vec_loop. */
- inside_cost
- = record_stmt_cost (cost_vec,
- SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node),
- vector_stmt, stmt_info, 0, vect_body);
- /* prologue cost for vec_init (if not nested) and step. */
- prologue_cost = record_stmt_cost (cost_vec, 1 + !nested_in_vect_loop,
- scalar_to_vec,
- stmt_info, 0, vect_prologue);
- }
- else /* if (!slp_node) */
- {
- /* loop cost for vec_loop. */
- inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
- stmt_info, 0, vect_body);
- /* prologue cost for vec_init and vec_step. */
- prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
- stmt_info, 0, vect_prologue);
- }
+ /* We eventually need to set a vector type on invariant
+ arguments. */
+ unsigned j;
+ slp_tree child;
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (slp_node), j, child)
+ if (!vect_maybe_update_slp_op_vectype
+ (child, SLP_TREE_VECTYPE (slp_node)))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "incompatible vector types for "
+ "invariants\n");
+ return false;
+ }
+ /* loop cost for vec_loop. */
+ inside_cost = record_stmt_cost (cost_vec,
+ SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node),
+ vector_stmt, stmt_info, 0, vect_body);
+ /* prologue cost for vec_init (if not nested) and step. */
+ prologue_cost = record_stmt_cost (cost_vec, 1 + !nested_in_vect_loop,
+ scalar_to_vec,
+ stmt_info, 0, vect_prologue);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_induction_cost: inside_cost = %d, "
@@ -10217,670 +10179,374 @@ vectorizable_induction (loop_vec_info loop_vinfo,
with group size 3 we need
[i0, i1, i2, i0 + S0] [i1 + S1, i2 + S2, i0 + 2*S0, i1 + 2*S1]
[i2 + 2*S2, i0 + 3*S0, i1 + 3*S1, i2 + 3*S2]. */
- if (slp_node)
+ gimple_stmt_iterator incr_si;
+ bool insert_after;
+ standard_iv_increment_position (iv_loop, &incr_si, &insert_after);
+
+ /* The initial values are vectorized, but any lanes > group_size
+ need adjustment. */
+ slp_tree init_node
+ = SLP_TREE_CHILDREN (slp_node)[pe->dest_idx];
+
+ /* Gather steps. Since we do not vectorize inductions as
+ cycles we have to reconstruct the step from SCEV data. */
+ unsigned group_size = SLP_TREE_LANES (slp_node);
+ tree *steps = XALLOCAVEC (tree, group_size);
+ tree *inits = XALLOCAVEC (tree, group_size);
+ stmt_vec_info phi_info;
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, phi_info)
+ {
+ steps[i] = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info);
+ if (!init_node)
+ inits[i] = gimple_phi_arg_def (as_a<gphi *> (phi_info->stmt),
+ pe->dest_idx);
+ }
+
+ /* Now generate the IVs. */
+ unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ gcc_assert (multiple_p (nunits * nvects, group_size));
+ unsigned nivs;
+ unsigned HOST_WIDE_INT const_nunits;
+ if (nested_in_vect_loop)
+ nivs = nvects;
+ else if (nunits.is_constant (&const_nunits))
{
- gimple_stmt_iterator incr_si;
- bool insert_after;
- standard_iv_increment_position (iv_loop, &incr_si, &insert_after);
-
- /* The initial values are vectorized, but any lanes > group_size
- need adjustment. */
- slp_tree init_node
- = SLP_TREE_CHILDREN (slp_node)[pe->dest_idx];
-
- /* Gather steps. Since we do not vectorize inductions as
- cycles we have to reconstruct the step from SCEV data. */
- unsigned group_size = SLP_TREE_LANES (slp_node);
- tree *steps = XALLOCAVEC (tree, group_size);
- tree *inits = XALLOCAVEC (tree, group_size);
- stmt_vec_info phi_info;
- FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, phi_info)
- {
- steps[i] = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info);
- if (!init_node)
- inits[i] = gimple_phi_arg_def (as_a<gphi *> (phi_info->stmt),
- pe->dest_idx);
- }
-
- /* Now generate the IVs. */
- unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
- gcc_assert (multiple_p (nunits * nvects, group_size));
- unsigned nivs;
- unsigned HOST_WIDE_INT const_nunits;
- if (nested_in_vect_loop)
- nivs = nvects;
- else if (nunits.is_constant (&const_nunits))
- {
- /* Compute the number of distinct IVs we need. First reduce
- group_size if it is a multiple of const_nunits so we get
- one IV for a group_size of 4 but const_nunits 2. */
- unsigned group_sizep = group_size;
- if (group_sizep % const_nunits == 0)
- group_sizep = group_sizep / const_nunits;
- nivs = least_common_multiple (group_sizep,
- const_nunits) / const_nunits;
- }
- else
- {
- gcc_assert (SLP_TREE_LANES (slp_node) == 1);
- nivs = 1;
- }
- gimple_seq init_stmts = NULL;
- tree lupdate_mul = NULL_TREE;
- if (!nested_in_vect_loop)
+ /* Compute the number of distinct IVs we need. First reduce
+ group_size if it is a multiple of const_nunits so we get
+ one IV for a group_size of 4 but const_nunits 2. */
+ unsigned group_sizep = group_size;
+ if (group_sizep % const_nunits == 0)
+ group_sizep = group_sizep / const_nunits;
+ nivs = least_common_multiple (group_sizep, const_nunits) / const_nunits;
+ }
+ else
+ {
+ gcc_assert (SLP_TREE_LANES (slp_node) == 1);
+ nivs = 1;
+ }
+ gimple_seq init_stmts = NULL;
+ tree lupdate_mul = NULL_TREE;
+ if (!nested_in_vect_loop)
+ {
+ if (nunits.is_constant (&const_nunits))
{
- if (nunits.is_constant (&const_nunits))
- {
- /* The number of iterations covered in one vector iteration. */
- unsigned lup_mul = (nvects * const_nunits) / group_size;
- lupdate_mul
- = build_vector_from_val (step_vectype,
- SCALAR_FLOAT_TYPE_P (stept)
- ? build_real_from_wide (stept, lup_mul,
- UNSIGNED)
- : build_int_cstu (stept, lup_mul));
- }
- else
- {
- if (SCALAR_FLOAT_TYPE_P (stept))
- {
- tree tem = build_int_cst (integer_type_node, vf);
- lupdate_mul = gimple_build (&init_stmts, FLOAT_EXPR,
- stept, tem);
- }
- else
- lupdate_mul = build_int_cst (stept, vf);
- lupdate_mul = gimple_build_vector_from_val (&init_stmts,
- step_vectype,
- lupdate_mul);
- }
+ /* The number of iterations covered in one vector iteration. */
+ unsigned lup_mul = (nvects * const_nunits) / group_size;
+ lupdate_mul
+ = build_vector_from_val (step_vectype,
+ SCALAR_FLOAT_TYPE_P (stept)
+ ? build_real_from_wide (stept, lup_mul,
+ UNSIGNED)
+ : build_int_cstu (stept, lup_mul));
}
- tree peel_mul = NULL_TREE;
- if (LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo))
+ else
{
if (SCALAR_FLOAT_TYPE_P (stept))
- peel_mul = gimple_build (&init_stmts, FLOAT_EXPR, stept,
- LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo));
- else
- peel_mul = gimple_convert (&init_stmts, stept,
- LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo));
- peel_mul = gimple_build_vector_from_val (&init_stmts,
- step_vectype, peel_mul);
-
- /* If early break then we have to create a new PHI which we can use as
- an offset to adjust the induction reduction in early exits.
-
- This is because when peeling for alignment using masking, the first
- few elements of the vector can be inactive. As such if we find the
- entry in the first iteration we have adjust the starting point of
- the scalar code.
-
- We do this by creating a new scalar PHI that keeps track of whether
- we are the first iteration of the loop (with the additional masking)
- or whether we have taken a loop iteration already.
-
- The generated sequence:
-
- pre-header:
- bb1:
- i_1 = <number of leading inactive elements>
-
- header:
- bb2:
- i_2 = PHI <i_1(bb1), 0(latch)>
- …
-
- early-exit:
- bb3:
- i_3 = iv_step * i_2 + PHI<vector-iv>
-
- The first part of the adjustment to create i_1 and i_2 are done here
- and the last part creating i_3 is done in
- vectorizable_live_operations when the induction extraction is
- materialized. */
- if (LOOP_VINFO_EARLY_BREAKS (loop_vinfo)
- && !LOOP_VINFO_MASK_NITERS_PFA_OFFSET (loop_vinfo))
{
- auto skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
- tree ty_skip_niters = TREE_TYPE (skip_niters);
- tree break_lhs_phi = vect_get_new_vect_var (ty_skip_niters,
- vect_scalar_var,
- "pfa_iv_offset");
- gphi *nphi = create_phi_node (break_lhs_phi, bb);
- add_phi_arg (nphi, skip_niters, pe, UNKNOWN_LOCATION);
- add_phi_arg (nphi, build_zero_cst (ty_skip_niters),
- loop_latch_edge (iv_loop), UNKNOWN_LOCATION);
-
- LOOP_VINFO_MASK_NITERS_PFA_OFFSET (loop_vinfo)
- = PHI_RESULT (nphi);
+ tree tem = build_int_cst (integer_type_node, vf);
+ lupdate_mul = gimple_build (&init_stmts, FLOAT_EXPR, stept, tem);
}
+ else
+ lupdate_mul = build_int_cst (stept, vf);
+ lupdate_mul = gimple_build_vector_from_val (&init_stmts, step_vectype,
+ lupdate_mul);
}
- tree step_mul = NULL_TREE;
- unsigned ivn;
- auto_vec<tree> vec_steps;
- for (ivn = 0; ivn < nivs; ++ivn)
+ }
+ tree peel_mul = NULL_TREE;
+ if (LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo))
+ {
+ if (SCALAR_FLOAT_TYPE_P (stept))
+ peel_mul = gimple_build (&init_stmts, FLOAT_EXPR, stept,
+ LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo));
+ else
+ peel_mul = gimple_convert (&init_stmts, stept,
+ LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo));
+ peel_mul = gimple_build_vector_from_val (&init_stmts,
+ step_vectype, peel_mul);
+
+ /* If early break then we have to create a new PHI which we can use as
+ an offset to adjust the induction reduction in early exits.
+
+ This is because when peeling for alignment using masking, the first
+ few elements of the vector can be inactive. As such if we find the
+ entry in the first iteration we have adjust the starting point of
+ the scalar code.
+
+ We do this by creating a new scalar PHI that keeps track of whether
+ we are the first iteration of the loop (with the additional masking)
+ or whether we have taken a loop iteration already.
+
+ The generated sequence:
+
+ pre-header:
+ bb1:
+ i_1 = <number of leading inactive elements>
+
+ header:
+ bb2:
+ i_2 = PHI <i_1(bb1), 0(latch)>
+ …
+
+ early-exit:
+ bb3:
+ i_3 = iv_step * i_2 + PHI<vector-iv>
+
+ The first part of the adjustment to create i_1 and i_2 are done here
+ and the last part creating i_3 is done in
+ vectorizable_live_operations when the induction extraction is
+ materialized. */
+ if (LOOP_VINFO_EARLY_BREAKS (loop_vinfo)
+ && !LOOP_VINFO_MASK_NITERS_PFA_OFFSET (loop_vinfo))
+ {
+ auto skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
+ tree ty_skip_niters = TREE_TYPE (skip_niters);
+ tree break_lhs_phi = vect_get_new_vect_var (ty_skip_niters,
+ vect_scalar_var,
+ "pfa_iv_offset");
+ gphi *nphi = create_phi_node (break_lhs_phi, bb);
+ add_phi_arg (nphi, skip_niters, pe, UNKNOWN_LOCATION);
+ add_phi_arg (nphi, build_zero_cst (ty_skip_niters),
+ loop_latch_edge (iv_loop), UNKNOWN_LOCATION);
+
+ LOOP_VINFO_MASK_NITERS_PFA_OFFSET (loop_vinfo) = PHI_RESULT (nphi);
+ }
+ }
+ tree step_mul = NULL_TREE;
+ unsigned ivn;
+ auto_vec<tree> vec_steps;
+ for (ivn = 0; ivn < nivs; ++ivn)
+ {
+ gimple_seq stmts = NULL;
+ bool invariant = true;
+ if (nunits.is_constant (&const_nunits))
{
- gimple_seq stmts = NULL;
- bool invariant = true;
- if (nunits.is_constant (&const_nunits))
+ tree_vector_builder step_elts (step_vectype, const_nunits, 1);
+ tree_vector_builder init_elts (vectype, const_nunits, 1);
+ tree_vector_builder mul_elts (step_vectype, const_nunits, 1);
+ for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
{
- tree_vector_builder step_elts (step_vectype, const_nunits, 1);
- tree_vector_builder init_elts (vectype, const_nunits, 1);
- tree_vector_builder mul_elts (step_vectype, const_nunits, 1);
- for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
- {
- /* The scalar steps of the IVs. */
- tree elt = steps[(ivn*const_nunits + eltn) % group_size];
- elt = gimple_convert (&init_stmts,
- TREE_TYPE (step_vectype), elt);
- step_elts.quick_push (elt);
- if (!init_node)
- {
- /* The scalar inits of the IVs if not vectorized. */
- elt = inits[(ivn*const_nunits + eltn) % group_size];
- if (!useless_type_conversion_p (TREE_TYPE (vectype),
- TREE_TYPE (elt)))
- elt = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
- TREE_TYPE (vectype), elt);
- init_elts.quick_push (elt);
- }
- /* The number of steps to add to the initial values. */
- unsigned mul_elt = (ivn*const_nunits + eltn) / group_size;
- mul_elts.quick_push (SCALAR_FLOAT_TYPE_P (stept)
- ? build_real_from_wide (stept, mul_elt,
- UNSIGNED)
- : build_int_cstu (stept, mul_elt));
- }
- vec_step = gimple_build_vector (&init_stmts, &step_elts);
- step_mul = gimple_build_vector (&init_stmts, &mul_elts);
+ /* The scalar steps of the IVs. */
+ tree elt = steps[(ivn*const_nunits + eltn) % group_size];
+ elt = gimple_convert (&init_stmts, TREE_TYPE (step_vectype), elt);
+ step_elts.quick_push (elt);
if (!init_node)
- vec_init = gimple_build_vector (&init_stmts, &init_elts);
- }
- else
- {
- if (init_node)
- ;
- else if (INTEGRAL_TYPE_P (TREE_TYPE (steps[0])))
- {
- new_name = gimple_convert (&init_stmts, stept, inits[0]);
- /* Build the initial value directly as a VEC_SERIES_EXPR. */
- vec_init = gimple_build (&init_stmts, VEC_SERIES_EXPR,
- step_vectype, new_name, steps[0]);
- if (!useless_type_conversion_p (vectype, step_vectype))
- vec_init = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
- vectype, vec_init);
- }
- else
{
- /* Build:
- [base, base, base, ...]
- + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
- gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (steps[0])));
- gcc_assert (flag_associative_math);
- gcc_assert (index_vectype != NULL_TREE);
-
- tree index = build_index_vector (index_vectype, 0, 1);
- new_name = gimple_convert (&init_stmts, TREE_TYPE (steps[0]),
- inits[0]);
- tree base_vec = gimple_build_vector_from_val (&init_stmts,
- step_vectype,
- new_name);
- tree step_vec = gimple_build_vector_from_val (&init_stmts,
- step_vectype,
- steps[0]);
- vec_init = gimple_build (&init_stmts, FLOAT_EXPR,
- step_vectype, index);
- vec_init = gimple_build (&init_stmts, MULT_EXPR,
- step_vectype, vec_init, step_vec);
- vec_init = gimple_build (&init_stmts, PLUS_EXPR,
- step_vectype, vec_init, base_vec);
- if (!useless_type_conversion_p (vectype, step_vectype))
- vec_init = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
- vectype, vec_init);
+ /* The scalar inits of the IVs if not vectorized. */
+ elt = inits[(ivn*const_nunits + eltn) % group_size];
+ if (!useless_type_conversion_p (TREE_TYPE (vectype),
+ TREE_TYPE (elt)))
+ elt = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
+ TREE_TYPE (vectype), elt);
+ init_elts.quick_push (elt);
}
- /* iv_loop is nested in the loop to be vectorized. Generate:
- vec_step = [S, S, S, S] */
- t = unshare_expr (steps[0]);
- gcc_assert (CONSTANT_CLASS_P (t)
- || TREE_CODE (t) == SSA_NAME);
- vec_step = gimple_build_vector_from_val (&init_stmts,
- step_vectype, t);
- }
- vec_steps.safe_push (vec_step);
- if (peel_mul)
- {
- if (!step_mul)
- step_mul = peel_mul;
- else
- step_mul = gimple_build (&init_stmts,
- MINUS_EXPR, step_vectype,
- step_mul, peel_mul);
- }
-
- /* Create the induction-phi that defines the induction-operand. */
- vec_dest = vect_get_new_vect_var (vectype, vect_simple_var,
- "vec_iv_");
- induction_phi = create_phi_node (vec_dest, iv_loop->header);
- induc_def = PHI_RESULT (induction_phi);
-
- /* Create the iv update inside the loop */
- tree up = vec_step;
- if (lupdate_mul)
- {
- if (LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo))
- {
- /* When we're using loop_len produced by SELEC_VL, the
- non-final iterations are not always processing VF
- elements. So vectorize induction variable instead of
-
- _21 = vect_vec_iv_.6_22 + { VF, ... };
-
- We should generate:
-
- _35 = .SELECT_VL (ivtmp_33, VF);
- vect_cst__22 = [vec_duplicate_expr] _35;
- _21 = vect_vec_iv_.6_22 + vect_cst__22; */
- vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo);
- tree len = vect_get_loop_len (loop_vinfo, NULL, lens, 1,
- vectype, 0, 0);
- if (SCALAR_FLOAT_TYPE_P (stept))
- expr = gimple_build (&stmts, FLOAT_EXPR, stept, len);
- else
- expr = gimple_convert (&stmts, stept, len);
- lupdate_mul = gimple_build_vector_from_val (&stmts,
- step_vectype,
- expr);
- up = gimple_build (&stmts, MULT_EXPR,
- step_vectype, vec_step, lupdate_mul);
- }
- else
- up = gimple_build (&init_stmts,
- MULT_EXPR, step_vectype,
- vec_step, lupdate_mul);
- }
- vec_def = gimple_convert (&stmts, step_vectype, induc_def);
- vec_def = gimple_build (&stmts,
- PLUS_EXPR, step_vectype, vec_def, up);
- vec_def = gimple_convert (&stmts, vectype, vec_def);
- insert_iv_increment (&incr_si, insert_after, stmts);
- add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
- UNKNOWN_LOCATION);
-
- if (init_node)
- vec_init = vect_get_slp_vect_def (init_node, ivn);
- if (!nested_in_vect_loop
- && step_mul
- && !integer_zerop (step_mul))
- {
- gcc_assert (invariant);
- vec_def = gimple_convert (&init_stmts, step_vectype, vec_init);
- up = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
- vec_step, step_mul);
- vec_def = gimple_build (&init_stmts, PLUS_EXPR, step_vectype,
- vec_def, up);
- vec_init = gimple_convert (&init_stmts, vectype, vec_def);
- }
-
- /* Set the arguments of the phi node: */
- add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
-
- slp_node->push_vec_def (induction_phi);
- }
- if (!nested_in_vect_loop)
- {
- /* Fill up to the number of vectors we need for the whole group. */
- if (nunits.is_constant (&const_nunits))
- nivs = least_common_multiple (group_size,
- const_nunits) / const_nunits;
- else
- nivs = 1;
- vec_steps.reserve (nivs-ivn);
- for (; ivn < nivs; ++ivn)
- {
- slp_node->push_vec_def (SLP_TREE_VEC_DEFS (slp_node)[0]);
- vec_steps.quick_push (vec_steps[0]);
+ /* The number of steps to add to the initial values. */
+ unsigned mul_elt = (ivn*const_nunits + eltn) / group_size;
+ mul_elts.quick_push (SCALAR_FLOAT_TYPE_P (stept)
+ ? build_real_from_wide (stept, mul_elt,
+ UNSIGNED)
+ : build_int_cstu (stept, mul_elt));
}
+ vec_step = gimple_build_vector (&init_stmts, &step_elts);
+ step_mul = gimple_build_vector (&init_stmts, &mul_elts);
+ if (!init_node)
+ vec_init = gimple_build_vector (&init_stmts, &init_elts);
}
-
- /* Re-use IVs when we can. We are generating further vector
- stmts by adding VF' * stride to the IVs generated above. */
- if (ivn < nvects)
+ else
{
- if (nunits.is_constant (&const_nunits))
+ if (init_node)
+ ;
+ else if (INTEGRAL_TYPE_P (TREE_TYPE (steps[0])))
{
- unsigned vfp = (least_common_multiple (group_size, const_nunits)
- / group_size);
- lupdate_mul
- = build_vector_from_val (step_vectype,
- SCALAR_FLOAT_TYPE_P (stept)
- ? build_real_from_wide (stept,
- vfp, UNSIGNED)
- : build_int_cstu (stept, vfp));
+ new_name = gimple_convert (&init_stmts, stept, inits[0]);
+ /* Build the initial value directly as a VEC_SERIES_EXPR. */
+ vec_init = gimple_build (&init_stmts, VEC_SERIES_EXPR,
+ step_vectype, new_name, steps[0]);
+ if (!useless_type_conversion_p (vectype, step_vectype))
+ vec_init = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
+ vectype, vec_init);
}
else
{
- if (SCALAR_FLOAT_TYPE_P (stept))
- {
- tree tem = build_int_cst (integer_type_node, nunits);
- lupdate_mul = gimple_build (&init_stmts, FLOAT_EXPR,
- stept, tem);
- }
- else
- lupdate_mul = build_int_cst (stept, nunits);
- lupdate_mul = gimple_build_vector_from_val (&init_stmts,
- step_vectype,
- lupdate_mul);
- }
- for (; ivn < nvects; ++ivn)
- {
- gimple *iv
- = SSA_NAME_DEF_STMT (SLP_TREE_VEC_DEFS (slp_node)[ivn - nivs]);
- tree def = gimple_get_lhs (iv);
- if (ivn < 2*nivs)
- vec_steps[ivn - nivs]
- = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
- vec_steps[ivn - nivs], lupdate_mul);
- gimple_seq stmts = NULL;
- def = gimple_convert (&stmts, step_vectype, def);
- def = gimple_build (&stmts, PLUS_EXPR, step_vectype,
- def, vec_steps[ivn % nivs]);
- def = gimple_convert (&stmts, vectype, def);
- if (gimple_code (iv) == GIMPLE_PHI)
- gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
- else
- {
- gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
- gsi_insert_seq_after (&tgsi, stmts, GSI_CONTINUE_LINKING);
- }
- slp_node->push_vec_def (def);
+ /* Build:
+ [base, base, base, ...]
+ + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
+ gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (steps[0])));
+ gcc_assert (flag_associative_math);
+ gcc_assert (index_vectype != NULL_TREE);
+
+ tree index = build_index_vector (index_vectype, 0, 1);
+ new_name = gimple_convert (&init_stmts, TREE_TYPE (steps[0]),
+ inits[0]);
+ tree base_vec = gimple_build_vector_from_val (&init_stmts,
+ step_vectype,
+ new_name);
+ tree step_vec = gimple_build_vector_from_val (&init_stmts,
+ step_vectype,
+ steps[0]);
+ vec_init = gimple_build (&init_stmts, FLOAT_EXPR,
+ step_vectype, index);
+ vec_init = gimple_build (&init_stmts, MULT_EXPR,
+ step_vectype, vec_init, step_vec);
+ vec_init = gimple_build (&init_stmts, PLUS_EXPR,
+ step_vectype, vec_init, base_vec);
+ if (!useless_type_conversion_p (vectype, step_vectype))
+ vec_init = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
+ vectype, vec_init);
}
+ /* iv_loop is nested in the loop to be vectorized. Generate:
+ vec_step = [S, S, S, S] */
+ t = unshare_expr (steps[0]);
+ gcc_assert (CONSTANT_CLASS_P (t)
+ || TREE_CODE (t) == SSA_NAME);
+ vec_step = gimple_build_vector_from_val (&init_stmts,
+ step_vectype, t);
+ }
+ vec_steps.safe_push (vec_step);
+ if (peel_mul)
+ {
+ if (!step_mul)
+ step_mul = peel_mul;
+ else
+ step_mul = gimple_build (&init_stmts,
+ MINUS_EXPR, step_vectype,
+ step_mul, peel_mul);
}
- new_bb = gsi_insert_seq_on_edge_immediate (pe, init_stmts);
- gcc_assert (!new_bb);
+ /* Create the induction-phi that defines the induction-operand. */
+ vec_dest = vect_get_new_vect_var (vectype, vect_simple_var,
+ "vec_iv_");
+ induction_phi = create_phi_node (vec_dest, iv_loop->header);
+ induc_def = PHI_RESULT (induction_phi);
- return true;
- }
-
- tree init_expr = vect_phi_initial_value (phi);
-
- gimple_seq stmts = NULL;
- if (!nested_in_vect_loop)
- {
- /* Convert the initial value to the IV update type. */
- tree new_type = TREE_TYPE (step_expr);
- init_expr = gimple_convert (&stmts, new_type, init_expr);
-
- /* If we are using the loop mask to "peel" for alignment then we need
- to adjust the start value here. */
- tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
- if (skip_niters != NULL_TREE)
+ /* Create the iv update inside the loop */
+ tree up = vec_step;
+ if (lupdate_mul)
{
- if (FLOAT_TYPE_P (vectype))
- skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
- skip_niters);
- else
- skip_niters = gimple_convert (&stmts, new_type, skip_niters);
- tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
- skip_niters, step_expr);
- init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
- init_expr, skip_step);
- }
- }
+ if (LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo))
+ {
+ /* When we're using loop_len produced by SELEC_VL, the
+ non-final iterations are not always processing VF
+ elements. So vectorize induction variable instead of
- if (stmts)
- {
- new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
- gcc_assert (!new_bb);
- }
+ _21 = vect_vec_iv_.6_22 + { VF, ... };
- /* Create the vector that holds the initial_value of the induction. */
- if (nested_in_vect_loop)
- {
- /* iv_loop is nested in the loop to be vectorized. init_expr had already
- been created during vectorization of previous stmts. We obtain it
- from the STMT_VINFO_VEC_STMT of the defining stmt. */
- auto_vec<tree> vec_inits;
- vect_get_vec_defs_for_operand (loop_vinfo, stmt_info, 1,
- init_expr, &vec_inits);
- vec_init = vec_inits[0];
- /* If the initial value is not of proper type, convert it. */
- if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
- {
- new_stmt
- = gimple_build_assign (vect_get_new_ssa_name (vectype,
- vect_simple_var,
- "vec_iv_"),
- VIEW_CONVERT_EXPR,
- build1 (VIEW_CONVERT_EXPR, vectype,
- vec_init));
- vec_init = gimple_assign_lhs (new_stmt);
- new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
- new_stmt);
- gcc_assert (!new_bb);
- }
- }
- else
- {
- /* iv_loop is the loop to be vectorized. Create:
- vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
- stmts = NULL;
- new_name = gimple_convert (&stmts, TREE_TYPE (step_expr), init_expr);
+ We should generate:
- unsigned HOST_WIDE_INT const_nunits;
- if (nunits.is_constant (&const_nunits))
- {
- tree_vector_builder elts (step_vectype, const_nunits, 1);
- elts.quick_push (new_name);
- for (i = 1; i < const_nunits; i++)
- {
- /* Create: new_name_i = new_name + step_expr */
- new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
- new_name, step_expr);
- elts.quick_push (new_name);
+ _35 = .SELECT_VL (ivtmp_33, VF);
+ vect_cst__22 = [vec_duplicate_expr] _35;
+ _21 = vect_vec_iv_.6_22 + vect_cst__22; */
+ vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo);
+ tree len = vect_get_loop_len (loop_vinfo, NULL, lens, 1,
+ vectype, 0, 0);
+ if (SCALAR_FLOAT_TYPE_P (stept))
+ expr = gimple_build (&stmts, FLOAT_EXPR, stept, len);
+ else
+ expr = gimple_convert (&stmts, stept, len);
+ lupdate_mul = gimple_build_vector_from_val (&stmts, step_vectype,
+ expr);
+ up = gimple_build (&stmts, MULT_EXPR,
+ step_vectype, vec_step, lupdate_mul);
}
- /* Create a vector from [new_name_0, new_name_1, ...,
- new_name_nunits-1] */
- vec_init = gimple_build_vector (&stmts, &elts);
- }
- else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
- /* Build the initial value directly from a VEC_SERIES_EXPR. */
- vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, step_vectype,
- new_name, step_expr);
- else
- {
- /* Build:
- [base, base, base, ...]
- + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
- gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
- gcc_assert (flag_associative_math);
- gcc_assert (index_vectype != NULL_TREE);
-
- tree index = build_index_vector (index_vectype, 0, 1);
- tree base_vec = gimple_build_vector_from_val (&stmts, step_vectype,
- new_name);
- tree step_vec = gimple_build_vector_from_val (&stmts, step_vectype,
- step_expr);
- vec_init = gimple_build (&stmts, FLOAT_EXPR, step_vectype, index);
- vec_init = gimple_build (&stmts, MULT_EXPR, step_vectype,
- vec_init, step_vec);
- vec_init = gimple_build (&stmts, PLUS_EXPR, step_vectype,
- vec_init, base_vec);
- }
- vec_init = gimple_convert (&stmts, vectype, vec_init);
+ else
+ up = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
+ vec_step, lupdate_mul);
+ }
+ vec_def = gimple_convert (&stmts, step_vectype, induc_def);
+ vec_def = gimple_build (&stmts, PLUS_EXPR, step_vectype, vec_def, up);
+ vec_def = gimple_convert (&stmts, vectype, vec_def);
+ insert_iv_increment (&incr_si, insert_after, stmts);
+ add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
+ UNKNOWN_LOCATION);
- if (stmts)
+ if (init_node)
+ vec_init = vect_get_slp_vect_def (init_node, ivn);
+ if (!nested_in_vect_loop
+ && step_mul
+ && !integer_zerop (step_mul))
{
- new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
- gcc_assert (!new_bb);
+ gcc_assert (invariant);
+ vec_def = gimple_convert (&init_stmts, step_vectype, vec_init);
+ up = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
+ vec_step, step_mul);
+ vec_def = gimple_build (&init_stmts, PLUS_EXPR, step_vectype,
+ vec_def, up);
+ vec_init = gimple_convert (&init_stmts, vectype, vec_def);
}
- }
-
-
- /* Create the vector that holds the step of the induction. */
- gimple_stmt_iterator *step_iv_si = NULL;
- if (nested_in_vect_loop)
- /* iv_loop is nested in the loop to be vectorized. Generate:
- vec_step = [S, S, S, S] */
- new_name = step_expr;
- else if (LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo))
- {
- /* When we're using loop_len produced by SELEC_VL, the non-final
- iterations are not always processing VF elements. So vectorize
- induction variable instead of
- _21 = vect_vec_iv_.6_22 + { VF, ... };
+ /* Set the arguments of the phi node: */
+ add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
- We should generate:
-
- _35 = .SELECT_VL (ivtmp_33, VF);
- vect_cst__22 = [vec_duplicate_expr] _35;
- _21 = vect_vec_iv_.6_22 + vect_cst__22; */
- gcc_assert (!slp_node);
- gimple_seq seq = NULL;
- vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo);
- tree len = vect_get_loop_len (loop_vinfo, NULL, lens, 1, vectype, 0, 0);
- expr = force_gimple_operand (fold_convert (TREE_TYPE (step_expr),
- unshare_expr (len)),
- &seq, true, NULL_TREE);
- new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr), expr,
- step_expr);
- gsi_insert_seq_before (&si, seq, GSI_SAME_STMT);
- step_iv_si = &si;
+ slp_node->push_vec_def (induction_phi);
}
- else
+ if (!nested_in_vect_loop)
{
- /* iv_loop is the loop to be vectorized. Generate:
- vec_step = [VF*S, VF*S, VF*S, VF*S] */
- gimple_seq seq = NULL;
- if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
- {
- expr = build_int_cst (integer_type_node, vf);
- expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
- }
+ /* Fill up to the number of vectors we need for the whole group. */
+ if (nunits.is_constant (&const_nunits))
+ nivs = least_common_multiple (group_size, const_nunits) / const_nunits;
else
- expr = build_int_cst (TREE_TYPE (step_expr), vf);
- new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
- expr, step_expr);
- if (seq)
+ nivs = 1;
+ vec_steps.reserve (nivs-ivn);
+ for (; ivn < nivs; ++ivn)
{
- new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
- gcc_assert (!new_bb);
+ slp_node->push_vec_def (SLP_TREE_VEC_DEFS (slp_node)[0]);
+ vec_steps.quick_push (vec_steps[0]);
}
}
- t = unshare_expr (new_name);
- gcc_assert (CONSTANT_CLASS_P (new_name)
- || TREE_CODE (new_name) == SSA_NAME);
- new_vec = build_vector_from_val (step_vectype, t);
- vec_step = vect_init_vector (loop_vinfo, stmt_info,
- new_vec, step_vectype, step_iv_si);
-
-
- /* Create the following def-use cycle:
- loop prolog:
- vec_init = ...
- vec_step = ...
- loop:
- vec_iv = PHI <vec_init, vec_loop>
- ...
- STMT
- ...
- vec_loop = vec_iv + vec_step; */
-
- /* Create the induction-phi that defines the induction-operand. */
- vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
- induction_phi = create_phi_node (vec_dest, iv_loop->header);
- induc_def = PHI_RESULT (induction_phi);
-
- /* Create the iv update inside the loop */
- stmts = NULL;
- vec_def = gimple_convert (&stmts, step_vectype, induc_def);
- vec_def = gimple_build (&stmts, PLUS_EXPR, step_vectype, vec_def, vec_step);
- vec_def = gimple_convert (&stmts, vectype, vec_def);
- gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
- new_stmt = SSA_NAME_DEF_STMT (vec_def);
-
- /* Set the arguments of the phi node: */
- add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
- add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
- UNKNOWN_LOCATION);
-
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (induction_phi);
- *vec_stmt = induction_phi;
-
- /* In case that vectorization factor (VF) is bigger than the number
- of elements that we can fit in a vectype (nunits), we have to generate
- more than one vector stmt - i.e - we need to "unroll" the
- vector stmt by a factor VF/nunits. For more details see documentation
- in vectorizable_operation. */
-
- if (ncopies > 1)
+ /* Re-use IVs when we can. We are generating further vector
+ stmts by adding VF' * stride to the IVs generated above. */
+ if (ivn < nvects)
{
- gimple_seq seq = NULL;
- /* FORNOW. This restriction should be relaxed. */
- gcc_assert (!nested_in_vect_loop);
- /* We expect LOOP_VINFO_USING_SELECT_VL_P to be false if ncopies > 1. */
- gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
-
- /* Create the vector that holds the step of the induction. */
- if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
+ if (nunits.is_constant (&const_nunits))
{
- expr = build_int_cst (integer_type_node, nunits);
- expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
+ unsigned vfp = (least_common_multiple (group_size, const_nunits)
+ / group_size);
+ lupdate_mul
+ = build_vector_from_val (step_vectype,
+ SCALAR_FLOAT_TYPE_P (stept)
+ ? build_real_from_wide (stept,
+ vfp, UNSIGNED)
+ : build_int_cstu (stept, vfp));
}
else
- expr = build_int_cst (TREE_TYPE (step_expr), nunits);
- new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
- expr, step_expr);
- if (seq)
{
- new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
- gcc_assert (!new_bb);
- }
-
- t = unshare_expr (new_name);
- gcc_assert (CONSTANT_CLASS_P (new_name)
- || TREE_CODE (new_name) == SSA_NAME);
- new_vec = build_vector_from_val (step_vectype, t);
- vec_step = vect_init_vector (loop_vinfo, stmt_info,
- new_vec, step_vectype, NULL);
-
- vec_def = induc_def;
- for (i = 1; i < ncopies + 1; i++)
- {
- /* vec_i = vec_prev + vec_step */
- gimple_seq stmts = NULL;
- vec_def = gimple_convert (&stmts, step_vectype, vec_def);
- vec_def = gimple_build (&stmts,
- PLUS_EXPR, step_vectype, vec_def, vec_step);
- vec_def = gimple_convert (&stmts, vectype, vec_def);
-
- gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
- if (i < ncopies)
+ if (SCALAR_FLOAT_TYPE_P (stept))
{
- new_stmt = SSA_NAME_DEF_STMT (vec_def);
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
+ tree tem = build_int_cst (integer_type_node, nunits);
+ lupdate_mul = gimple_build (&init_stmts, FLOAT_EXPR, stept, tem);
}
else
+ lupdate_mul = build_int_cst (stept, nunits);
+ lupdate_mul = gimple_build_vector_from_val (&init_stmts, step_vectype,
+ lupdate_mul);
+ }
+ for (; ivn < nvects; ++ivn)
+ {
+ gimple *iv
+ = SSA_NAME_DEF_STMT (SLP_TREE_VEC_DEFS (slp_node)[ivn - nivs]);
+ tree def = gimple_get_lhs (iv);
+ if (ivn < 2*nivs)
+ vec_steps[ivn - nivs]
+ = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
+ vec_steps[ivn - nivs], lupdate_mul);
+ gimple_seq stmts = NULL;
+ def = gimple_convert (&stmts, step_vectype, def);
+ def = gimple_build (&stmts, PLUS_EXPR, step_vectype,
+ def, vec_steps[ivn % nivs]);
+ def = gimple_convert (&stmts, vectype, def);
+ if (gimple_code (iv) == GIMPLE_PHI)
+ gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
+ else
{
- /* vec_1 = vec_iv + (VF/n * S)
- vec_2 = vec_1 + (VF/n * S)
- ...
- vec_n = vec_prev + (VF/n * S) = vec_iv + VF * S = vec_loop
-
- vec_n is used as vec_loop to save the large step register and
- related operations. */
- add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
- UNKNOWN_LOCATION);
+ gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
+ gsi_insert_seq_after (&tgsi, stmts, GSI_CONTINUE_LINKING);
}
+ slp_node->push_vec_def (def);
}
}
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "transform induction: created def-use cycle: %G%G",
- (gimple *) induction_phi, SSA_NAME_DEF_STMT (vec_def));
+ new_bb = gsi_insert_seq_on_edge_immediate (pe, init_stmts);
+ gcc_assert (!new_bb);
return true;
}
diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
index ca19add..d848476 100644
--- a/gcc/tree-vect-patterns.cc
+++ b/gcc/tree-vect-patterns.cc
@@ -1098,6 +1098,7 @@ vect_recog_cond_expr_convert_pattern (vec_info *vinfo,
tree lhs, match[4], temp, type, new_lhs, op2;
gimple *cond_stmt;
gimple *pattern_stmt;
+ enum tree_code code = NOP_EXPR;
if (!last_stmt)
return NULL;
@@ -1111,6 +1112,11 @@ vect_recog_cond_expr_convert_pattern (vec_info *vinfo,
vect_pattern_detected ("vect_recog_cond_expr_convert_pattern", last_stmt);
+ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (lhs)))
+ code = INTEGRAL_TYPE_P (TREE_TYPE (match[1])) ? FLOAT_EXPR : CONVERT_EXPR;
+ else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (match[1])))
+ code = FIX_TRUNC_EXPR;
+
op2 = match[2];
type = TREE_TYPE (match[1]);
if (TYPE_SIGN (type) != TYPE_SIGN (TREE_TYPE (match[2])))
@@ -1127,7 +1133,7 @@ vect_recog_cond_expr_convert_pattern (vec_info *vinfo,
append_pattern_def_seq (vinfo, stmt_vinfo, cond_stmt,
get_vectype_for_scalar_type (vinfo, type));
new_lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
- pattern_stmt = gimple_build_assign (new_lhs, NOP_EXPR, temp);
+ pattern_stmt = gimple_build_assign (new_lhs, code, temp);
*type_out = STMT_VINFO_VECTYPE (stmt_vinfo);
if (dump_enabled_p ())
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index 857517f..fb2262a 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -8036,7 +8036,7 @@ vect_prologue_cost_for_slp (slp_tree node,
we are costing so avoid passing it down more than once. Pass
it to the first vec_construct or scalar_to_vec part since for those
the x86 backend tries to account for GPR to XMM register moves. */
- record_stmt_cost (cost_vec, 1, kind,
+ record_stmt_cost (cost_vec, 1, kind, nullptr,
(kind != vector_load && !passed) ? node : nullptr,
vectype, 0, vect_prologue);
if (kind != vector_load)
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index bd390b2..ec50f50 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -130,7 +130,8 @@ record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
tree vectype, int misalign,
enum vect_cost_model_location where)
{
- return record_stmt_cost (body_cost_vec, count, kind, NULL, node,
+ return record_stmt_cost (body_cost_vec, count, kind,
+ SLP_TREE_REPRESENTATIVE (node), node,
vectype, misalign, where);
}
@@ -905,11 +906,8 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo, bool *fatal)
be generated for the single vector op. We will handle that shortly. */
static void
-vect_model_simple_cost (vec_info *,
- stmt_vec_info stmt_info, int ncopies,
- enum vect_def_type *dt,
- int ndts,
- slp_tree node,
+vect_model_simple_cost (vec_info *, int ncopies, enum vect_def_type *dt,
+ int ndts, slp_tree node,
stmt_vector_for_cost *cost_vec,
vect_cost_for_stmt kind = vector_stmt)
{
@@ -928,11 +926,11 @@ vect_model_simple_cost (vec_info *,
for (int i = 0; i < ndts; i++)
if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
- stmt_info, 0, vect_prologue);
+ node, 0, vect_prologue);
/* Pass the inside-of-loop statements to the target-specific cost model. */
inside_cost += record_stmt_cost (cost_vec, ncopies, kind,
- stmt_info, 0, vect_body);
+ node, 0, vect_body);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
@@ -3756,8 +3754,7 @@ vectorizable_call (vec_info *vinfo,
}
STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_call");
- vect_model_simple_cost (vinfo, stmt_info,
- ncopies, dt, ndts, slp_node, cost_vec);
+ vect_model_simple_cost (vinfo, ncopies, dt, ndts, slp_node, cost_vec);
if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
record_stmt_cost (cost_vec, ncopies / 2,
vec_promote_demote, stmt_info, 0, vect_body);
@@ -4724,8 +4721,7 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
-/* vect_model_simple_cost (vinfo, stmt_info, ncopies,
- dt, slp_node, cost_vec); */
+/* vect_model_simple_cost (vinfo, ncopies, dt, slp_node, cost_vec); */
return true;
}
@@ -5922,7 +5918,7 @@ vectorizable_conversion (vec_info *vinfo,
if (modifier == NONE)
{
STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
- vect_model_simple_cost (vinfo, stmt_info, (1 + multi_step_cvt),
+ vect_model_simple_cost (vinfo, (1 + multi_step_cvt),
dt, ndts, slp_node, cost_vec);
}
else if (modifier == NARROW_SRC || modifier == NARROW_DST)
@@ -6291,8 +6287,7 @@ vectorizable_assignment (vec_info *vinfo,
STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_assignment");
if (!vect_nop_conversion_p (stmt_info))
- vect_model_simple_cost (vinfo, stmt_info, ncopies, dt, ndts, slp_node,
- cost_vec);
+ vect_model_simple_cost (vinfo, ncopies, dt, ndts, slp_node, cost_vec);
return true;
}
@@ -6662,7 +6657,7 @@ vectorizable_shift (vec_info *vinfo,
}
STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_shift");
- vect_model_simple_cost (vinfo, stmt_info, ncopies, dt,
+ vect_model_simple_cost (vinfo, ncopies, dt,
scalar_shift_arg ? 1 : ndts, slp_node, cost_vec);
return true;
}
@@ -7099,8 +7094,7 @@ vectorizable_operation (vec_info *vinfo,
STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_operation");
- vect_model_simple_cost (vinfo, stmt_info,
- 1, dt, ndts, slp_node, cost_vec);
+ vect_model_simple_cost (vinfo, 1, dt, ndts, slp_node, cost_vec);
if (using_emulated_vectors_p)
{
/* The above vect_model_simple_cost call handles constants
@@ -12931,7 +12925,7 @@ vectorizable_condition (vec_info *vinfo,
}
STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
- vect_model_simple_cost (vinfo, stmt_info, ncopies, dts, ndts, slp_node,
+ vect_model_simple_cost (vinfo, ncopies, dts, ndts, slp_node,
cost_vec, kind);
return true;
}
@@ -13363,8 +13357,7 @@ vectorizable_comparison_1 (vec_info *vinfo, tree vectype,
return false;
}
- vect_model_simple_cost (vinfo, stmt_info,
- ncopies * (1 + (bitop2 != NOP_EXPR)),
+ vect_model_simple_cost (vinfo, ncopies * (1 + (bitop2 != NOP_EXPR)),
dts, ndts, slp_node, cost_vec);
return true;
}
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index a2f33a5..118200f 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -2454,6 +2454,17 @@ record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
STMT_VINFO_VECTYPE (stmt_info), misalign, where);
}
+/* Overload of record_stmt_cost with VECTYPE derived from SLP node. */
+
+inline unsigned
+record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
+ enum vect_cost_for_stmt kind, slp_tree node,
+ int misalign, enum vect_cost_model_location where)
+{
+ return record_stmt_cost (body_cost_vec, count, kind, node,
+ SLP_TREE_VECTYPE (node), misalign, where);
+}
+
extern void vect_finish_replace_stmt (vec_info *, stmt_vec_info, gimple *);
extern void vect_finish_stmt_generation (vec_info *, stmt_vec_info, gimple *,
gimple_stmt_iterator *);
diff --git a/include/libiberty.h b/include/libiberty.h
index d4e8791..4ec9b9a 100644
--- a/include/libiberty.h
+++ b/include/libiberty.h
@@ -215,10 +215,6 @@ extern int ffs(int);
extern int mkstemps(char *, int);
#endif
-#if defined (HAVE_DECL_MKSTEMPS) && !HAVE_DECL_MKSTEMPS
-extern int mkstemps(char *, int);
-#endif
-
/* Make memrchr available on systems that do not have it. */
#if !defined (__GNU_LIBRARY__ ) && !defined (__linux__) && \
!defined (HAVE_MEMRCHR)
diff --git a/libgcobol/ChangeLog b/libgcobol/ChangeLog
index 2eadc73..06b99f2 100644
--- a/libgcobol/ChangeLog
+++ b/libgcobol/ChangeLog
@@ -1,3 +1,7 @@
+2025-05-13 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * libgcobol.cc [!LOG_PERROR] (LOG_PERROR): Provide fallback.
+
2025-05-11 Robert Dubner <rdubner@symas.com>
PR cobol/119377
diff --git a/libgfortran/ChangeLog b/libgfortran/ChangeLog
index d36fcc1..638c03e 100644
--- a/libgfortran/ChangeLog
+++ b/libgfortran/ChangeLog
@@ -1,3 +1,48 @@
+2025-05-13 Jakub Jelinek <jakub@redhat.com>
+
+ PR libfortran/120196
+ * m4/ifindloc2.m4 (header1, header2): For back use i > 0 rather than
+ i >= 0 as for condition.
+ * generated/findloc2_s1.c: Regenerate.
+ * generated/findloc2_s4.c: Regenerate.
+
+2025-05-13 Jakub Jelinek <jakub@redhat.com>
+
+ PR fortran/120191
+ * m4/ifunction-s.m4 (SCALAR_ARRAY_FUNCTION): Don't multiply
+ GFC_DESCRIPTOR_EXTENT(array,) by string_len.
+ * generated/maxloc1_4_s1.c: Regenerate.
+ * generated/maxloc1_4_s4.c: Regenerate.
+ * generated/maxloc1_8_s1.c: Regenerate.
+ * generated/maxloc1_8_s4.c: Regenerate.
+ * generated/maxloc1_16_s1.c: Regenerate.
+ * generated/maxloc1_16_s4.c: Regenerate.
+ * generated/minloc1_4_s1.c: Regenerate.
+ * generated/minloc1_4_s4.c: Regenerate.
+ * generated/minloc1_8_s1.c: Regenerate.
+ * generated/minloc1_8_s4.c: Regenerate.
+ * generated/minloc1_16_s1.c: Regenerate.
+ * generated/minloc1_16_s4.c: Regenerate.
+
+2025-05-13 Jakub Jelinek <jakub@redhat.com>
+
+ PR fortran/120191
+ * m4/maxloc2s.m4: For smaxloc2 call maxloc2 if mask is NULL or *mask.
+ Swap back and len arguments.
+ * m4/minloc2s.m4: Likewise.
+ * generated/maxloc2_4_s1.c: Regenerate.
+ * generated/maxloc2_4_s4.c: Regenerate.
+ * generated/maxloc2_8_s1.c: Regenerate.
+ * generated/maxloc2_8_s4.c: Regenerate.
+ * generated/maxloc2_16_s1.c: Regenerate.
+ * generated/maxloc2_16_s4.c: Regenerate.
+ * generated/minloc2_4_s1.c: Regenerate.
+ * generated/minloc2_4_s4.c: Regenerate.
+ * generated/minloc2_8_s1.c: Regenerate.
+ * generated/minloc2_8_s4.c: Regenerate.
+ * generated/minloc2_16_s1.c: Regenerate.
+ * generated/minloc2_16_s4.c: Regenerate.
+
2025-05-10 Yuao Ma <c8ef@outlook.com>
* io/read.c (read_f): Comment typo, explict -> explicit.
diff --git a/libiberty/ChangeLog b/libiberty/ChangeLog
index 81c247a..2ae5626 100644
--- a/libiberty/ChangeLog
+++ b/libiberty/ChangeLog
@@ -1,3 +1,8 @@
+2025-05-13 Andreas Schwab <schwab@suse.de>
+
+ * regex.c (regex_compile): Don't write beyond array bounds when
+ collecting range expression.
+
2025-03-29 Iain Sandoe <iain@sandoe.co.uk>
PR cobol/119283
diff --git a/libstdc++-v3/doc/doxygen/stdheader.cc b/libstdc++-v3/doc/doxygen/stdheader.cc
index 3ee825f..839bfc8 100644
--- a/libstdc++-v3/doc/doxygen/stdheader.cc
+++ b/libstdc++-v3/doc/doxygen/stdheader.cc
@@ -54,7 +54,8 @@ void init_map()
headers["function.h"] = "functional";
headers["functional_hash.h"] = "functional";
headers["mofunc_impl.h"] = "functional";
- headers["move_only_function.h"] = "functional";
+ headers["cpyfunc_impl.h"] = "functional";
+ headers["funcwrap.h"] = "functional";
headers["invoke.h"] = "functional";
headers["ranges_cmp.h"] = "functional";
headers["refwrap.h"] = "functional";
diff --git a/libstdc++-v3/include/Makefile.am b/libstdc++-v3/include/Makefile.am
index 1140fa0..3e5b6c4 100644
--- a/libstdc++-v3/include/Makefile.am
+++ b/libstdc++-v3/include/Makefile.am
@@ -194,6 +194,7 @@ bits_headers = \
${bits_srcdir}/chrono_io.h \
${bits_srcdir}/codecvt.h \
${bits_srcdir}/cow_string.h \
+ ${bits_srcdir}/cpyfunc_impl.h \
${bits_srcdir}/deque.tcc \
${bits_srcdir}/erase_if.h \
${bits_srcdir}/formatfwd.h \
@@ -204,6 +205,7 @@ bits_headers = \
${bits_srcdir}/fs_ops.h \
${bits_srcdir}/fs_path.h \
${bits_srcdir}/fstream.tcc \
+ ${bits_srcdir}/funcwrap.h \
${bits_srcdir}/gslice.h \
${bits_srcdir}/gslice_array.h \
${bits_srcdir}/hashtable.h \
@@ -223,7 +225,6 @@ bits_headers = \
${bits_srcdir}/mask_array.h \
${bits_srcdir}/memory_resource.h \
${bits_srcdir}/mofunc_impl.h \
- ${bits_srcdir}/move_only_function.h \
${bits_srcdir}/new_allocator.h \
${bits_srcdir}/node_handle.h \
${bits_srcdir}/ostream.tcc \
diff --git a/libstdc++-v3/include/Makefile.in b/libstdc++-v3/include/Makefile.in
index c96e981..3531162 100644
--- a/libstdc++-v3/include/Makefile.in
+++ b/libstdc++-v3/include/Makefile.in
@@ -547,6 +547,7 @@ bits_freestanding = \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/chrono_io.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/codecvt.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/cow_string.h \
+@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/cpyfunc_impl.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/deque.tcc \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/erase_if.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/formatfwd.h \
@@ -557,6 +558,7 @@ bits_freestanding = \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/fs_ops.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/fs_path.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/fstream.tcc \
+@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/funcwrap.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/gslice.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/gslice_array.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/hashtable.h \
@@ -576,7 +578,6 @@ bits_freestanding = \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/mask_array.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/memory_resource.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/mofunc_impl.h \
-@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/move_only_function.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/new_allocator.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/node_handle.h \
@GLIBCXX_HOSTED_TRUE@ ${bits_srcdir}/ostream.tcc \
diff --git a/libstdc++-v3/include/bits/cpyfunc_impl.h b/libstdc++-v3/include/bits/cpyfunc_impl.h
new file mode 100644
index 0000000..bc44cd3e
--- /dev/null
+++ b/libstdc++-v3/include/bits/cpyfunc_impl.h
@@ -0,0 +1,269 @@
+// Implementation of std::copyable_function -*- C++ -*-
+
+// Copyright The GNU Toolchain Authors.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+/** @file include/bits/cpyfunc_impl.h
+ * This is an internal header file, included by other library headers.
+ * Do not attempt to use it directly. @headername{functional}
+ */
+
+#ifndef _GLIBCXX_MOF_CV
+# define _GLIBCXX_MOF_CV
+#endif
+
+#ifdef _GLIBCXX_MOF_REF
+# define _GLIBCXX_MOF_INV_QUALS _GLIBCXX_MOF_CV _GLIBCXX_MOF_REF
+#else
+# define _GLIBCXX_MOF_REF
+# define _GLIBCXX_MOF_INV_QUALS _GLIBCXX_MOF_CV &
+#endif
+
+#define _GLIBCXX_MOF_CV_REF _GLIBCXX_MOF_CV _GLIBCXX_MOF_REF
+
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ /**
+ * @brief Polymorphic copyable function wrapper.
+ * @ingroup functors
+ * @since C++26
+ * @headerfile functional
+ *
+ * The `std::copyable_function` class template is a call wrapper similar
+ * to `std::function`, but it does not provide information about it's
+ * target, and preserves constness.
+ *
+ * It also supports const-qualification, ref-qualification, and
+ * no-throw guarantees. The qualifications and exception-specification
+ * of the `copyable_function::operator()` member function are respected
+ * when invoking the target function.
+ */
+ template<typename _Res, typename... _ArgTypes, bool _Noex>
+ class copyable_function<_Res(_ArgTypes...) _GLIBCXX_MOF_CV
+ _GLIBCXX_MOF_REF noexcept(_Noex)>
+ : __polyfunc::_Cpy_base
+ {
+ using _Base = __polyfunc::_Cpy_base;
+ using _Invoker = __polyfunc::_Invoker<_Noex, _Res, _ArgTypes...>;
+ using _Signature = _Invoker::_Signature;
+
+ template<typename _Tp>
+ using __callable
+ = __conditional_t<_Noex,
+ is_nothrow_invocable_r<_Res, _Tp, _ArgTypes...>,
+ is_invocable_r<_Res, _Tp, _ArgTypes...>>;
+
+ // [func.wrap.copy.con]/1 is-callable-from<VT>
+ template<typename _Vt>
+ static constexpr bool __is_callable_from
+ = __and_v<__callable<_Vt _GLIBCXX_MOF_CV_REF>,
+ __callable<_Vt _GLIBCXX_MOF_INV_QUALS>>;
+
+ public:
+ using result_type = _Res;
+
+ /// Creates an empty object.
+ copyable_function() noexcept { }
+
+ /// Creates an empty object.
+ copyable_function(nullptr_t) noexcept { }
+
+ /// Moves the target object, leaving the source empty.
+ copyable_function(copyable_function&& __x) noexcept
+ : _Base(static_cast<_Base&&>(__x)),
+ _M_invoke(std::__exchange(__x._M_invoke, nullptr))
+ { }
+
+ /// Copies the target object.
+ copyable_function(copyable_function const& __x)
+ : _Base(static_cast<const _Base&>(__x)),
+ _M_invoke(__x._M_invoke)
+ { }
+
+ /// Stores a target object initialized from the argument.
+ template<typename _Fn, typename _Vt = decay_t<_Fn>>
+ requires (!is_same_v<_Vt, copyable_function>)
+ && (!__is_in_place_type_v<_Vt>) && __is_callable_from<_Vt>
+ copyable_function(_Fn&& __f) noexcept(_S_nothrow_init<_Vt, _Fn>())
+ {
+ static_assert(is_copy_constructible_v<_Vt>);
+ if constexpr (is_function_v<remove_pointer_t<_Vt>>
+ || is_member_pointer_v<_Vt>
+ || __is_polymorphic_function_v<_Vt>)
+ {
+ if (__f == nullptr)
+ return;
+ }
+
+ if constexpr (!__is_polymorphic_function_v<_Vt>
+ || !__polyfunc::__is_invoker_convertible<_Vt, copyable_function>())
+ {
+ _M_init<_Vt>(std::forward<_Fn>(__f));
+ _M_invoke = _Invoker::template _S_storage<_Vt _GLIBCXX_MOF_INV_QUALS>();
+ }
+ else if constexpr (is_lvalue_reference_v<_Fn>)
+ {
+ _M_copy(__polyfunc::__base_of(__f));
+ _M_invoke = __polyfunc::__invoker_of(__f);
+ }
+ else
+ {
+ _M_move(__polyfunc::__base_of(__f));
+ _M_invoke = std::__exchange(__polyfunc::__invoker_of(__f), nullptr);
+ }
+ }
+
+ /// Stores a target object initialized from the arguments.
+ template<typename _Tp, typename... _Args>
+ requires is_constructible_v<_Tp, _Args...>
+ && __is_callable_from<_Tp>
+ explicit
+ copyable_function(in_place_type_t<_Tp>, _Args&&... __args)
+ noexcept(_S_nothrow_init<_Tp, _Args...>())
+ : _M_invoke(_Invoker::template _S_storage<_Tp _GLIBCXX_MOF_INV_QUALS>())
+ {
+ static_assert(is_same_v<decay_t<_Tp>, _Tp>);
+ static_assert(is_copy_constructible_v<_Tp>);
+ _M_init<_Tp>(std::forward<_Args>(__args)...);
+ }
+
+ /// Stores a target object initialized from the arguments.
+ template<typename _Tp, typename _Up, typename... _Args>
+ requires is_constructible_v<_Tp, initializer_list<_Up>&, _Args...>
+ && __is_callable_from<_Tp>
+ explicit
+ copyable_function(in_place_type_t<_Tp>, initializer_list<_Up> __il,
+ _Args&&... __args)
+ noexcept(_S_nothrow_init<_Tp, initializer_list<_Up>&, _Args...>())
+ : _M_invoke(_Invoker::template _S_storage<_Tp _GLIBCXX_MOF_INV_QUALS>())
+ {
+ static_assert(is_same_v<decay_t<_Tp>, _Tp>);
+ static_assert(is_copy_constructible_v<_Tp>);
+ _M_init<_Tp>(__il, std::forward<_Args>(__args)...);
+ }
+
+ /// Stores a new target object, leaving `x` empty.
+ copyable_function&
+ operator=(copyable_function&& __x) noexcept
+ {
+ // Standard requires support of self assigment, by specifying it as
+ // copy and swap.
+ if (this != std::addressof(__x)) [[likely]]
+ {
+ _Base::operator=(static_cast<_Base&&>(__x));
+ _M_invoke = std::__exchange(__x._M_invoke, nullptr);
+ }
+ return *this;
+ }
+
+ /// Stores a copy of the source target object
+ copyable_function&
+ operator=(const copyable_function& __x)
+ {
+ copyable_function(__x).swap(*this);
+ return *this;
+ }
+
+ /// Destroys the target object (if any).
+ copyable_function&
+ operator=(nullptr_t) noexcept
+ {
+ _M_reset();
+ _M_invoke = nullptr;
+ return *this;
+ }
+
+ /// Stores a new target object, initialized from the argument.
+ template<typename _Fn>
+ requires is_constructible_v<copyable_function, _Fn>
+ copyable_function&
+ operator=(_Fn&& __f)
+ noexcept(is_nothrow_constructible_v<copyable_function, _Fn>)
+ {
+ copyable_function(std::forward<_Fn>(__f)).swap(*this);
+ return *this;
+ }
+
+ ~copyable_function() = default;
+
+ /// True if a target object is present, false otherwise.
+ explicit operator bool() const noexcept
+ { return _M_invoke != nullptr; }
+
+ /** Invoke the target object.
+ *
+ * The target object will be invoked using the supplied arguments,
+ * and as an lvalue or rvalue, and as const or non-const, as dictated
+ * by the template arguments of the `copyable_function` specialization.
+ *
+ * @pre Must not be empty.
+ */
+ _Res
+ operator()(_ArgTypes... __args) _GLIBCXX_MOF_CV_REF noexcept(_Noex)
+ {
+ __glibcxx_assert(*this != nullptr);
+ return _M_invoke(this->_M_storage, std::forward<_ArgTypes>(__args)...);
+ }
+
+ /// Exchange the target objects (if any).
+ void
+ swap(copyable_function& __x) noexcept
+ {
+ _Base::swap(__x);
+ std::swap(_M_invoke, __x._M_invoke);
+ }
+
+ /// Exchange the target objects (if any).
+ friend void
+ swap(copyable_function& __x, copyable_function& __y) noexcept
+ { __x.swap(__y); }
+
+ /// Check for emptiness by comparing with `nullptr`.
+ friend bool
+ operator==(const copyable_function& __x, nullptr_t) noexcept
+ { return __x._M_invoke == nullptr; }
+
+ private:
+ typename _Invoker::__storage_func_t _M_invoke = nullptr;
+
+ template<typename _Func>
+ friend auto&
+ __polyfunc::__invoker_of(_Func&) noexcept;
+
+ template<typename _Func>
+ friend auto&
+ __polyfunc::__base_of(_Func&) noexcept;
+
+ template<typename _Dst, typename _Src>
+ friend consteval bool
+ __polyfunc::__is_invoker_convertible() noexcept;
+ };
+
+#undef _GLIBCXX_MOF_CV_REF
+#undef _GLIBCXX_MOF_CV
+#undef _GLIBCXX_MOF_REF
+#undef _GLIBCXX_MOF_INV_QUALS
+
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace std
diff --git a/libstdc++-v3/include/bits/funcwrap.h b/libstdc++-v3/include/bits/funcwrap.h
new file mode 100644
index 0000000..4e05353
--- /dev/null
+++ b/libstdc++-v3/include/bits/funcwrap.h
@@ -0,0 +1,507 @@
+// Implementation of std::move_only_function and std::copyable_function -*- C++ -*-
+
+// Copyright The GNU Toolchain Authors.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+/** @file include/bits/funcwrap.h
+ * This is an internal header file, included by other library headers.
+ * Do not attempt to use it directly. @headername{functional}
+ */
+
+#ifndef _GLIBCXX_FUNCWRAP_H
+#define _GLIBCXX_FUNCWRAP_H 1
+
+#ifdef _GLIBCXX_SYSHDR
+#pragma GCC system_header
+#endif
+
+#include <bits/version.h>
+
+#if defined(__glibcxx_move_only_function) || defined(__glibcxx_copyable_function)
+
+#include <bits/invoke.h>
+#include <bits/utility.h>
+
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ /// @cond undocumented
+ template<typename _Tp>
+ inline constexpr bool __is_polymorphic_function_v = false;
+
+ namespace __polyfunc
+ {
+ union _Ptrs
+ {
+ void* _M_obj;
+ void (*_M_func)();
+ };
+
+ struct _Storage
+ {
+ void* _M_addr() noexcept { return &_M_bytes[0]; }
+ void const* _M_addr() const noexcept { return &_M_bytes[0]; }
+
+ template<typename _Tp>
+ static consteval bool
+ _S_stored_locally() noexcept
+ {
+ return sizeof(_Tp) <= sizeof(_Storage)
+ && alignof(_Tp) <= alignof(_Storage)
+ && is_nothrow_move_constructible_v<_Tp>;
+ }
+
+ template<typename _Tp, typename... _Args>
+ static consteval bool
+ _S_nothrow_init() noexcept
+ {
+ if constexpr (_S_stored_locally<_Tp>())
+ return is_nothrow_constructible_v<_Tp, _Args...>;
+ return false;
+ }
+
+ template<typename _Tp, typename... _Args>
+ void
+ _M_init(_Args&&... __args) noexcept(_S_nothrow_init<_Tp, _Args...>())
+ {
+ if constexpr (is_function_v<remove_pointer_t<_Tp>>)
+ {
+ static_assert( sizeof...(__args) <= 1 );
+ // __args can have up to one element, returns nullptr if empty.
+ _Tp __func = (nullptr, ..., __args);
+ _M_ptrs._M_func = reinterpret_cast<void(*)()>(__func);
+ }
+ else if constexpr (!_S_stored_locally<_Tp>())
+ _M_ptrs._M_obj = new _Tp(std::forward<_Args>(__args)...);
+ else
+ ::new (_M_addr()) _Tp(std::forward<_Args>(__args)...);
+ }
+
+ template<typename _Tp>
+ [[__gnu__::__always_inline__]]
+ _Tp*
+ _M_ptr() const noexcept
+ {
+ if constexpr (!_S_stored_locally<remove_const_t<_Tp>>())
+ return static_cast<_Tp*>(_M_ptrs._M_obj);
+ else if constexpr (is_const_v<_Tp>)
+ return static_cast<_Tp*>(_M_addr());
+ else
+ // _Manager and _Invoker pass _Storage by const&, even for mutable sources.
+ return static_cast<_Tp*>(const_cast<void*>(_M_addr()));
+ }
+
+ template<typename _Ref>
+ [[__gnu__::__always_inline__]]
+ _Ref
+ _M_ref() const noexcept
+ {
+ using _Tp = remove_reference_t<_Ref>;
+ if constexpr (is_function_v<remove_pointer_t<_Tp>>)
+ return reinterpret_cast<_Tp>(_M_ptrs._M_func);
+ else
+ return static_cast<_Ref>(*_M_ptr<_Tp>());
+ }
+
+ // We want to have enough space to store a simple delegate type.
+ struct _Delegate { void (_Storage::*__pfm)(); _Storage* __obj; };
+ union {
+ _Ptrs _M_ptrs;
+ alignas(_Delegate) alignas(void(*)())
+ unsigned char _M_bytes[sizeof(_Delegate)];
+ };
+ };
+
+ template<bool _Noex, typename _Ret, typename... _Args>
+ struct _Base_invoker
+ {
+ using _Signature = _Ret(*)(_Args...) noexcept(_Noex);
+
+ using __storage_func_t = _Ret(*)(const _Storage&, _Args...) noexcept(_Noex);
+ template<typename _Tp>
+ static consteval __storage_func_t
+ _S_storage()
+ { return &_S_call_storage<_Adjust_target<_Tp>>; }
+
+ private:
+ template<typename _Tp, typename _Td = remove_cvref_t<_Tp>>
+ using _Adjust_target =
+ __conditional_t<is_pointer_v<_Td> || is_member_pointer_v<_Td>, _Td, _Tp>;
+
+ template<typename _Tp>
+ static _Ret
+ _S_call_storage(const _Storage& __ref, _Args... __args) noexcept(_Noex)
+ {
+ return std::__invoke_r<_Ret>(__ref._M_ref<_Tp>(),
+ std::forward<_Args>(__args)...);
+ }
+ };
+
+ template<typename _Tp>
+ using __param_t = __conditional_t<is_scalar_v<_Tp>, _Tp, _Tp&&>;
+
+ template<bool _Noex, typename _Ret, typename... _Args>
+ using _Invoker = _Base_invoker<_Noex, remove_cv_t<_Ret>, __param_t<_Args>...>;
+
+ template<typename _Func>
+ auto&
+ __invoker_of(_Func& __f) noexcept
+ { return __f._M_invoke; }
+
+ template<typename _Func>
+ auto&
+ __base_of(_Func& __f) noexcept
+ { return static_cast<__like_t<_Func&, typename _Func::_Base>>(__f); }
+
+ template<typename _Src, typename _Dst>
+ consteval bool
+ __is_invoker_convertible() noexcept
+ {
+ if constexpr (requires { typename _Src::_Signature; })
+ return is_convertible_v<typename _Src::_Signature,
+ typename _Dst::_Signature>;
+ else
+ return false;
+ }
+
+ struct _Manager
+ {
+ enum class _Op
+ {
+ // saves address of entity in *__src to __target._M_ptrs,
+ _Address,
+ // moves entity stored in *__src to __target, __src becomes empty
+ _Move,
+ // copies entity stored in *__src to __target, supported only if
+ // _ProvideCopy is specified.
+ _Copy,
+ // destroys entity stored in __target, __src is ignoring
+ _Destroy,
+ };
+
+ // A function that performs operation __op on the __target and possibly __src.
+ using _Func = void (*)(_Op __op, _Storage& __target, const _Storage* __src);
+
+ // The no-op manager function for objects with no target.
+ static void _S_empty(_Op, _Storage&, const _Storage*) noexcept { }
+
+ template<bool _ProvideCopy, typename _Tp>
+ consteval static auto
+ _S_select()
+ {
+ if constexpr (is_function_v<remove_pointer_t<_Tp>>)
+ return &_S_func;
+ else if constexpr (!_Storage::_S_stored_locally<_Tp>())
+ return &_S_ptr<_ProvideCopy, _Tp>;
+ else if constexpr (is_trivially_copyable_v<_Tp>)
+ return &_S_trivial;
+ else
+ return &_S_local<_ProvideCopy, _Tp>;
+ }
+
+ private:
+ static void
+ _S_func(_Op __op, _Storage& __target, const _Storage* __src) noexcept
+ {
+ switch (__op)
+ {
+ case _Op::_Address:
+ case _Op::_Move:
+ case _Op::_Copy:
+ __target._M_ptrs._M_func = __src->_M_ptrs._M_func;
+ return;
+ case _Op::_Destroy:
+ return;
+ }
+ }
+
+ static void
+ _S_trivial(_Op __op, _Storage& __target, const _Storage* __src) noexcept
+ {
+ switch (__op)
+ {
+ case _Op::_Address:
+ __target._M_ptrs._M_obj = const_cast<void*>(__src->_M_addr());
+ return;
+ case _Op::_Move:
+ case _Op::_Copy:
+ // N.B. Creating _Storage starts lifetime of _M_bytes char array,
+ // that implicitly creates, amongst other, all possible trivially
+ // copyable objects, so we copy any object present in __src._M_bytes.
+ ::new (&__target) _Storage(*__src);
+ return;
+ case _Op::_Destroy:
+ return;
+ }
+ }
+
+ template<bool _Provide_copy, typename _Tp>
+ static void
+ _S_local(_Op __op, _Storage& __target, const _Storage* __src)
+ noexcept(!_Provide_copy)
+ {
+ switch (__op)
+ {
+ case _Op::_Address:
+ __target._M_ptrs._M_obj = __src->_M_ptr<_Tp>();
+ return;
+ case _Op::_Move:
+ {
+ _Tp* __obj = __src->_M_ptr<_Tp>();
+ ::new(__target._M_addr()) _Tp(std::move(*__obj));
+ __obj->~_Tp();
+ }
+ return;
+ case _Op::_Destroy:
+ __target._M_ptr<_Tp>()->~_Tp();
+ return;
+ case _Op::_Copy:
+ if constexpr (_Provide_copy)
+ ::new (__target._M_addr()) _Tp(__src->_M_ref<const _Tp&>());
+ else
+ __builtin_unreachable();
+ return;
+ }
+ }
+
+ template<bool _Provide_copy, typename _Tp>
+ static void
+ _S_ptr(_Op __op, _Storage& __target, const _Storage* __src)
+ noexcept(!_Provide_copy)
+ {
+ switch (__op)
+ {
+ case _Op::_Address:
+ case _Op::_Move:
+ __target._M_ptrs._M_obj = __src->_M_ptrs._M_obj;
+ return;
+ case _Op::_Destroy:
+ delete __target._M_ptr<_Tp>();
+ return;
+ case _Op::_Copy:
+ if constexpr (_Provide_copy)
+ __target._M_ptrs._M_obj = new _Tp(__src->_M_ref<const _Tp&>());
+ else
+ __builtin_unreachable();
+ return;
+ }
+ }
+ };
+
+ class _Mo_base
+ {
+ protected:
+ _Mo_base() noexcept
+ : _M_manage(_Manager::_S_empty)
+ { }
+
+ _Mo_base(_Mo_base&& __x) noexcept
+ { _M_move(__x); }
+
+ template<typename _Tp, typename... _Args>
+ static consteval bool
+ _S_nothrow_init() noexcept
+ { return _Storage::_S_nothrow_init<_Tp, _Args...>(); }
+
+ template<typename _Tp, typename... _Args>
+ void
+ _M_init(_Args&&... __args)
+ noexcept(_S_nothrow_init<_Tp, _Args...>())
+ {
+ _M_storage._M_init<_Tp>(std::forward<_Args>(__args)...);
+ _M_manage = _Manager::_S_select<false, _Tp>();
+ }
+
+ void
+ _M_move(_Mo_base& __x) noexcept
+ {
+ using _Op = _Manager::_Op;
+ _M_manage = std::__exchange(__x._M_manage, _Manager::_S_empty);
+ _M_manage(_Op::_Move, _M_storage, &__x._M_storage);
+ }
+
+ _Mo_base&
+ operator=(_Mo_base&& __x) noexcept
+ {
+ _M_destroy();
+ _M_move(__x);
+ return *this;
+ }
+
+ void
+ _M_reset() noexcept
+ {
+ _M_destroy();
+ _M_manage = _Manager::_S_empty;
+ }
+
+ ~_Mo_base()
+ { _M_destroy(); }
+
+ void
+ swap(_Mo_base& __x) noexcept
+ {
+ using _Op = _Manager::_Op;
+ // Order of operations here is more efficient if __x is empty.
+ _Storage __s;
+ __x._M_manage(_Op::_Move, __s, &__x._M_storage);
+ _M_manage(_Op::_Move, __x._M_storage, &_M_storage);
+ __x._M_manage(_Op::_Move, _M_storage, &__s);
+ std::swap(_M_manage, __x._M_manage);
+ }
+
+ _Storage _M_storage;
+
+ private:
+ void _M_destroy() noexcept
+ { _M_manage(_Manager::_Op::_Destroy, _M_storage, nullptr); }
+
+ _Manager::_Func _M_manage;
+
+#ifdef __glibcxx_copyable_function // C++ >= 26 && HOSTED
+ friend class _Cpy_base;
+#endif // __glibcxx_copyable_function
+ };
+
+} // namespace __polyfunc
+ /// @endcond
+
+#ifdef __glibcxx_move_only_function // C++ >= 23 && HOSTED
+ template<typename... _Signature>
+ class move_only_function; // not defined
+
+ /// @cond undocumented
+ template<typename _Tp>
+ constexpr bool __is_polymorphic_function_v<move_only_function<_Tp>> = true;
+
+ namespace __detail::__variant
+ {
+ template<typename> struct _Never_valueless_alt; // see <variant>
+
+ // Provide the strong exception-safety guarantee when emplacing a
+ // move_only_function into a variant.
+ template<typename... _Signature>
+ struct _Never_valueless_alt<std::move_only_function<_Signature...>>
+ : true_type
+ { };
+ } // namespace __detail::__variant
+ /// @endcond
+#endif // __glibcxx_move_only_function
+
+#ifdef __glibcxx_copyable_function // C++ >= 26 && HOSTED
+ /// @cond undocumented
+ namespace __polyfunc
+ {
+ class _Cpy_base : public _Mo_base
+ {
+ protected:
+ _Cpy_base() = default;
+
+ template<typename _Tp, typename... _Args>
+ void
+ _M_init(_Args&&... __args)
+ noexcept(_S_nothrow_init<_Tp, _Args...>())
+ {
+ _M_storage._M_init<_Tp>(std::forward<_Args>(__args)...);
+ _M_manage = _Manager::_S_select<true, _Tp>();
+ }
+
+ void
+ _M_copy(_Cpy_base const& __x)
+ {
+ using _Op = _Manager::_Op;
+ __x._M_manage(_Op::_Copy, _M_storage, &__x._M_storage);
+ _M_manage = __x._M_manage;
+ }
+
+ _Cpy_base(_Cpy_base&&) = default;
+
+ _Cpy_base(_Cpy_base const& __x)
+ { _M_copy(__x); }
+
+ _Cpy_base&
+ operator=(_Cpy_base&&) = default;
+
+ _Cpy_base&
+ // Needs to use copy and swap for exception guarantees.
+ operator=(_Cpy_base const&) = delete;
+ };
+ } // namespace __polyfunc
+ /// @endcond
+
+ template<typename... _Signature>
+ class copyable_function; // not defined
+
+ template<typename _Tp>
+ constexpr bool __is_polymorphic_function_v<copyable_function<_Tp>> = true;
+
+ namespace __detail::__variant
+ {
+ template<typename> struct _Never_valueless_alt; // see <variant>
+
+ // Provide the strong exception-safety guarantee when emplacing a
+ // copyable_function into a variant.
+ template<typename... _Signature>
+ struct _Never_valueless_alt<std::copyable_function<_Signature...>>
+ : true_type
+ { };
+ } // namespace __detail::__variant
+#endif // __glibcxx_copyable_function
+
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace std
+
+#ifdef __glibcxx_move_only_function // C++ >= 23 && HOSTED
+#include "mofunc_impl.h"
+#define _GLIBCXX_MOF_CV const
+#include "mofunc_impl.h"
+#define _GLIBCXX_MOF_REF &
+#include "mofunc_impl.h"
+#define _GLIBCXX_MOF_REF &&
+#include "mofunc_impl.h"
+#define _GLIBCXX_MOF_CV const
+#define _GLIBCXX_MOF_REF &
+#include "mofunc_impl.h"
+#define _GLIBCXX_MOF_CV const
+#define _GLIBCXX_MOF_REF &&
+#include "mofunc_impl.h"
+#endif // __glibcxx_move_only_function
+
+#ifdef __glibcxx_copyable_function // C++ >= 26 && HOSTED
+#include "cpyfunc_impl.h"
+#define _GLIBCXX_MOF_CV const
+#include "cpyfunc_impl.h"
+#define _GLIBCXX_MOF_REF &
+#include "cpyfunc_impl.h"
+#define _GLIBCXX_MOF_REF &&
+#include "cpyfunc_impl.h"
+#define _GLIBCXX_MOF_CV const
+#define _GLIBCXX_MOF_REF &
+#include "cpyfunc_impl.h"
+#define _GLIBCXX_MOF_CV const
+#define _GLIBCXX_MOF_REF &&
+#include "cpyfunc_impl.h"
+#endif // __glibcxx_copyable_function
+
+#endif // __glibcxx_copyable_function || __glibcxx_copyable_function
+#endif // _GLIBCXX_FUNCWRAP_H
diff --git a/libstdc++-v3/include/bits/mofunc_impl.h b/libstdc++-v3/include/bits/mofunc_impl.h
index 318a55e..1ceb910 100644
--- a/libstdc++-v3/include/bits/mofunc_impl.h
+++ b/libstdc++-v3/include/bits/mofunc_impl.h
@@ -62,8 +62,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Res, typename... _ArgTypes, bool _Noex>
class move_only_function<_Res(_ArgTypes...) _GLIBCXX_MOF_CV
_GLIBCXX_MOF_REF noexcept(_Noex)>
- : _Mofunc_base
+ : __polyfunc::_Mo_base
{
+ using _Base = __polyfunc::_Mo_base;
+ using _Invoker = __polyfunc::_Invoker<_Noex, _Res, _ArgTypes...>;
+ using _Signature = _Invoker::_Signature;
+
template<typename _Tp>
using __callable
= __conditional_t<_Noex,
@@ -87,7 +91,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
/// Moves the target object, leaving the source empty.
move_only_function(move_only_function&& __x) noexcept
- : _Mofunc_base(static_cast<_Mofunc_base&&>(__x)),
+ : _Base(static_cast<_Base&&>(__x)),
_M_invoke(std::__exchange(__x._M_invoke, nullptr))
{ }
@@ -97,15 +101,31 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
&& (!__is_in_place_type_v<_Vt>) && __is_callable_from<_Vt>
move_only_function(_Fn&& __f) noexcept(_S_nothrow_init<_Vt, _Fn>())
{
+ // _GLIBCXX_RESOLVE_LIB_DEFECTS
+ // 4255. move_only_function constructor should recognize empty
+ // copyable_functions
if constexpr (is_function_v<remove_pointer_t<_Vt>>
|| is_member_pointer_v<_Vt>
- || __is_move_only_function_v<_Vt>)
+ || __is_polymorphic_function_v<_Vt>)
{
if (__f == nullptr)
return;
}
- _M_init<_Vt>(std::forward<_Fn>(__f));
- _M_invoke = &_S_invoke<_Vt>;
+
+ if constexpr (__is_polymorphic_function_v<_Vt>
+ && __polyfunc::__is_invoker_convertible<_Vt, move_only_function>())
+ {
+ // Handle cases where _Fn is const reference to copyable_function,
+ // by firstly creating temporary and moving from it.
+ _Vt __tmp(std::forward<_Fn>(__f));
+ _M_move(__polyfunc::__base_of(__tmp));
+ _M_invoke = std::__exchange(__polyfunc::__invoker_of(__tmp), nullptr);
+ }
+ else
+ {
+ _M_init<_Vt>(std::forward<_Fn>(__f));
+ _M_invoke = _Invoker::template _S_storage<_Vt _GLIBCXX_MOF_INV_QUALS>();
+ }
}
/// Stores a target object initialized from the arguments.
@@ -115,7 +135,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
explicit
move_only_function(in_place_type_t<_Tp>, _Args&&... __args)
noexcept(_S_nothrow_init<_Tp, _Args...>())
- : _M_invoke(&_S_invoke<_Tp>)
+ : _M_invoke(_Invoker::template _S_storage<_Tp _GLIBCXX_MOF_INV_QUALS>())
{
static_assert(is_same_v<decay_t<_Tp>, _Tp>);
_M_init<_Tp>(std::forward<_Args>(__args)...);
@@ -129,7 +149,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
move_only_function(in_place_type_t<_Tp>, initializer_list<_Up> __il,
_Args&&... __args)
noexcept(_S_nothrow_init<_Tp, initializer_list<_Up>&, _Args...>())
- : _M_invoke(&_S_invoke<_Tp>)
+ : _M_invoke(_Invoker::template _S_storage<_Tp _GLIBCXX_MOF_INV_QUALS>())
{
static_assert(is_same_v<decay_t<_Tp>, _Tp>);
_M_init<_Tp>(__il, std::forward<_Args>(__args)...);
@@ -139,8 +159,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
move_only_function&
operator=(move_only_function&& __x) noexcept
{
- _Mofunc_base::operator=(static_cast<_Mofunc_base&&>(__x));
- _M_invoke = std::__exchange(__x._M_invoke, nullptr);
+ // Standard requires support of self assigment, by specifying it as
+ // copy and swap.
+ if (this != std::addressof(__x)) [[likely]]
+ {
+ _Base::operator=(static_cast<_Base&&>(__x));
+ _M_invoke = std::__exchange(__x._M_invoke, nullptr);
+ }
return *this;
}
@@ -148,7 +173,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
move_only_function&
operator=(nullptr_t) noexcept
{
- _Mofunc_base::operator=(nullptr);
+ _M_reset();
_M_invoke = nullptr;
return *this;
}
@@ -167,7 +192,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
~move_only_function() = default;
/// True if a target object is present, false otherwise.
- explicit operator bool() const noexcept { return _M_invoke != nullptr; }
+ explicit operator bool() const noexcept
+ { return _M_invoke != nullptr; }
/** Invoke the target object.
*
@@ -181,14 +207,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
operator()(_ArgTypes... __args) _GLIBCXX_MOF_CV_REF noexcept(_Noex)
{
__glibcxx_assert(*this != nullptr);
- return _M_invoke(this, std::forward<_ArgTypes>(__args)...);
+ return _M_invoke(this->_M_storage, std::forward<_ArgTypes>(__args)...);
}
/// Exchange the target objects (if any).
void
swap(move_only_function& __x) noexcept
{
- _Mofunc_base::swap(__x);
+ _Base::swap(__x);
std::swap(_M_invoke, __x._M_invoke);
}
@@ -203,25 +229,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{ return __x._M_invoke == nullptr; }
private:
- template<typename _Tp>
- using __param_t = __conditional_t<is_scalar_v<_Tp>, _Tp, _Tp&&>;
+ typename _Invoker::__storage_func_t _M_invoke = nullptr;
- using _Invoker = _Res (*)(_Mofunc_base _GLIBCXX_MOF_CV*,
- __param_t<_ArgTypes>...) noexcept(_Noex);
+ template<typename _Func>
+ friend auto&
+ __polyfunc::__invoker_of(_Func&) noexcept;
- template<typename _Tp>
- static _Res
- _S_invoke(_Mofunc_base _GLIBCXX_MOF_CV* __self,
- __param_t<_ArgTypes>... __args) noexcept(_Noex)
- {
- using _TpCv = _Tp _GLIBCXX_MOF_CV;
- using _TpInv = _Tp _GLIBCXX_MOF_INV_QUALS;
- return std::__invoke_r<_Res>(
- std::forward<_TpInv>(*_S_access<_TpCv>(__self)),
- std::forward<__param_t<_ArgTypes>>(__args)...);
- }
+ template<typename _Func>
+ friend auto&
+ __polyfunc::__base_of(_Func&) noexcept;
- _Invoker _M_invoke = nullptr;
+ template<typename _Dst, typename _Src>
+ friend consteval bool
+ __polyfunc::__is_invoker_convertible() noexcept;
};
#undef _GLIBCXX_MOF_CV_REF
diff --git a/libstdc++-v3/include/bits/move_only_function.h b/libstdc++-v3/include/bits/move_only_function.h
deleted file mode 100644
index 42b33d0..0000000
--- a/libstdc++-v3/include/bits/move_only_function.h
+++ /dev/null
@@ -1,218 +0,0 @@
-// Implementation of std::move_only_function -*- C++ -*-
-
-// Copyright The GNU Toolchain Authors.
-//
-// This file is part of the GNU ISO C++ Library. This library is free
-// software; you can redistribute it and/or modify it under the
-// terms of the GNU General Public License as published by the
-// Free Software Foundation; either version 3, or (at your option)
-// any later version.
-
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// Under Section 7 of GPL version 3, you are granted additional
-// permissions described in the GCC Runtime Library Exception, version
-// 3.1, as published by the Free Software Foundation.
-
-// You should have received a copy of the GNU General Public License and
-// a copy of the GCC Runtime Library Exception along with this program;
-// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
-// <http://www.gnu.org/licenses/>.
-
-/** @file include/bits/move_only_function.h
- * This is an internal header file, included by other library headers.
- * Do not attempt to use it directly. @headername{functional}
- */
-
-#ifndef _GLIBCXX_MOVE_ONLY_FUNCTION_H
-#define _GLIBCXX_MOVE_ONLY_FUNCTION_H 1
-
-#ifdef _GLIBCXX_SYSHDR
-#pragma GCC system_header
-#endif
-
-#include <bits/version.h>
-
-#ifdef __glibcxx_move_only_function // C++ >= 23 && HOSTED
-
-#include <bits/invoke.h>
-#include <bits/utility.h>
-
-namespace std _GLIBCXX_VISIBILITY(default)
-{
-_GLIBCXX_BEGIN_NAMESPACE_VERSION
-
- template<typename... _Signature>
- class move_only_function; // not defined
-
- /// @cond undocumented
- class _Mofunc_base
- {
- protected:
- _Mofunc_base() noexcept
- : _M_manage(_S_empty)
- { }
-
- _Mofunc_base(_Mofunc_base&& __x) noexcept
- {
- _M_manage = std::__exchange(__x._M_manage, _S_empty);
- _M_manage(_M_storage, &__x._M_storage);
- }
-
- template<typename _Tp, typename... _Args>
- static constexpr bool
- _S_nothrow_init() noexcept
- {
- if constexpr (__stored_locally<_Tp>)
- return is_nothrow_constructible_v<_Tp, _Args...>;
- return false;
- }
-
- template<typename _Tp, typename... _Args>
- void
- _M_init(_Args&&... __args) noexcept(_S_nothrow_init<_Tp, _Args...>())
- {
- if constexpr (__stored_locally<_Tp>)
- ::new (_M_storage._M_addr()) _Tp(std::forward<_Args>(__args)...);
- else
- _M_storage._M_p = new _Tp(std::forward<_Args>(__args)...);
-
- _M_manage = &_S_manage<_Tp>;
- }
-
- _Mofunc_base&
- operator=(_Mofunc_base&& __x) noexcept
- {
- _M_manage(_M_storage, nullptr);
- _M_manage = std::__exchange(__x._M_manage, _S_empty);
- _M_manage(_M_storage, &__x._M_storage);
- return *this;
- }
-
- _Mofunc_base&
- operator=(nullptr_t) noexcept
- {
- _M_manage(_M_storage, nullptr);
- _M_manage = _S_empty;
- return *this;
- }
-
- ~_Mofunc_base() { _M_manage(_M_storage, nullptr); }
-
- void
- swap(_Mofunc_base& __x) noexcept
- {
- // Order of operations here is more efficient if __x is empty.
- _Storage __s;
- __x._M_manage(__s, &__x._M_storage);
- _M_manage(__x._M_storage, &_M_storage);
- __x._M_manage(_M_storage, &__s);
- std::swap(_M_manage, __x._M_manage);
- }
-
- template<typename _Tp, typename _Self>
- static _Tp*
- _S_access(_Self* __self) noexcept
- {
- if constexpr (__stored_locally<remove_const_t<_Tp>>)
- return static_cast<_Tp*>(__self->_M_storage._M_addr());
- else
- return static_cast<_Tp*>(__self->_M_storage._M_p);
- }
-
- private:
- struct _Storage
- {
- void* _M_addr() noexcept { return &_M_bytes[0]; }
- const void* _M_addr() const noexcept { return &_M_bytes[0]; }
-
- // We want to have enough space to store a simple delegate type.
- struct _Delegate { void (_Storage::*__pfm)(); _Storage* __obj; };
- union {
- void* _M_p;
- alignas(_Delegate) alignas(void(*)())
- unsigned char _M_bytes[sizeof(_Delegate)];
- };
- };
-
- template<typename _Tp>
- static constexpr bool __stored_locally
- = sizeof(_Tp) <= sizeof(_Storage) && alignof(_Tp) <= alignof(_Storage)
- && is_nothrow_move_constructible_v<_Tp>;
-
- // A function that either destroys the target object stored in __target,
- // or moves the target object from *__src to __target.
- using _Manager = void (*)(_Storage& __target, _Storage* __src) noexcept;
-
- // The no-op manager function for objects with no target.
- static void _S_empty(_Storage&, _Storage*) noexcept { }
-
- // The real manager function for a target object of type _Tp.
- template<typename _Tp>
- static void
- _S_manage(_Storage& __target, _Storage* __src) noexcept
- {
- if constexpr (__stored_locally<_Tp>)
- {
- if (__src)
- {
- _Tp* __rval = static_cast<_Tp*>(__src->_M_addr());
- ::new (__target._M_addr()) _Tp(std::move(*__rval));
- __rval->~_Tp();
- }
- else
- static_cast<_Tp*>(__target._M_addr())->~_Tp();
- }
- else
- {
- if (__src)
- __target._M_p = __src->_M_p;
- else
- delete static_cast<_Tp*>(__target._M_p);
- }
- }
-
- _Storage _M_storage;
- _Manager _M_manage;
- };
-
- template<typename _Tp>
- inline constexpr bool __is_move_only_function_v = false;
- template<typename _Tp>
- constexpr bool __is_move_only_function_v<move_only_function<_Tp>> = true;
- /// @endcond
-
- namespace __detail::__variant
- {
- template<typename> struct _Never_valueless_alt; // see <variant>
-
- // Provide the strong exception-safety guarantee when emplacing a
- // move_only_function into a variant.
- template<typename... _Signature>
- struct _Never_valueless_alt<std::move_only_function<_Signature...>>
- : true_type
- { };
- } // namespace __detail::__variant
-
-_GLIBCXX_END_NAMESPACE_VERSION
-} // namespace std
-
-#include "mofunc_impl.h"
-#define _GLIBCXX_MOF_CV const
-#include "mofunc_impl.h"
-#define _GLIBCXX_MOF_REF &
-#include "mofunc_impl.h"
-#define _GLIBCXX_MOF_REF &&
-#include "mofunc_impl.h"
-#define _GLIBCXX_MOF_CV const
-#define _GLIBCXX_MOF_REF &
-#include "mofunc_impl.h"
-#define _GLIBCXX_MOF_CV const
-#define _GLIBCXX_MOF_REF &&
-#include "mofunc_impl.h"
-
-#endif // __glibcxx_move_only_function
-#endif // _GLIBCXX_MOVE_ONLY_FUNCTION_H
diff --git a/libstdc++-v3/include/bits/version.def b/libstdc++-v3/include/bits/version.def
index 2d34a8d..6ca148f 100644
--- a/libstdc++-v3/include/bits/version.def
+++ b/libstdc++-v3/include/bits/version.def
@@ -1748,6 +1748,15 @@ ftms = {
};
ftms = {
+ name = copyable_function;
+ values = {
+ v = 202306;
+ cxxmin = 26;
+ hosted = yes;
+ };
+};
+
+ftms = {
name = out_ptr;
values = {
v = 202311;
diff --git a/libstdc++-v3/include/bits/version.h b/libstdc++-v3/include/bits/version.h
index 24831f7..48a090c 100644
--- a/libstdc++-v3/include/bits/version.h
+++ b/libstdc++-v3/include/bits/version.h
@@ -1948,6 +1948,16 @@
#endif /* !defined(__cpp_lib_move_only_function) && defined(__glibcxx_want_move_only_function) */
#undef __glibcxx_want_move_only_function
+#if !defined(__cpp_lib_copyable_function)
+# if (__cplusplus > 202302L) && _GLIBCXX_HOSTED
+# define __glibcxx_copyable_function 202306L
+# if defined(__glibcxx_want_all) || defined(__glibcxx_want_copyable_function)
+# define __cpp_lib_copyable_function 202306L
+# endif
+# endif
+#endif /* !defined(__cpp_lib_copyable_function) && defined(__glibcxx_want_copyable_function) */
+#undef __glibcxx_want_copyable_function
+
#if !defined(__cpp_lib_out_ptr)
# if (__cplusplus >= 202100L)
# define __glibcxx_out_ptr 202311L
diff --git a/libstdc++-v3/include/std/format b/libstdc++-v3/include/std/format
index b3192cf..f0b0252 100644
--- a/libstdc++-v3/include/std/format
+++ b/libstdc++-v3/include/std/format
@@ -1863,20 +1863,24 @@ namespace __format
_Spec<_CharT> _M_spec{};
};
+#ifdef __BFLT16_DIG__
+ using __bflt16_t = decltype(0.0bf16);
+#endif
+
// Decide how 128-bit floating-point types should be formatted (or not).
- // When supported, the typedef __format::__float128_t is the type that
- // format arguments should be converted to for storage in basic_format_arg.
+ // When supported, the typedef __format::__flt128_t is the type that format
+ // arguments should be converted to before passing them to __formatter_fp.
// Define the macro _GLIBCXX_FORMAT_F128 to say they're supported.
- // _GLIBCXX_FORMAT_F128=1 means __float128, _Float128 etc. will be formatted
- // by converting them to long double (or __ieee128 for powerpc64le).
- // _GLIBCXX_FORMAT_F128=2 means basic_format_arg needs to enable explicit
- // support for _Float128, rather than formatting it as another type.
+ // The __float128, _Float128 will be formatted by converting them to:
+ // __ieee128 (same as __float128) when _GLIBCXX_FORMAT_F128=1,
+ // long double when _GLIBCXX_FORMAT_F128=2,
+ // _Float128 when _GLIBCXX_FORMAT_F128=3.
#undef _GLIBCXX_FORMAT_F128
#ifdef _GLIBCXX_LONG_DOUBLE_ALT128_COMPAT
// Format 128-bit floating-point types using __ieee128.
- using __float128_t = __ieee128;
+ using __flt128_t = __ieee128;
# define _GLIBCXX_FORMAT_F128 1
#ifdef __LONG_DOUBLE_IEEE128__
@@ -1910,14 +1914,14 @@ namespace __format
#elif defined _GLIBCXX_LDOUBLE_IS_IEEE_BINARY128
// Format 128-bit floating-point types using long double.
- using __float128_t = long double;
-# define _GLIBCXX_FORMAT_F128 1
+ using __flt128_t = long double;
+# define _GLIBCXX_FORMAT_F128 2
#elif __FLT128_DIG__ && defined(_GLIBCXX_HAVE_FLOAT128_MATH)
// Format 128-bit floating-point types using _Float128.
- using __float128_t = _Float128;
-# define _GLIBCXX_FORMAT_F128 2
+ using __flt128_t = _Float128;
+# define _GLIBCXX_FORMAT_F128 3
# if __cplusplus == 202002L
// These overloads exist in the library, but are not declared for C++20.
@@ -2947,8 +2951,8 @@ namespace __format
};
#endif
-#if defined(__FLT128_DIG__) && _GLIBCXX_FORMAT_F128 == 1
- // Reuse __formatter_fp<C>::format<__float128_t, Out> for _Float128.
+#if defined(__FLT128_DIG__) && _GLIBCXX_FORMAT_F128
+ // Use __formatter_fp<C>::format<__format::__flt128_t, Out> for _Float128.
template<__format::__char _CharT>
struct formatter<_Float128, _CharT>
{
@@ -2962,17 +2966,45 @@ namespace __format
template<typename _Out>
typename basic_format_context<_Out, _CharT>::iterator
format(_Float128 __u, basic_format_context<_Out, _CharT>& __fc) const
- { return _M_f.format((__format::__float128_t)__u, __fc); }
+ { return _M_f.format((__format::__flt128_t)__u, __fc); }
+
+ private:
+ __format::__formatter_fp<_CharT> _M_f;
+ };
+#endif
+
+#if defined(__SIZEOF_FLOAT128__) && _GLIBCXX_FORMAT_F128 != 1
+ // Reuse __formatter_fp<C>::format<__format::__flt128_t, Out> for __float128.
+ // This formatter is not declared if _GLIBCXX_LONG_DOUBLE_ALT128_COMPAT is true,
+ // as __float128 when present is same type as __ieee128, which may be same as
+ // long double.
+ template<__format::__char _CharT>
+ struct formatter<__float128, _CharT>
+ {
+ formatter() = default;
+
+ [[__gnu__::__always_inline__]]
+ constexpr typename basic_format_parse_context<_CharT>::iterator
+ parse(basic_format_parse_context<_CharT>& __pc)
+ { return _M_f.parse(__pc); }
+
+ template<typename _Out>
+ typename basic_format_context<_Out, _CharT>::iterator
+ format(__float128 __u, basic_format_context<_Out, _CharT>& __fc) const
+ { return _M_f.format((__format::__flt128_t)__u, __fc); }
private:
__format::__formatter_fp<_CharT> _M_f;
+
+ static_assert( !is_same_v<__float128, long double>,
+ "This specialization should not be used for long double" );
};
#endif
#if defined(__STDCPP_BFLOAT16_T__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
// Reuse __formatter_fp<C>::format<float, Out> for bfloat16_t.
template<__format::__char _CharT>
- struct formatter<__gnu_cxx::__bfloat16_t, _CharT>
+ struct formatter<__format::__bflt16_t, _CharT>
{
formatter() = default;
@@ -3835,16 +3867,14 @@ namespace __format
enum _Arg_t : unsigned char {
_Arg_none, _Arg_bool, _Arg_c, _Arg_i, _Arg_u, _Arg_ll, _Arg_ull,
_Arg_flt, _Arg_dbl, _Arg_ldbl, _Arg_str, _Arg_sv, _Arg_ptr, _Arg_handle,
- _Arg_i128, _Arg_u128,
- _Arg_bf16, _Arg_f16, _Arg_f32, _Arg_f64, // These are unused.
+ _Arg_i128, _Arg_u128, _Arg_float128,
+ _Arg_bf16, _Arg_f16, _Arg_f32, _Arg_f64,
+ _Arg_max_,
+
#ifdef _GLIBCXX_LONG_DOUBLE_ALT128_COMPAT
- _Arg_next_value_,
- _Arg_f128 = _Arg_ldbl,
- _Arg_ibm128 = _Arg_next_value_,
-#else
- _Arg_f128,
+ _Arg_ibm128 = _Arg_ldbl,
+ _Arg_ieee128 = _Arg_float128,
#endif
- _Arg_max_
};
template<typename _Context>
@@ -3871,6 +3901,12 @@ namespace __format
double _M_dbl;
#ifndef _GLIBCXX_LONG_DOUBLE_ALT128_COMPAT // No long double if it's ambiguous.
long double _M_ldbl;
+#else
+ __ibm128 _M_ibm128;
+ __ieee128 _M_ieee128;
+#endif
+#ifdef __SIZEOF_FLOAT128__
+ __float128 _M_float128;
#endif
const _CharT* _M_str;
basic_string_view<_CharT> _M_sv;
@@ -3880,11 +3916,17 @@ namespace __format
__int128 _M_i128;
unsigned __int128 _M_u128;
#endif
-#ifdef _GLIBCXX_LONG_DOUBLE_ALT128_COMPAT
- __ieee128 _M_f128;
- __ibm128 _M_ibm128;
-#elif _GLIBCXX_FORMAT_F128 == 2
- __float128_t _M_f128;
+#ifdef __BFLT16_DIG__
+ __bflt16_t _M_bf16;
+#endif
+#ifdef __FLT16_DIG__
+ _Float16 _M_f16;
+#endif
+#ifdef __FLT32_DIG__
+ _Float32 _M_f32;
+#endif
+#ifdef __FLT64_DIG__
+ _Float64 _M_f64;
#endif
};
@@ -3922,10 +3964,14 @@ namespace __format
else if constexpr (is_same_v<_Tp, long double>)
return __u._M_ldbl;
#else
- else if constexpr (is_same_v<_Tp, __ieee128>)
- return __u._M_f128;
else if constexpr (is_same_v<_Tp, __ibm128>)
return __u._M_ibm128;
+ else if constexpr (is_same_v<_Tp, __ieee128>)
+ return __u._M_ieee128;
+#endif
+#ifdef __SIZEOF_FLOAT128__
+ else if constexpr (is_same_v<_Tp, __float128>)
+ return __u._M_float128;
#endif
else if constexpr (is_same_v<_Tp, const _CharT*>)
return __u._M_str;
@@ -3939,9 +3985,21 @@ namespace __format
else if constexpr (is_same_v<_Tp, unsigned __int128>)
return __u._M_u128;
#endif
-#if _GLIBCXX_FORMAT_F128 == 2
- else if constexpr (is_same_v<_Tp, __float128_t>)
- return __u._M_f128;
+#ifdef __BFLT16_DIG__
+ else if constexpr (is_same_v<_Tp, __bflt16_t>)
+ return __u._M_bf16;
+#endif
+#ifdef __FLT16_DIG__
+ else if constexpr (is_same_v<_Tp, _Float16>)
+ return __u._M_f16;
+#endif
+#ifdef __FLT32_DIG__
+ else if constexpr (is_same_v<_Tp, _Float32>)
+ return __u._M_f32;
+#endif
+#ifdef __FLT64_DIG__
+ else if constexpr (is_same_v<_Tp, _Float64>)
+ return __u._M_f64;
#endif
else if constexpr (derived_from<_Tp, _HandleBase>)
return static_cast<_Tp&>(__u._M_handle);
@@ -4120,36 +4178,25 @@ namespace __format
else if constexpr (is_same_v<_Td, __ieee128>)
return type_identity<__ieee128>();
#endif
-
-#if defined(__FLT16_DIG__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
- else if constexpr (is_same_v<_Td, _Float16>)
- return type_identity<float>();
+#if defined(__SIZEOF_FLOAT128__) && _GLIBCXX_FORMAT_F128
+ else if constexpr (is_same_v<_Td, __float128>)
+ return type_identity<__float128>();
#endif
-
-#if defined(__BFLT16_DIG__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
- else if constexpr (is_same_v<_Td, decltype(0.0bf16)>)
- return type_identity<float>();
+#if defined(__STDCPP_BFLOAT16_T__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
+ else if constexpr (is_same_v<_Td, __format::__bflt16_t>)
+ return type_identity<__format::__bflt16_t>();
+#endif
+#if defined(__STDCPP_FLOAT16_T__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
+ else if constexpr (is_same_v<_Td, _Float16>)
+ return type_identity<_Float16>();
#endif
-
#if defined(__FLT32_DIG__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
else if constexpr (is_same_v<_Td, _Float32>)
- return type_identity<float>();
+ return type_identity<_Float32>();
#endif
-
#if defined(__FLT64_DIG__) && defined(_GLIBCXX_DOUBLE_IS_IEEE_BINARY64)
else if constexpr (is_same_v<_Td, _Float64>)
- return type_identity<double>();
-#endif
-
-#if _GLIBCXX_FORMAT_F128
-# if __FLT128_DIG__
- else if constexpr (is_same_v<_Td, _Float128>)
- return type_identity<__format::__float128_t>();
-# endif
-# if __SIZEOF_FLOAT128__
- else if constexpr (is_same_v<_Td, __float128>)
- return type_identity<__format::__float128_t>();
-# endif
+ return type_identity<_Float64>();
#endif
else if constexpr (__is_specialization_of<_Td, basic_string_view>
|| __is_specialization_of<_Td, basic_string>)
@@ -4205,7 +4252,27 @@ namespace __format
else if constexpr (is_same_v<_Tp, __ibm128>)
return _Arg_ibm128;
else if constexpr (is_same_v<_Tp, __ieee128>)
- return _Arg_f128;
+ return _Arg_ieee128;
+#endif
+#if defined(__SIZEOF_FLOAT128__) && _GLIBCXX_FORMAT_F128
+ else if constexpr (is_same_v<_Tp, __float128>)
+ return _Arg_float128;
+#endif
+#if defined(__STDCPP_BFLOAT16_T__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
+ else if constexpr (is_same_v<_Tp, __format::__bflt16_t>)
+ return _Arg_bf16;
+#endif
+#if defined(__STDCPP_FLOAT16_T__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
+ else if constexpr (is_same_v<_Tp, _Float16>)
+ return _Arg_f16;
+#endif
+#if defined(__FLT32_DIG__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
+ else if constexpr (is_same_v<_Tp, _Float32>)
+ return _Arg_f32;
+#endif
+#if defined(__FLT64_DIG__) && defined(_GLIBCXX_DOUBLE_IS_IEEE_BINARY64)
+ else if constexpr (is_same_v<_Tp, _Float64>)
+ return _Arg_f64;
#endif
else if constexpr (is_same_v<_Tp, const _CharT*>)
return _Arg_str;
@@ -4219,11 +4286,6 @@ namespace __format
else if constexpr (is_same_v<_Tp, unsigned __int128>)
return _Arg_u128;
#endif
-
-#if _GLIBCXX_FORMAT_F128 == 2
- else if constexpr (is_same_v<_Tp, __format::__float128_t>)
- return _Arg_f128;
-#endif
else if constexpr (is_same_v<_Tp, handle>)
return _Arg_handle;
}
@@ -4296,13 +4358,33 @@ namespace __format
#ifndef _GLIBCXX_LONG_DOUBLE_ALT128_COMPAT
case _Arg_ldbl:
return std::forward<_Visitor>(__vis)(_M_val._M_ldbl);
+#if defined(__SIZEOF_FLOAT128__) && _GLIBCXX_FORMAT_F128
+ case _Arg_float128:
+ return std::forward<_Visitor>(__vis)(_M_val._M_float128);
+#endif
#else
- case _Arg_f128:
- return std::forward<_Visitor>(__vis)(_M_val._M_f128);
case _Arg_ibm128:
return std::forward<_Visitor>(__vis)(_M_val._M_ibm128);
+ case _Arg_ieee128:
+ return std::forward<_Visitor>(__vis)(_M_val._M_ieee128);
#endif
+#if defined(__STDCPP_BFLOAT16_T__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
+ case _Arg_bf16:
+ return std::forward<_Visitor>(__vis)(_M_val._M_bf16);
+#endif
+#if defined(__STDCPP_FLOAT16_T__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
+ case _Arg_f16:
+ return std::forward<_Visitor>(__vis)(_M_val._M_f16);
+#endif
+#if defined(__FLT32_DIG__) && defined(_GLIBCXX_FLOAT_IS_IEEE_BINARY32)
+ case _Arg_f32:
+ return std::forward<_Visitor>(__vis)(_M_val._M_f32);
#endif
+#if defined(__FLT64_DIG__) && defined(_GLIBCXX_DOUBLE_IS_IEEE_BINARY64)
+ case _Arg_f64:
+ return std::forward<_Visitor>(__vis)(_M_val._M_f64);
+#endif
+#endif // __glibcxx_to_chars
case _Arg_str:
return std::forward<_Visitor>(__vis)(_M_val._M_str);
case _Arg_sv:
@@ -4320,14 +4402,7 @@ namespace __format
case _Arg_u128:
return std::forward<_Visitor>(__vis)(_M_val._M_u128);
#endif
-
-#if _GLIBCXX_FORMAT_F128 == 2
- case _Arg_f128:
- return std::forward<_Visitor>(__vis)(_M_val._M_f128);
-#endif
-
default:
- // _Arg_f16 etc.
__builtin_unreachable();
}
}
diff --git a/libstdc++-v3/include/std/functional b/libstdc++-v3/include/std/functional
index 1077e96..9a55b18 100644
--- a/libstdc++-v3/include/std/functional
+++ b/libstdc++-v3/include/std/functional
@@ -52,6 +52,20 @@
#if __cplusplus >= 201103L
+#define __glibcxx_want_boyer_moore_searcher
+#define __glibcxx_want_bind_front
+#define __glibcxx_want_bind_back
+#define __glibcxx_want_constexpr_functional
+#define __glibcxx_want_copyable_function
+#define __glibcxx_want_invoke
+#define __glibcxx_want_invoke_r
+#define __glibcxx_want_move_only_function
+#define __glibcxx_want_not_fn
+#define __glibcxx_want_ranges
+#define __glibcxx_want_reference_wrapper
+#define __glibcxx_want_transparent_operators
+#include <bits/version.h>
+
#include <tuple>
#include <type_traits>
#include <bits/functional_hash.h>
@@ -72,23 +86,10 @@
# include <bits/ranges_cmp.h> // std::identity, ranges::equal_to etc.
# include <compare>
#endif
-#if __cplusplus > 202002L && _GLIBCXX_HOSTED
-# include <bits/move_only_function.h>
+#if defined(__glibcxx_move_only_function) || defined(__glibcxx_copyable_function)
+# include <bits/funcwrap.h>
#endif
-#define __glibcxx_want_boyer_moore_searcher
-#define __glibcxx_want_bind_front
-#define __glibcxx_want_bind_back
-#define __glibcxx_want_constexpr_functional
-#define __glibcxx_want_invoke
-#define __glibcxx_want_invoke_r
-#define __glibcxx_want_move_only_function
-#define __glibcxx_want_not_fn
-#define __glibcxx_want_ranges
-#define __glibcxx_want_reference_wrapper
-#define __glibcxx_want_transparent_operators
-#include <bits/version.h>
-
#endif // C++11
namespace std _GLIBCXX_VISIBILITY(default)
diff --git a/libstdc++-v3/include/std/utility b/libstdc++-v3/include/std/utility
index 1c15c75..8a85ccf 100644
--- a/libstdc++-v3/include/std/utility
+++ b/libstdc++-v3/include/std/utility
@@ -201,7 +201,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#ifdef __cpp_lib_to_underlying // C++ >= 23
/// Convert an object of enumeration type to its underlying type.
template<typename _Tp>
- [[nodiscard]]
+ [[nodiscard, __gnu__::__always_inline__]]
constexpr underlying_type_t<_Tp>
to_underlying(_Tp __value) noexcept
{ return static_cast<underlying_type_t<_Tp>>(__value); }
diff --git a/libstdc++-v3/src/c++23/std.cc.in b/libstdc++-v3/src/c++23/std.cc.in
index d45ae63..417c8a1 100644
--- a/libstdc++-v3/src/c++23/std.cc.in
+++ b/libstdc++-v3/src/c++23/std.cc.in
@@ -1412,6 +1412,9 @@ export namespace std
#if __cpp_lib_move_only_function
using std::move_only_function;
#endif
+#if __cpp_lib_copyable_function
+ using std::copyable_function;
+#endif
using std::multiplies;
using std::negate;
using std::not_equal_to;
diff --git a/libstdc++-v3/testsuite/20_util/copyable_function/call.cc b/libstdc++-v3/testsuite/20_util/copyable_function/call.cc
new file mode 100644
index 0000000..cf99757
--- /dev/null
+++ b/libstdc++-v3/testsuite/20_util/copyable_function/call.cc
@@ -0,0 +1,224 @@
+// { dg-do run { target c++26 } }
+// { dg-require-effective-target hosted }
+
+#include <functional>
+#include <utility>
+#include <testsuite_hooks.h>
+
+using std::copyable_function;
+
+using std::is_same_v;
+using std::is_invocable_v;
+using std::is_nothrow_invocable_v;
+using std::invoke_result_t;
+
+// Check return types
+static_assert( is_same_v<void, invoke_result_t<copyable_function<void()>>> );
+static_assert( is_same_v<int, invoke_result_t<copyable_function<int()>>> );
+static_assert( is_same_v<int&, invoke_result_t<copyable_function<int&()>>> );
+
+// With const qualifier
+static_assert( ! is_invocable_v< copyable_function<void()> const > );
+static_assert( ! is_invocable_v< copyable_function<void()> const &> );
+static_assert( is_invocable_v< copyable_function<void() const> > );
+static_assert( is_invocable_v< copyable_function<void() const> &> );
+static_assert( is_invocable_v< copyable_function<void() const> const > );
+static_assert( is_invocable_v< copyable_function<void() const> const &> );
+
+// With no ref-qualifier
+static_assert( is_invocable_v< copyable_function<void()> > );
+static_assert( is_invocable_v< copyable_function<void()> &> );
+static_assert( is_invocable_v< copyable_function<void() const> > );
+static_assert( is_invocable_v< copyable_function<void() const> &> );
+static_assert( is_invocable_v< copyable_function<void() const> const > );
+static_assert( is_invocable_v< copyable_function<void() const> const &> );
+
+// With & ref-qualifier
+static_assert( ! is_invocable_v< copyable_function<void()&> > );
+static_assert( is_invocable_v< copyable_function<void()&> &> );
+static_assert( is_invocable_v< copyable_function<void() const&> > );
+static_assert( is_invocable_v< copyable_function<void() const&> &> );
+static_assert( is_invocable_v< copyable_function<void() const&> const > );
+static_assert( is_invocable_v< copyable_function<void() const&> const &> );
+
+// With && ref-qualifier
+static_assert( is_invocable_v< copyable_function<void()&&> > );
+static_assert( ! is_invocable_v< copyable_function<void()&&> &> );
+static_assert( is_invocable_v< copyable_function<void() const&&> > );
+static_assert( ! is_invocable_v< copyable_function<void() const&&> &> );
+static_assert( is_invocable_v< copyable_function<void() const&&> const > );
+static_assert( ! is_invocable_v< copyable_function<void() const&&> const &> );
+
+// With noexcept-specifier
+static_assert( ! is_nothrow_invocable_v< copyable_function<void()> > );
+static_assert( ! is_nothrow_invocable_v< copyable_function<void() noexcept(false)> > );
+static_assert( is_nothrow_invocable_v< copyable_function<void() noexcept> > );
+static_assert( is_nothrow_invocable_v< copyable_function<void()& noexcept>& > );
+
+void
+test01()
+{
+ struct F
+ {
+ int operator()() { return 0; }
+ int operator()() const { return 1; }
+ };
+
+ copyable_function<int()> f0{F{}};
+ VERIFY( f0() == 0 );
+ VERIFY( std::move(f0)() == 0 );
+
+ copyable_function<int() const> f1{F{}};
+ VERIFY( f1() == 1 );
+ VERIFY( std::as_const(f1)() == 1 );
+ VERIFY( std::move(f1)() == 1 );
+ VERIFY( std::move(std::as_const(f1))() == 1 );
+
+ copyable_function<int()&> f2{F{}};
+ VERIFY( f2() == 0 );
+ // Not rvalue-callable: std::move(f2)()
+
+ copyable_function<int() const&> f3{F{}};
+ VERIFY( f3() == 1 );
+ VERIFY( std::as_const(f3)() == 1 );
+ VERIFY( std::move(f3)() == 1 );
+ VERIFY( std::move(std::as_const(f3))() == 1 );
+
+ copyable_function<int()&&> f4{F{}};
+ // Not lvalue-callable: f4()
+ VERIFY( std::move(f4)() == 0 );
+
+ copyable_function<int() const&&> f5{F{}};
+ // Not lvalue-callable: f5()
+ VERIFY( std::move(f5)() == 1 );
+ VERIFY( std::move(std::as_const(f5))() == 1 );
+}
+
+void
+test02()
+{
+ struct F
+ {
+ int operator()() & { return 0; }
+ int operator()() && { return 1; }
+ };
+
+ copyable_function<int()> f0{F{}};
+ VERIFY( f0() == 0 );
+ VERIFY( std::move(f0)() == 0 );
+
+ copyable_function<int()&&> f1{F{}};
+ // Not lvalue callable: f1()
+ VERIFY( std::move(f1)() == 1 );
+
+ copyable_function<int()&> f2{F{}};
+ VERIFY( f2() == 0 );
+ // Not rvalue-callable: std::move(f2)()
+}
+
+void
+test03()
+{
+ struct F
+ {
+ int operator()() const & { return 0; }
+ int operator()() && { return 1; }
+ };
+
+ copyable_function<int()> f0{F{}};
+ VERIFY( f0() == 0 );
+ VERIFY( std::move(f0)() == 0 );
+
+ copyable_function<int()&&> f1{F{}};
+ // Not lvalue callable: f1()
+ VERIFY( std::move(f1)() == 1 );
+
+ copyable_function<int() const> f2{F{}};
+ VERIFY( f2() == 0 );
+ VERIFY( std::as_const(f2)() == 0 );
+ VERIFY( std::move(f2)() == 0 );
+ VERIFY( std::move(std::as_const(f2))() == 0 );
+
+ copyable_function<int() const &&> f3{F{}};
+ // Not lvalue callable: f3()
+ VERIFY( std::move(f3)() == 0 );
+ VERIFY( std::move(std::as_const(f3))() == 0 );
+
+ copyable_function<int() const &> f4{F{}};
+ VERIFY( f4() == 0 );
+ VERIFY( std::as_const(f4)() == 0 );
+ // Not rvalue-callable: std::move(f4)()
+}
+
+void
+test04()
+{
+ struct F
+ {
+ int operator()() & { return 0; }
+ int operator()() && { return 1; }
+ int operator()() const & { return 2; }
+ int operator()() const && { return 3; }
+ };
+
+ copyable_function<int()> f0{F{}};
+ VERIFY( f0() == 0 );
+ VERIFY( std::move(f0)() == 0 );
+
+ copyable_function<int()&> f1{F{}};
+ VERIFY( f1() == 0 );
+ // Not rvalue-callable: std::move(f1)()
+
+ copyable_function<int()&&> f2{F{}};
+ // Not lvalue callable: f2()
+ VERIFY( std::move(f2)() == 1 );
+
+ copyable_function<int() const> f3{F{}};
+ VERIFY( f3() == 2 );
+ VERIFY( std::as_const(f3)() == 2 );
+ VERIFY( std::move(f3)() == 2 );
+ VERIFY( std::move(std::as_const(f3))() == 2 );
+
+ copyable_function<int() const &> f4{F{}};
+ VERIFY( f4() == 2 );
+ VERIFY( std::as_const(f4)() == 2 );
+ // Not rvalue-callable: std::move(f4)()
+
+ copyable_function<int() const &&> f5{F{}};
+ // Not lvalue callable: f5()
+ VERIFY( std::move(f5)() == 3 );
+ VERIFY( std::move(std::as_const(f5))() == 3 );
+}
+
+void
+test05()
+{
+ int (*fp)() = [] { return 0; };
+ copyable_function<int()> f0{fp};
+ VERIFY( f0() == 0 );
+ VERIFY( std::move(f0)() == 0 );
+
+ const copyable_function<int() const> f1{fp};
+ VERIFY( f1() == 0 );
+ VERIFY( std::move(f1)() == 0 );
+}
+
+struct Incomplete;
+
+void
+test_params()
+{
+ std::copyable_function<void(Incomplete)> f1;
+ std::copyable_function<void(Incomplete&)> f2;
+ std::copyable_function<void(Incomplete&&)> f3;
+}
+
+int main()
+{
+ test01();
+ test02();
+ test03();
+ test04();
+ test05();
+ test_params();
+}
diff --git a/libstdc++-v3/testsuite/20_util/copyable_function/cons.cc b/libstdc++-v3/testsuite/20_util/copyable_function/cons.cc
new file mode 100644
index 0000000..8d422dc
--- /dev/null
+++ b/libstdc++-v3/testsuite/20_util/copyable_function/cons.cc
@@ -0,0 +1,126 @@
+// { dg-do compile { target c++26 } }
+// { dg-require-effective-target hosted }
+// { dg-add-options no_pch }
+
+#include <functional>
+
+#ifndef __cpp_lib_copyable_function
+# error "Feature-test macro for copyable_function missing in <functional>"
+#elif __cpp_lib_copyable_function != 202306L
+# error "Feature-test macro for copyable_function has wrong value in <functional>"
+#endif
+
+using std::copyable_function;
+
+using std::is_constructible_v;
+using std::is_copy_constructible_v;
+using std::is_nothrow_default_constructible_v;
+using std::is_nothrow_move_constructible_v;
+using std::is_nothrow_constructible_v;
+using std::nullptr_t;
+using std::in_place_type_t;
+
+static_assert( is_nothrow_default_constructible_v<copyable_function<void()>> );
+static_assert( is_nothrow_constructible_v<copyable_function<void()>, nullptr_t> );
+static_assert( is_nothrow_move_constructible_v<copyable_function<void()>> );
+static_assert( is_copy_constructible_v<copyable_function<void()>> );
+
+static_assert( is_constructible_v<copyable_function<void()>, void()> );
+static_assert( is_constructible_v<copyable_function<void()>, void(&)()> );
+static_assert( is_constructible_v<copyable_function<void()>, void(*)()> );
+static_assert( is_constructible_v<copyable_function<void()>, int()> );
+static_assert( is_constructible_v<copyable_function<void()>, int(&)()> );
+static_assert( is_constructible_v<copyable_function<void()>, int(*)()> );
+static_assert( ! is_constructible_v<copyable_function<void()>, void(int)> );
+static_assert( is_constructible_v<copyable_function<void(int)>, void(int)> );
+
+static_assert( is_constructible_v<copyable_function<void(int)>,
+ in_place_type_t<void(*)(int)>, void(int)> );
+
+static_assert( is_constructible_v<copyable_function<void()>,
+ void() noexcept> );
+static_assert( is_constructible_v<copyable_function<void() noexcept>,
+ void() noexcept> );
+static_assert( ! is_constructible_v<copyable_function<void() noexcept>,
+ void() > );
+
+struct Q
+{
+ void operator()() const &;
+ void operator()() &&;
+};
+
+static_assert( is_constructible_v<copyable_function<void()>, Q> );
+static_assert( is_constructible_v<copyable_function<void() const>, Q> );
+static_assert( is_constructible_v<copyable_function<void() &>, Q> );
+static_assert( is_constructible_v<copyable_function<void() const &>, Q> );
+static_assert( is_constructible_v<copyable_function<void() &&>, Q> );
+static_assert( is_constructible_v<copyable_function<void() const &&>, Q> );
+
+struct R
+{
+ void operator()() &;
+ void operator()() &&;
+};
+
+static_assert( is_constructible_v<copyable_function<void()>, R> );
+static_assert( is_constructible_v<copyable_function<void()&>, R> );
+static_assert( is_constructible_v<copyable_function<void()&&>, R> );
+static_assert( ! is_constructible_v<copyable_function<void() const>, R> );
+static_assert( ! is_constructible_v<copyable_function<void() const&>, R> );
+static_assert( ! is_constructible_v<copyable_function<void() const&&>, R> );
+
+// The following nothrow-constructible guarantees are a GCC extension,
+// not required by the standard.
+
+static_assert( is_nothrow_constructible_v<copyable_function<void()>, void()> );
+static_assert( is_nothrow_constructible_v<copyable_function<void(int)>,
+ in_place_type_t<void(*)(int)>,
+ void(int)> );
+
+// These types are all small and nothrow move constructible
+struct F { void operator()(); };
+struct G { void operator()() const; };
+static_assert( is_nothrow_constructible_v<copyable_function<void()>, F> );
+static_assert( is_nothrow_constructible_v<copyable_function<void()>, G> );
+static_assert( is_nothrow_constructible_v<copyable_function<void() const>, G> );
+
+struct H {
+ H(int);
+ H(int, int) noexcept;
+ void operator()() noexcept;
+};
+static_assert( is_nothrow_constructible_v<copyable_function<void()>, H> );
+static_assert( is_nothrow_constructible_v<copyable_function<void() noexcept>,
+ H> );
+static_assert( ! is_nothrow_constructible_v<copyable_function<void() noexcept>,
+ in_place_type_t<H>, int> );
+static_assert( is_nothrow_constructible_v<copyable_function<void() noexcept>,
+ in_place_type_t<H>, int, int> );
+
+struct I {
+ I(int, const char*);
+ I(std::initializer_list<char>);
+ int operator()() const noexcept;
+};
+
+static_assert( is_constructible_v<copyable_function<void()>,
+ std::in_place_type_t<I>,
+ int, const char*> );
+static_assert( is_constructible_v<copyable_function<void()>,
+ std::in_place_type_t<I>,
+ std::initializer_list<char>> );
+
+void
+test_instantiation()
+{
+ // Instantiate the constructor bodies
+ copyable_function<void()> f0;
+ copyable_function<void()> f1(nullptr);
+ copyable_function<void()> f2( I(1, "two") );
+ copyable_function<void()> f3(std::in_place_type<I>, 3, "four");
+ copyable_function<void()> f4(std::in_place_type<I>, // PR libstdc++/102825
+ { 'P', 'R', '1', '0', '2', '8', '2', '5'});
+ auto f5 = std::move(f4);
+ f4 = std::move(f5);
+}
diff --git a/libstdc++-v3/testsuite/20_util/copyable_function/conv.cc b/libstdc++-v3/testsuite/20_util/copyable_function/conv.cc
new file mode 100644
index 0000000..e678e16
--- /dev/null
+++ b/libstdc++-v3/testsuite/20_util/copyable_function/conv.cc
@@ -0,0 +1,251 @@
+// { dg-do run { target c++26 } }
+// { dg-require-effective-target hosted }
+
+#include <functional>
+#include <testsuite_hooks.h>
+
+using std::copyable_function;
+
+static_assert( !std::is_constructible_v<std::copyable_function<void()>,
+ std::copyable_function<void()&>> );
+static_assert( !std::is_constructible_v<std::copyable_function<void()>,
+ std::copyable_function<void()&&>> );
+static_assert( !std::is_constructible_v<std::copyable_function<void()&>,
+ std::copyable_function<void()&&>> );
+static_assert( !std::is_constructible_v<std::copyable_function<void() const>,
+ std::copyable_function<void()>> );
+
+// Non-trivial args, guarantess that type is not passed by copy
+struct CountedArg
+{
+ CountedArg() = default;
+ CountedArg(const CountedArg& f) noexcept : counter(f.counter) { ++counter; }
+ CountedArg& operator=(CountedArg&&) = delete;
+
+ int counter = 0;
+};
+CountedArg const c;
+
+// When copyable_function or move_only_function is constructed from other copyable_function,
+// the compiler can avoid double indirection per C++26 [func.wrap.general] p2.
+
+void
+test01()
+{
+ auto f = [](CountedArg const& arg) noexcept { return arg.counter; };
+ std::copyable_function<int(CountedArg) const noexcept> c1(f);
+ using CF = std::copyable_function<int(CountedArg) const noexcept>;
+ VERIFY( c1(c) == 1 );
+
+ std::copyable_function<int(CountedArg) const> c2a(c1);
+ VERIFY( c2a(c) == 1 );
+
+ std::copyable_function<int(CountedArg) const> c2b(static_cast<CF>(c1));
+ VERIFY( c2b(c) == 1 );
+
+ std::move_only_function<int(CountedArg) const> m2a(c1);
+ VERIFY( m2a(c) == 1 );
+
+ std::move_only_function<int(CountedArg) const> m2b(static_cast<CF>(c1));
+ VERIFY( m2b(c) == 1 );
+
+ std::copyable_function<int(CountedArg)> c3a(c1);
+ VERIFY( c3a(c) == 1 );
+
+ std::copyable_function<int(CountedArg)> c3b(static_cast<CF>(c1));
+ VERIFY( c3b(c) == 1 );
+
+ std::move_only_function<int(CountedArg)> m3a(c1);
+ VERIFY( m3a(c) == 1 );
+
+ std::move_only_function<int(CountedArg)> m3b(static_cast<CF>(c1));
+ VERIFY( m3b(c) == 1 );
+
+ // Invokers internally uses Counted&& for non-trivial types,
+ // sinature remain compatible.
+ std::copyable_function<int(CountedArg&&)> c4a(c1);
+ VERIFY( c4a({}) == 0 );
+
+ std::copyable_function<int(CountedArg&&)> c4b(static_cast<CF>(c1));
+ VERIFY( c4b({}) == 0 );
+
+ std::move_only_function<int(CountedArg&&)> m4a(c1);
+ VERIFY( m4a({}) == 0 );
+
+ std::move_only_function<int(CountedArg&&)> m4b(static_cast<CF>(c1));
+ VERIFY( m4b({}) == 0 );
+
+ std::copyable_function<int(CountedArg&&)&> c5a(c1);
+ VERIFY( c5a({}) == 0 );
+
+ std::copyable_function<int(CountedArg&&)&&> c5b(static_cast<CF>(c1));
+ VERIFY( std::move(c5b)({}) == 0 );
+
+ std::move_only_function<int(CountedArg&&)&> m5a(c1);
+ VERIFY( m5a({}) == 0 );
+
+ std::move_only_function<int(CountedArg&&)&&> m5b(static_cast<CF>(c1));
+ VERIFY( std::move(m5b)({}) == 0 );
+
+ // Incompatible signatures
+ std::copyable_function<long(CountedArg) const noexcept> c6a(c1);
+ VERIFY( c6a(c) == 2 );
+
+ std::copyable_function<long(CountedArg) const noexcept> c6b(static_cast<CF>(c1));
+ VERIFY( c6b(c) == 2 );
+
+ std::move_only_function<long(CountedArg) const noexcept> m6a(c1);
+ VERIFY( m6a(c) == 2 );
+
+ std::move_only_function<long(CountedArg) const noexcept> m6b(static_cast<CF>(c1));
+ VERIFY( m6b(c) == 2 );
+}
+
+void
+test02()
+{
+ auto f = [](CountedArg const& arg) noexcept { return arg.counter; };
+ std::copyable_function<int(CountedArg) const noexcept> c1(f);
+ using CF = std::copyable_function<int(CountedArg) const noexcept>;
+ VERIFY( c1(c) == 1 );
+
+ std::copyable_function<int(CountedArg) const> c2;
+ c2 = c1;
+ VERIFY( c2(c) == 1 );
+ c2 = static_cast<CF>(c1);
+ VERIFY( c2(c) == 1 );
+
+ std::move_only_function<int(CountedArg) const> m2;
+ m2 = c1;
+ VERIFY( m2(c) == 1 );
+ m2 = static_cast<CF>(c1);
+ VERIFY( m2(c) == 1 );
+
+ // Incompatible signatures
+ std::copyable_function<long(CountedArg) const noexcept> c3;
+ c3 = c1;
+ VERIFY( c3(c) == 2 );
+ c3 = static_cast<CF>(c1);
+ VERIFY( c3(c) == 2 );
+
+ std::move_only_function<long(CountedArg) const noexcept> m3;
+ m3 = c1;
+ VERIFY( m3(c) == 2 );
+ m3 = static_cast<CF>(c1);
+ VERIFY( m3(c) == 2 );
+}
+
+void
+test03()
+{
+ std::copyable_function<int(long) const noexcept> c1;
+ VERIFY( c1 == nullptr );
+
+ std::copyable_function<int(long) const> c2(c1);
+ VERIFY( c2 == nullptr );
+ c2 = c1;
+ VERIFY( c2 == nullptr );
+ c2 = std::move(c1);
+ VERIFY( c2 == nullptr );
+
+ std::copyable_function<bool(int) const> c3(std::move(c1));
+ VERIFY( c3 == nullptr );
+ c3 = c1;
+ VERIFY( c3 == nullptr );
+ c3 = std::move(c1);
+ VERIFY( c3 == nullptr );
+
+ // LWG4255 move_only_function constructor should recognize empty
+ // copyable_functions
+ std::move_only_function<int(long) const noexcept> m1(c1);
+ VERIFY( m1 == nullptr );
+ m1 = c1;
+ VERIFY( m1 == nullptr );
+ m1 = std::move(c1);
+ VERIFY( m1 == nullptr );
+
+ std::move_only_function<int(long) const> m2(c1);
+ VERIFY( m2 == nullptr );
+ m2 = c1;
+ VERIFY( m2 == nullptr );
+ m2 = std::move(c1);
+ VERIFY( m2 == nullptr );
+
+ std::move_only_function<bool(int) const> m3(std::move(c1));
+ VERIFY( m3 == nullptr );
+ m3 = c1;
+ VERIFY( m3 == nullptr );
+ m3 = std::move(c1);
+ VERIFY( m3 == nullptr );
+}
+
+void
+test04()
+{
+ struct F
+ {
+ int operator()(CountedArg const& arg) noexcept
+ { return arg.counter; }
+
+ int operator()(CountedArg const& arg) const noexcept
+ { return arg.counter + 1000; }
+ };
+
+ F f;
+ std::copyable_function<int(CountedArg) const> c1(f);
+ VERIFY( c1(c) == 1001 );
+
+ // Call const overload as std::copyable_function<int(CountedArg) const>
+ // inside td::copyable_function<int(CountedArg)> would do.
+ std::copyable_function<int(CountedArg)> c2(c1);
+ VERIFY( c2(c) == 1001 );
+ std::move_only_function<int(CountedArg)> m2(c1);
+ VERIFY( m2(c) == 1001 );
+
+ std::copyable_function<int(CountedArg)> m3(f);
+ VERIFY( m3(c) == 1 );
+}
+
+void
+test05()
+{
+ auto f = [](CountedArg const& arg) noexcept { return arg.counter; };
+ std::copyable_function<int(CountedArg)> w1(f);
+ // copyable_function stores copyable_function due incompatibile signatures
+ std::copyable_function<int(CountedArg const&)> w2(std::move(w1));
+ // copy is made when passing to int(CountedArg)
+ VERIFY( w2(c) == 1 );
+ // wrapped 3 times
+ w1 = std::move(w2);
+ VERIFY( w1(c) == 2 );
+ // wrapped 4 times
+ w2 = std::move(w1);
+ VERIFY( w2(c) == 2 );
+ // wrapped 5 times
+ w1 = std::move(w2);
+ VERIFY( w1(c) == 3 );
+}
+
+void
+test06()
+{
+ // No special interoperability with std::function
+ auto f = [](CountedArg const& arg) noexcept { return arg.counter; };
+ std::function<int(CountedArg)> f1(f);
+ std::copyable_function<int(CountedArg) const> c1(std::move(f1));
+ VERIFY( c1(c) == 2 );
+
+ std::copyable_function<int(CountedArg) const> c2(f);
+ std::function<int(CountedArg)> f2(c2);
+ VERIFY( f2(c) == 2 );
+}
+
+int main()
+{
+ test01();
+ test02();
+ test03();
+ test04();
+ test05();
+ test06();
+}
diff --git a/libstdc++-v3/testsuite/20_util/copyable_function/copy.cc b/libstdc++-v3/testsuite/20_util/copyable_function/copy.cc
new file mode 100644
index 0000000..6445a27
--- /dev/null
+++ b/libstdc++-v3/testsuite/20_util/copyable_function/copy.cc
@@ -0,0 +1,154 @@
+// { dg-do run { target c++26 } }
+// { dg-require-effective-target hosted }
+
+#include <functional>
+#include <testsuite_hooks.h>
+
+using std::copyable_function;
+
+void
+test01()
+{
+ // Small type with non-throwing move constructor. Not allocated on the heap.
+ struct F
+ {
+ F() = default;
+ F(const F& f) : counters(f.counters) { ++counters.copy; }
+ F(F&& f) noexcept : counters(f.counters) { ++counters.move; }
+
+ F& operator=(F&&) = delete;
+
+ struct Counters
+ {
+ int copy = 0;
+ int move = 0;
+ } counters;
+
+ const Counters& operator()() const { return counters; }
+ };
+
+ F f;
+ std::copyable_function<const F::Counters&() const> m1(f);
+ VERIFY( m1().copy == 1 );
+ VERIFY( m1().move == 0 );
+
+ // This will copy construct a new target object
+ auto m2 = m1;
+ VERIFY( m1 != nullptr && m2 != nullptr );
+ VERIFY( m2().copy == 2 );
+ VERIFY( m2().move == 0 );
+
+ m1 = m2;
+ VERIFY( m1 != nullptr && m2 != nullptr );
+ VERIFY( m1().copy == 3 );
+ VERIFY( m1().move == 1 ); // Copies object first and then swaps
+
+ m1 = m1;
+ VERIFY( m1 != nullptr && m2 != nullptr );
+ VERIFY( m1().copy == 4 );
+ VERIFY( m1().move == 2 ); // Copies object first and then swaps
+
+ m2 = f;
+ VERIFY( m2().copy == 1 );
+ VERIFY( m2().move == 1 ); // Copy construct target object, then swap into m2.
+}
+
+void
+test02()
+{
+ // Move constructor is potentially throwing. Allocated on the heap.
+ struct F
+ {
+ F() = default;
+ F(const F& f) noexcept : counters(f.counters) { ++counters.copy; }
+ F(F&& f) noexcept(false) : counters(f.counters) { ++counters.move; }
+
+ F& operator=(F&&) = delete;
+
+ struct Counters
+ {
+ int copy = 0;
+ int move = 0;
+ } counters;
+
+ Counters operator()() const noexcept { return counters; }
+ };
+
+ F f;
+ std::copyable_function<F::Counters() const> m1(f);
+ VERIFY( m1().copy == 1 );
+ VERIFY( m1().move == 0 );
+
+ // The target object is on the heap, but we need to allocate new one
+ auto m2 = m1;
+ VERIFY( m1 != nullptr && m2 != nullptr );
+ VERIFY( m2().copy == 2 );
+ VERIFY( m2().move == 0 );
+
+ m1 = m2;
+ VERIFY( m1 != nullptr && m2 != nullptr );
+ VERIFY( m1().copy == 3 );
+ VERIFY( m1().move == 0 );
+
+ m1 = m1;
+ VERIFY( m1 != nullptr && m2 != nullptr );
+ VERIFY( m1().copy == 4 );
+ VERIFY( m1().move == 0 );
+
+ m2 = f;
+ VERIFY( m2().copy == 1 );
+ VERIFY( m2().move == 0 );
+}
+
+void
+test03()
+{
+ // Small type with non-throwing, but not non-trivial move constructor.
+ struct F
+ {
+ F(int i) noexcept : id(i) {}
+ F(const F& f) : id(f.id)
+ { if (id == 3) throw id; }
+ F(F&& f) noexcept : id(f.id) { }
+
+ int operator()() const
+ { return id; }
+
+ int id;
+ };
+
+ std::copyable_function<int() const> m1(std::in_place_type<F>, 1);
+ const std::copyable_function<int() const> m2(std::in_place_type<F>, 2);
+ const std::copyable_function<int() const> m3(std::in_place_type<F>, 3);
+
+ try
+ {
+ auto mc = m3;
+ VERIFY( false );
+ }
+ catch(int i)
+ {
+ VERIFY( i == 3 );
+ }
+
+ m1 = m2;
+ VERIFY( m1() == 2 );
+
+ try
+ {
+ m1 = m3;
+ VERIFY( false );
+ }
+ catch (int i)
+ {
+ VERIFY( i == 3 );
+ }
+ VERIFY( m1() == 2 );
+}
+
+int main()
+{
+ test01();
+ test02();
+ test03();
+}
diff --git a/libstdc++-v3/testsuite/20_util/copyable_function/move.cc b/libstdc++-v3/testsuite/20_util/copyable_function/move.cc
new file mode 100644
index 0000000..ec9d0d1
--- /dev/null
+++ b/libstdc++-v3/testsuite/20_util/copyable_function/move.cc
@@ -0,0 +1,120 @@
+// { dg-do run { target c++26 } }
+// { dg-require-effective-target hosted }
+
+#include <functional>
+#include <testsuite_hooks.h>
+
+using std::copyable_function;
+
+void
+test01()
+{
+ // Small type with non-throwing move constructor. Not allocated on the heap.
+ struct F
+ {
+ F() = default;
+ F(const F& f) : counters(f.counters) { ++counters.copy; }
+ F(F&& f) noexcept : counters(f.counters) { ++counters.move; }
+
+ F& operator=(F&&) = delete;
+
+ struct Counters
+ {
+ int copy = 0;
+ int move = 0;
+ } counters;
+
+ const Counters& operator()() const { return counters; }
+ };
+
+ F f;
+ std::copyable_function<const F::Counters&() const> m1(f);
+ VERIFY( m1().copy == 1 );
+ VERIFY( m1().move == 0 );
+
+ // Standard specifies move assigment as copy and swap
+ m1 = std::move(m1);
+ VERIFY( m1 != nullptr );
+ VERIFY( m1().copy == 1 );
+ VERIFY( m1().move == 0 );
+
+ // This will move construct a new target object and destroy the old one:
+ auto m2 = std::move(m1);
+ VERIFY( m1 == nullptr && m2 != nullptr );
+ VERIFY( m2().copy == 1 );
+ VERIFY( m2().move == 1 );
+
+ m1 = std::move(m2);
+ VERIFY( m1 != nullptr && m2 == nullptr );
+ VERIFY( m1().copy == 1 );
+ VERIFY( m1().move == 2 );
+
+ m2 = std::move(f);
+ VERIFY( m2().copy == 0 );
+ VERIFY( m2().move == 2 ); // Move construct target object, then swap into m2.
+ const int moves = m1().move + m2().move;
+ // This will do three moves:
+ swap(m1, m2);
+ VERIFY( m1().copy == 0 );
+ VERIFY( m2().copy == 1 );
+ VERIFY( (m1().move + m2().move) == (moves + 3) );
+}
+
+void
+test02()
+{
+ // Move constructor is potentially throwing. Allocated on the heap.
+ struct F
+ {
+ F() = default;
+ F(const F& f) noexcept : counters(f.counters) { ++counters.copy; }
+ F(F&& f) noexcept(false) : counters(f.counters) { ++counters.move; }
+
+ F& operator=(F&&) = delete;
+
+ struct Counters
+ {
+ int copy = 0;
+ int move = 0;
+ } counters;
+
+ Counters operator()() const noexcept { return counters; }
+ };
+
+ F f;
+ std::copyable_function<F::Counters() const> m1(f);
+ VERIFY( m1().copy == 1 );
+ VERIFY( m1().move == 0 );
+
+ m1 = std::move(m1);
+ VERIFY( m1 != nullptr );
+ VERIFY( m1().copy == 1 );
+ VERIFY( m1().move == 0 );
+
+ // The target object is on the heap so this just moves a pointer:
+ auto m2 = std::move(m1);
+ VERIFY( m1 == nullptr && m2 != nullptr );
+ VERIFY( m2().copy == 1 );
+ VERIFY( m2().move == 0 );
+
+ m1 = std::move(m2);
+ VERIFY( m1 != nullptr && m2 == nullptr );
+ VERIFY( m1().copy == 1 );
+ VERIFY( m1().move == 0 );
+
+ m2 = std::move(f);
+ VERIFY( m2().copy == 0 );
+ VERIFY( m2().move == 1 );
+ const int moves = m1().move + m2().move;
+ // This just swaps the pointers, so no moves:
+ swap(m1, m2);
+ VERIFY( m1().copy == 0 );
+ VERIFY( m2().copy == 1 );
+ VERIFY( (m1().move + m2().move) == moves );
+}
+
+int main()
+{
+ test01();
+ test02();
+}
diff --git a/libstdc++-v3/testsuite/20_util/move_only_function/call.cc b/libstdc++-v3/testsuite/20_util/move_only_function/call.cc
index bfc609a..217de37 100644
--- a/libstdc++-v3/testsuite/20_util/move_only_function/call.cc
+++ b/libstdc++-v3/testsuite/20_util/move_only_function/call.cc
@@ -190,6 +190,19 @@ test04()
VERIFY( std::move(std::as_const(f5))() == 3 );
}
+void
+test05()
+{
+ int (*fp)() = [] { return 0; };
+ move_only_function<int()> f0{fp};
+ VERIFY( f0() == 0 );
+ VERIFY( std::move(f0)() == 0 );
+
+ const move_only_function<int() const> f1{fp};
+ VERIFY( f1() == 0 );
+ VERIFY( std::move(f1)() == 0 );
+}
+
struct Incomplete;
void
@@ -206,5 +219,6 @@ int main()
test02();
test03();
test04();
+ test05();
test_params();
}
diff --git a/libstdc++-v3/testsuite/20_util/move_only_function/conv.cc b/libstdc++-v3/testsuite/20_util/move_only_function/conv.cc
new file mode 100644
index 0000000..3da5e9e
--- /dev/null
+++ b/libstdc++-v3/testsuite/20_util/move_only_function/conv.cc
@@ -0,0 +1,188 @@
+// { dg-do run { target c++23 } }
+// { dg-require-effective-target hosted }
+
+#include <functional>
+#include <testsuite_hooks.h>
+
+using std::move_only_function;
+
+static_assert( !std::is_constructible_v<std::move_only_function<void()>,
+ std::move_only_function<void()&>> );
+static_assert( !std::is_constructible_v<std::move_only_function<void()>,
+ std::move_only_function<void()&&>> );
+static_assert( !std::is_constructible_v<std::move_only_function<void()&>,
+ std::move_only_function<void()&&>> );
+static_assert( !std::is_constructible_v<std::move_only_function<void() const>,
+ std::move_only_function<void()>> );
+
+// Non-trivial args, guarantess that type is not passed by copy
+struct CountedArg
+{
+ CountedArg() = default;
+ CountedArg(const CountedArg& f) noexcept : counter(f.counter) { ++counter; }
+ CountedArg& operator=(CountedArg&&) = delete;
+
+ int counter = 0;
+};
+CountedArg const c;
+
+// When move_only_functions is constructed from other move_only_function,
+// the compiler can avoid double indirection per C++26 [func.wrap.general] p2.
+
+void
+test01()
+{
+ auto f = [](CountedArg const& arg) noexcept { return arg.counter; };
+ std::move_only_function<int(CountedArg) const noexcept> m1(f);
+ VERIFY( m1(c) == 1 );
+
+ std::move_only_function<int(CountedArg) const> m2(std::move(m1));
+ VERIFY( m2(c) == 1 );
+
+ std::move_only_function<int(CountedArg)> m3(std::move(m2));
+ VERIFY( m3(c) == 1 );
+
+ // Invokers internally uses Counted&& for non-trivial types,
+ // sinature remain compatible.
+ std::move_only_function<int(CountedArg&&)> m4(std::move(m3));
+ VERIFY( m4({}) == 0 );
+
+ std::move_only_function<int(CountedArg&&)&&> m5(std::move(m4));
+ VERIFY( std::move(m5)({}) == 0 );
+
+ m4 = f;
+ std::move_only_function<int(CountedArg&&)&> m7(std::move(m4));
+ VERIFY( m7({}) == 0 );
+
+ m4 = f;
+ std::move_only_function<int(CountedArg&&)&> m8(std::move(m4));
+ VERIFY( m8({}) == 0 );
+
+ // Incompatible signatures
+ m1 = f;
+ std::move_only_function<long(CountedArg) const noexcept> m9(std::move(m1));
+ VERIFY( m9(c) == 2 );
+}
+
+void
+test02()
+{
+ auto f = [](CountedArg const& arg) noexcept { return arg.counter; };
+ std::move_only_function<int(CountedArg) const noexcept> m1(f);
+ VERIFY( m1(c) == 1 );
+
+ std::move_only_function<int(CountedArg) const> m2;
+ m2 = std::move(m1);
+ VERIFY( m2(c) == 1 );
+
+ std::move_only_function<int(CountedArg)> m3;
+ m3 = std::move(m2);
+ VERIFY( m3(c) == 1 );
+
+ // Invokers internally uses Counted&& for non-trivial types,
+ // sinature remain compatible.
+ std::move_only_function<int(CountedArg&&)> m4;
+ m4 = std::move(m3);
+ VERIFY( m4({}) == 0 );
+
+ std::move_only_function<int(CountedArg&&)&&> m5;
+ m5 = std::move(m4);
+ VERIFY( std::move(m5)({}) == 0 );
+
+ m4 = f;
+ std::move_only_function<int(CountedArg&&)&> m7;
+ m7 = std::move(m4);
+ VERIFY( m7({}) == 0 );
+
+ m4 = f;
+ std::move_only_function<int(CountedArg&&)&> m8;
+ m8 = std::move(m4);
+ VERIFY( m8({}) == 0 );
+
+ m1 = f;
+ std::move_only_function<long(CountedArg) const noexcept> m9;
+ m9 = std::move(m1);
+ VERIFY( m9(c) == 2 );
+}
+
+void
+test03()
+{
+ std::move_only_function<int(long) const noexcept> e;
+ VERIFY( e == nullptr );
+
+ std::move_only_function<int(long) const> e2(std::move(e));
+ VERIFY( e2 == nullptr );
+ e2 = std::move(e);
+ VERIFY( e2 == nullptr );
+
+ std::move_only_function<bool(int) const> e3(std::move(e));
+ VERIFY( e3 == nullptr );
+ e3 = std::move(e);
+ VERIFY( e3 == nullptr );
+}
+
+void
+test04()
+{
+ struct F
+ {
+ int operator()(CountedArg const& arg) noexcept
+ { return arg.counter; }
+
+ int operator()(CountedArg const& arg) const noexcept
+ { return arg.counter + 1000; }
+ };
+
+ F f;
+ std::move_only_function<int(CountedArg) const> m1(f);
+ VERIFY( m1(c) == 1001 );
+
+ // Call const overload as std::move_only_function<int(CountedArg) const>
+ // inside std::move_only_function<int(CountedArg)> would do.
+ std::move_only_function<int(CountedArg)> m2(std::move(m1));
+ VERIFY( m2(c) == 1001 );
+
+ std::move_only_function<int(CountedArg)> m3(f);
+ VERIFY( m3(c) == 1 );
+}
+
+void
+test05()
+{
+ auto f = [](CountedArg const& arg) noexcept { return arg.counter; };
+ std::move_only_function<int(CountedArg)> w1(f);
+ // move_only_function stores move_only_function due incompatibile signatures
+ std::move_only_function<int(CountedArg const&)> w2(std::move(w1));
+ // copy is made when passing to int(CountedArg)
+ VERIFY( w2(c) == 1 );
+ // wrapped 3 times
+ w1 = std::move(w2);
+ VERIFY( w1(c) == 2 );
+ // wrapped 4 times
+ w2 = std::move(w1);
+ VERIFY( w2(c) == 2 );
+ // wrapped 5 times
+ w1 = std::move(w2);
+ VERIFY( w1(c) == 3 );
+}
+
+void
+test06()
+{
+ // No special interoperability with std::function
+ auto f = [](CountedArg const& arg) noexcept { return arg.counter; };
+ std::function<int(CountedArg)> f1(f);
+ std::move_only_function<int(CountedArg) const> m1(std::move(f1));
+ VERIFY( m1(c) == 2 );
+}
+
+int main()
+{
+ test01();
+ test02();
+ test03();
+ test04();
+ test05();
+ test06();
+}
diff --git a/libstdc++-v3/testsuite/20_util/move_only_function/move.cc b/libstdc++-v3/testsuite/20_util/move_only_function/move.cc
index 51e31a6..6da02c9 100644
--- a/libstdc++-v3/testsuite/20_util/move_only_function/move.cc
+++ b/libstdc++-v3/testsuite/20_util/move_only_function/move.cc
@@ -32,6 +32,12 @@ test01()
VERIFY( m1().copy == 1 );
VERIFY( m1().move == 0 );
+ // Standard specifies move assigment as copy and swap
+ m1 = std::move(m1);
+ VERIFY( m1 != nullptr );
+ VERIFY( m1().copy == 1 );
+ VERIFY( m1().move == 0 );
+
// This will move construct a new target object and destroy the old one:
auto m2 = std::move(m1);
VERIFY( m1 == nullptr && m2 != nullptr );
@@ -80,6 +86,11 @@ test02()
VERIFY( m1().copy == 1 );
VERIFY( m1().move == 0 );
+ m1 = std::move(m1);
+ VERIFY( m1 != nullptr );
+ VERIFY( m1().copy == 1 );
+ VERIFY( m1().move == 0 );
+
// The target object is on the heap so this just moves a pointer:
auto m2 = std::move(m1);
VERIFY( m1 == nullptr && m2 != nullptr );
diff --git a/libstdc++-v3/testsuite/std/format/arguments/args.cc b/libstdc++-v3/testsuite/std/format/arguments/args.cc
index 4c50bc7..6029675 100644
--- a/libstdc++-v3/testsuite/std/format/arguments/args.cc
+++ b/libstdc++-v3/testsuite/std/format/arguments/args.cc
@@ -164,24 +164,6 @@ void test_visited_as_handle()
#endif
}
-template<typename E, typename S>
-void test_visited_as()
-{
- auto v = static_cast<S>(1.0);
- auto store = std::make_format_args(v);
- std::format_args args = store;
-
- auto is_expected_val = [v](auto arg) {
- if constexpr (std::is_same_v<decltype(arg), E>)
- return arg == static_cast<E>(v);
- return false;
- };
- VERIFY( std::visit_format_arg(is_expected_val, args.get(0)) );
-#if __cpp_lib_format >= 202306L // C++26 adds std::basic_format_arg::visit
- VERIFY( args.get(0).visit(is_expected_val) );
-#endif
-}
-
template<typename T>
concept can_format = std::is_default_constructible_v<std::formatter<T, char>>;
@@ -195,30 +177,31 @@ int main()
test_visited_as_handle<__int128>();
test_visited_as_handle<unsigned __int128>();
#endif
-// TODO: This should be visited as handle.
-#ifdef __STDCPP_FLOAT16_T__
- if constexpr (can_format<_Float16>)
- test_visited_as<float, _Float16>();
-#endif
-#ifdef __STDCPP_BFLOAT16_T__
+#ifdef __BFLT16_DIG__
if constexpr (can_format<__gnu_cxx::__bfloat16_t>)
- test_visited_as<float, __gnu_cxx::__bfloat16_t>();
+ test_visited_as_handle<__gnu_cxx::__bfloat16_t>();
+#endif
+#ifdef __FLT16_DIG__
+ if constexpr (can_format<_Float16>)
+ test_visited_as_handle<_Float16>();
#endif
#ifdef __FLT32_DIG__
if constexpr (can_format<_Float32>)
- test_visited_as<float, _Float32>();
+ test_visited_as_handle<_Float32>();
#endif
#ifdef __FLT64_DIG__
if constexpr (can_format<_Float64>)
- test_visited_as<double, _Float64>();
+ test_visited_as_handle<_Float64>();
#endif
#ifdef __FLT128_DIG__
if constexpr (can_format<_Float128>)
-# ifdef _GLIBCXX_LDOUBLE_IS_IEEE_BINARY128
- test_visited_as<long double, _Float128>();
-# else
test_visited_as_handle<_Float128>();
-# endif
+#endif
+#ifdef __SIZEOF_FLOAT128__
+ // __ieee128 is same type as __float128, and may be long double
+ if constexpr (!std::is_same_v<__float128, long double>)
+ if constexpr (can_format<__float128>)
+ test_visited_as_handle<__float128>();
#endif
#ifdef _GLIBCXX_LONG_DOUBLE_ALT128_COMPAT
if constexpr (!std::is_same_v<__ieee128, long double>)
diff --git a/libstdc++-v3/testsuite/std/format/parse_ctx.cc b/libstdc++-v3/testsuite/std/format/parse_ctx.cc
index b5dd7cd..adafc58 100644
--- a/libstdc++-v3/testsuite/std/format/parse_ctx.cc
+++ b/libstdc++-v3/testsuite/std/format/parse_ctx.cc
@@ -443,6 +443,8 @@ test_custom()
}
#if __cpp_lib_format >= 202305
+#include <stdfloat>
+
struct X { };
template<>
@@ -458,13 +460,20 @@ struct std::formatter<X, char>
if (spec == "int")
{
pc.check_dynamic_spec_integral(pc.next_arg_id());
- integer = true;
+ type = Type::integral;
}
else if (spec == "str")
{
pc.check_dynamic_spec_string(pc.next_arg_id());
- integer = false;
+ type = Type::string;
+ }
+ else if (spec == "float")
+ {
+ pc.check_dynamic_spec<float, double, long double>(pc.next_arg_id());
+ type = Type::floating;
}
+ else if (spec == "other")
+ type = Type::other;
else
throw std::format_error("invalid format-spec");
return pc.begin() + spec.size();
@@ -474,13 +483,44 @@ struct std::formatter<X, char>
format(X, std::format_context& c) const
{
std::visit_format_arg([this]<typename T>(T) { // { dg-warning "deprecated" "" { target c++26 } }
- if (is_integral_v<T> != this->integer)
- throw std::format_error("invalid argument type");
+ constexpr bool is_handle
+ = std::is_same_v<std::basic_format_arg<std::format_context>::handle, T>;
+ constexpr bool is_integral
+ = std::is_same_v<int, T> || std::is_same_v<unsigned int, T>
+ || is_same_v<long long, T> || std::is_same_v<unsigned long long, T>;
+ constexpr bool is_string
+ = std::is_same_v<const char*, T> || std::is_same_v<std::string_view, T>;
+ constexpr bool is_floating
+ = std::is_same_v<float, T> || std::is_same_v<double, T>
+ || std::is_same_v<long double, T>;
+ switch (this->type)
+ {
+ case Type::other:
+ if (is_handle) return;
+ break;
+ case Type::integral:
+ if (is_integral) return;
+ break;
+ case Type::string:
+ if (is_string) return;
+ break;
+ case Type::floating:
+ if (is_floating) return;
+ break;
+ }
+ throw std::format_error("invalid argument type");
}, c.arg(1));
return c.out();
}
private:
- bool integer = false;
+ enum class Type
+ {
+ other,
+ integral,
+ string,
+ floating,
+ };
+ Type type = Type::other;
};
#endif
@@ -497,6 +537,28 @@ test_dynamic_type_check()
(void) std::format("{:int}", X{}, 42L);
(void) std::format("{:str}", X{}, "H2G2");
+ (void) std::format("{:float}", X{}, 10.0);
+
+#ifdef __STDCPP_FLOAT16_T__
+ if constexpr (std::formattable<std::bfloat16_t, char>)
+ (void) std::format("{:other}", X{}, 10.0bf16);
+#endif
+#ifdef __STDCPP_FLOAT16_T__
+ if constexpr (std::formattable<std::float16_t, char>)
+ (void) std::format("{:other}", X{}, 10.0f16);
+#endif
+#ifdef __STDCPP_FLOAT32_T__
+ if constexpr (std::formattable<std::float32_t, char>)
+ (void) std::format("{:other}", X{}, 10.0f32);
+#endif
+#ifdef __STDCPP_FLOAT64_T__
+ if constexpr (std::formattable<std::float64_t, char>)
+ (void) std::format("{:other}", X{}, 10.0f64);
+#endif
+#ifdef __STDCPP_FLOAT128_T__
+ if constexpr (std::formattable<std::float128_t, char>)
+ (void) std::format("{:other}", X{}, 10.0f128);
+#endif
#endif
}