diff options
128 files changed, 2384 insertions, 1799 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 9167c84..19c28f1 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,61 @@ +2025-10-13 Shreya Munnangi <smunnangi1@ventanamicro.com> + + PR target/120811 + * config/riscv/riscv.cc (synthesize_add): Exchange constant terms when + generating addi pairs. + (synthesize_addsi): Similarly. + * config/riscv/riscv.md (addptr<mode>3): New define_expand. + (*add<mode>3_const_sum_of_two_s12): Remove pattern. + +2025-10-13 Jeff Law <jlaw@ventanamicro.com> + + PR target/120674 + * config/riscv/riscv.cc (riscv_dwarf_poly_indeterminite_value): Do not + set FACTOR to zero, for that case use one instead. + +2025-10-13 Pan Li <pan2.li@intel.com> + + * match.pd: Add simplifed pattern for widen_mul based unsigned + SAT_MUL. + +2025-10-13 Jan Hubicka <hubicka@ucw.cz> + + * ipa-inline.cc (max_count): Remove. + (has_nonzero_ipa_profile): New. + (inline_small_functions): Update. + (dump_inline_stats): Update. + +2025-10-13 Robin Dapp <rdapp@ventanamicro.com> + + PR target/118019 + * internal-fn.cc (get_supported_else_vals): Exit at invalid + index. + (internal_strided_fn_supported_p): New funtion. + * internal-fn.h (internal_strided_fn_supported_p): Declare. + * tree-vect-stmts.cc (vector_vector_composition_type): + Add vector_only argument. + (vect_use_grouped_gather): New function. + (vect_get_store_rhs): Adjust docs of + vector_vector_composition_type. + (get_load_store_type): Try grouped gather. + (vectorizable_store): Use punned vectype. + (vectorizable_load): Ditto. + * tree-vectorizer.h (struct vect_load_store_data): Add punned + vectype. + +2025-10-13 Avinash Jayakar <avinashd@linux.ibm.com> + + PR tree-optimization/122213 + * match.pd: Canonicalize unsigned pow2 div only for trunk, floor and + exact div. + +2025-10-13 Richard Biener <rguenther@suse.de> + + * tree-vect-patterns.cc (integer_type_for_mask): Add optional + output dt argument. + (vect_recog_bool_pattern): Make sure to not apply the bitwise + binary pattern to an external operand. + 2025-10-11 Bohan Lei <garthlei@linux.alibaba.com> PR target/119587 diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP index 1a4c009..75df97b 100644 --- a/gcc/DATESTAMP +++ b/gcc/DATESTAMP @@ -1 +1 @@ -20251013 +20251014 diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog index feffdf1..07ea6aa 100644 --- a/gcc/c-family/ChangeLog +++ b/gcc/c-family/ChangeLog @@ -1,3 +1,8 @@ +2025-10-13 Iain Sandoe <iain@sandoe.co.uk> + + * c.opt: Enable Wignored-attributes for Objective-C and + Objective-C++. + 2025-10-09 David Faust <david.faust@oracle.com> * c-attribs.cc (c_common_attribute_table): Add btf_decl_tag and diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt index a7fd14a..b7ce67a 100644 --- a/gcc/c-family/c.opt +++ b/gcc/c-family/c.opt @@ -774,7 +774,7 @@ C++ ObjC++ Var(warn_extra_semi) Init(-1) Warning Warn about semicolon after in-class function definition. Wflex-array-member-not-at-end -C C++ Var(warn_flex_array_member_not_at_end) Warning +C ObjC C++ ObjC++ Var(warn_flex_array_member_not_at_end) Warning Warn when a structure containing a C99 flexible array member as the last field is not at the end of another structure. @@ -866,7 +866,7 @@ C ObjC C++ ObjC++ Var(warn_if_not_aligned) Init(1) Warning Warn when the field in a struct is not aligned. Wignored-qualifiers -C C++ Var(warn_ignored_qualifiers) Warning EnabledBy(Wextra) +C ObjC C++ ObjC++ Var(warn_ignored_qualifiers) Warning EnabledBy(Wextra) Warn whenever type qualifiers are ignored. Wignored-attributes @@ -1013,7 +1013,7 @@ C ObjC C++ ObjC++ Var(warn_memset_transposed_args) Warning LangEnabledBy(C ObjC Warn about suspicious calls to memset where the third argument is constant literal zero and the second is not. Wmisleading-indentation -C C++ Common Var(warn_misleading_indentation) Warning LangEnabledBy(C C++,Wall) +C ObjC C++ ObjC++ Common Var(warn_misleading_indentation) Warning LangEnabledBy(C ObjC C++ ObjC++,Wall) Warn when the indentation of the code does not reflect the block structure. Wmismatched-dealloc @@ -1187,7 +1187,7 @@ C ObjC Var(warn_old_style_definition) Init(-1) Warning Warn if an old-style parameter definition is used. Wopenacc-parallelism -C C++ Var(warn_openacc_parallelism) Warning +C ObjC C++ ObjC++ Var(warn_openacc_parallelism) Warning Warn about potentially suboptimal choices related to OpenACC parallelism. Wopenmp @@ -1195,7 +1195,7 @@ C ObjC C++ ObjC++ Warning Var(warn_openmp) Init(1) Warn about suspicious OpenMP code. Wopenmp-simd -C C++ Var(warn_openmp_simd) Warning LangEnabledBy(C C++,Wall) +C ObjC C++ ObjC++ Var(warn_openmp_simd) Warning LangEnabledBy(C ObjC C++ ObjC++,Wall) Warn if a simd directive is overridden by the vectorizer cost model. Woverlength-strings @@ -1243,11 +1243,11 @@ C++ ObjC++ Var(warn_pessimizing_move) Warning LangEnabledBy(C++ ObjC++, Wall) Warn about calling std::move on a local object in a return statement preventing copy elision. Wplacement-new -C++ Warning Alias(Wplacement-new=, 1, 0) +C++ ObjC++ Warning Alias(Wplacement-new=, 1, 0) Warn for placement new expressions with undefined behavior. Wplacement-new= -C++ Joined RejectNegative UInteger Var(warn_placement_new) Init(-1) Warning IntegerRange(0, 2) +C++ ObjC++ Joined RejectNegative UInteger Var(warn_placement_new) Init(-1) Warning IntegerRange(0, 2) Warn for placement new expressions with undefined behavior. Wpmf-conversions @@ -1417,7 +1417,7 @@ C ObjC C++ ObjC++ LangEnabledBy(C ObjC C++ ObjC++,Wall, 3, 0) IntegerRange(0, 3) ; Wstrict-flex-arrays -C C++ Var(warn_strict_flex_arrays) Warning +C ObjC C++ ObjC++ Var(warn_strict_flex_arrays) Warning Warn about improper usages of flexible array members according to the level of -fstrict-flex-arrays. @@ -1495,7 +1495,7 @@ C ObjC C++ ObjC++ Var(warn_switch_outside_range) Warning Init(1) Warn about switch values that are outside of the switch's type range. Wsync-nand -C C++ Var(warn_sync_nand) Init(1) Warning +C ObjC C++ ObjC++ Var(warn_sync_nand) Init(1) Warning Warn when __sync_fetch_and_nand and __sync_nand_and_fetch built-in functions are used. Wsynth @@ -1900,35 +1900,35 @@ EnumValue Enum(on_off) String(on) Value(1) fcontract-assumption-mode= -C++ Joined RejectNegative +C++ ObjC++ Joined RejectNegative -fcontract-assumption-mode=[on|off] Enable or disable treating axiom level contracts as assumptions (default on). fcontract-build-level= -C++ Joined RejectNegative +C++ ObjC++ Joined RejectNegative -fcontract-build-level=[off|default|audit] Specify max contract level to generate runtime checks for. fcontract-strict-declarations= -C++ Var(flag_contract_strict_declarations) Enum(on_off) Joined Init(0) RejectNegative +C++ ObjC++ Var(flag_contract_strict_declarations) Enum(on_off) Joined Init(0) RejectNegative -fcontract-strict-declarations=[on|off] Enable or disable warnings on generalized redeclaration of functions with contracts (default off). fcontract-mode= -C++ Var(flag_contract_mode) Enum(on_off) Joined Init(1) RejectNegative +C++ ObjC++ Var(flag_contract_mode) Enum(on_off) Joined Init(1) RejectNegative -fcontract-mode=[on|off] Enable or disable all contract facilities (default on). fcontract-continuation-mode= -C++ Joined RejectNegative +C++ ObjC++ Joined RejectNegative -fcontract-continuation-mode=[on|off] Enable or disable contract continuation mode (default off). fcontract-role= -C++ Joined RejectNegative +C++ ObjC++ Joined RejectNegative -fcontract-role=<name>:<semantics> Specify the semantics for all levels in a role (default, review), or a custom contract role with given semantics (ex: opt:assume,assume,assume). fcontract-semantic= -C++ Joined RejectNegative +C++ ObjC++ Joined RejectNegative -fcontract-semantic=<level>:<semantic> Specify the concrete semantics for level. fcoroutines -C++ LTO Var(flag_coroutines) +C++ ObjC++ LTO Var(flag_coroutines) Enable C++ coroutines (experimental). fdebug-cpp @@ -2130,23 +2130,23 @@ C ObjC Var(warn_compare_distinct_pointer_types) Warning Init(1) Warn if pointers of distinct types are compared without a cast. flang-info-include-translate -C++ Var(note_include_translate_yes) +C++ ObjC++ Var(note_include_translate_yes) Note #include directives translated to import declarations. flang-info-include-translate-not -C++ Var(note_include_translate_no) +C++ ObjC++ Var(note_include_translate_no) Note #include directives not translated to import declarations, and not known to be textual. flang-info-include-translate= -C++ Joined RejectNegative MissingArgError(missing header name) +C++ ObjC++ Joined RejectNegative MissingArgError(missing header name) Note a #include translation of a specific header. flang-info-module-cmi -C++ Var(note_module_cmi_yes) +C++ ObjC++ Var(note_module_cmi_yes) Note Compiled Module Interface pathnames. flang-info-module-cmi= -C++ Joined RejectNegative MissingArgError(missing module name) +C++ ObjC++ Joined RejectNegative MissingArgError(missing module name) Note Compiled Module Interface pathname of a specific module or header-unit. fmax-include-depth= @@ -2357,10 +2357,10 @@ C++ ObjC++ Var(flag_sized_deallocation) Init(-1) Enable C++14 sized deallocation support. fstrict-flex-arrays -C C++ Common Alias(fstrict-flex-arrays=,3,0) +C ObjC C++ ObjC++ Common Alias(fstrict-flex-arrays=,3,0) fstrict-flex-arrays= -C C++ Common Joined RejectNegative UInteger Var(flag_strict_flex_arrays) Init(0) IntegerRange(0,3) +C ObjC C++ ObjC++ Common Joined RejectNegative UInteger Var(flag_strict_flex_arrays) Init(0) IntegerRange(0,3) -fstrict-flex-arrays=<level> Control when to treat the trailing array of a structure as a flexible array member for the purposes of accessing the elements of such an array. The default is treating all trailing arrays of structures as flexible array members. fsquangle diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h index 3f29f17..4efa2c0 100644 --- a/gcc/common/config/i386/cpuinfo.h +++ b/gcc/common/config/i386/cpuinfo.h @@ -1024,8 +1024,6 @@ get_available_features (struct __processor_model *cpu_model, set_feature (FEATURE_AMX_AVX512); if (eax & bit_AMX_TF32) set_feature (FEATURE_AMX_TF32); - if (eax & bit_AMX_TRANSPOSE) - set_feature (FEATURE_AMX_TRANSPOSE); if (eax & bit_AMX_FP8) set_feature (FEATURE_AMX_FP8); if (eax & bit_AMX_MOVRS) diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc index 9e807e4..d3509e1 100644 --- a/gcc/common/config/i386/i386-common.cc +++ b/gcc/common/config/i386/i386-common.cc @@ -134,8 +134,6 @@ along with GCC; see the file COPYING3. If not see (OPTION_MASK_ISA2_AMX_TILE_SET | OPTION_MASK_ISA2_AMX_AVX512) #define OPTION_MASK_ISA2_AMX_TF32_SET \ (OPTION_MASK_ISA2_AMX_TILE_SET | OPTION_MASK_ISA2_AMX_TF32) -#define OPTION_MASK_ISA2_AMX_TRANSPOSE_SET \ - (OPTION_MASK_ISA2_AMX_TILE_SET | OPTION_MASK_ISA2_AMX_TRANSPOSE) #define OPTION_MASK_ISA2_AMX_FP8_SET \ (OPTION_MASK_ISA2_AMX_TILE_SET | OPTION_MASK_ISA2_AMX_FP8) #define OPTION_MASK_ISA2_MOVRS_SET OPTION_MASK_ISA2_MOVRS @@ -303,8 +301,8 @@ along with GCC; see the file COPYING3. If not see (OPTION_MASK_ISA2_AMX_TILE | OPTION_MASK_ISA2_AMX_INT8_UNSET \ | OPTION_MASK_ISA2_AMX_BF16_UNSET | OPTION_MASK_ISA2_AMX_FP16_UNSET \ | OPTION_MASK_ISA2_AMX_COMPLEX_UNSET | OPTION_MASK_ISA2_AMX_AVX512_UNSET \ - | OPTION_MASK_ISA2_AMX_TF32_UNSET | OPTION_MASK_ISA2_AMX_TRANSPOSE_UNSET \ - | OPTION_MASK_ISA2_AMX_FP8_UNSET | OPTION_MASK_ISA2_AMX_MOVRS_UNSET) + | OPTION_MASK_ISA2_AMX_TF32_UNSET | OPTION_MASK_ISA2_AMX_FP8_UNSET \ + | OPTION_MASK_ISA2_AMX_MOVRS_UNSET) #define OPTION_MASK_ISA2_AMX_INT8_UNSET OPTION_MASK_ISA2_AMX_INT8 #define OPTION_MASK_ISA2_AMX_BF16_UNSET OPTION_MASK_ISA2_AMX_BF16 #define OPTION_MASK_ISA2_UINTR_UNSET OPTION_MASK_ISA2_UINTR @@ -330,7 +328,6 @@ along with GCC; see the file COPYING3. If not see #define OPTION_MASK_ISA2_AVX10_2_UNSET OPTION_MASK_ISA2_AVX10_2 #define OPTION_MASK_ISA2_AMX_AVX512_UNSET OPTION_MASK_ISA2_AMX_AVX512 #define OPTION_MASK_ISA2_AMX_TF32_UNSET OPTION_MASK_ISA2_AMX_TF32 -#define OPTION_MASK_ISA2_AMX_TRANSPOSE_UNSET OPTION_MASK_ISA2_AMX_TRANSPOSE #define OPTION_MASK_ISA2_AMX_FP8_UNSET OPTION_MASK_ISA2_AMX_FP8 #define OPTION_MASK_ISA2_MOVRS_UNSET OPTION_MASK_ISA2_MOVRS #define OPTION_MASK_ISA2_AMX_MOVRS_UNSET OPTION_MASK_ISA2_AMX_MOVRS @@ -1396,20 +1393,6 @@ ix86_handle_option (struct gcc_options *opts, } return true; - case OPT_mamx_transpose: - if (value) - { - opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_AMX_TRANSPOSE_SET; - opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AMX_TRANSPOSE_SET; - } - else - { - opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_AMX_TRANSPOSE_UNSET; - opts->x_ix86_isa_flags2_explicit |= - OPTION_MASK_ISA2_AMX_TRANSPOSE_UNSET; - } - return true; - case OPT_mamx_fp8: if (value) { diff --git a/gcc/common/config/i386/i386-cpuinfo.h b/gcc/common/config/i386/i386-cpuinfo.h index c73a87d..0e75626 100644 --- a/gcc/common/config/i386/i386-cpuinfo.h +++ b/gcc/common/config/i386/i386-cpuinfo.h @@ -269,8 +269,7 @@ enum processor_features FEATURE_AVX10_2 = 116, FEATURE_AMX_AVX512, FEATURE_AMX_TF32, - FEATURE_AMX_TRANSPOSE, - FEATURE_AMX_FP8, + FEATURE_AMX_FP8 = 120, FEATURE_MOVRS, FEATURE_AMX_MOVRS, CPU_FEATURE_MAX diff --git a/gcc/common/config/i386/i386-isas.h b/gcc/common/config/i386/i386-isas.h index 379bb34..fcd3ab2 100644 --- a/gcc/common/config/i386/i386-isas.h +++ b/gcc/common/config/i386/i386-isas.h @@ -188,8 +188,6 @@ ISA_NAMES_TABLE_START ISA_NAMES_TABLE_ENTRY("amx-avx512", FEATURE_AMX_AVX512, P_NONE, "-mamx-avx512") ISA_NAMES_TABLE_ENTRY("amx-tf32", FEATURE_AMX_TF32, P_NONE, "-mamx-tf32") - ISA_NAMES_TABLE_ENTRY("amx-transpose", FEATURE_AMX_TRANSPOSE, - P_NONE, "-mamx-transpose") ISA_NAMES_TABLE_ENTRY("amx-fp8", FEATURE_AMX_FP8, P_NONE, "-mamx-fp8") ISA_NAMES_TABLE_ENTRY("movrs", FEATURE_MOVRS, P_NONE, "-mmovrs") ISA_NAMES_TABLE_ENTRY("amx-movrs", FEATURE_AMX_MOVRS, P_NONE, "-mamx-movrs") diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc index efa2a45..adfd220 100644 --- a/gcc/common/config/riscv/riscv-common.cc +++ b/gcc/common/config/riscv/riscv-common.cc @@ -1404,6 +1404,47 @@ fail: return NULL; } +/* Get the profile that best matches the current architecture string, + where best is defined as the most expansive profile. */ + +const char * +riscv_subset_list::get_profile_name () const +{ + const char *best_profile = NULL; + int max_ext_count = -1; + + for (int i = 0; riscv_profiles_table[i].profile_name != nullptr; ++i) + { + riscv_subset_list *subset_list = riscv_subset_list::parse ( + riscv_profiles_table[i].profile_string, NULL); + if (!subset_list) + continue; + if (subset_list->xlen () == this->xlen ()) + { + int ext_count = 0; + bool all_found = true; + for (riscv_subset_t *p = subset_list->m_head; p != NULL; + p = p->next, ++ext_count) + { + if (!this->lookup (p->name.c_str (), + p->major_version, + p->minor_version)) + { + all_found = false; + break; + } + } + if (all_found && ext_count > max_ext_count) + { + max_ext_count = ext_count; + best_profile = riscv_profiles_table[i].profile_name; + } + } + delete subset_list; + } + return best_profile; +} + /* Clone whole subset list. */ riscv_subset_list * diff --git a/gcc/config.gcc b/gcc/config.gcc index a73bf95..2f478e2 100644 --- a/gcc/config.gcc +++ b/gcc/config.gcc @@ -464,8 +464,8 @@ i[34567]86-*-* | x86_64-*-*) avx10_2mediaintrin.h avx10_2convertintrin.h avx10_2bf16intrin.h avx10_2satcvtintrin.h avx10_2minmaxintrin.h avx10_2copyintrin.h - amxavx512intrin.h amxtf32intrin.h amxtransposeintrin.h - amxfp8intrin.h movrsintrin.h amxmovrsintrin.h" + amxavx512intrin.h amxtf32intrin.h amxfp8intrin.h + movrsintrin.h amxmovrsintrin.h" ;; ia64-*-*) extra_headers=ia64intrin.h diff --git a/gcc/config/i386/amxmovrsintrin.h b/gcc/config/i386/amxmovrsintrin.h index 97969f8..019adcf 100644 --- a/gcc/config/i386/amxmovrsintrin.h +++ b/gcc/config/i386/amxmovrsintrin.h @@ -59,53 +59,6 @@ __asm__ volatile \ #pragma GCC pop_options #endif /* __DISABLE_AMX_MOVRS__ */ -#if !defined(__AMX_MOVRS__) || !defined (__AMX_TRANSPOSE__) -#pragma GCC push_options -#pragma GCC target("amx-movrs,amx-transpose") -#define __DISABLE_AMX_MOVRS_TRANSPOSE__ -#endif /* __AMX_MOVRS_TRANSPOSE__ */ - -#define _tile_2rpntlvwz0rs_internal(tdst, base, stride) \ - __asm__ volatile \ - ("{t2rpntlvwz0rs\t(%0,%1,1), %%tmm"#tdst \ - "|t2rpntlvwz0rs\t%%tmm"#tdst", [%0+%1*1]}" \ - :: "r" ((const void*) (base)), "r" ((long) (stride))) - -#define _tile_2rpntlvwz0rst1_internal(tdst, base, stride) \ - __asm__ volatile \ - ("{t2rpntlvwz0rst1\t(%0,%1,1), %%tmm"#tdst \ - "|t2rpntlvwz0rst1\t%%tmm"#tdst", [%0+%1*1]}" \ - :: "r" ((const void*) (base)), "r" ((long) (stride))) - -#define _tile_2rpntlvwz1rs_internal(tdst, base, stride) \ - __asm__ volatile \ - ("{t2rpntlvwz1rs\t(%0,%1,1), %%tmm"#tdst \ - "|t2rpntlvwz1rs\t%%tmm"#tdst", [%0+%1*1]}" \ - :: "r" ((const void*) (base)), "r" ((long) (stride))) - -#define _tile_2rpntlvwz1rst1_internal(tdst, base, stride) \ - __asm__ volatile \ - ("{t2rpntlvwz1rst1\t(%0,%1,1), %%tmm"#tdst \ - "|t2rpntlvwz1rst1\t%%tmm"#tdst", [%0+%1*1]}" \ - :: "r" ((const void*) (base)), "r" ((long) (stride))) - -#define _tile_2rpntlvwz0rs(tdst, base, stride) \ - _tile_2rpntlvwz0rs_internal(tdst, base, stride) - -#define _tile_2rpntlvwz0rst1(tdst, base, stride) \ - _tile_2rpntlvwz0rst1_internal(tdst, base, stride) - -#define _tile_2rpntlvwz1rs(tdst, base, stride) \ - _tile_2rpntlvwz1rs_internal(tdst, base, stride) - -#define _tile_2rpntlvwz1rst1(tdst, base, stride) \ - _tile_2rpntlvwz1rst1_internal(tdst, base, stride) - -#ifdef __DISABLE_AMX_MOVRS_TRANSPOSE__ -#undef __DISABLE_AMX_MOVRS_TRANSPOSE__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_MOVRS_TRANSPOSE__ */ - #endif /* __x86_64__ */ #endif /* _AMX_MOVRSINTRIN_H_INCLUDED */ diff --git a/gcc/config/i386/amxtransposeintrin.h b/gcc/config/i386/amxtransposeintrin.h index f06603e..6409db3 100644 --- a/gcc/config/i386/amxtransposeintrin.h +++ b/gcc/config/i386/amxtransposeintrin.h @@ -21,157 +21,4 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ -#if !defined _IMMINTRIN_H_INCLUDED -#error "Never use <amxtransposeintrin.h> directly; include <immintrin.h> instead." -#endif - -#ifndef _AMXTRANSPOSEINTRIN_H_INCLUDED -#define _AMXTRANSPOSEINTRIN_H_INCLUDED - -#if !defined(__AMX_TRANSPOSE__) -#pragma GCC push_options -#pragma GCC target("amx-transpose") -#define __DISABLE_AMX_TRANSPOSE__ -#endif /* __AMX_TRANSPOSE__ */ - -#if defined(__x86_64__) -#define _tile_transposed_internal(dst,src) \ - __asm__ volatile\ - ("{ttransposed\t%%tmm"#src", %%tmm"#dst"|ttransposed\t%%tmm"#dst", %%tmm"#src"}" ::) - -#define _tile_2rpntlvwz0_internal(dst,base,stride) \ - __asm__ volatile\ - ("{t2rpntlvwz0\t(%0,%1,1), %%tmm"#dst"|t2rpntlvwz0\t%%tmm"#dst", [%0+%1*1]}" \ - :: "r" ((const void*) (base)), "r" ((long) (stride))) - -#define _tile_2rpntlvwz0t1_internal(dst,base,stride) \ - __asm__ volatile\ - ("{t2rpntlvwz0t1\t(%0,%1,1), %%tmm"#dst"|t2rpntlvwz0t1\t%%tmm"#dst", [%0+%1*1]}" \ - :: "r" ((const void*)(base)), "r" ((long)(stride))) - -#define _tile_2rpntlvwz1_internal(dst,base,stride) \ - __asm__ volatile\ - ("{t2rpntlvwz1\t(%0,%1,1), %%tmm"#dst"|t2rpntlvwz1\t%%tmm"#dst", [%0+%1*1]}" \ - :: "r" ((const void*)(base)), "r" ((long)(stride))) - -#define _tile_2rpntlvwz1t1_internal(dst,base,stride) \ - __asm__ volatile\ - ("{t2rpntlvwz1t1\t(%0,%1,1), %%tmm"#dst"|t2rpntlvwz1t1\t%%tmm"#dst", [%0+%1*1]}" \ - :: "r" ((const void*)(base)), "r" ((long)(stride))) - -#define _tile_transposed(dst,src) \ - _tile_transposed_internal (dst, src) - -#define _tile_2rpntlvwz0(dst,base,stride) \ - _tile_2rpntlvwz0_internal (dst, base, stride) - -#define _tile_2rpntlvwz0t1(dst,base,stride) \ - _tile_2rpntlvwz0t1_internal (dst, base, stride) - -#define _tile_2rpntlvwz1(dst,base,stride) \ - _tile_2rpntlvwz1_internal (dst, base, stride) - -#define _tile_2rpntlvwz1t1(dst,base,stride) \ - _tile_2rpntlvwz1t1_internal (dst, base, stride) - -#if !defined(__AMX_BF16__) -#pragma GCC push_options -#pragma GCC target("amx-bf16") -#define __DISABLE_AMX_BF16__ -#endif /* __AMX_BF16__ */ - -#define _tile_tdpbf16ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{ttdpbf16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|ttdpbf16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_tdpbf16ps(src1_dst,src2,src3) \ - _tile_tdpbf16ps_internal (src1_dst, src2, src3) - -#ifdef __DISABLE_AMX_BF16__ -#undef __DISABLE_AMX_BF16__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_BF16__ */ - -#if !defined(__AMX_FP16__) -#pragma GCC push_options -#pragma GCC target("amx-fp16") -#define __DISABLE_AMX_FP16__ -#endif /* __AMX_FP16__ */ - -#define _tile_tdpfp16ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{ttdpfp16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|ttdpfp16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_tdpfp16ps(src1_dst,src2,src3) \ - _tile_tdpfp16ps_internal (src1_dst, src2, src3) - -#ifdef __DISABLE_AMX_FP16__ -#undef __DISABLE_AMX_FP16__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_FP16__ */ - -#if !defined(__AMX_COMPLEX__) -#pragma GCC push_options -#pragma GCC target("amx-complex") -#define __DISABLE_AMX_COMPLEX__ -#endif /* __AMX_COMPLEX__ */ - -#define _tile_conjtcmmimfp16ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{tconjtcmmimfp16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|tconjtcmmimfp16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_conjtfp16_internal(dst,src) \ - __asm__ volatile\ - ("{tconjtfp16\t%%tmm"#src", %%tmm"#dst"|tconjtfp16\t%%tmm"#dst", %%tmm"#src"}" ::) - -#define _tile_tcmmimfp16ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{ttcmmimfp16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|ttcmmimfp16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_tcmmrlfp16ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{ttcmmrlfp16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|ttcmmrlfp16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_conjtcmmimfp16ps(src1_dst,src2,src3) \ - _tile_conjtcmmimfp16ps_internal (src1_dst, src2, src3) - -#define _tile_conjtfp16(dst,src) \ - _tile_conjtfp16_internal (dst, src) - -#define _tile_tcmmimfp16ps(src1_dst,src2,src3) \ - _tile_tcmmimfp16ps_internal (src1_dst, src2, src3) - -#define _tile_tcmmrlfp16ps(src1_dst,src2,src3) \ - _tile_tcmmrlfp16ps_internal (src1_dst, src2, src3) - -#ifdef __DISABLE_AMX_COMPLEX__ -#undef __DISABLE_AMX_COMPLEX__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_COMPLEX__ */ - -#if !defined(__AMX_TF32__) -#pragma GCC push_options -#pragma GCC target("amx-tf32") -#define __DISABLE_AMX_TF32__ -#endif /* __AMX_TF32__ */ - -#define _tile_tmmultf32ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{ttmmultf32ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|ttmmultf32ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_tmmultf32ps(src1_dst,src2,src3) \ - _tile_tmmultf32ps_internal (src1_dst, src2, src3) - -#ifdef __DISABLE_AMX_TF32__ -#undef __DISABLE_AMX_TF32__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_TF32__ */ - -#endif /* __x86_64__ */ - -#ifdef __DISABLE_AMX_TRANSPOSE__ -#undef __DISABLE_AMX_TRANSPOSE__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_TRANSPOSE__ */ - -#endif /* _AMXTRANSPOSEINTRIN_H_INCLUDED */ +#error "AMX-TRANSPOSE support has been removed since GCC 16." diff --git a/gcc/config/i386/cpuid.h b/gcc/config/i386/cpuid.h index 25e2835..04149c1 100644 --- a/gcc/config/i386/cpuid.h +++ b/gcc/config/i386/cpuid.h @@ -170,7 +170,6 @@ /* AMX sub leaf (%eax == 0x1e, %ecx == 1) */ /* %eax */ #define bit_AMX_FP8 (1 << 4) -#define bit_AMX_TRANSPOSE (1 << 5) #define bit_AMX_TF32 (1 << 6) #define bit_AMX_AVX512 (1 << 7) #define bit_AMX_MOVRS (1 << 8) diff --git a/gcc/config/i386/i386-c.cc b/gcc/config/i386/i386-c.cc index 457aa05..0037465 100644 --- a/gcc/config/i386/i386-c.cc +++ b/gcc/config/i386/i386-c.cc @@ -743,8 +743,6 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag, def_or_undef (parse_in, "__AMX_AVX512__"); if (isa_flag2 & OPTION_MASK_ISA2_AMX_TF32) def_or_undef (parse_in, "__AMX_TF32__"); - if (isa_flag2 & OPTION_MASK_ISA2_AMX_TRANSPOSE) - def_or_undef (parse_in, "__AMX_TRANSPOSE__"); if (isa_flag2 & OPTION_MASK_ISA2_AMX_FP8) def_or_undef (parse_in, "__AMX_FP8__"); if (isa_flag2 & OPTION_MASK_ISA2_MOVRS) diff --git a/gcc/config/i386/i386-isa.def b/gcc/config/i386/i386-isa.def index 6fa601d..a1d994c 100644 --- a/gcc/config/i386/i386-isa.def +++ b/gcc/config/i386/i386-isa.def @@ -122,7 +122,6 @@ DEF_PTA(AVX10_1) DEF_PTA(AVX10_2) DEF_PTA(AMX_AVX512) DEF_PTA(AMX_TF32) -DEF_PTA(AMX_TRANSPOSE) DEF_PTA(AMX_FP8) DEF_PTA(MOVRS) DEF_PTA(AMX_MOVRS) diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc index cad4019..35cba3f 100644 --- a/gcc/config/i386/i386-options.cc +++ b/gcc/config/i386/i386-options.cc @@ -264,7 +264,6 @@ static struct ix86_target_opts isa2_opts[] = { "-mavx10.2", OPTION_MASK_ISA2_AVX10_2 }, { "-mamx-avx512", OPTION_MASK_ISA2_AMX_AVX512 }, { "-mamx-tf32", OPTION_MASK_ISA2_AMX_TF32 }, - { "-mamx-transpose", OPTION_MASK_ISA2_AMX_TRANSPOSE }, { "-mamx-fp8", OPTION_MASK_ISA2_AMX_FP8 }, { "-mmovrs", OPTION_MASK_ISA2_MOVRS }, { "-mamx-movrs", OPTION_MASK_ISA2_AMX_MOVRS } @@ -1123,7 +1122,6 @@ ix86_valid_target_attribute_inner_p (tree fndecl, tree args, char *p_strings[], IX86_ATTR_ISA ("avx10.2", OPT_mavx10_2), IX86_ATTR_ISA ("amx-avx512", OPT_mamx_avx512), IX86_ATTR_ISA ("amx-tf32", OPT_mamx_tf32), - IX86_ATTR_ISA ("amx-transpose", OPT_mamx_transpose), IX86_ATTR_ISA ("amx-fp8", OPT_mamx_fp8), IX86_ATTR_ISA ("movrs", OPT_mmovrs), IX86_ATTR_ISA ("amx-movrs", OPT_mamx_movrs), diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h index f4c89f0..fbd8d9a 100644 --- a/gcc/config/i386/i386.h +++ b/gcc/config/i386/i386.h @@ -2485,8 +2485,8 @@ constexpr wide_int_bitmask PTA_PANTHERLAKE = constexpr wide_int_bitmask PTA_DIAMONDRAPIDS = PTA_GRANITERAPIDS_D | PTA_AVXIFMA | PTA_AVXNECONVERT | PTA_AVXVNNIINT16 | PTA_AVXVNNIINT8 | PTA_CMPCCXADD | PTA_SHA512 | PTA_SM3 | PTA_SM4 | PTA_AVX10_2 - | PTA_APX_F | PTA_AMX_AVX512 | PTA_AMX_FP8 | PTA_AMX_TF32 | PTA_AMX_TRANSPOSE - | PTA_MOVRS | PTA_AMX_MOVRS | PTA_USER_MSR; + | PTA_APX_F | PTA_AMX_AVX512 | PTA_AMX_FP8 | PTA_AMX_TF32 | PTA_MOVRS + | PTA_AMX_MOVRS | PTA_USER_MSR; constexpr wide_int_bitmask PTA_BDVER1 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_POPCNT | PTA_LZCNT diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt index 1192176..8449450 100644 --- a/gcc/config/i386/i386.opt +++ b/gcc/config/i386/i386.opt @@ -1362,10 +1362,6 @@ mamx-tf32 Target Mask(ISA2_AMX_TF32) Var(ix86_isa_flags2) Save Support AMX-TF32 built-in functions and code generation. -mamx-transpose -Target Mask(ISA2_AMX_TRANSPOSE) Var(ix86_isa_flags2) Save -Support AMX-TRANSPOSE built-in functions and code generation. - mamx-fp8 Target Mask(ISA2_AMX_FP8) Var(ix86_isa_flags2) Save Support AMX-FP8 built-in functions and code generation. diff --git a/gcc/config/i386/i386.opt.urls b/gcc/config/i386/i386.opt.urls index cce524c..a9bbac0 100644 --- a/gcc/config/i386/i386.opt.urls +++ b/gcc/config/i386/i386.opt.urls @@ -605,9 +605,6 @@ UrlSuffix(gcc/x86-Options.html#index-mamx-avx512) mamx-tf32 UrlSuffix(gcc/x86-Options.html#index-mamx-tf32) -mamx-transpose -UrlSuffix(gcc/x86-Options.html#index-mamx-transpose) - mamx-fp8 UrlSuffix(gcc/x86-Options.html#index-mamx-fp8) diff --git a/gcc/config/i386/immintrin.h b/gcc/config/i386/immintrin.h index b195fe5..f5a11ff 100644 --- a/gcc/config/i386/immintrin.h +++ b/gcc/config/i386/immintrin.h @@ -136,8 +136,6 @@ #include <amxtf32intrin.h> -#include <amxtransposeintrin.h> - #include <amxfp8intrin.h> #include <prfchwintrin.h> diff --git a/gcc/config/riscv/riscv-c.cc b/gcc/config/riscv/riscv-c.cc index 4fc0528..d497326 100644 --- a/gcc/config/riscv/riscv-c.cc +++ b/gcc/config/riscv/riscv-c.cc @@ -165,6 +165,15 @@ riscv_cpu_cpp_builtins (cpp_reader *pfile) if (!subset_list) return; + /* Define profile macro if a profile was used. */ + const char *profile_name = subset_list->get_profile_name (); + if (profile_name) + { + char *profile_macro = (char *)alloca (strlen (profile_name) + 10); + sprintf (profile_macro, "__riscv_%s", profile_name); + builtin_define (profile_macro); + } + size_t max_ext_len = 0; /* Figure out the max length of extension name for reserving buffer. */ diff --git a/gcc/config/riscv/riscv-subset.h b/gcc/config/riscv/riscv-subset.h index 4cd860f..1887ed7 100644 --- a/gcc/config/riscv/riscv-subset.h +++ b/gcc/config/riscv/riscv-subset.h @@ -105,6 +105,8 @@ public: unsigned xlen () const {return m_xlen;}; + const char *get_profile_name () const; + riscv_subset_list *clone () const; static riscv_subset_list *parse (const char *, location_t *); diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog index aeec007..4e41b69 100644 --- a/gcc/cp/ChangeLog +++ b/gcc/cp/ChangeLog @@ -1,3 +1,16 @@ +2025-10-13 Jakub Jelinek <jakub@redhat.com> + + PR c++/122228 + * decl.cc (cp_make_fname_decl): When not using fname_as_decl, + attempt to translate name into ordinary literal encoding. + +2025-10-13 Jakub Jelinek <jakub@redhat.com> + + * decl.cc (omp_declare_variant_finalize_one): If !nbase_args + and TREE_TYPE (decl) has TYPE_NO_NAMED_ARGS_STDARG_P bit set + and varg is NULL, temporarily set TYPE_NO_NAMED_ARGS_STDARG_P + on TREE_TYPE (variant). + 2025-10-12 Nathaniel Shead <nathanieloshead@gmail.com> PR c++/122163 diff --git a/gcc/cp/parser.cc b/gcc/cp/parser.cc index 1ed2f37..9280632 100644 --- a/gcc/cp/parser.cc +++ b/gcc/cp/parser.cc @@ -19091,7 +19091,7 @@ cp_parser_mem_initializer_id (cp_parser* parser) return cp_parser_class_name (parser, /*typename_keyword_p=*/true, /*template_keyword_p=*/template_p, - typename_type, + class_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi index a12855d..94b76b7 100644 --- a/gcc/doc/extend.texi +++ b/gcc/doc/extend.texi @@ -6750,11 +6750,6 @@ Enable/disable the generation of the AMX-AVX512 instructions. @itemx no-amx-tf32 Enable/disable the generation of the AMX-TF32 instructions. -@cindex @code{target("amx-transpose")} function attribute, x86 -@item amx-transpose -@itemx no-amx-transpose -Enable/disable the generation of the AMX-TRANSPOSE instructions. - @cindex @code{target("amx-fp8")} function attribute, x86 @item amx-fp8 @itemx no-amx-fp8 diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi index 8559b73..6bd5128 100644 --- a/gcc/doc/invoke.texi +++ b/gcc/doc/invoke.texi @@ -1506,8 +1506,7 @@ See RS/6000 and PowerPC Options. -mamx-tile -mamx-int8 -mamx-bf16 -muintr -mhreset -mavxvnni -mamx-fp8 -mavx512fp16 -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mapxf --musermsr -mavx10.1 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mmovrs --mamx-movrs +-musermsr -mavx10.1 -mavx10.2 -mamx-avx512 -mamx-tf32 -mmovrs -mamx-movrs -mcldemote -mms-bitfields -mno-align-stringops -minline-all-stringops -minline-stringops-dynamically -mstringop-strategy=@var{alg} -mkl -mwidekl @@ -36154,9 +36153,6 @@ preferred alignment to @option{-mpreferred-stack-boundary=2}. @opindex mamx-tf32 @itemx -mamx-tf32 @need 200 -@opindex mamx-transpose -@itemx -mamx-transpose -@need 200 @itemx -mamx-fp8 @opindex mamx-fp8 @need 200 @@ -36175,9 +36171,9 @@ WAITPKG, VPCLMULQDQ, AVX512BITALG, MOVDIRI, MOVDIR64B, AVX512BF16, ENQCMD, AVX512VPOPCNTDQ, AVX512VNNI, SERIALIZE, UINTR, HRESET, AMXTILE, AMXINT8, AMXBF16, KL, WIDEKL, AVXVNNI, AVX512-FP16, AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AMX-FP16, PREFETCHI, RAOINT, AMX-COMPLEX, AVXVNNIINT16, SM3, SHA512, -SM4, APX_F, USER_MSR, AVX10.1, AVX10.2, AMX-AVX512, AMX-TF32, AMX-TRANSPOSE, -AMX-FP8, MOVRS, AMX-MOVRS or CLDEMOTE extended instruction sets. Each has a -corresponding @option{-mno-} option to disable use of these instructions. +SM4, APX_F, USER_MSR, AVX10.1, AVX10.2, AMX-AVX512, AMX-TF32, AMX-FP8, MOVRS, +AMX-MOVRS or CLDEMOTE extended instruction sets. Each has a corresponding +@option{-mno-} option to disable use of these instructions. These extensions are also available as built-in functions: see @ref{x86 Built-in Functions}, for details of the functions enabled and diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi index c001e8e..29742e2 100644 --- a/gcc/doc/sourcebuild.texi +++ b/gcc/doc/sourcebuild.texi @@ -2698,9 +2698,6 @@ Target supports the execution of @code{amx-movrs} instructions. @item amx_tf32 Target supports the execution of @code{amx-tf32} instructions. -@item amx_transpose -Target supports the execution of @code{amx-transpose} instructions. - @item amx_fp8 Target supports the execution of @code{amx-fp8} instructions. diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog index 914341d..b84ce2f 100644 --- a/gcc/fortran/ChangeLog +++ b/gcc/fortran/ChangeLog @@ -1,3 +1,11 @@ +2025-10-13 Paul Thomas <pault@gcc.gnu.org> + + PR fortran/121191 + * trans-array.cc (has_parameterized_comps): New function which + checks if a derived type has parameterized components. + ( gfc_deallocate_pdt_comp): Use it to prevent deallocation of + PDTs if there are no parameterized components. + 2025-10-12 Paul Thomas <pault@gcc.gnu.org> PR fortran/95543 diff --git a/gcc/m2/ChangeLog b/gcc/m2/ChangeLog index 316e0ba..8efab9e 100644 --- a/gcc/m2/ChangeLog +++ b/gcc/m2/ChangeLog @@ -1,3 +1,67 @@ +2025-10-13 Gaius Mulley <gaiusmod2@gmail.com> + + PR modula2/122241 + * gm2-compiler/M2Quads.mod (BuildSizeFunction): Improve + error message. + (BuildTSizeFunction): Improve error message. + * gm2-compiler/P3Build.bnf (ProgramModule): New variable + namet. + Pass namet to P3EndBuildProgModule. + (ImplementationModule): New variable namet. + Pass namet to P3EndBuildImpModule. + (ModuleDeclaration): New variable namet. + Pass namet to P3EndBuildInnerModule. + (DefinitionModule): New variable namet. + Pass namet to P3EndBuildDefModule. + * gm2-compiler/P3SymBuild.def (P3EndBuildDefModule): New + parameter tokno. + (P3EndBuildImpModule): Ditto. + (P3EndBuildProgModule): Ditto. + (EndBuildInnerModule): Ditto. + * gm2-compiler/P3SymBuild.mod (P3EndBuildDefModule): New + parameter tokno. + Pass tokno to CheckForUnknownInModule. + (P3EndBuildImpModule): Ditto. + (P3EndBuildProgModule): Ditto. + (EndBuildInnerModule): Ditto. + * gm2-compiler/PCBuild.bnf (ProgramModule): New variable + namet. + Pass namet to PCEndBuildProgModule. + (ImplementationModule): New variable namet. + Pass namet to PCEndBuildImpModule. + (ModuleDeclaration): New variable namet. + Pass namet to PCEndBuildInnerModule. + (DefinitionModule): New variable namet. + Pass namet to PCEndBuildDefModule. + * gm2-compiler/PCSymBuild.def (PCEndBuildDefModule): New + parameter tokno. + (PCEndBuildImpModule): Ditto. + (PCEndBuildProgModule): Ditto. + (PCEndBuildInnerModule): Ditto. + * gm2-compiler/PCSymBuild.mod (PCEndBuildDefModule): New + parameter tokno. + Pass tokno to CheckForUnknownInModule. + (PCEndBuildImpModule): Ditto. + (PCEndBuildProgModule): Ditto. + (PCEndBuildInnerModule): Ditto. + * gm2-compiler/PHBuild.bnf (DefinitionModule): New variable + namet. + Pass namet to PHEndBuildDefModule. + (ModuleDeclaration): New variable namet. + Pass namet to PHEndBuildProgModule. + (ImplementationModule): New variable namet. + Pass namet to PHEndBuildImpModule. + (ModuleDeclaration): New variable namet. + Pass namet to PHEndBuildInnerModule. + (DefinitionModule): New variable namet. + Pass namet to PHEndBuildDefModule. + * gm2-compiler/SymbolTable.def (CheckForUnknownInModule): Add + tokno parameter. + * gm2-compiler/SymbolTable.mod (CheckForUnknownInModule): Add + tokno parameter. + Pass tokno to CheckForUnknowns. + (CheckForUnknowns): Reimplement. + 2025-10-11 Gaius Mulley <gaiusmod2@gmail.com> PR modula2/122241 diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index dd5ccac..7b857ec 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,49 @@ +2025-10-13 Eric Botcazou <ebotcazou@adacore.com> + + * gcc.dg/cpp/cpp.exp: Process .i files. + * gcc.dg/cpp/pr36674.i: Pass -Wno-implicit-int. + * gcc.dg/cpp/escape-3.i: New test. + +2025-10-13 Shreya Munnangi <smunnangi1@ventanamicro.com> + + PR target/120811 + * gcc.target/riscv/add-synthesis-1.c: Adjust const to fit in range. + * gcc.target/riscv/pr120811.c: Add new test case. + * gcc.target/riscv/sum-of-two-s12-const-1.c: Adjust const to fit in range. + +2025-10-13 Jeff Law <jlaw@ventanamicro.com> + + PR target/120674 + * gcc.target/riscv/pr120674.c: New test. + +2025-10-13 Jakub Jelinek <jakub@redhat.com> + + PR c++/122228 + * g++.dg/cpp1y/func_constexpr3.C: New test. + +2025-10-13 Gaius Mulley <gaiusmod2@gmail.com> + + PR modula2/122241 + * gm2/iso/fail/badconst.mod: New test. + * gm2/iso/fail/badtype.mod: New test. + * gm2/iso/fail/badvar.mod: New test. + +2025-10-13 Alex Coplan <alex.coplan@arm.com> + + PR tree-optimization/121772 + * gcc.target/aarch64/torture/pr121772.c: Add -fchecking to + dg-options. + +2025-10-13 Robin Dapp <rdapp@ventanamicro.com> + + PR target/118019 + * gcc.target/riscv/rvv/autovec/pr118019-2.c: New test. + +2025-10-13 Paul Thomas <pault@gcc.gnu.org> + + PR fortran/121191 + * gfortran.dg/pdt_59.f03: New test. + 2025-10-12 Nathaniel Shead <nathanieloshead@gmail.com> PR c++/122163 diff --git a/gcc/testsuite/g++.dg/other/i386-2.C b/gcc/testsuite/g++.dg/other/i386-2.C index 88252ad..d4c73f5 100644 --- a/gcc/testsuite/g++.dg/other/i386-2.C +++ b/gcc/testsuite/g++.dg/other/i386-2.C @@ -1,5 +1,5 @@ /* { dg-do compile { target i?86-*-* x86_64-*-* } } */ -/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mamx-fp8 -mmovrs -mamx-movrs" } */ +/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-fp8 -mmovrs -mamx-movrs" } */ /* { dg-skip-if "requires hosted libstdc++ for cstdlib malloc" { ! hostedlib } } */ /* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h, diff --git a/gcc/testsuite/g++.dg/other/i386-3.C b/gcc/testsuite/g++.dg/other/i386-3.C index a234e4f..e925607 100644 --- a/gcc/testsuite/g++.dg/other/i386-3.C +++ b/gcc/testsuite/g++.dg/other/i386-3.C @@ -1,5 +1,5 @@ /* { dg-do compile { target i?86-*-* x86_64-*-* } } */ -/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mamx-fp8 -mmovrs -mamx-movrs" } */ +/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-fp8 -mmovrs -mamx-movrs" } */ /* { dg-skip-if "requires hosted libstdc++ for cstdlib malloc" { ! hostedlib } } */ /* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h, diff --git a/gcc/testsuite/g++.dg/template/dependent-base6.C b/gcc/testsuite/g++.dg/template/dependent-base6.C index b4bc5c2..9f2a7a2 100644 --- a/gcc/testsuite/g++.dg/template/dependent-base6.C +++ b/gcc/testsuite/g++.dg/template/dependent-base6.C @@ -8,5 +8,7 @@ struct A { struct S1 : A::B { }; // OK -template<class T> struct S2 : T::B { }; // OK, used to fail +template<class T> struct S2 : T::B { // OK, used to fail + S2() : T::B() { } // Also OK +}; template struct S2<A>; diff --git a/gcc/testsuite/g++.dg/tree-ssa/cselim-1.C b/gcc/testsuite/g++.dg/tree-ssa/cselim-1.C new file mode 100644 index 0000000..a621945 --- /dev/null +++ b/gcc/testsuite/g++.dg/tree-ssa/cselim-1.C @@ -0,0 +1,37 @@ +/* { dg-do compile { target c++11 } } */ +/* { dg-options "-O2 -fdump-tree-phiopt1-details -fdump-tree-optimized" } */ +/* PR tree-optimization/122178 */ +/* cselim/cselim-limited should be able to handle clobbers. */ + +#include <new> + +struct s1 +{ + bool t; +}; + +void f(s1 *a, bool b) +{ + if (b) + { + a = new(a)s1{1}; + } + else + { + a = new(a)s1{0}; + } +} + +/* + The above should be optimized in phiopt1 to: + *a = {CLOBBER(bob)}; + a->t = b; + */ + + +/* { dg-final { scan-tree-dump-times "factoring out stores" 1 "phiopt1" } } */ +/* { dg-final { scan-tree-dump-times "factoring out clobber" 1 "phiopt1" } } */ +/* { dg-final { scan-tree-dump-times " converted to straightline code" 1 "phiopt1" } } */ +/* { dg-final { scan-tree-dump-not "if " "phiopt1" } } */ +/* { dg-final { scan-tree-dump-not "if " "optimized" } } */ + diff --git a/gcc/testsuite/gcc.dg/vect/pr120687-1.c b/gcc/testsuite/gcc.dg/vect/pr120687-1.c index ce9cf63..ac684c0 100644 --- a/gcc/testsuite/gcc.dg/vect/pr120687-1.c +++ b/gcc/testsuite/gcc.dg/vect/pr120687-1.c @@ -11,6 +11,6 @@ frd (unsigned *p, unsigned *lastone) return sum; } -/* { dg-final { scan-tree-dump "reduction: detected reduction chain" "vect" } } */ +/* { dg-final { scan-tree-dump "Starting SLP discovery of reduction chain" "vect" } } */ /* { dg-final { scan-tree-dump-not "SLP discovery of reduction chain failed" "vect" } } */ /* { dg-final { scan-tree-dump "optimized: loop vectorized" "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr120687-2.c b/gcc/testsuite/gcc.dg/vect/pr120687-2.c index dfc6dc7..25f0355 100644 --- a/gcc/testsuite/gcc.dg/vect/pr120687-2.c +++ b/gcc/testsuite/gcc.dg/vect/pr120687-2.c @@ -12,6 +12,6 @@ frd (float *p, float *lastone) return sum; } -/* { dg-final { scan-tree-dump "reduction: detected reduction chain" "vect" } } */ +/* { dg-final { scan-tree-dump "Starting SLP discovery of reduction chain" "vect" } } */ /* { dg-final { scan-tree-dump-not "SLP discovery of reduction chain failed" "vect" } } */ /* { dg-final { scan-tree-dump "optimized: loop vectorized" "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr120687-3.c b/gcc/testsuite/gcc.dg/vect/pr120687-3.c index f20a66a..31a6c94 100644 --- a/gcc/testsuite/gcc.dg/vect/pr120687-3.c +++ b/gcc/testsuite/gcc.dg/vect/pr120687-3.c @@ -11,6 +11,6 @@ frd (float *p, float *lastone) return sum; } -/* { dg-final { scan-tree-dump "reduction: detected reduction chain" "vect" } } */ +/* { dg-final { scan-tree-dump "Starting SLP discovery of reduction chain" "vect" } } */ /* { dg-final { scan-tree-dump-not "SLP discovery of reduction chain failed" "vect" } } */ /* { dg-final { scan-tree-dump "optimized: loop vectorized" "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr121949_1.c b/gcc/testsuite/gcc.dg/vect/pr121949_1.c new file mode 100644 index 0000000..9e8d41e --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr121949_1.c @@ -0,0 +1,45 @@ +#ifndef TYPE +#define TYPE short +#define MAX 16 +#define IV_TYPE char +#endif + +#include "tree-vect.h" + +__attribute__((noipa)) +void f(TYPE* acc) +{ + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +__attribute__((noipa)) +void g(TYPE* acc) +{ +#pragma GCC novector + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +int main () +{ + + check_vect (); + + TYPE acc1[MAX] = {}; + TYPE acc2[MAX] = {}; +#pragma GCC novector + for (int i = 0; i < MAX; i++) + acc1[i] = acc2[i] = i; + + f (acc1); + f (acc2); + +#pragma GCC novector + for (int i = 0; i < MAX; i++) + if (acc1[i] != acc2[i]) + __builtin_abort (); +} + +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { vect_var_shift && vect_int } } } } */ +/* { dg-final { scan-tree-dump "vect_recog_over_widening_pattern: detected" "vect" { target { vect_var_shift && vect_int } } } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr121949_2.c b/gcc/testsuite/gcc.dg/vect/pr121949_2.c new file mode 100644 index 0000000..f448eb6 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr121949_2.c @@ -0,0 +1,45 @@ +#ifndef TYPE +#define TYPE int +#define MAX 32 +#define IV_TYPE short +#endif + +#include "tree-vect.h" + +__attribute__((noipa)) +void f(TYPE* acc) +{ + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +__attribute__((noipa)) +void g(TYPE* acc) +{ +#pragma GCC novector + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +int main () +{ + + check_vect (); + + TYPE acc1[MAX] = {}; + TYPE acc2[MAX] = {}; +#pragma GCC novector + for (int i = 0; i < MAX; i++) + acc1[i] = acc2[i] = i; + + f (acc1); + f (acc2); + +#pragma GCC novector + for (int i = 0; i < MAX; i++) + if (acc1[i] != acc2[i]) + __builtin_abort (); +} + +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { vect_var_shift && vect_int } } } } */ +/* { dg-final { scan-tree-dump-not "vect_recog_over_widening_pattern: detected" "vect" { target { vect_var_shift && vect_int } } } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr121949_3.c b/gcc/testsuite/gcc.dg/vect/pr121949_3.c new file mode 100644 index 0000000..b7e6a3d --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr121949_3.c @@ -0,0 +1,45 @@ +#ifndef TYPE +#define TYPE long long +#define MAX 64 +#define IV_TYPE int +#endif + +#include "tree-vect.h" + +__attribute__((noipa)) +void f(TYPE* acc) +{ + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +__attribute__((noipa)) +void g(TYPE* acc) +{ +#pragma GCC novector + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +int main () +{ + + check_vect (); + + TYPE acc1[MAX] = {}; + TYPE acc2[MAX] = {}; +#pragma GCC novector + for (int i = 0; i < MAX; i++) + acc1[i] = acc2[i] = i; + + f (acc1); + f (acc2); + +#pragma GCC novector + for (int i = 0; i < MAX; i++) + if (acc1[i] != acc2[i]) + __builtin_abort (); +} + +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { vect_var_shift && vect_int } } } } */ +/* { dg-final { scan-tree-dump "vect_recog_vector_vector_shift_pattern: detected" "vect" { target { vect_var_shift && vect_int } } } } */ diff --git a/gcc/testsuite/gcc.target/i386/amx-check.h b/gcc/testsuite/gcc.target/i386/amx-check.h index 0addb5b..c43a955 100644 --- a/gcc/testsuite/gcc.target/i386/amx-check.h +++ b/gcc/testsuite/gcc.target/i386/amx-check.h @@ -260,9 +260,6 @@ main () #ifdef AMX_TF32 && __builtin_cpu_supports ("amx-tf32") #endif -#ifdef AMX_TRANSPOSE - && __builtin_cpu_supports ("amx-transpose") -#endif #ifdef AMX_FP8 && __builtin_cpu_supports ("amx-fp8") #endif diff --git a/gcc/testsuite/gcc.target/i386/amxmovrs-2rpntlvwrs-2.c b/gcc/testsuite/gcc.target/i386/amxmovrs-2rpntlvwrs-2.c deleted file mode 100644 index 0093ef7..0000000 --- a/gcc/testsuite/gcc.target/i386/amxmovrs-2rpntlvwrs-2.c +++ /dev/null @@ -1,58 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_movrs } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-movrs -mamx-transpose -mavx512fp16 -mavx512bf16" } */ -#define AMX_MOVRS -#define AMX_TRANSPOSE -#define DO_TEST test_amx_movrs_t2rpntlvwrs -void test_amx_movrs_t2rpntlvwrs (); -#include "amx-helper.h" - -#define init_pair_tile_reg_and_src_z_t(tmm_num, src, buffer, ztype, wtype) \ -{ \ - init_pair_tile_src (tmm_num, &src, buffer, ztype); \ - _tile_2rpntlvwz##ztype##rs##wtype (tmm_num, buffer, _STRIDE); \ -} - -void test_amx_movrs_t2rpntlvwrs () -{ - __tilecfg_u cfg; - __tilepair src; - __tile ref_0, ref_1; - uint8_t buffer[2048]; - int i; - - init_tile_config (&cfg); - - for (i = 0; i < 2048; i++) - buffer[i] = i % 256; - - /* Check t2rpntlvwz0rs. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 0,); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz1rs. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 1,); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz0t1rs. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 0, t1); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz1t1rs. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 1, t1); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxmovrs-asmatt-1.c b/gcc/testsuite/gcc.target/i386/amxmovrs-asmatt-1.c index d99a97f..339550b 100644 --- a/gcc/testsuite/gcc.target/i386/amxmovrs-asmatt-1.c +++ b/gcc/testsuite/gcc.target/i386/amxmovrs-asmatt-1.c @@ -1,11 +1,7 @@ /* { dg-do compile { target { ! ia32 } } } */ -/* { dg-options "-O2 -mamx-movrs -mamx-transpose" } */ +/* { dg-options "-O2 -mamx-movrs" } */ /* { dg-final { scan-assembler "tileloaddrs\[ \\t]+\[^\n\]*\(%\[a-z0-9\]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ /* { dg-final { scan-assembler "tileloaddrst1\[ \\t]+\[^\n\]*\(%\[a-z0-9]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0rs\[ \\t]+\[^\n\]*\(%\[a-z0-9\]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0rst1\[ \\t]+\[^\n\]*\(%\[a-z0-9\]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1rs\[ \\t]+\[^\n\]*\(%\[a-z0-9\]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1rst1\[ \\t]+\[^\n\]*\(%\[a-z0-9\]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ #include <immintrin.h> extern const void* base; @@ -20,8 +16,4 @@ void TEST() { _tile_loaddrs (TMM1, base, stride); _tile_loaddrst1 (TMM1, base, stride); - _tile_2rpntlvwz0rs (TMM0, base, stride); - _tile_2rpntlvwz0rst1 (TMM1, base, stride); - _tile_2rpntlvwz1rs (TMM2, base, stride); - _tile_2rpntlvwz1rst1 (TMM3, base, stride); } diff --git a/gcc/testsuite/gcc.target/i386/amxmovrs-asmintel-1.c b/gcc/testsuite/gcc.target/i386/amxmovrs-asmintel-1.c index 6a33986..6a522b5 100644 --- a/gcc/testsuite/gcc.target/i386/amxmovrs-asmintel-1.c +++ b/gcc/testsuite/gcc.target/i386/amxmovrs-asmintel-1.c @@ -1,12 +1,8 @@ /* { dg-do compile { target { ! ia32 } } } */ /* { dg-require-effective-target masm_intel } */ -/* { dg-options "-O2 -mamx-movrs -mamx-transpose -masm=intel" } */ +/* { dg-options "-O2 -mamx-movrs -masm=intel" } */ /* { dg-final { scan-assembler-times "tileloaddrs\[ \\t]%tmm\[0-9\]" 1 } } */ /* { dg-final { scan-assembler-times "tileloaddrst1\[ \\t]%tmm\[0-9\]" 1 } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0rs\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0rst1\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1rs\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1rst1\[ \\t]%tmm\[0-9\]" } } */ #include <immintrin.h> extern const void* base; @@ -21,8 +17,4 @@ void TEST() { _tile_loaddrs (TMM1, base, stride); _tile_loaddrst1 (TMM1, base, stride); - _tile_2rpntlvwz0rs (TMM0, base, stride); - _tile_2rpntlvwz0rst1 (TMM1, base, stride); - _tile_2rpntlvwz1rs (TMM2, base, stride); - _tile_2rpntlvwz1rst1 (TMM3, base, stride); } diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-2rpntlvw-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-2rpntlvw-2.c deleted file mode 100644 index 2d01827..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-2rpntlvw-2.c +++ /dev/null @@ -1,54 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-options "-O2 -mamx-transpose" } */ -#define AMX_TRANSPOSE -#define DO_TEST test_amx_transpose_t2rpntlvw -void test_amx_transpose_t2rpntlvw (); -#include "amx-helper.h" -#define init_pair_tile_reg_and_src_z_t(tmm_num, src, buffer, ztype, wtype) \ -{ \ - init_pair_tile_src (tmm_num, &src, buffer, ztype); \ - _tile_2rpntlvwz##ztype##wtype (tmm_num, buffer, _STRIDE); \ -} - -void test_amx_transpose_t2rpntlvw () -{ - __tilecfg_u cfg; - __tilepair src; - __tile ref_0, ref_1; - uint8_t buffer[2048]; - int i; - - init_tile_config (&cfg); - - for (i = 0; i < 2048; i++) - buffer[i] = i % 256; - - /* Check t2rpntlvwz0. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 0,); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz1. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 1,); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz0t1. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 0, t1); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz1t1. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 1, t1); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-asmatt-1.c b/gcc/testsuite/gcc.target/i386/amxtranspose-asmatt-1.c deleted file mode 100644 index a970f5d..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-asmatt-1.c +++ /dev/null @@ -1,39 +0,0 @@ -/* { dg-do compile { target { ! ia32 } } } */ -/* { dg-options "-O2 -mamx-transpose -mamx-bf16 -mamx-complex -mamx-fp16 -mamx-tf32" } */ -/* { dg-final { scan-assembler "ttdpbf16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "ttdpfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "ttransposed\[ \\t]+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0\[ \\t]+\[^\n\]*\\(%\[a-z0-9]*\,%\[a-z0-9\]*\,\[124\]\\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0t1\[ \\t]+\[^\n\]*\\(%\[a-z0-9]*\,%\[a-z0-9\]*\,\[124\]\\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1\[ \\t]+\[^\n\]*\\(%\[a-z0-9]*\,%\[a-z0-9\]*\,\[124\]\\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1t1\[ \\t]+\[^\n\]*\\(%\[a-z0-9]*\,%\[a-z0-9\]*\,\[124\]\\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "tconjtcmmimfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "tconjtfp16\[ \\t]+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "ttcmmimfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "ttcmmrlfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "ttmmultf32ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -#include <immintrin.h> - -extern const void* base; -extern const int stride; - -#define TMM0 0 -#define TMM1 1 -#define TMM2 2 -#define TMM3 3 - -void TEST() -{ - _tile_tdpbf16ps (TMM1, TMM2, TMM3); - _tile_tdpfp16ps (TMM1, TMM2, TMM3); - _tile_transposed (TMM1, TMM2); - _tile_2rpntlvwz0 (TMM0, base, stride); - _tile_2rpntlvwz0t1 (TMM1, base, stride); - _tile_2rpntlvwz1 (TMM2, base, stride); - _tile_2rpntlvwz1t1 (TMM3, base, stride); - _tile_conjtcmmimfp16ps (TMM1, TMM2, TMM3); - _tile_conjtfp16 (TMM1, TMM2); - _tile_tcmmimfp16ps (TMM1, TMM2, TMM3); - _tile_tcmmrlfp16ps (TMM1, TMM2, TMM3); - _tile_tmmultf32ps (TMM1, TMM2, TMM3); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-asmintel-1.c b/gcc/testsuite/gcc.target/i386/amxtranspose-asmintel-1.c deleted file mode 100644 index 2cf73ae..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-asmintel-1.c +++ /dev/null @@ -1,35 +0,0 @@ -/* { dg-do compile { target { ! ia32 } } } */ -/* { dg-require-effective-target masm_intel } */ -/* { dg-options "-O2 -mamx-transpose -mamx-bf16 -mamx-complex -mamx-fp16 -mamx-tf32 -masm=intel" } */ -/* { dg-final { scan-assembler "ttdpbf16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -/* { dg-final { scan-assembler "ttdpfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -/* { dg-final { scan-assembler "ttransposed\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0t1\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1t1\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "tconjtcmmimfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -/* { dg-final { scan-assembler "tconjtfp16\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2" } } */ -/* { dg-final { scan-assembler "ttcmmimfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -/* { dg-final { scan-assembler "ttcmmrlfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -/* { dg-final { scan-assembler "ttmmultf32ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -#include <immintrin.h> - -extern const void* base; -extern const int stride; - -void TEST() -{ - _tile_tdpbf16ps (1, 2, 3); - _tile_tdpfp16ps (1, 2, 3); - _tile_transposed (1, 2); - _tile_2rpntlvwz0 (5, base, stride); - _tile_2rpntlvwz0t1 (4, base, stride); - _tile_2rpntlvwz1 (3, base, stride); - _tile_2rpntlvwz1t1 (2, base, stride); - _tile_conjtcmmimfp16ps (1, 2, 3); - _tile_conjtfp16 (1, 2); - _tile_tcmmimfp16ps (1, 2, 3); - _tile_tcmmrlfp16ps (1, 2, 3); - _tile_tmmultf32ps (1, 2, 3); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-conjtcmmimfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-conjtcmmimfp16ps-2.c deleted file mode 100644 index 159867d..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-conjtcmmimfp16ps-2.c +++ /dev/null @@ -1,55 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_complex } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-complex -mavx512fp16" } */ -#define AMX_TRANSPOSE -#define AMX_COMPLEX -#define DO_TEST test_amx_transpose_conjtcmmimfp16ps -void test_amx_transpose_conjtcmmimfp16ps (); -#include "amx-helper.h" - -void calc_matrix_conjtcmmimfp16ps (__tile *dst, __tile *src1, __tile *src2) -{ - uint16_t *src1_buf = (uint16_t *) src1->buf; - uint16_t *src2_buf = (uint16_t *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, k, n, t; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - for (t = 0; t < 2; t+=2) - dst_buf[m * N + n] += - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t + 1])) - - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t + 1]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t])); -} - -void test_amx_transpose_conjtcmmimfp16ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024]; - - init_fp16_max_tile_buffer (tmp_dst_buf); - init_fp16_max_tile_zero_buffer (tmp_dst_zero_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_conjtcmmimfp16ps (&dst, &src1, &src2); - - _tile_conjtcmmimfp16ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-conjtfp16-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-conjtfp16-2.c deleted file mode 100644 index 710d76a..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-conjtfp16-2.c +++ /dev/null @@ -1,48 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_complex } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-complex -mavx512fp16" } */ -#define AMX_TRANSPOSE -#define AMX_COMPLEX -#define DO_TEST test_amx_transpose_conjtfp16 -void test_amx_transpose_conjtfp16 (); -#include "amx-helper.h" - -void calc_matrix_conjtfp16 (__tile *dst, __tile *src) -{ - uint16_t *src_buf = (uint16_t *) src->buf; - float *dst_buf = (float *) dst->buf; - - int M = dst->rows; - int N = dst->colsb / 4; - int i, j, t; - - for (i = 0; i < M; i++) - for (j = 0; j < N; j++) - for (t = 0; t < 2; t+=2) - { - dst_buf[i * 2 * N + 2 * j + t] = src_buf[j * 2 * M + 2 * i + t]; - dst_buf[i * 2 * N + 2 * j + t + 1] = -src_buf[j * 2 * M + 2 * i + t + 1]; - } -} - -void test_amx_transpose_conjtfp16 () -{ - __tilecfg_u cfg; - __tile src, dst, ref; - uint8_t tmp_dst_buf[1024]; - - init_fp16_max_tile_buffer (tmp_dst_buf); - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (2, src, tmp_dst_buf); - - /* Check tconjtfp16. */ - calc_matrix_conjtfp16 (&dst, &src); - _tile_conjtfp16 (1, 2); - _tile_stored (1, ref.buf, _STRIDE); - - if (!check_tile_register (&ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmimfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmimfp16ps-2.c deleted file mode 100644 index e2a0f10..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmimfp16ps-2.c +++ /dev/null @@ -1,55 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_complex } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-complex -mavx512fp16" } */ -#define AMX_TRANSPOSE -#define AMX_COMPLEX -#define DO_TEST test_amx_transpose_tcmmimfp16ps -void test_amx_transpose_tcmmimfp16ps (); -#include "amx-helper.h" - -void calc_matrix_tcmmimfp16ps (__tile *dst, __tile *src1, __tile *src2) -{ - uint16_t *src1_buf = (uint16_t *) src1->buf; - uint16_t *src2_buf = (uint16_t *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, k, n, t; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - for (t = 0; t < 2; t+=2) - dst_buf[m * N + n] += - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t + 1])) + - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t + 1]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t])); -} - -void test_amx_transpose_tcmmimfp16ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024]; - - init_fp16_max_tile_buffer (tmp_dst_buf); - init_fp16_max_tile_zero_buffer (tmp_dst_zero_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_tcmmimfp16ps (&dst, &src1, &src2); - - _tile_tcmmimfp16ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmrlfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmrlfp16ps-2.c deleted file mode 100644 index b09186c..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmrlfp16ps-2.c +++ /dev/null @@ -1,55 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_complex } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-complex -mavx512fp16" } */ -#define AMX_TRANSPOSE -#define AMX_COMPLEX -#define DO_TEST test_amx_transpose_tcmmrlfp16ps -void test_amx_transpose_tcmmrlfp16ps (); -#include "amx-helper.h" - -void calc_matrix_tcmmrlfp16ps (__tile *dst, __tile *src1, __tile *src2) -{ - uint16_t *src1_buf = (uint16_t *) src1->buf; - uint16_t *src2_buf = (uint16_t *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, k, n, t; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - for (t = 0; t < 2; t+=2) - dst_buf[m * N + n] += - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t])) - - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t + 1]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t + 1])); -} - -void test_amx_transpose_tcmmrlfp16ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024]; - - init_fp16_max_tile_buffer (tmp_dst_buf); - init_fp16_max_tile_zero_buffer (tmp_dst_zero_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_tcmmrlfp16ps (&dst, &src1, &src2); - - _tile_tcmmrlfp16ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-tdpbf16ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-tdpbf16ps-2.c deleted file mode 100644 index 6a3226b..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-tdpbf16ps-2.c +++ /dev/null @@ -1,53 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_bf16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-bf16 -mavx512bf16" } */ -#define AMX_TRANSPOSE -#define AMX_BF16 -#define DO_TEST test_amx_transpose_tdpbf16ps -void test_amx_transpose_tdpbf16ps (); -#include "amx-helper.h" - -void calc_matrix_tdpbf16ps(__tile *dst, __tile *src1, __tile *src2) -{ - uint16_t *src1_buf = (uint16_t *) src1->buf; - uint16_t *src2_buf = (uint16_t *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, k, n, t; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - for (t = 0; t < 2; t+=2) - dst_buf[m * N + n] += - (make_bf16_f32 (src1_buf[k * 2 * M + 2 * m + t]) * - make_bf16_f32 (src2_buf[k * 2 * N + 2 * n + t])) + - (make_bf16_f32 (src1_buf[k * 2 * M + 2 * m + t + 1]) * - make_bf16_f32 (src2_buf[k * 2 * N + 2 * n + t + 1])); -} - -void test_amx_transpose_tdpbf16ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024]; - - init_bf16_max_tile_buffer (tmp_dst_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_tdpbf16ps (&dst, &src1, &src2); - - _tile_tdpbf16ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_float_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-tdpfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-tdpfp16ps-2.c deleted file mode 100644 index 83c3715..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-tdpfp16ps-2.c +++ /dev/null @@ -1,55 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_fp16 } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-fp16 -mavx512fp16" } */ -#define AMX_TRANSPOSE -#define AMX_FP16 -#define DO_TEST test_amx_transpose_tdpfp16ps -void test_amx_transpose_tdpfp16ps (); -#include "amx-helper.h" - -void calc_matrix_tdpfp16ps(__tile *dst, __tile *src1, __tile *src2) -{ - uint16_t *src1_buf = (uint16_t *) src1->buf; - uint16_t *src2_buf = (uint16_t *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, k, n, t; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - for (t = 0; t < 2; t+=2) - dst_buf[m * N + n] += - (make_fp16_f32 (src1_buf[k * 2 * M + 2 * m + t]) * - make_fp16_f32 (src2_buf[k * 2 * N + 2 * n + t])) + - (make_fp16_f32 (src1_buf[k * 2 * M + 2 * m + t + 1]) * - make_fp16_f32 (src2_buf[k * 2 * N + 2 * n + t + 1])); -} - -void test_amx_transpose_tdpfp16ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024]; - - init_fp16_max_tile_buffer (tmp_dst_buf); - init_fp16_max_tile_zero_buffer(tmp_dst_zero_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_tdpfp16ps (&dst, &src1, &src2); - - _tile_tdpfp16ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_float_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-tmmultf32ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-tmmultf32ps-2.c deleted file mode 100644 index 44166c1..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-tmmultf32ps-2.c +++ /dev/null @@ -1,51 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_tf32 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-tf32" } */ -#define AMX_TRANSPOSE -#define AMX_TF32 -#define DO_TEST test_amx_transpose_tmmultf32ps -void test_amx_transpose_tmmultf32ps(); -#include "amx-helper.h" - -void calc_matrix_tmmultf32ps(__tile *dst, __tile *src1, __tile *src2) -{ - float *src1_buf = (float *) src1->buf; - float *src2_buf = (float *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, n, k; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - dst_buf[m * N + n] += - zero_lower_mantissa_bits_fp32 (silence_snan_fp32 (src1_buf[k * M + m])) * - zero_lower_mantissa_bits_fp32 (silence_snan_fp32 (src2_buf[k * N + n])); - -} - -void test_amx_transpose_tmmultf32ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024]; - - init_fp32_max_tile_buffer (tmp_dst_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_tmmultf32ps (&dst, &src1, &src2); - - _tile_tmmultf32ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-transposed-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-transposed-2.c deleted file mode 100644 index 73c709c..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-transposed-2.c +++ /dev/null @@ -1,39 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-options "-O2 -mamx-transpose" } */ -#define AMX_TRANSPOSE -#define DO_TEST test_amx_transpose_transposed -void test_amx_transpose_transposed (); -#include "amx-helper.h" - -void calc_matrix_ttransposed (__tile *dst, __tile *src) -{ - uint32_t *src_buf = (uint32_t *) src->buf; - uint32_t *dst_buf = (uint32_t *) dst->buf; - - int M = src->rows; - int N = src->colsb / 4; - int i, j; - - for (i = 0; i < M; i++) - for (j = 0; j < N; j++) - dst_buf[j * M + i] = (uint32_t) src_buf[i * N + j]; -} - -void test_amx_transpose_transposed () -{ - __tilecfg_u cfg; - __tile src, dst, ref; - - init_tile_config (&cfg); - init_tile_reg_and_src (1, dst); - init_tile_reg_and_src (2, src); - - /* Check ttransposed. */ - calc_matrix_ttransposed (&dst, &src); - _tile_transposed (1, 2); - _tile_stored (1, ref.buf, _STRIDE); - - if (!check_tile_register (&ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/funcspec-56.inc b/gcc/testsuite/gcc.target/i386/funcspec-56.inc index e462ead..3d9af7a 100644 --- a/gcc/testsuite/gcc.target/i386/funcspec-56.inc +++ b/gcc/testsuite/gcc.target/i386/funcspec-56.inc @@ -90,7 +90,6 @@ extern void test_user_msr (void) __attribute__((__target__("usermsr"))); extern void test_avx10_2 (void) __attribute__((__target__("avx10.2"))); extern void test_amx_avx512 (void) __attribute__((__target__("amx-avx512"))); extern void test_amx_tf32 (void) __attribute__((__target__("amx-tf32"))); -extern void test_amx_transpose (void) __attribute__((__target__("amx-transpose"))); extern void test_amx_fp8 (void) __attribute__((__target__("amx-fp8"))); extern void test_movrs (void) __attribute__((__target__("movrs"))); extern void test_amx_movrs (void) __attribute__((__target__("amx-movrs"))); @@ -185,7 +184,6 @@ extern void test_no_user_msr (void) __attribute__((__target__("no-usermsr"))); extern void test_no_avx10_2 (void) __attribute__((__target__("no-avx10.2"))); extern void test_no_amx_avx512 (void) __attribute__((__target__("no-amx-avx512"))); extern void test_no_amx_tf32 (void) __attribute__((__target__("no-amx-tf32"))); -extern void test_no_amx_transpose (void) __attribute__((__target__("no-amx-transpose"))); extern void test_no_amx_fp8 (void) __attribute__((__target__("no-amx-fp8"))); extern void test_no_movrs (void) __attribute__((__target__("no-movrs"))); extern void test_no_amx_movrs (void) __attribute__((__target__("no-amx-movrs"))); diff --git a/gcc/testsuite/gcc.target/i386/sse-12.c b/gcc/testsuite/gcc.target/i386/sse-12.c index cabccb0..fc406b0 100644 --- a/gcc/testsuite/gcc.target/i386/sse-12.c +++ b/gcc/testsuite/gcc.target/i386/sse-12.c @@ -3,7 +3,7 @@ popcntintrin.h gfniintrin.h and mm_malloc.h are usable with -O -std=c89 -pedantic-errors. */ /* { dg-do compile } */ -/* { dg-options "-O -std=c89 -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mamx-fp16 -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mamx-fp8 -mmovrs -mamx-movrs" } */ +/* { dg-options "-O -std=c89 -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mamx-fp16 -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-fp8 -mmovrs -mamx-movrs" } */ #include <x86intrin.h> diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c index 189e19e..7541956 100644 --- a/gcc/testsuite/gcc.target/i386/sse-13.c +++ b/gcc/testsuite/gcc.target/i386/sse-13.c @@ -1,5 +1,5 @@ /* { dg-do compile } */ -/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mavx512vp2intersect -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mamx-fp8 -mmovrs -mamx-movrs" } */ +/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mavx512vp2intersect -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-fp8 -mmovrs -mamx-movrs" } */ /* { dg-add-options bind_pic_locally } */ #include <mm_malloc.h> diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c index f3b7c112..1e86c75 100644 --- a/gcc/testsuite/gcc.target/i386/sse-14.c +++ b/gcc/testsuite/gcc.target/i386/sse-14.c @@ -1,5 +1,5 @@ /* { dg-do compile } */ -/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mamx-fp16 -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mamx-fp8 -mmovrs -mamx-movrs" } */ +/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mamx-fp16 -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-fp8 -mmovrs -mamx-movrs" } */ /* { dg-add-options bind_pic_locally } */ #include <mm_malloc.h> diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c index 0cb0368..418fe23 100644 --- a/gcc/testsuite/gcc.target/i386/sse-22.c +++ b/gcc/testsuite/gcc.target/i386/sse-22.c @@ -103,7 +103,7 @@ #ifndef DIFFERENT_PRAGMAS -#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,gfni,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,amx-fp16,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-transpose,amx-fp8,movrs,amx-movrs") +#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,gfni,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,amx-fp16,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-fp8,movrs,amx-movrs") #endif /* Following intrinsics require immediate arguments. They @@ -220,7 +220,7 @@ test_4 (_mm_cmpestrz, int, __m128i, int, __m128i, int, 1) /* immintrin.h (AVX/AVX2/RDRND/FSGSBASE/F16C/RTM/AVX512F/SHA) */ #ifdef DIFFERENT_PRAGMAS -#pragma GCC target ("avx,avx2,rdrnd,fsgsbase,f16c,rtm,sha,gfni,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,amx-fp16,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-transpose,amx-fp8,movrs,amx-movrs") +#pragma GCC target ("avx,avx2,rdrnd,fsgsbase,f16c,rtm,sha,gfni,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,amx-fp16,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-fp8,movrs,amx-movrs") #endif #include <immintrin.h> test_1 (_cvtss_sh, unsigned short, float, 1) diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c index 95db1f7..f9b0613 100644 --- a/gcc/testsuite/gcc.target/i386/sse-23.c +++ b/gcc/testsuite/gcc.target/i386/sse-23.c @@ -895,6 +895,6 @@ #define __builtin_ia32_minmaxsh_mask_round(A, B, C, D, E, F) __builtin_ia32_minmaxsh_mask_round (A, B, 100, D, E, 4) #define __builtin_ia32_minmaxss_mask_round(A, B, C, D, E, F) __builtin_ia32_minmaxss_mask_round (A, B, 100, D, E, 4) -#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-transpose,amx-fp8,movrs,amx-movrs") +#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-fp8,movrs,amx-movrs") #include <x86intrin.h> diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-1.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-1.c new file mode 100644 index 0000000..5fc17ab --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-1.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rvi20u64 -mabi=lp64" } */ + +int main () { + +#ifndef __riscv_rvi20u64 +#error "__riscv_rvi20u64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-2.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-2.c new file mode 100644 index 0000000..86f2771 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-2.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rvi20u32 -mabi=ilp32" } */ + +int main () { + +#ifndef __riscv_rvi20u32 +#error "__riscv_rvi20u32" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-3.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-3.c new file mode 100644 index 0000000..7787549 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-3.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rva20u64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rva20u64 +#error "__riscv_rva20u64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-4.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-4.c new file mode 100644 index 0000000..abb20b7 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-4.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rva22u64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rva22u64 +#error "__riscv_rva22u64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-5.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-5.c new file mode 100644 index 0000000..0840cdc --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-5.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rva23u64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rva23u64 +#error "__riscv_rva23u64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-6.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-6.c new file mode 100644 index 0000000..7159780 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-6.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rva23s64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rva23s64 +#error "__riscv_rva23s64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-7.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-7.c new file mode 100644 index 0000000..1366159 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-7.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rvb23u64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rvb23u64 +#error "__riscv_rvb23u64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-8.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-8.c new file mode 100644 index 0000000..c0c5003 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-8.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rvb23s64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rvb23s64 +#error "__riscv_rvb23s64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp index 1acfb37..4ee8d12 100644 --- a/gcc/testsuite/lib/target-supports.exp +++ b/gcc/testsuite/lib/target-supports.exp @@ -11238,17 +11238,6 @@ proc check_effective_target_amx_tf32 { } { } "-mamx-tf32" ] } -# Return 1 if amx-transpose instructions can be compiled. -proc check_effective_target_amx_transpose { } { - return [check_no_compiler_messages amx_transpose object { - void - foo () - { - __asm__ volatile ("ttransposed\t%%tmm1, %%tmm2" ::); - } - } "-mamx-transpose" ] -} - # Return 1 if amx-fp8 instructions can be compiled. proc check_effective_target_amx_fp8 { } { return [check_no_compiler_messages amx_fp8 object { diff --git a/gcc/tree-ssa-dom.cc b/gcc/tree-ssa-dom.cc index b1ac35e..087d842 100644 --- a/gcc/tree-ssa-dom.cc +++ b/gcc/tree-ssa-dom.cc @@ -2040,11 +2040,6 @@ cprop_operand (gimple *stmt, use_operand_p op_p, range_query *query) if (val && val != op) { - /* Do not replace hard register operands in asm statements. */ - if (gimple_code (stmt) == GIMPLE_ASM - && !may_propagate_copy_into_asm (op)) - return; - /* Certain operands are not allowed to be copy propagated due to their interaction with exception handling and some GCC extensions. */ diff --git a/gcc/tree-ssa-phiopt.cc b/gcc/tree-ssa-phiopt.cc index 3d6673c..031184d 100644 --- a/gcc/tree-ssa-phiopt.cc +++ b/gcc/tree-ssa-phiopt.cc @@ -3648,16 +3648,20 @@ cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb, if (then_assign == NULL || !gimple_assign_single_p (then_assign) - || gimple_clobber_p (then_assign) - || gimple_has_volatile_ops (then_assign) || else_assign == NULL || !gimple_assign_single_p (else_assign) - || gimple_clobber_p (else_assign) - || gimple_has_volatile_ops (else_assign) || stmt_references_abnormal_ssa_name (then_assign) || stmt_references_abnormal_ssa_name (else_assign)) return false; + /* Allow both being clobbers but no other volatile operations. */ + if (gimple_clobber_p (then_assign) + && gimple_clobber_p (else_assign)) + ; + else if (gimple_has_volatile_ops (then_assign) + || gimple_has_volatile_ops (else_assign)) + return false; + lhs = gimple_assign_lhs (then_assign); if (!operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0)) return false; @@ -3674,7 +3678,14 @@ cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb, if (!is_gimple_reg_type (TREE_TYPE (lhs))) { - if (!operand_equal_p (then_rhs, else_rhs)) + /* Handle clobbers seperately as operand_equal_p does not check + the kind of the clobbers being the same. */ + if (TREE_CLOBBER_P (then_rhs) && TREE_CLOBBER_P (else_rhs)) + { + if (CLOBBER_KIND (then_rhs) != CLOBBER_KIND (else_rhs)) + return false; + } + else if (!operand_equal_p (then_rhs, else_rhs)) return false; /* Currently only handle commoning of `= {}`. */ if (TREE_CODE (then_rhs) != CONSTRUCTOR) @@ -3683,7 +3694,10 @@ cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb, if (dump_file && (dump_flags & TDF_DETAILS)) { - fprintf(dump_file, "factoring out stores:\n\tthen:\n"); + if (TREE_CLOBBER_P (then_rhs)) + fprintf(dump_file, "factoring out clobber:\n\tthen:\n"); + else + fprintf(dump_file, "factoring out stores:\n\tthen:\n"); print_gimple_stmt (dump_file, then_assign, 0, TDF_VOPS|TDF_MEMSYMS); fprintf(dump_file, "\telse:\n"); @@ -4555,8 +4569,8 @@ pass_phiopt::execute (function *) hoist_adjacent_loads (bb, bb1, bb2, bb3); /* Try to see if there are only store in each side of the if - and try to remove that. */ - if (EDGE_COUNT (bb3->preds) == 2) + and try to remove that; don't do this for -Og. */ + if (EDGE_COUNT (bb3->preds) == 2 && !optimize_debug) while (cond_if_else_store_replacement_limited (bb1, bb2, bb3)) ; } @@ -4572,7 +4586,8 @@ pass_phiopt::execute (function *) /* Factor out operations from the phi if possible. */ if (single_pred_p (bb1) - && EDGE_COUNT (merge->preds) == 2) + && EDGE_COUNT (merge->preds) == 2 + && !optimize_debug) { for (gsi = gsi_start (phis); !gsi_end_p (gsi); ) { diff --git a/gcc/tree-ssa-propagate.cc b/gcc/tree-ssa-propagate.cc index 872f881..f02b10d 100644 --- a/gcc/tree-ssa-propagate.cc +++ b/gcc/tree-ssa-propagate.cc @@ -578,10 +578,6 @@ substitute_and_fold_engine::replace_uses_in (gimple *stmt) if (val == tuse || val == NULL_TREE) continue; - if (gimple_code (stmt) == GIMPLE_ASM - && !may_propagate_copy_into_asm (tuse)) - continue; - if (!may_propagate_copy (tuse, val)) continue; @@ -1142,15 +1138,6 @@ may_propagate_copy_into_stmt (gimple *dest, tree orig) return true; } -/* Similarly, but we know that we're propagating into an ASM_EXPR. */ - -bool -may_propagate_copy_into_asm (tree dest ATTRIBUTE_UNUSED) -{ - return true; -} - - /* Replace *OP_P with value VAL (assumed to be a constant or another SSA_NAME). Use this version when not const/copy propagating values. For example, diff --git a/gcc/tree-ssa-propagate.h b/gcc/tree-ssa-propagate.h index 200fc73..7819c0c 100644 --- a/gcc/tree-ssa-propagate.h +++ b/gcc/tree-ssa-propagate.h @@ -67,7 +67,6 @@ extern void move_ssa_defining_stmt_for_defs (gimple *, gimple *); extern bool stmt_makes_single_store (gimple *); extern bool may_propagate_copy (tree, tree, bool = false); extern bool may_propagate_copy_into_stmt (gimple *, tree); -extern bool may_propagate_copy_into_asm (tree); extern void propagate_value (use_operand_p, tree); extern void replace_exp (use_operand_p, tree); extern void propagate_tree_value (tree *, tree); diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc index 73398e5..97c1bf0 100644 --- a/gcc/tree-vect-loop.cc +++ b/gcc/tree-vect-loop.cc @@ -161,7 +161,7 @@ along with GCC; see the file COPYING3. If not see static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *, unsigned *); static stmt_vec_info vect_is_simple_reduction (loop_vec_info, stmt_vec_info, - gphi **, bool *, bool); + gphi **); /* Function vect_is_simple_iv_evolution. @@ -341,8 +341,7 @@ vect_phi_first_order_recurrence_p (loop_vec_info loop_vinfo, class loop *loop, slp analyses or not. */ static void -vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop, - bool slp) +vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop) { basic_block bb = loop->header; auto_vec<stmt_vec_info, 64> worklist; @@ -425,19 +424,15 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop, && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); gphi *double_reduc; - bool reduc_chain; stmt_vec_info reduc_stmt_info - = vect_is_simple_reduction (loop_vinfo, stmt_vinfo, &double_reduc, - &reduc_chain, slp); + = vect_is_simple_reduction (loop_vinfo, stmt_vinfo, &double_reduc); if (reduc_stmt_info && double_reduc) { - bool inner_chain; stmt_vec_info inner_phi_info = loop_vinfo->lookup_stmt (double_reduc); /* ??? Pass down flag we're the inner loop of a double reduc. */ stmt_vec_info inner_reduc_info - = vect_is_simple_reduction (loop_vinfo, inner_phi_info, - NULL, &inner_chain, slp); + = vect_is_simple_reduction (loop_vinfo, inner_phi_info, NULL); if (inner_reduc_info) { STMT_VINFO_REDUC_DEF (stmt_vinfo) = reduc_stmt_info; @@ -478,12 +473,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop, STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def; STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def; - /* Store the reduction cycles for possible vectorization in - loop-aware SLP if it was not detected as reduction - chain. */ - if (! reduc_chain) - LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push - (reduc_stmt_info); + LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt_info); } } else if (vect_phi_first_order_recurrence_p (loop_vinfo, loop, phi)) @@ -518,11 +508,11 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop, a[i] = i; */ static void -vect_analyze_scalar_cycles (loop_vec_info loop_vinfo, bool slp) +vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) { class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); - vect_analyze_scalar_cycles_1 (loop_vinfo, loop, slp); + vect_analyze_scalar_cycles_1 (loop_vinfo, loop); /* When vectorizing an outer-loop, the inner-loop is executed sequentially. Reductions in such inner-loop therefore have different properties than @@ -534,87 +524,7 @@ vect_analyze_scalar_cycles (loop_vec_info loop_vinfo, bool slp) current checks are too strict. */ if (loop->inner) - vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner, slp); -} - -/* Transfer group and reduction information from STMT_INFO to its - pattern stmt. */ - -static void -vect_fixup_reduc_chain (stmt_vec_info stmt_info) -{ - stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info); - stmt_vec_info stmtp; - gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp) - && REDUC_GROUP_FIRST_ELEMENT (stmt_info)); - REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info); - do - { - stmtp = STMT_VINFO_RELATED_STMT (stmt_info); - gcc_checking_assert (STMT_VINFO_DEF_TYPE (stmtp) - == STMT_VINFO_DEF_TYPE (stmt_info)); - REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp; - stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info); - if (stmt_info) - REDUC_GROUP_NEXT_ELEMENT (stmtp) - = STMT_VINFO_RELATED_STMT (stmt_info); - } - while (stmt_info); -} - -/* Fixup scalar cycles that now have their stmts detected as patterns. */ - -static void -vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo) -{ - stmt_vec_info first; - unsigned i; - - FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first) - { - stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first); - while (next) - { - if ((STMT_VINFO_IN_PATTERN_P (next) - != STMT_VINFO_IN_PATTERN_P (first)) - || STMT_VINFO_REDUC_IDX (vect_stmt_to_vectorize (next)) == -1) - break; - next = REDUC_GROUP_NEXT_ELEMENT (next); - } - /* If all reduction chain members are well-formed patterns adjust - the group to group the pattern stmts instead. */ - if (! next - && STMT_VINFO_REDUC_IDX (vect_stmt_to_vectorize (first)) != -1) - { - if (STMT_VINFO_IN_PATTERN_P (first)) - { - vect_fixup_reduc_chain (first); - LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i] - = STMT_VINFO_RELATED_STMT (first); - } - } - /* If not all stmt in the chain are patterns or if we failed - to update STMT_VINFO_REDUC_IDX dissolve the chain and handle - it as regular reduction instead. */ - else - { - stmt_vec_info vinfo = first; - stmt_vec_info last = NULL; - while (vinfo) - { - next = REDUC_GROUP_NEXT_ELEMENT (vinfo); - REDUC_GROUP_FIRST_ELEMENT (vinfo) = NULL; - REDUC_GROUP_NEXT_ELEMENT (vinfo) = NULL; - last = vinfo; - vinfo = next; - } - STMT_VINFO_DEF_TYPE (vect_stmt_to_vectorize (first)) - = vect_internal_def; - loop_vinfo->reductions.safe_push (vect_stmt_to_vectorize (last)); - LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).unordered_remove (i); - --i; - } - } + vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); } /* Function vect_get_loop_niters. @@ -2264,12 +2174,10 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, /* Classify all cross-iteration scalar data-flow cycles. Cross-iteration cycles caused by virtual phis are analyzed separately. */ - vect_analyze_scalar_cycles (loop_vinfo, !force_single_lane); + vect_analyze_scalar_cycles (loop_vinfo); vect_pattern_recog (loop_vinfo); - vect_fixup_scalar_cycles_with_patterns (loop_vinfo); - /* Analyze the access patterns of the data-refs in the loop (consecutive, complex, etc.). FORNOW: Only handle consecutive access pattern. */ @@ -2678,10 +2586,6 @@ again: if (applying_suggested_uf) return ok; - /* If there are reduction chains re-trying will fail anyway. */ - if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ()) - return ok; - /* Likewise if the grouped loads or stores in the SLP cannot be handled via interleaving or lane instructions. */ slp_instance instance; @@ -3756,7 +3660,7 @@ check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi, static stmt_vec_info vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, - gphi **double_reduc, bool *reduc_chain_p, bool slp) + gphi **double_reduc) { gphi *phi = as_a <gphi *> (phi_info->stmt); gimple *phi_use_stmt = NULL; @@ -3768,7 +3672,6 @@ vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, bool inner_loop_of_double_reduc = double_reduc == NULL; if (double_reduc) *double_reduc = NULL; - *reduc_chain_p = false; STMT_VINFO_REDUC_TYPE (phi_info) = TREE_CODE_REDUCTION; tree phi_name = PHI_RESULT (phi); @@ -3918,12 +3821,8 @@ vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, if (code == COND_EXPR && !nested_in_vect_loop) STMT_VINFO_REDUC_TYPE (phi_info) = COND_REDUCTION; - /* Fill in STMT_VINFO_REDUC_IDX and gather stmts for an SLP - reduction chain for which the additional restriction is that - all operations in the chain are the same. */ - auto_vec<stmt_vec_info, 8> reduc_chain; + /* Fill in STMT_VINFO_REDUC_IDX. */ unsigned i; - bool is_slp_reduc = !nested_in_vect_loop && code != COND_EXPR; for (i = path.length () - 1; i >= 1; --i) { gimple *stmt = USE_STMT (path[i].second); @@ -3940,39 +3839,8 @@ vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, STMT_VINFO_REDUC_IDX (stmt_info) = path[i].second->use - gimple_call_arg_ptr (call, 0); } - bool leading_conversion = (CONVERT_EXPR_CODE_P (op.code) - && (i == 1 || i == path.length () - 1)); - if ((op.code != code && !leading_conversion) - /* We can only handle the final value in epilogue - generation for reduction chains. */ - || (i != 1 && !has_single_use (gimple_get_lhs (stmt)))) - is_slp_reduc = false; - /* For reduction chains we support a trailing/leading - conversions. We do not store those in the actual chain. */ - if (leading_conversion) - continue; - reduc_chain.safe_push (stmt_info); } - if (slp && is_slp_reduc && reduc_chain.length () > 1) - { - for (unsigned i = 0; i < reduc_chain.length () - 1; ++i) - { - REDUC_GROUP_FIRST_ELEMENT (reduc_chain[i]) = reduc_chain[0]; - REDUC_GROUP_NEXT_ELEMENT (reduc_chain[i]) = reduc_chain[i+1]; - } - REDUC_GROUP_FIRST_ELEMENT (reduc_chain.last ()) = reduc_chain[0]; - REDUC_GROUP_NEXT_ELEMENT (reduc_chain.last ()) = NULL; - - /* Save the chain for further analysis in SLP detection. */ - LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (reduc_chain[0]); - REDUC_GROUP_SIZE (reduc_chain[0]) = reduc_chain.length (); - - *reduc_chain_p = true; - if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "reduction: detected reduction chain\n"); - } - else if (dump_enabled_p ()) + if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "reduction: detected reduction\n"); @@ -5390,7 +5258,6 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, tree new_temp = NULL_TREE, new_name, new_scalar_dest; gimple *epilog_stmt = NULL; gimple *exit_phi; - tree bitsize; tree def; tree orig_name, scalar_result; imm_use_iterator imm_iter; @@ -5405,8 +5272,7 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, # b1 = phi <b2, b0> a2 = operation (a1) b2 = operation (b1) */ - const bool slp_reduc - = SLP_INSTANCE_KIND (slp_node_instance) != slp_inst_kind_reduc_chain; + const bool slp_reduc = !reduc_info->is_reduc_chain; tree induction_index = NULL_TREE; unsigned int group_size = SLP_TREE_LANES (slp_node); @@ -5608,7 +5474,6 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, scalar_results.truncate (0); scalar_results.reserve_exact (group_size); new_scalar_dest = vect_create_destination_var (scalar_dest, NULL); - bitsize = TYPE_SIZE (scalar_type); /* True if we should implement SLP_REDUC using native reduction operations instead of scalar operations. */ @@ -6030,6 +5895,7 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, if (reduce_with_shift && (!slp_reduc || group_size == 1)) { + tree bitsize = TYPE_SIZE (TREE_TYPE (vectype1)); int element_bitsize = tree_to_uhwi (bitsize); /* Enforced by vectorizable_reduction, which disallows SLP reductions for variable-length vectors and also requires direct target support @@ -6098,9 +5964,10 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, dump_printf_loc (MSG_NOTE, vect_location, "Reduce using scalar code.\n"); + tree compute_type = TREE_TYPE (vectype1); + tree bitsize = TYPE_SIZE (compute_type); int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1)); int element_bitsize = tree_to_uhwi (bitsize); - tree compute_type = TREE_TYPE (vectype); gimple_seq stmts = NULL; FOR_EACH_VEC_ELT (reduc_inputs, i, vec_temp) { @@ -6956,8 +6823,6 @@ vectorizable_reduction (loop_vec_info loop_vinfo, bool single_defuse_cycle = false; tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE; tree cond_reduc_val = NULL_TREE; - const bool reduc_chain - = SLP_INSTANCE_KIND (slp_node_instance) == slp_inst_kind_reduc_chain; /* Make sure it was already recognized as a reduction computation. */ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def @@ -7019,6 +6884,7 @@ vectorizable_reduction (loop_vec_info loop_vinfo, double_reduc = true; } + const bool reduc_chain = reduc_info->is_reduc_chain; slp_node_instance->reduc_phis = slp_node; /* ??? We're leaving slp_node to point to the PHIs, we only need it to get at the number of vector stmts which wasn't @@ -7030,33 +6896,28 @@ vectorizable_reduction (loop_vec_info loop_vinfo, /* Verify following REDUC_IDX from the latch def leads us back to the PHI and compute the reduction chain length. Discover the real - reduction operation stmt on the way (stmt_info and slp_for_stmt_info). */ - tree reduc_def - = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi, loop_latch_edge (loop)); + reduction operation stmt on the way (slp_for_stmt_info). */ unsigned reduc_chain_length = 0; - bool only_slp_reduc_chain = true; stmt_info = NULL; slp_tree slp_for_stmt_info = NULL; slp_tree vdef_slp = slp_node_instance->root; - /* For double-reductions we start SLP analysis at the inner loop LC PHI - which is the def of the outer loop live stmt. */ - if (double_reduc) - vdef_slp = SLP_TREE_CHILDREN (vdef_slp)[0]; - while (reduc_def != PHI_RESULT (reduc_def_phi)) + while (vdef_slp != slp_node) { - stmt_vec_info def = loop_vinfo->lookup_def (reduc_def); - stmt_vec_info vdef = vect_stmt_to_vectorize (def); - int reduc_idx = STMT_VINFO_REDUC_IDX (vdef); - if (STMT_VINFO_REDUC_IDX (vdef) == -1 - || SLP_TREE_REDUC_IDX (vdef_slp) == -1) + int reduc_idx = SLP_TREE_REDUC_IDX (vdef_slp); + if (reduc_idx == -1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction chain broken by patterns.\n"); return false; } - if (!REDUC_GROUP_FIRST_ELEMENT (vdef)) - only_slp_reduc_chain = false; + stmt_vec_info vdef = SLP_TREE_REPRESENTATIVE (vdef_slp); + if (is_a <gphi *> (vdef->stmt)) + { + vdef_slp = SLP_TREE_CHILDREN (vdef_slp)[reduc_idx]; + /* Do not count PHIs towards the chain length. */ + continue; + } gimple_match_op op; if (!gimple_extract_op (vdef->stmt, &op)) { @@ -7080,11 +6941,8 @@ vectorizable_reduction (loop_vec_info loop_vinfo, else { /* First non-conversion stmt. */ - if (!stmt_info) - { - stmt_info = vdef; - slp_for_stmt_info = vdef_slp; - } + if (!slp_for_stmt_info) + slp_for_stmt_info = vdef_slp; if (lane_reducing_op_p (op.code)) { @@ -7116,29 +6974,15 @@ vectorizable_reduction (loop_vec_info loop_vinfo, } else if (!vectype_in) vectype_in = SLP_TREE_VECTYPE (slp_node); - if (!REDUC_GROUP_FIRST_ELEMENT (vdef)) - { - gcc_assert (reduc_idx == SLP_TREE_REDUC_IDX (vdef_slp)); - vdef_slp = SLP_TREE_CHILDREN (vdef_slp)[reduc_idx]; - } + vdef_slp = SLP_TREE_CHILDREN (vdef_slp)[reduc_idx]; } - - reduc_def = op.ops[reduc_idx]; reduc_chain_length++; } + stmt_info = SLP_TREE_REPRESENTATIVE (slp_for_stmt_info); + /* PHIs should not participate in patterns. */ gcc_assert (!STMT_VINFO_RELATED_STMT (phi_info)); - /* STMT_VINFO_REDUC_DEF doesn't point to the first but the last - element. */ - if (REDUC_GROUP_FIRST_ELEMENT (stmt_info)) - { - gcc_assert (!REDUC_GROUP_NEXT_ELEMENT (stmt_info)); - stmt_info = REDUC_GROUP_FIRST_ELEMENT (stmt_info); - } - if (REDUC_GROUP_FIRST_ELEMENT (stmt_info)) - gcc_assert (REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info); - /* 1. Is vectorizable reduction? */ /* Not supportable if the reduction variable is used in the loop, unless it's a reduction chain. */ @@ -7453,8 +7297,7 @@ vectorizable_reduction (loop_vec_info loop_vinfo, { /* When vectorizing a reduction chain w/o SLP the reduction PHI is not directy used in stmt. */ - if (!only_slp_reduc_chain - && reduc_chain_length != 1) + if (reduc_chain_length != 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -7789,22 +7632,18 @@ vectorizable_reduction (loop_vec_info loop_vinfo, /* All but single defuse-cycle optimized and fold-left reductions go through their own vectorizable_* routines. */ + stmt_vec_info tem + = SLP_TREE_REPRESENTATIVE (SLP_INSTANCE_TREE (slp_node_instance)); if (!single_defuse_cycle && reduction_type != FOLD_LEFT_REDUCTION) + STMT_VINFO_DEF_TYPE (tem) = vect_internal_def; + else { - stmt_vec_info tem - = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (phi_info)); - if (REDUC_GROUP_FIRST_ELEMENT (tem)) - { - gcc_assert (!REDUC_GROUP_NEXT_ELEMENT (tem)); - tem = REDUC_GROUP_FIRST_ELEMENT (tem); - } - STMT_VINFO_DEF_TYPE (vect_orig_stmt (tem)) = vect_internal_def; - STMT_VINFO_DEF_TYPE (tem) = vect_internal_def; + STMT_VINFO_DEF_TYPE (tem) = vect_reduction_def; + if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)) + vect_reduction_update_partial_vector_usage (loop_vinfo, reduc_info, + slp_node, op.code, op.type, + vectype_in); } - else if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)) - vect_reduction_update_partial_vector_usage (loop_vinfo, reduc_info, - slp_node, op.code, op.type, - vectype_in); return true; } @@ -8238,8 +8077,6 @@ vect_transform_cycle_phi (loop_vec_info loop_vinfo, int i; bool nested_cycle = false; int vec_num; - const bool reduc_chain - = SLP_INSTANCE_KIND (slp_node_instance) == slp_inst_kind_reduc_chain; if (nested_in_vect_loop_p (loop, stmt_info)) { @@ -8308,7 +8145,7 @@ vect_transform_cycle_phi (loop_vec_info loop_vinfo, vec<stmt_vec_info> &stmts = SLP_TREE_SCALAR_STMTS (slp_node); unsigned int num_phis = stmts.length (); - if (reduc_chain) + if (reduc_info->is_reduc_chain) num_phis = 1; initial_values.reserve (num_phis); for (unsigned int i = 0; i < num_phis; ++i) diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc index 74a9a19..becee62 100644 --- a/gcc/tree-vect-patterns.cc +++ b/gcc/tree-vect-patterns.cc @@ -1022,13 +1022,11 @@ vect_reassociating_reduction_p (vec_info *vinfo, if (loop && nested_in_vect_loop_p (loop, stmt_info)) return false; - if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def) - { - if (needs_fold_left_reduction_p (TREE_TYPE (gimple_assign_lhs (assign)), - code)) - return false; - } - else if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) == NULL) + if (!vect_is_reduction (stmt_info)) + return false; + + if (needs_fold_left_reduction_p (TREE_TYPE (gimple_assign_lhs (assign)), + code)) return false; *op0_out = gimple_assign_rhs1 (assign); @@ -4087,10 +4085,13 @@ vect_recog_vector_vector_shift_pattern (vec_info *vinfo, != TYPE_PRECISION (TREE_TYPE (oprnd0))) return NULL; - stmt_vec_info def_vinfo = vect_get_internal_def (vinfo, oprnd1); - if (!def_vinfo) + stmt_vec_info def_vinfo = vinfo->lookup_def (oprnd1); + if (!def_vinfo || STMT_VINFO_DEF_TYPE (def_vinfo) == vect_external_def) return NULL; + def_vinfo = vect_stmt_to_vectorize (def_vinfo); + gcc_assert (def_vinfo); + *type_out = get_vectype_for_scalar_type (vinfo, TREE_TYPE (oprnd0)); if (*type_out == NULL_TREE) return NULL; diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc index f553e8f..13a2995 100644 --- a/gcc/tree-vect-slp.cc +++ b/gcc/tree-vect-slp.cc @@ -53,6 +53,9 @@ along with GCC; see the file COPYING3. If not see #include "sreal.h" #include "predict.h" +#define REDUC_GROUP_FIRST_ELEMENT(S) \ + (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element) + static bool vect_transform_slp_perm_load_1 (vec_info *, slp_tree, load_permutation_t &, const vec<tree> &, @@ -4187,41 +4190,60 @@ vect_build_slp_instance (vec_info *vinfo, Return FALSE if SLP build fails. */ static bool -vect_analyze_slp_reduc_chain (vec_info *vinfo, +vect_analyze_slp_reduc_chain (loop_vec_info vinfo, scalar_stmts_to_slp_tree_map_t *bst_map, - stmt_vec_info stmt_info, + stmt_vec_info scalar_stmt, unsigned max_tree_size, unsigned *limit) { - vec<stmt_vec_info> scalar_stmts; + vec<stmt_vec_info> scalar_stmts = vNULL; - /* Collect the reduction stmts and store them in scalar_stmts. */ - scalar_stmts.create (REDUC_GROUP_SIZE (stmt_info)); - stmt_vec_info next_info = stmt_info; - while (next_info) + bool fail = false; + /* ??? We could leave operation code checking to SLP discovery. */ + code_helper code = STMT_VINFO_REDUC_CODE (STMT_VINFO_REDUC_DEF + (vect_orig_stmt (scalar_stmt))); + bool first = true; + stmt_vec_info next_stmt = scalar_stmt; + do { - scalar_stmts.quick_push (vect_stmt_to_vectorize (next_info)); - next_info = REDUC_GROUP_NEXT_ELEMENT (next_info); + stmt_vec_info stmt = next_stmt; + gimple_match_op op; + if (!gimple_extract_op (STMT_VINFO_STMT (stmt), &op)) + gcc_unreachable (); + tree reduc_def = gimple_arg (STMT_VINFO_STMT (stmt), + STMT_VINFO_REDUC_IDX (stmt)); + next_stmt = vect_stmt_to_vectorize (vinfo->lookup_def (reduc_def)); + gcc_assert (is_a <gphi *> (STMT_VINFO_STMT (next_stmt)) + || STMT_VINFO_REDUC_IDX (next_stmt) != -1); + if (!gimple_extract_op (STMT_VINFO_STMT (vect_orig_stmt (stmt)), &op)) + gcc_unreachable (); + if (CONVERT_EXPR_CODE_P (op.code) + && (first + || is_a <gphi *> (STMT_VINFO_STMT (next_stmt)))) + ; + else if (code != op.code) + { + fail = true; + break; + } + else + scalar_stmts.safe_push (stmt); + first = false; } - /* Mark the first element of the reduction chain as reduction to properly - transform the node. In the reduction analysis phase only the last - element of the chain is marked as reduction. */ - STMT_VINFO_DEF_TYPE (stmt_info) - = STMT_VINFO_DEF_TYPE (scalar_stmts.last ()); - STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info)) - = STMT_VINFO_REDUC_DEF (vect_orig_stmt (scalar_stmts.last ())); + while (!is_a <gphi *> (STMT_VINFO_STMT (next_stmt))); + if (fail || scalar_stmts.length () <= 1) + return false; + + scalar_stmts.reverse (); + stmt_vec_info reduc_phi_info = next_stmt; /* Build the tree for the SLP instance. */ vec<stmt_vec_info> root_stmt_infos = vNULL; vec<tree> remain = vNULL; - /* If there's no budget left bail out early. */ - if (*limit == 0) - return false; - if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, - "Starting SLP discovery for\n"); + "Starting SLP discovery of reduction chain for\n"); for (unsigned i = 0; i < scalar_stmts.length (); ++i) dump_printf_loc (MSG_NOTE, vect_location, " %G", scalar_stmts[i]->stmt); @@ -4233,136 +4255,195 @@ vect_analyze_slp_reduc_chain (vec_info *vinfo, poly_uint64 max_nunits = 1; unsigned tree_size = 0; + /* ??? We need this only for SLP discovery. */ + for (unsigned i = 0; i < scalar_stmts.length (); ++i) + REDUC_GROUP_FIRST_ELEMENT (scalar_stmts[i]) = scalar_stmts[0]; + slp_tree node = vect_build_slp_tree (vinfo, scalar_stmts, group_size, &max_nunits, matches, limit, &tree_size, bst_map); + + for (unsigned i = 0; i < scalar_stmts.length (); ++i) + REDUC_GROUP_FIRST_ELEMENT (scalar_stmts[i]) = NULL; + if (node != NULL) { - /* Calculate the unrolling factor based on the smallest type. */ - poly_uint64 unrolling_factor - = calculate_unrolling_factor (max_nunits, group_size); + /* Create a new SLP instance. */ + slp_instance new_instance = XNEW (class _slp_instance); + SLP_INSTANCE_TREE (new_instance) = node; + SLP_INSTANCE_LOADS (new_instance) = vNULL; + SLP_INSTANCE_ROOT_STMTS (new_instance) = root_stmt_infos; + SLP_INSTANCE_REMAIN_DEFS (new_instance) = remain; + SLP_INSTANCE_KIND (new_instance) = slp_inst_kind_reduc_chain; + new_instance->reduc_phis = NULL; + new_instance->cost_vec = vNULL; + new_instance->subgraph_entries = vNULL; - if (maybe_ne (unrolling_factor, 1U) - && is_a <bb_vec_info> (vinfo)) + vect_reduc_info reduc_info = info_for_reduction (vinfo, node); + reduc_info->is_reduc_chain = true; + + if (dump_enabled_p ()) + dump_printf_loc (MSG_NOTE, vect_location, + "SLP size %u vs. limit %u.\n", + tree_size, max_tree_size); + + /* Fixup SLP reduction chains. If this is a reduction chain with + a conversion in front amend the SLP tree with a node for that. */ + gimple *scalar_def = STMT_VINFO_REDUC_DEF (reduc_phi_info)->stmt; + if (is_gimple_assign (scalar_def) + && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (scalar_def))) + { + stmt_vec_info conv_info = vect_stmt_to_vectorize + (STMT_VINFO_REDUC_DEF (reduc_phi_info)); + scalar_stmts = vNULL; + scalar_stmts.create (group_size); + for (unsigned i = 0; i < group_size; ++i) + scalar_stmts.quick_push (conv_info); + slp_tree conv = vect_create_new_slp_node (scalar_stmts, 1); + SLP_TREE_VECTYPE (conv) + = get_vectype_for_scalar_type (vinfo, + TREE_TYPE + (gimple_assign_lhs (scalar_def)), + group_size); + SLP_TREE_REDUC_IDX (conv) = 0; + conv->cycle_info.id = node->cycle_info.id; + SLP_TREE_CHILDREN (conv).quick_push (node); + SLP_INSTANCE_TREE (new_instance) = conv; + } + /* Fill the backedge child of the PHI SLP node. The + general matching code cannot find it because the + scalar code does not reflect how we vectorize the + reduction. */ + use_operand_p use_p; + imm_use_iterator imm_iter; + class loop *loop = LOOP_VINFO_LOOP (vinfo); + FOR_EACH_IMM_USE_FAST (use_p, imm_iter, + gimple_get_lhs (scalar_def)) + /* There are exactly two non-debug uses, the reduction + PHI and the loop-closed PHI node. */ + if (!is_gimple_debug (USE_STMT (use_p)) + && gimple_bb (USE_STMT (use_p)) == loop->header) + { + auto_vec<stmt_vec_info, 64> phis (group_size); + stmt_vec_info phi_info = vinfo->lookup_stmt (USE_STMT (use_p)); + for (unsigned i = 0; i < group_size; ++i) + phis.quick_push (phi_info); + slp_tree *phi_node = bst_map->get (phis); + unsigned dest_idx = loop_latch_edge (loop)->dest_idx; + SLP_TREE_CHILDREN (*phi_node)[dest_idx] + = SLP_INSTANCE_TREE (new_instance); + SLP_INSTANCE_TREE (new_instance)->refcnt++; + } + + vinfo->slp_instances.safe_push (new_instance); + + /* ??? We've replaced the old SLP_INSTANCE_GROUP_SIZE with + the number of scalar stmts in the root in a few places. + Verify that assumption holds. */ + gcc_assert (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (new_instance)) + .length () == group_size); + + if (dump_enabled_p ()) { - unsigned HOST_WIDE_INT const_max_nunits; - if (!max_nunits.is_constant (&const_max_nunits) - || const_max_nunits > group_size) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: store group " - "size not a multiple of the vector size " - "in basic block SLP\n"); - vect_free_slp_tree (node); - return false; - } - /* Fatal mismatch. */ - if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "SLP discovery succeeded but node needs " - "splitting\n"); - memset (matches, true, group_size); - matches[group_size / const_max_nunits * const_max_nunits] = false; - vect_free_slp_tree (node); + dump_printf_loc (MSG_NOTE, vect_location, + "Final SLP tree for instance %p:\n", + (void *) new_instance); + vect_print_slp_graph (MSG_NOTE, vect_location, + SLP_INSTANCE_TREE (new_instance)); } - else - { - /* Create a new SLP instance. */ - slp_instance new_instance = XNEW (class _slp_instance); - SLP_INSTANCE_TREE (new_instance) = node; - SLP_INSTANCE_LOADS (new_instance) = vNULL; - SLP_INSTANCE_ROOT_STMTS (new_instance) = root_stmt_infos; - SLP_INSTANCE_REMAIN_DEFS (new_instance) = remain; - SLP_INSTANCE_KIND (new_instance) = slp_inst_kind_reduc_chain; - new_instance->reduc_phis = NULL; - new_instance->cost_vec = vNULL; - new_instance->subgraph_entries = vNULL; - if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "SLP size %u vs. limit %u.\n", - tree_size, max_tree_size); + return true; + } - /* Fixup SLP reduction chains. If this is a reduction chain with - a conversion in front amend the SLP tree with a node for that. */ - gimple *scalar_def - = vect_orig_stmt (scalar_stmts[group_size - 1])->stmt; - if (STMT_VINFO_DEF_TYPE (scalar_stmts[0]) != vect_reduction_def) - { - /* Get at the conversion stmt - we know it's the single use - of the last stmt of the reduction chain. */ - use_operand_p use_p; - bool r = single_imm_use (gimple_assign_lhs (scalar_def), - &use_p, &scalar_def); - gcc_assert (r); - stmt_vec_info next_info = vinfo->lookup_stmt (scalar_def); - next_info = vect_stmt_to_vectorize (next_info); - scalar_stmts = vNULL; - scalar_stmts.create (group_size); - for (unsigned i = 0; i < group_size; ++i) - scalar_stmts.quick_push (next_info); - slp_tree conv = vect_create_new_slp_node (scalar_stmts, 1); - SLP_TREE_VECTYPE (conv) - = get_vectype_for_scalar_type (vinfo, - TREE_TYPE - (gimple_assign_lhs (scalar_def)), - group_size); - SLP_TREE_REDUC_IDX (conv) = 0; - conv->cycle_info.id = node->cycle_info.id; - SLP_TREE_CHILDREN (conv).quick_push (node); - SLP_INSTANCE_TREE (new_instance) = conv; - /* We also have to fake this conversion stmt as SLP reduction - group so we don't have to mess with too much code - elsewhere. */ - REDUC_GROUP_FIRST_ELEMENT (next_info) = next_info; - REDUC_GROUP_NEXT_ELEMENT (next_info) = NULL; - } - /* Fill the backedge child of the PHI SLP node. The - general matching code cannot find it because the - scalar code does not reflect how we vectorize the - reduction. */ - use_operand_p use_p; - imm_use_iterator imm_iter; - class loop *loop = LOOP_VINFO_LOOP (as_a <loop_vec_info> (vinfo)); - FOR_EACH_IMM_USE_FAST (use_p, imm_iter, - gimple_get_lhs (scalar_def)) - /* There are exactly two non-debug uses, the reduction - PHI and the loop-closed PHI node. */ - if (!is_gimple_debug (USE_STMT (use_p)) - && gimple_bb (USE_STMT (use_p)) == loop->header) - { - auto_vec<stmt_vec_info, 64> phis (group_size); - stmt_vec_info phi_info - = vinfo->lookup_stmt (USE_STMT (use_p)); - for (unsigned i = 0; i < group_size; ++i) - phis.quick_push (phi_info); - slp_tree *phi_node = bst_map->get (phis); - unsigned dest_idx = loop_latch_edge (loop)->dest_idx; - SLP_TREE_CHILDREN (*phi_node)[dest_idx] - = SLP_INSTANCE_TREE (new_instance); - SLP_INSTANCE_TREE (new_instance)->refcnt++; - } + /* Failed to SLP. */ + scalar_stmts.release (); + if (dump_enabled_p ()) + dump_printf_loc (MSG_NOTE, vect_location, + "SLP discovery of reduction chain failed\n"); + return false; +} - vinfo->slp_instances.safe_push (new_instance); +/* Analyze an SLP instance starting from SCALAR_STMTS which are a group + of KIND. Return true if successful. */ - /* ??? We've replaced the old SLP_INSTANCE_GROUP_SIZE with - the number of scalar stmts in the root in a few places. - Verify that assumption holds. */ - gcc_assert (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (new_instance)) - .length () == group_size); +static bool +vect_analyze_slp_reduction (loop_vec_info vinfo, + stmt_vec_info scalar_stmt, + unsigned max_tree_size, unsigned *limit, + scalar_stmts_to_slp_tree_map_t *bst_map, + bool force_single_lane) +{ + slp_instance_kind kind = slp_inst_kind_reduc_group; - if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Final SLP tree for instance %p:\n", - (void *) new_instance); - vect_print_slp_graph (MSG_NOTE, vect_location, - SLP_INSTANCE_TREE (new_instance)); - } + /* If there's no budget left bail out early. */ + if (*limit == 0) + return false; - return true; + /* Try to gather a reduction chain. */ + if (! force_single_lane + && STMT_VINFO_DEF_TYPE (scalar_stmt) == vect_reduction_def + && vect_analyze_slp_reduc_chain (vinfo, bst_map, scalar_stmt, + max_tree_size, limit)) + return true; + + vec<stmt_vec_info> scalar_stmts; + scalar_stmts.create (1); + scalar_stmts.quick_push (scalar_stmt); + + if (dump_enabled_p ()) + { + dump_printf_loc (MSG_NOTE, vect_location, + "Starting SLP discovery for\n"); + for (unsigned i = 0; i < scalar_stmts.length (); ++i) + dump_printf_loc (MSG_NOTE, vect_location, + " %G", scalar_stmts[i]->stmt); + } + + /* Build the tree for the SLP instance. */ + unsigned int group_size = scalar_stmts.length (); + bool *matches = XALLOCAVEC (bool, group_size); + poly_uint64 max_nunits = 1; + unsigned tree_size = 0; + + slp_tree node = vect_build_slp_tree (vinfo, scalar_stmts, group_size, + &max_nunits, matches, limit, + &tree_size, bst_map); + if (node != NULL) + { + /* Create a new SLP instance. */ + slp_instance new_instance = XNEW (class _slp_instance); + SLP_INSTANCE_TREE (new_instance) = node; + SLP_INSTANCE_LOADS (new_instance) = vNULL; + SLP_INSTANCE_ROOT_STMTS (new_instance) = vNULL; + SLP_INSTANCE_REMAIN_DEFS (new_instance) = vNULL; + SLP_INSTANCE_KIND (new_instance) = kind; + new_instance->reduc_phis = NULL; + new_instance->cost_vec = vNULL; + new_instance->subgraph_entries = vNULL; + + if (dump_enabled_p ()) + dump_printf_loc (MSG_NOTE, vect_location, + "SLP size %u vs. limit %u.\n", + tree_size, max_tree_size); + + vinfo->slp_instances.safe_push (new_instance); + + /* ??? We've replaced the old SLP_INSTANCE_GROUP_SIZE with + the number of scalar stmts in the root in a few places. + Verify that assumption holds. */ + gcc_assert (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (new_instance)) + .length () == group_size); + + if (dump_enabled_p ()) + { + dump_printf_loc (MSG_NOTE, vect_location, + "Final SLP tree for instance %p:\n", + (void *) new_instance); + vect_print_slp_graph (MSG_NOTE, vect_location, + SLP_INSTANCE_TREE (new_instance)); } + + return true; } /* Failed to SLP. */ @@ -5256,40 +5337,6 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size, if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo)) { - /* Find SLP sequences starting from reduction chains. */ - FOR_EACH_VEC_ELT (loop_vinfo->reduction_chains, i, first_element) - if (! STMT_VINFO_RELEVANT_P (first_element) - && ! STMT_VINFO_LIVE_P (first_element)) - ; - else if (force_single_lane - || ! vect_analyze_slp_reduc_chain (vinfo, bst_map, - first_element, - max_tree_size, &limit)) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "SLP discovery of reduction chain failed\n"); - /* Dissolve reduction chain group. */ - stmt_vec_info vinfo = first_element; - stmt_vec_info last = NULL; - while (vinfo) - { - stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (vinfo); - REDUC_GROUP_FIRST_ELEMENT (vinfo) = NULL; - REDUC_GROUP_NEXT_ELEMENT (vinfo) = NULL; - last = vinfo; - vinfo = next; - } - STMT_VINFO_DEF_TYPE (first_element) = vect_internal_def; - /* ??? When there's a conversion around the reduction - chain 'last' isn't the entry of the reduction. */ - if (STMT_VINFO_DEF_TYPE (last) != vect_reduction_def) - return opt_result::failure_at (vect_location, - "SLP build failed.\n"); - /* It can be still vectorized as part of an SLP reduction. */ - loop_vinfo->reductions.safe_push (last); - } - /* Find SLP sequences starting from groups of reductions. */ if (loop_vinfo->reductions.length () > 0) { @@ -5315,23 +5362,13 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size, if (!force_single_lane && !lane_reducing_stmt_p (STMT_VINFO_STMT (next_info))) scalar_stmts.quick_push (next_info); - else - { - /* Do SLP discovery for single-lane reductions. */ - vec<stmt_vec_info> stmts; - vec<stmt_vec_info> roots = vNULL; - vec<tree> remain = vNULL; - stmts.create (1); - stmts.quick_push (next_info); - if (! vect_build_slp_instance (vinfo, - slp_inst_kind_reduc_group, - stmts, roots, remain, - max_tree_size, &limit, - bst_map, - force_single_lane)) - return opt_result::failure_at (vect_location, - "SLP build failed.\n"); - } + /* Do SLP discovery for single-lane reductions. */ + else if (! vect_analyze_slp_reduction (loop_vinfo, next_info, + max_tree_size, &limit, + bst_map, + force_single_lane)) + return opt_result::failure_at (vect_location, + "SLP build failed.\n"); } } /* Save for re-processing on failure. */ @@ -5349,20 +5386,13 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size, scalar_stmts.release (); /* Do SLP discovery for single-lane reductions. */ for (auto stmt_info : saved_stmts) - { - vec<stmt_vec_info> stmts; - vec<stmt_vec_info> roots = vNULL; - vec<tree> remain = vNULL; - stmts.create (1); - stmts.quick_push (vect_stmt_to_vectorize (stmt_info)); - if (! vect_build_slp_instance (vinfo, - slp_inst_kind_reduc_group, - stmts, roots, remain, - max_tree_size, &limit, - bst_map, force_single_lane)) - return opt_result::failure_at (vect_location, - "SLP build failed.\n"); - } + if (! vect_analyze_slp_reduction (loop_vinfo, + vect_stmt_to_vectorize + (stmt_info), + max_tree_size, &limit, + bst_map, force_single_lane)) + return opt_result::failure_at (vect_location, + "SLP build failed.\n"); } saved_stmts.release (); } diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc index dcb2522..83acbb3 100644 --- a/gcc/tree-vect-stmts.cc +++ b/gcc/tree-vect-stmts.cc @@ -2062,16 +2062,13 @@ vector_vector_composition_type (tree vtype, poly_uint64 nelts, tree *ptype, VECTYPE is the vector type that the vectorized statements will use. If ELSVALS is nonzero the supported else values will be stored in the - vector ELSVALS points to. - - For loads PERM_OK indicates whether we can code generate a - SLP_TREE_LOAD_PERMUTATION on the node. */ + vector ELSVALS points to. */ static bool get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, tree vectype, slp_tree slp_node, bool masked_p, vec_load_store_type vls_type, - bool perm_ok, vect_load_store_data *ls) + vect_load_store_data *ls) { vect_memory_access_type *memory_access_type = &ls->memory_access_type; poly_int64 *poffset = &ls->poffset; @@ -2081,6 +2078,8 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, internal_fn *lanes_ifn = &ls->lanes_ifn; vec<int> *elsvals = &ls->elsvals; tree *ls_type = &ls->ls_type; + bool *slp_perm = &ls->slp_perm; + unsigned *n_perms = &ls->n_perms; loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; @@ -2093,6 +2092,15 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, *misalignment = DR_MISALIGNMENT_UNKNOWN; *poffset = 0; *ls_type = NULL_TREE; + *slp_perm = false; + *n_perms = -1U; + + bool perm_ok = true; + poly_int64 vf = loop_vinfo ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) : 1; + + if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) + perm_ok = vect_transform_slp_perm_load (vinfo, slp_node, vNULL, NULL, + vf, true, n_perms); if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) { @@ -2534,7 +2542,7 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, poly_uint64 read_amount = vf * TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) - read_amount *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info)); + read_amount *= group_size; auto target_alignment = DR_TARGET_ALIGNMENT (STMT_VINFO_DR_INFO (stmt_info)); @@ -2627,6 +2635,60 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, if (!loop_vinfo && *memory_access_type == VMAT_ELEMENTWISE) return false; + /* Some loads need to explicitly permute the loaded data if there + is a load permutation. Among those are: + - VMAT_ELEMENTWISE. + - VMAT_STRIDED_SLP. + - VMAT_GATHER_SCATTER: + - Strided gather (fallback for VMAT_STRIDED_SLP if #lanes == 1). + - Grouped strided gather (ditto but for #lanes > 1). + + For VMAT_ELEMENTWISE we can fold the load permutation into the + individual indices we access directly, eliding the permutation. + Strided gather only allows load permutations for the + single-element case. */ + + if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists () + && !(*memory_access_type == VMAT_ELEMENTWISE + || (mat_gather_scatter_p (*memory_access_type) + && SLP_TREE_LANES (slp_node) == 1 + && single_element_p))) + { + if (!loop_vinfo) + { + /* In BB vectorization we may not actually use a loaded vector + accessing elements in excess of DR_GROUP_SIZE. */ + stmt_vec_info group_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; + group_info = DR_GROUP_FIRST_ELEMENT (group_info); + unsigned HOST_WIDE_INT nunits; + unsigned j, k, maxk = 0; + FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (slp_node), j, k) + if (k > maxk) + maxk = k; + tree vectype = SLP_TREE_VECTYPE (slp_node); + if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits) + || maxk >= (DR_GROUP_SIZE (group_info) & ~(nunits - 1))) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "BB vectorization with gaps at the end of " + "a load is not supported\n"); + return false; + } + } + + if (!perm_ok) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, + vect_location, + "unsupported load permutation\n"); + return false; + } + + *slp_perm = true; + } + return true; } @@ -8009,7 +8071,7 @@ vectorizable_store (vec_info *vinfo, vect_load_store_data &ls = slp_node->get_data (_ls_data); if (cost_vec && !get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask_node, - vls_type, false, &_ls_data)) + vls_type, &_ls_data)) return false; /* Temporary aliases to analysis data, should not be modified through these. */ @@ -9454,7 +9516,6 @@ vectorizable_load (vec_info *vinfo, bool compute_in_loop = false; class loop *at_loop; int vec_num; - bool slp_perm = false; bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo); poly_uint64 vf; tree aggr_type; @@ -9592,17 +9653,11 @@ vectorizable_load (vec_info *vinfo, else group_size = 1; - bool perm_ok = true; - unsigned n_perms = -1U; - if (cost_vec && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) - perm_ok = vect_transform_slp_perm_load (vinfo, slp_node, vNULL, NULL, vf, - true, &n_perms); - vect_load_store_data _ls_data{}; vect_load_store_data &ls = slp_node->get_data (_ls_data); if (cost_vec && !get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask_node, - VLS_LOAD, perm_ok, &ls)) + VLS_LOAD, &ls)) return false; /* Temporary aliases to analysis data, should not be modified through these. */ @@ -9623,56 +9678,6 @@ vectorizable_load (vec_info *vinfo, bool type_mode_padding_p = TYPE_PRECISION (scalar_type) < GET_MODE_PRECISION (GET_MODE_INNER (mode)); - /* ??? The following checks should really be part of - get_load_store_type. */ - if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists () - && !(memory_access_type == VMAT_ELEMENTWISE - || (mat_gather_scatter_p (memory_access_type) - && SLP_TREE_LANES (slp_node) == 1 - && (!grouped_load - || !DR_GROUP_NEXT_ELEMENT (first_stmt_info))))) - { - slp_perm = true; - - if (!loop_vinfo && cost_vec) - { - /* In BB vectorization we may not actually use a loaded vector - accessing elements in excess of DR_GROUP_SIZE. */ - stmt_vec_info group_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; - group_info = DR_GROUP_FIRST_ELEMENT (group_info); - unsigned HOST_WIDE_INT nunits; - unsigned j, k, maxk = 0; - FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (slp_node), j, k) - if (k > maxk) - maxk = k; - tree vectype = SLP_TREE_VECTYPE (slp_node); - if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits) - || maxk >= (DR_GROUP_SIZE (group_info) & ~(nunits - 1))) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "BB vectorization with gaps at the end of " - "a load is not supported\n"); - return false; - } - } - - if (cost_vec) - { - if (!perm_ok) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, - vect_location, - "unsupported load permutation\n"); - return false; - } - ls.n_perms = n_perms; - } - else - n_perms = ls.n_perms; - } - if (slp_node->ldst_lanes && memory_access_type != VMAT_LOAD_STORE_LANES) { @@ -10027,7 +10032,7 @@ vectorizable_load (vec_info *vinfo, not only the number of vector stmts the permutation result fits in. */ int ncopies; - if (slp_perm) + if (ls.slp_perm) { gcc_assert (memory_access_type != VMAT_ELEMENTWISE); /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for @@ -10135,18 +10140,18 @@ vectorizable_load (vec_info *vinfo, if (!costing_p) { - if (slp_perm) + if (ls.slp_perm) dr_chain.quick_push (gimple_assign_lhs (new_stmt)); else slp_node->push_vec_def (new_stmt); } } - if (slp_perm) + if (ls.slp_perm) { if (costing_p) { - gcc_assert (n_perms != -1U); - inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm, + gcc_assert (ls.n_perms != -1U); + inside_cost += record_stmt_cost (cost_vec, ls.n_perms, vec_perm, slp_node, 0, vect_body); } else @@ -10154,7 +10159,7 @@ vectorizable_load (vec_info *vinfo, unsigned n_perms2; vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf, false, &n_perms2); - gcc_assert (n_perms == n_perms2); + gcc_assert (ls.n_perms == n_perms2); } } @@ -10219,7 +10224,7 @@ vectorizable_load (vec_info *vinfo, instead the access is contiguous but it might be permuted. No gap adjustment is needed though. */ ; - else if (slp_perm + else if (ls.slp_perm && (group_size != scalar_lanes || !multiple_p (nunits, group_size))) { @@ -10568,7 +10573,7 @@ vectorizable_load (vec_info *vinfo, if (mat_gather_scatter_p (memory_access_type)) { - gcc_assert ((!grouped_load && !slp_perm) || ls.ls_type); + gcc_assert ((!grouped_load && !ls.slp_perm) || ls.ls_type); /* If we pun the original vectype the loads as well as costing, length, etc. is performed with the new type. After loading we VIEW_CONVERT @@ -10930,14 +10935,14 @@ vectorizable_load (vec_info *vinfo, /* Store vector loads in the corresponding SLP_NODE. */ if (!costing_p) { - if (slp_perm) + if (ls.slp_perm) dr_chain.quick_push (gimple_assign_lhs (new_stmt)); else slp_node->push_vec_def (new_stmt); } } - if (slp_perm) + if (ls.slp_perm) { if (costing_p) { @@ -11034,7 +11039,7 @@ vectorizable_load (vec_info *vinfo, stmt_info, bump); } - if (grouped_load || slp_perm) + if (grouped_load || ls.slp_perm) dr_chain.create (vec_num); gimple *new_stmt = NULL; @@ -11531,11 +11536,11 @@ vectorizable_load (vec_info *vinfo, /* Collect vector loads and later create their permutation in vect_transform_slp_perm_load. */ - if (!costing_p && (grouped_load || slp_perm)) + if (!costing_p && (grouped_load || ls.slp_perm)) dr_chain.quick_push (new_temp); /* Store vector loads in the corresponding SLP_NODE. */ - if (!costing_p && !slp_perm) + if (!costing_p && !ls.slp_perm) slp_node->push_vec_def (new_stmt); /* With SLP permutation we load the gaps as well, without @@ -11544,7 +11549,7 @@ vectorizable_load (vec_info *vinfo, group_elt += nunits; if (!costing_p && maybe_ne (group_gap_adj, 0U) - && !slp_perm + && !ls.slp_perm && known_eq (group_elt, group_size - group_gap_adj)) { poly_wide_int bump_val @@ -11561,7 +11566,7 @@ vectorizable_load (vec_info *vinfo, elements loaded for a permuted SLP load. */ if (!costing_p && maybe_ne (group_gap_adj, 0U) - && slp_perm) + && ls.slp_perm) { poly_wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) * group_gap_adj); @@ -11572,7 +11577,7 @@ vectorizable_load (vec_info *vinfo, stmt_info, bump); } - if (slp_perm) + if (ls.slp_perm) { /* For SLP we know we've seen all possible uses of dr_chain so direct vect_transform_slp_perm_load to DCE the unused parts. @@ -11580,9 +11585,9 @@ vectorizable_load (vec_info *vinfo, in PR101120 and friends. */ if (costing_p) { - gcc_assert (n_perms != -1U); - if (n_perms != 0) - inside_cost = record_stmt_cost (cost_vec, n_perms, vec_perm, + gcc_assert (ls.n_perms != -1U); + if (ls.n_perms != 0) + inside_cost = record_stmt_cost (cost_vec, ls.n_perms, vec_perm, slp_node, 0, vect_body); } else @@ -11591,7 +11596,7 @@ vectorizable_load (vec_info *vinfo, bool ok = vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf, false, &n_perms2, nullptr, true); - gcc_assert (ok && n_perms == n_perms2); + gcc_assert (ok && ls.n_perms == n_perms2); } dr_chain.release (); } diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h index 52bc0d6..4785cbd 100644 --- a/gcc/tree-vectorizer.h +++ b/gcc/tree-vectorizer.h @@ -290,6 +290,8 @@ struct vect_load_store_data : vect_data { tree strided_offset_vectype; // VMAT_GATHER_SCATTER_IFN, originally strided tree ls_type; // VMAT_GATHER_SCATTER_IFN auto_vec<int> elsvals; + /* True if the load requires a load permutation. */ + bool slp_perm; // SLP_TREE_LOAD_PERMUTATION unsigned n_perms; // SLP_TREE_LOAD_PERMUTATION }; @@ -844,6 +846,9 @@ public: following land-reducing operation would be assigned to. */ unsigned int reduc_result_pos; + /* Whether this represents a reduction chain. */ + bool is_reduc_chain; + /* Whether we force a single cycle PHI during reduction vectorization. */ bool force_single_cycle; @@ -1066,10 +1071,6 @@ public: /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ auto_vec<stmt_vec_info> reductions; - /* All reduction chains in the loop, represented by the first - stmt in the chain. */ - auto_vec<stmt_vec_info> reduction_chains; - /* Defs that could not be analyzed such as OMP SIMD calls without a LHS. */ auto_vec<stmt_vec_info> alternate_defs; @@ -1290,7 +1291,6 @@ public: #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions -#define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_EARLY_BREAKS(L) (L)->early_breaks @@ -1538,7 +1538,7 @@ public: /* Whether the stmt is SLPed, loop-based vectorized, or both. */ enum slp_vect_type slp_type; - /* Interleaving and reduction chains info. */ + /* Interleaving chains info. */ /* First element in the group. */ stmt_vec_info first_element; /* Pointer to the next element in the group. */ @@ -1711,13 +1711,6 @@ struct gather_scatter_info { #define DR_GROUP_GAP(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap) -#define REDUC_GROUP_FIRST_ELEMENT(S) \ - (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element) -#define REDUC_GROUP_NEXT_ELEMENT(S) \ - (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element) -#define REDUC_GROUP_SIZE(S) \ - (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size) - #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) diff --git a/libcpp/ChangeLog b/libcpp/ChangeLog index 43d3f39..253812e 100644 --- a/libcpp/ChangeLog +++ b/libcpp/ChangeLog @@ -1,3 +1,8 @@ +2025-10-13 Pierre Marie de Rodat <derodat@adacore.com> + + * init.cc (read_original_directory): Attempt to decode escape + sequences with cpp_interpret_string_notranslate. + 2025-10-08 Jakub Jelinek <jakub@redhat.com> * makeucnid.cc (write_copyright): Adjust copyright year. diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog index eb6493e..c1221fb 100644 --- a/libstdc++-v3/ChangeLog +++ b/libstdc++-v3/ChangeLog @@ -1,3 +1,25 @@ +2025-10-13 Jonathan Wakely <jwakely@redhat.com> + + * include/bits/stl_iterator_base_funcs.h (advance): Fix comment. + +2025-10-13 Yuao Ma <c8ef@outlook.com> + + * include/bits/atomic_base.h: Implement address(). + * include/bits/version.def: Bump version number. + * include/bits/version.h: Regenerate. + * testsuite/29_atomics/atomic_ref/address.cc: New test. + +2025-10-13 Jonathan Wakely <jwakely@redhat.com> + + * include/bits/unicode.h (_Utf_view::_M_read_reverse_utf16): + Fix check for high surrogate preceding low surrogate. + * testsuite/ext/unicode/view.cc: Check unpaired low surrogates. + +2025-10-13 Jonathan Wakely <jwakely@redhat.com> + + * include/bits/unicode.h (__is_single_code_unit): Fix check for + 7-bit ASCII characters. + 2025-10-11 Gerald Pfeifer <gerald@pfeifer.com> * doc/xml/manual/build_hacking.xml: Update Autoconf manual link. diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h index 0f3f6b1..7e4ad2b 100644 --- a/libstdc++-v3/include/bits/atomic_base.h +++ b/libstdc++-v3/include/bits/atomic_base.h @@ -1538,7 +1538,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _S_required_aligment() { if constexpr (is_floating_point_v<_Vt> || is_pointer_v<_Vt>) - return alignof(_Vt); + return __alignof__(_Vt); else if constexpr ((sizeof(_Vt) & (sizeof(_Vt) - 1)) || sizeof(_Vt) > 16) return alignof(_Vt); else diff --git a/libstdc++-v3/include/bits/chrono.h b/libstdc++-v3/include/bits/chrono.h index 8de8e75..7f505aa 100644 --- a/libstdc++-v3/include/bits/chrono.h +++ b/libstdc++-v3/include/bits/chrono.h @@ -1515,6 +1515,78 @@ _GLIBCXX_END_INLINE_ABI_NAMESPACE(_V2) } // namespace filesystem #endif // C++17 && HOSTED +#if defined _GLIBCXX_USE_NANOSLEEP || defined _GLIBCXX_USE_CLOCK_REALTIME \ + || defined _GLIBCXX_HAS_GTHREADS +namespace chrono +{ +/// @cond undocumented + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wc++17-extensions" + // Convert a chrono::duration to a relative time represented as timespec + // (e.g. for use with nanosleep). + template<typename _Rep, typename _Period> + [[__nodiscard__]] _GLIBCXX14_CONSTEXPR inline + struct ::timespec + __to_timeout_timespec(const duration<_Rep, _Period>& __d) + { + struct ::timespec __ts{}; + + if (__d < __d.zero()) // Negative timeouts don't make sense. + return __ts; + + if constexpr (ratio_greater<_Period, ratio<1>>::value + || treat_as_floating_point<_Rep>::value) + { + // Converting from e.g. chrono::hours::max() to chrono::seconds + // would evaluate LLONG_MAX * 3600 which would overflow. + // Limit to chrono::seconds::max(). + chrono::duration<double> __fmax(chrono::seconds::max()); + if (__d > __fmax) [[__unlikely__]] + return chrono::__to_timeout_timespec(chrono::seconds::max()); + } + + auto __s = chrono::duration_cast<chrono::seconds>(__d); + + if constexpr (is_integral<time_t>::value) // POSIX.1-2001 allows floating + { + // Also limit to time_t maximum (only relevant for 32-bit time_t). + constexpr auto __tmax = numeric_limits<time_t>::max(); + if (__s.count() > __tmax) [[__unlikely__]] + { + __ts.tv_sec = __tmax; + return __ts; + } + } + + auto __ns = chrono::duration_cast<chrono::nanoseconds>(__d - __s); + + if constexpr (treat_as_floating_point<_Rep>::value) + if (__ns.count() > 999999999) [[__unlikely__]] + __ns = chrono::nanoseconds(999999999); + + __ts.tv_sec = static_cast<time_t>(__s.count()); + __ts.tv_nsec = static_cast<long>(__ns.count()); + return __ts; + } +#pragma GCC diagnostic pop + + // Convert a chrono::time_point to an absolute time represented as timespec. + // All times before the epoch get converted to the epoch, so this assumes + // that we only use it for clocks where that's true. + // It should be safe to use this for system_clock and steady_clock. + template<typename _Clock, typename _Dur> + [[__nodiscard__]] _GLIBCXX14_CONSTEXPR inline + struct ::timespec + __to_timeout_timespec(const time_point<_Clock, _Dur>& __t) + { + return chrono::__to_timeout_timespec(__t.time_since_epoch()); + } + +/// @endcond +} // namespace chrono +#endif // USE_NANOSLEEP || USE_CLOCK_REALTIME || HAS_GTHREADS + _GLIBCXX_END_NAMESPACE_VERSION } // namespace std diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h index b5a71f5..06cc51a 100644 --- a/libstdc++-v3/include/bits/hashtable.h +++ b/libstdc++-v3/include/bits/hashtable.h @@ -38,7 +38,7 @@ #include <bits/enable_special_members.h> #include <bits/stl_algobase.h> // fill_n, is_permutation #include <bits/stl_function.h> // __has_is_transparent_t -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED # include <bits/node_handle.h> #endif @@ -349,7 +349,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION using size_type = typename __hashtable_base::size_type; using difference_type = typename __hashtable_base::difference_type; -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED using node_type = _Node_handle<_Key, _Value, __node_alloc_type>; using insert_return_type = _Node_insert_return<iterator, node_type>; #endif @@ -1931,7 +1931,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION -> const_iterator { return const_iterator(_M_locate(__k)); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Key, typename _Value, typename _Alloc, typename _ExtractKey, typename _Equal, typename _Hash, typename _RangeHash, typename _Unused, @@ -1979,7 +1979,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION std::size_t __bkt = _M_bucket_index(__code); return const_iterator(_M_find_node_tr(__bkt, __k, __code)); } -#endif +#endif // C++20 __glibcxx_generic_unordered_lookup template<typename _Key, typename _Value, typename _Alloc, typename _ExtractKey, typename _Equal, @@ -2007,7 +2007,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION return __result; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Key, typename _Value, typename _Alloc, typename _ExtractKey, typename _Equal, typename _Hash, typename _RangeHash, typename _Unused, @@ -2052,7 +2052,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION return __result; } -#endif +#endif // C++20 __glibcxx_generic_unordered_lookup template<typename _Key, typename _Value, typename _Alloc, typename _ExtractKey, typename _Equal, @@ -2102,7 +2102,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION return { __beg, __ite }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Key, typename _Value, typename _Alloc, typename _ExtractKey, typename _Equal, typename _Hash, typename _RangeHash, typename _Unused, @@ -2190,7 +2190,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION return { __beg, __ite }; } -#endif +#endif // C++20 __glibcxx_generic_unordered_lookup // Find the node before the one whose key compares equal to k in the bucket // bkt. Return nullptr if no node is found. @@ -2966,7 +2966,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION } #pragma GCC diagnostic pop -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED template<typename, typename, typename> class _Hash_merge_helper { }; #endif // C++17 diff --git a/libstdc++-v3/include/bits/shared_ptr_atomic.h b/libstdc++-v3/include/bits/shared_ptr_atomic.h index cc7841a..cbc4bf6 100644 --- a/libstdc++-v3/include/bits/shared_ptr_atomic.h +++ b/libstdc++-v3/include/bits/shared_ptr_atomic.h @@ -392,6 +392,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION class _Sp_atomic { using value_type = _Tp; + using element_type = typename _Tp::element_type; friend struct atomic<_Tp>; @@ -420,7 +421,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ~_Atomic_count() { - auto __val = _M_val.load(memory_order_relaxed); + auto __val = _AtomicRef(_M_val).load(memory_order_relaxed); _GLIBCXX_TSAN_MUTEX_DESTROY(&_M_val); __glibcxx_assert(!(__val & _S_lock_bit)); if (auto __pi = reinterpret_cast<pointer>(__val)) @@ -442,18 +443,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { // To acquire the lock we flip the LSB from 0 to 1. - auto __current = _M_val.load(memory_order_relaxed); + _AtomicRef __aref(_M_val); + auto __current = __aref.load(memory_order_relaxed); while (__current & _S_lock_bit) { #if __glibcxx_atomic_wait __detail::__thread_relax(); #endif - __current = _M_val.load(memory_order_relaxed); + __current = __aref.load(memory_order_relaxed); } _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val); - while (!_M_val.compare_exchange_strong(__current, + while (!__aref.compare_exchange_strong(__current, __current | _S_lock_bit, __o, memory_order_relaxed)) @@ -474,7 +476,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION unlock(memory_order __o) const noexcept { _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val); - _M_val.fetch_sub(1, __o); + _AtomicRef(_M_val).fetch_sub(1, __o); _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val); } @@ -487,7 +489,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __o = memory_order_release; auto __x = reinterpret_cast<uintptr_t>(__c._M_pi); _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val); - __x = _M_val.exchange(__x, __o); + __x = _AtomicRef(_M_val).exchange(__x, __o); _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val); __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit); } @@ -495,19 +497,45 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION #if __glibcxx_atomic_wait // Precondition: caller holds lock! void - _M_wait_unlock(memory_order __o) const noexcept + _M_wait_unlock(const element_type* const& __ptr, memory_order __o) const noexcept { + auto __old_ptr = __ptr; _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val); - auto __v = _M_val.fetch_sub(1, memory_order_relaxed); + uintptr_t __old_pi + = _AtomicRef(_M_val).fetch_sub(1, memory_order_relaxed) - 1u; _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val); - _M_val.wait(__v & ~_S_lock_bit, __o); + + // Ensure that the correct value of _M_ptr is visible after locking, + // by upgrading relaxed or consume to acquire. + auto __lo = __o; + if (__o != memory_order_seq_cst) + __lo = memory_order_acquire; + + std::__atomic_wait_address( + &_M_val, + [=, &__ptr, this](uintptr_t __new_pi) + { + if (__old_pi != (__new_pi & ~_S_lock_bit)) + // control block changed, we can wake up + return true; + + // control block is same, we need to check if ptr changed, + // the lock needs to be taken first, the value of pi may have + // also been updated in meantime, so reload it + __new_pi = reinterpret_cast<uintptr_t>(this->lock(__lo)); + auto __new_ptr = __ptr; + this->unlock(memory_order_relaxed); + // wake up if either of the values changed + return __new_pi != __old_pi || __new_ptr != __old_ptr; + }, + [__o, this] { return _AtomicRef(_M_val).load(__o); }); } void notify_one() noexcept { _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val); - _M_val.notify_one(); + _AtomicRef(_M_val).notify_one(); _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val); } @@ -515,17 +543,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION notify_all() noexcept { _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val); - _M_val.notify_all(); + _AtomicRef(_M_val).notify_all(); _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val); } #endif private: - mutable __atomic_base<uintptr_t> _M_val{0}; + using _AtomicRef = __atomic_ref<uintptr_t>; + alignas(_AtomicRef::required_alignment) mutable uintptr_t _M_val{0}; static constexpr uintptr_t _S_lock_bit{1}; }; - typename _Tp::element_type* _M_ptr = nullptr; + element_type* _M_ptr = nullptr; _Atomic_count _M_refcount; static typename _Atomic_count::pointer @@ -608,7 +637,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { auto __pi = _M_refcount.lock(memory_order_acquire); if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi) - _M_refcount._M_wait_unlock(__o); + _M_refcount._M_wait_unlock(_M_ptr, __o); else _M_refcount.unlock(memory_order_relaxed); } diff --git a/libstdc++-v3/include/bits/std_mutex.h b/libstdc++-v3/include/bits/std_mutex.h index 777097b..5f9f154 100644 --- a/libstdc++-v3/include/bits/std_mutex.h +++ b/libstdc++-v3/include/bits/std_mutex.h @@ -39,6 +39,7 @@ #else #include <errno.h> // EBUSY +#include <bits/chrono.h> #include <bits/functexcept.h> #include <bits/gthr.h> @@ -210,8 +211,31 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __gthread_cond_t _M_cond; #endif }; - /// @endcond +namespace chrono +{ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wc++17-extensions" + // Convert a time_point to an absolute time represented as __gthread_time_t + // (which is typically just a typedef for struct timespec). + template<typename _Clock, typename _Dur> + [[__nodiscard__]] _GLIBCXX14_CONSTEXPR inline + __gthread_time_t + __to_timeout_gthread_time_t(const time_point<_Clock, _Dur>& __t) + { + auto __ts = chrono::__to_timeout_timespec(__t.time_since_epoch()); + if constexpr (is_same<::timespec, __gthread_time_t>::value) + return __ts; + else if constexpr (is_convertible<::timespec, __gthread_time_t>::value) + return __ts; + else if constexpr (is_scalar<__gthread_time_t>::value) // Assume seconds: + return static_cast<__gthread_time_t>(__ts.tv_sec); + else // Assume this works and the members are in the correct order: + return __gthread_time_t{ __ts.tv_sec, __ts.tv_nsec }; + } +#pragma GCC diagnostic pop +} + /// @endcond #endif // _GLIBCXX_HAS_GTHREADS /// Do not acquire ownership of the mutex. diff --git a/libstdc++-v3/include/bits/stl_map.h b/libstdc++-v3/include/bits/stl_map.h index 68c23b8..62d66ce 100644 --- a/libstdc++-v3/include/bits/stl_map.h +++ b/libstdc++-v3/include/bits/stl_map.h @@ -1259,7 +1259,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) { return _M_t.find(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto find(const _Kt& __x) -> decltype(_M_t._M_find_tr(__x)) @@ -1284,7 +1284,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) const { return _M_t.find(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto find(const _Kt& __x) const -> decltype(_M_t._M_find_tr(__x)) @@ -1305,7 +1305,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER count(const key_type& __x) const { return _M_t.find(__x) == _M_t.end() ? 0 : 1; } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto count(const _Kt& __x) const -> decltype(_M_t._M_count_tr(__x)) @@ -1348,7 +1348,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER lower_bound(const key_type& __x) { return _M_t.lower_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto lower_bound(const _Kt& __x) @@ -1373,7 +1373,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER lower_bound(const key_type& __x) const { return _M_t.lower_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto lower_bound(const _Kt& __x) const @@ -1393,7 +1393,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER upper_bound(const key_type& __x) { return _M_t.upper_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto upper_bound(const _Kt& __x) @@ -1413,7 +1413,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER upper_bound(const key_type& __x) const { return _M_t.upper_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto upper_bound(const _Kt& __x) const @@ -1442,7 +1442,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) { return _M_t.equal_range(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto equal_range(const _Kt& __x) @@ -1471,7 +1471,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) const { return _M_t.equal_range(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto equal_range(const _Kt& __x) const @@ -1649,7 +1649,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER _GLIBCXX_END_NAMESPACE_CONTAINER -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED // Allow std::map access to internals of compatible maps. template<typename _Key, typename _Val, typename _Cmp1, typename _Alloc, typename _Cmp2> diff --git a/libstdc++-v3/include/bits/stl_multimap.h b/libstdc++-v3/include/bits/stl_multimap.h index 4ee4a84..b2ae2ba 100644 --- a/libstdc++-v3/include/bits/stl_multimap.h +++ b/libstdc++-v3/include/bits/stl_multimap.h @@ -891,7 +891,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) { return _M_t.find(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto find(const _Kt& __x) -> decltype(_M_t._M_find_tr(__x)) @@ -915,7 +915,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) const { return _M_t.find(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto find(const _Kt& __x) const -> decltype(_M_t._M_find_tr(__x)) @@ -933,7 +933,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER count(const key_type& __x) const { return _M_t.count(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto count(const _Kt& __x) const -> decltype(_M_t._M_count_tr(__x)) @@ -976,7 +976,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER lower_bound(const key_type& __x) { return _M_t.lower_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto lower_bound(const _Kt& __x) @@ -1001,7 +1001,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER lower_bound(const key_type& __x) const { return _M_t.lower_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto lower_bound(const _Kt& __x) const @@ -1021,7 +1021,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER upper_bound(const key_type& __x) { return _M_t.upper_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto upper_bound(const _Kt& __x) @@ -1041,7 +1041,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER upper_bound(const key_type& __x) const { return _M_t.upper_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto upper_bound(const _Kt& __x) const @@ -1068,7 +1068,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) { return _M_t.equal_range(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto equal_range(const _Kt& __x) @@ -1095,7 +1095,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) const { return _M_t.equal_range(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto equal_range(const _Kt& __x) const @@ -1272,7 +1272,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER _GLIBCXX_END_NAMESPACE_CONTAINER -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED // Allow std::multimap access to internals of compatible maps. template<typename _Key, typename _Val, typename _Cmp1, typename _Alloc, typename _Cmp2> diff --git a/libstdc++-v3/include/bits/stl_multiset.h b/libstdc++-v3/include/bits/stl_multiset.h index 31451ab..b6e1bfc 100644 --- a/libstdc++-v3/include/bits/stl_multiset.h +++ b/libstdc++-v3/include/bits/stl_multiset.h @@ -773,7 +773,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER count(const key_type& __x) const { return _M_t.count(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto count(const _Kt& __x) const -> decltype(_M_t._M_count_tr(__x)) @@ -822,7 +822,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) const { return _M_t.find(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto find(const _Kt& __x) @@ -857,7 +857,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER lower_bound(const key_type& __x) const { return _M_t.lower_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto lower_bound(const _Kt& __x) @@ -887,7 +887,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER upper_bound(const key_type& __x) const { return _M_t.upper_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto upper_bound(const _Kt& __x) @@ -926,7 +926,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) const { return _M_t.equal_range(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto equal_range(const _Kt& __x) @@ -1103,7 +1103,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER _GLIBCXX_END_NAMESPACE_CONTAINER -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED // Allow std::multiset access to internals of compatible sets. template<typename _Val, typename _Cmp1, typename _Alloc, typename _Cmp2> struct diff --git a/libstdc++-v3/include/bits/stl_set.h b/libstdc++-v3/include/bits/stl_set.h index b65d631..f03d9e5 100644 --- a/libstdc++-v3/include/bits/stl_set.h +++ b/libstdc++-v3/include/bits/stl_set.h @@ -794,7 +794,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER count(const key_type& __x) const { return _M_t.find(__x) == _M_t.end() ? 0 : 1; } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto count(const _Kt& __x) const @@ -844,7 +844,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) const { return _M_t.find(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto find(const _Kt& __x) @@ -879,7 +879,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER lower_bound(const key_type& __x) const { return _M_t.lower_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto lower_bound(const _Kt& __x) @@ -909,7 +909,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER upper_bound(const key_type& __x) const { return _M_t.upper_bound(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto upper_bound(const _Kt& __x) @@ -948,7 +948,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) const { return _M_t.equal_range(__x); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt> auto equal_range(const _Kt& __x) @@ -1119,7 +1119,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER _GLIBCXX_END_NAMESPACE_CONTAINER -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED // Allow std::set access to internals of compatible sets. template<typename _Val, typename _Cmp1, typename _Alloc, typename _Cmp2> struct diff --git a/libstdc++-v3/include/bits/stl_tree.h b/libstdc++-v3/include/bits/stl_tree.h index 4b7f482..e78fa1d 100644 --- a/libstdc++-v3/include/bits/stl_tree.h +++ b/libstdc++-v3/include/bits/stl_tree.h @@ -1918,7 +1918,7 @@ namespace __rb_tree pair<const_iterator, const_iterator> equal_range(const key_type& __k) const; -#if __cplusplus >= 201402L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = __has_is_transparent_t<_Compare, _Kt>> iterator @@ -2007,7 +2007,7 @@ namespace __rb_tree ++__high; return { __low, __high }; } -#endif +#endif // __glibcxx_generic_associative_lookup // Debugging. bool diff --git a/libstdc++-v3/include/bits/this_thread_sleep.h b/libstdc++-v3/include/bits/this_thread_sleep.h index 57f89f8..01f25dd 100644 --- a/libstdc++-v3/include/bits/this_thread_sleep.h +++ b/libstdc++-v3/include/bits/this_thread_sleep.h @@ -36,6 +36,7 @@ #if __cplusplus >= 201103L #include <bits/chrono.h> // std::chrono::* +#include <ext/numeric_traits.h> // __int_traits #ifdef _GLIBCXX_USE_NANOSLEEP # include <cerrno> // errno, EINTR @@ -59,11 +60,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { #ifndef _GLIBCXX_NO_SLEEP -#ifndef _GLIBCXX_USE_NANOSLEEP - void - __sleep_for(chrono::seconds, chrono::nanoseconds); -#endif - /// this_thread::sleep_for template<typename _Rep, typename _Period> inline void @@ -71,18 +67,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { if (__rtime <= __rtime.zero()) return; - auto __s = chrono::duration_cast<chrono::seconds>(__rtime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__rtime - __s); + + struct timespec __ts = chrono::__to_timeout_timespec(__rtime); #ifdef _GLIBCXX_USE_NANOSLEEP - struct ::timespec __ts = - { - static_cast<std::time_t>(__s.count()), - static_cast<long>(__ns.count()) - }; while (::nanosleep(&__ts, &__ts) == -1 && errno == EINTR) { } #else - __sleep_for(__s, __ns); + using chrono::seconds; + using chrono::nanoseconds; + void __sleep_for(seconds __s, nanoseconds __ns); + __sleep_for(seconds(__ts.tv_sec), nanoseconds(__ts.tv_nsec)); #endif } diff --git a/libstdc++-v3/include/bits/unordered_map.h b/libstdc++-v3/include/bits/unordered_map.h index cc9e2c4..b9b2772 100644 --- a/libstdc++-v3/include/bits/unordered_map.h +++ b/libstdc++-v3/include/bits/unordered_map.h @@ -961,7 +961,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) { return _M_h.find(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto find(const _Kt& __x) -> decltype(_M_h._M_find_tr(__x)) @@ -972,7 +972,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) const { return _M_h.find(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto find(const _Kt& __x) const -> decltype(_M_h._M_find_tr(__x)) @@ -994,7 +994,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER count(const key_type& __x) const { return _M_h.count(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto count(const _Kt& __x) const -> decltype(_M_h._M_count_tr(__x)) @@ -1034,7 +1034,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) { return _M_h.equal_range(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto equal_range(const _Kt& __x) @@ -1046,7 +1046,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) const { return _M_h.equal_range(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto equal_range(const _Kt& __x) const @@ -2039,7 +2039,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) { return _M_h.find(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto find(const _Kt& __x) -> decltype(_M_h._M_find_tr(__x)) @@ -2050,7 +2050,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) const { return _M_h.find(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto find(const _Kt& __x) const -> decltype(_M_h._M_find_tr(__x)) @@ -2068,7 +2068,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER count(const key_type& __x) const { return _M_h.count(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto count(const _Kt& __x) const -> decltype(_M_h._M_count_tr(__x)) @@ -2106,7 +2106,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) { return _M_h.equal_range(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto equal_range(const _Kt& __x) @@ -2118,7 +2118,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) const { return _M_h.equal_range(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto equal_range(const _Kt& __x) const diff --git a/libstdc++-v3/include/bits/unordered_set.h b/libstdc++-v3/include/bits/unordered_set.h index 5649dd7..29bc49a 100644 --- a/libstdc++-v3/include/bits/unordered_set.h +++ b/libstdc++-v3/include/bits/unordered_set.h @@ -744,7 +744,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) { return _M_h.find(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto find(const _Kt& __k) @@ -756,7 +756,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) const { return _M_h.find(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto find(const _Kt& __k) const @@ -779,7 +779,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER count(const key_type& __x) const { return _M_h.count(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto count(const _Kt& __k) const @@ -820,7 +820,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) { return _M_h.equal_range(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto equal_range(const _Kt& __k) @@ -832,7 +832,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) const { return _M_h.equal_range(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto equal_range(const _Kt& __k) const @@ -1745,7 +1745,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) { return _M_h.find(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto find(const _Kt& __x) @@ -1757,7 +1757,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER find(const key_type& __x) const { return _M_h.find(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto find(const _Kt& __x) const @@ -1776,7 +1776,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER count(const key_type& __x) const { return _M_h.count(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto count(const _Kt& __x) const -> decltype(_M_h._M_count_tr(__x)) @@ -1814,7 +1814,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) { return _M_h.equal_range(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto equal_range(const _Kt& __x) @@ -1826,7 +1826,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER equal_range(const key_type& __x) const { return _M_h.equal_range(__x); } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt> auto equal_range(const _Kt& __x) const diff --git a/libstdc++-v3/include/debug/map.h b/libstdc++-v3/include/debug/map.h index 985a7ac..30469b0 100644 --- a/libstdc++-v3/include/debug/map.h +++ b/libstdc++-v3/include/debug/map.h @@ -455,7 +455,7 @@ namespace __debug } #endif // C++17 -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED using node_type = typename _Base::node_type; using insert_return_type = _Node_insert_return<iterator, node_type>; @@ -601,7 +601,7 @@ namespace __debug find(const key_type& __x) { return iterator(_Base::find(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -614,7 +614,7 @@ namespace __debug find(const key_type& __x) const { return const_iterator(_Base::find(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -629,7 +629,7 @@ namespace __debug lower_bound(const key_type& __x) { return iterator(_Base::lower_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -642,7 +642,7 @@ namespace __debug lower_bound(const key_type& __x) const { return const_iterator(_Base::lower_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -655,7 +655,7 @@ namespace __debug upper_bound(const key_type& __x) { return iterator(_Base::upper_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -668,7 +668,7 @@ namespace __debug upper_bound(const key_type& __x) const { return const_iterator(_Base::upper_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -686,7 +686,7 @@ namespace __debug iterator(__res.second, this)); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -707,7 +707,7 @@ namespace __debug const_iterator(__res.second, this)); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> diff --git a/libstdc++-v3/include/debug/multimap.h b/libstdc++-v3/include/debug/multimap.h index c187e51..db9e246 100644 --- a/libstdc++-v3/include/debug/multimap.h +++ b/libstdc++-v3/include/debug/multimap.h @@ -340,7 +340,7 @@ namespace __debug _Base::insert(__first, __last); } -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED using node_type = typename _Base::node_type; node_type @@ -483,7 +483,7 @@ namespace __debug find(const key_type& __x) { return iterator(_Base::find(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -496,7 +496,7 @@ namespace __debug find(const key_type& __x) const { return const_iterator(_Base::find(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -511,7 +511,7 @@ namespace __debug lower_bound(const key_type& __x) { return iterator(_Base::lower_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -524,7 +524,7 @@ namespace __debug lower_bound(const key_type& __x) const { return const_iterator(_Base::lower_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -537,7 +537,7 @@ namespace __debug upper_bound(const key_type& __x) { return iterator(_Base::upper_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -550,7 +550,7 @@ namespace __debug upper_bound(const key_type& __x) const { return const_iterator(_Base::upper_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -568,7 +568,7 @@ namespace __debug iterator(__res.second, this)); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -589,7 +589,7 @@ namespace __debug const_iterator(__res.second, this)); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> diff --git a/libstdc++-v3/include/debug/multiset.h b/libstdc++-v3/include/debug/multiset.h index 41bf78d..156378a 100644 --- a/libstdc++-v3/include/debug/multiset.h +++ b/libstdc++-v3/include/debug/multiset.h @@ -311,7 +311,7 @@ namespace __debug { _Base::insert(__l); } #endif -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED using node_type = typename _Base::node_type; node_type @@ -457,7 +457,7 @@ namespace __debug find(const key_type& __x) const { return const_iterator(_Base::find(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -485,7 +485,7 @@ namespace __debug lower_bound(const key_type& __x) const { return const_iterator(_Base::lower_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -511,7 +511,7 @@ namespace __debug upper_bound(const key_type& __x) const { return const_iterator(_Base::upper_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -547,7 +547,7 @@ namespace __debug const_iterator(__res.second, this)); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> diff --git a/libstdc++-v3/include/debug/set.h b/libstdc++-v3/include/debug/set.h index 6ec8338..9b42862 100644 --- a/libstdc++-v3/include/debug/set.h +++ b/libstdc++-v3/include/debug/set.h @@ -319,7 +319,7 @@ namespace __debug { _Base::insert(__l); } #endif -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED using node_type = typename _Base::node_type; using insert_return_type = _Node_insert_return<iterator, node_type>; @@ -468,7 +468,7 @@ namespace __debug find(const key_type& __x) const { return const_iterator(_Base::find(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -496,7 +496,7 @@ namespace __debug lower_bound(const key_type& __x) const { return const_iterator(_Base::lower_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -522,7 +522,7 @@ namespace __debug upper_bound(const key_type& __x) const { return const_iterator(_Base::upper_bound(__x), this); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> @@ -558,7 +558,7 @@ namespace __debug const_iterator(__res.second, this)); } -#if __cplusplus > 201103L +#ifdef __glibcxx_generic_associative_lookup // C++ >= 14 template<typename _Kt, typename _Req = typename __has_is_transparent<_Compare, _Kt>::type> diff --git a/libstdc++-v3/include/debug/unordered_map b/libstdc++-v3/include/debug/unordered_map index 7673db1..c90e44a 100644 --- a/libstdc++-v3/include/debug/unordered_map +++ b/libstdc++-v3/include/debug/unordered_map @@ -561,7 +561,7 @@ namespace __debug } #endif // C++17 -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED using node_type = typename _Base::node_type; using insert_return_type = _Node_insert_return<iterator, node_type>; @@ -632,7 +632,7 @@ namespace __debug find(const key_type& __key) { return { _Base::find(__key), this }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -645,7 +645,7 @@ namespace __debug find(const key_type& __key) const { return { _Base::find(__key), this }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -666,7 +666,7 @@ namespace __debug return { { __res.first, this }, { __res.second, this } }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -685,7 +685,7 @@ namespace __debug return { { __res.first, this }, { __res.second, this } }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -790,7 +790,7 @@ namespace __debug return __next; } -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED node_type _M_extract(_Base_const_iterator __victim) { @@ -1362,7 +1362,7 @@ namespace __debug _M_check_rehashed(__bucket_count); } -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED using node_type = typename _Base::node_type; node_type @@ -1428,7 +1428,7 @@ namespace __debug find(const key_type& __key) { return { _Base::find(__key), this }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -1441,7 +1441,7 @@ namespace __debug find(const key_type& __key) const { return { _Base::find(__key), this }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -1462,7 +1462,7 @@ namespace __debug return { { __res.first, this }, { __res.second, this } }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -1481,7 +1481,7 @@ namespace __debug return { { __res.first, this }, { __res.second, this } }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -1587,7 +1587,7 @@ namespace __debug return __next; } -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED node_type _M_extract(_Base_const_iterator __victim) { diff --git a/libstdc++-v3/include/debug/unordered_set b/libstdc++-v3/include/debug/unordered_set index 932600d..7fc4146 100644 --- a/libstdc++-v3/include/debug/unordered_set +++ b/libstdc++-v3/include/debug/unordered_set @@ -448,7 +448,7 @@ namespace __debug _M_check_rehashed(__bucket_count); } -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED using node_type = typename _Base::node_type; using insert_return_type = _Node_insert_return<iterator, node_type>; @@ -519,7 +519,7 @@ namespace __debug find(const key_type& __key) { return { _Base::find(__key), this }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -532,7 +532,7 @@ namespace __debug find(const key_type& __key) const { return { _Base::find(__key), this }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -554,7 +554,7 @@ namespace __debug return { { __res.first, this }, { __res.second, this } }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -573,7 +573,7 @@ namespace __debug return { { __res.first, this }, { __res.second, this } }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -672,7 +672,7 @@ namespace __debug return __next; } -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED node_type _M_extract(_Base_const_iterator __victim) { @@ -1183,7 +1183,7 @@ namespace __debug _M_check_rehashed(__bucket_count); } -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED using node_type = typename _Base::node_type; node_type @@ -1249,7 +1249,7 @@ namespace __debug find(const key_type& __key) { return { _Base::find(__key), this }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -1262,7 +1262,7 @@ namespace __debug find(const key_type& __key) const { return { _Base::find(__key), this }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -1284,7 +1284,7 @@ namespace __debug return { { __res.first, this }, { __res.second, this } }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -1303,7 +1303,7 @@ namespace __debug return { { __res.first, this }, { __res.second, this } }; } -#if __cplusplus > 201703L +#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED template<typename _Kt, typename = std::__has_is_transparent_t<_Hash, _Kt>, typename = std::__has_is_transparent_t<_Pred, _Kt>> @@ -1400,7 +1400,7 @@ namespace __debug return __next; } -#if __cplusplus > 201402L +#ifdef __glibcxx_node_extract // >= C++17 && HOSTED node_type _M_extract(_Base_const_iterator __victim) { diff --git a/libstdc++-v3/include/std/condition_variable b/libstdc++-v3/include/std/condition_variable index 3525ff3..dcf0b92 100644 --- a/libstdc++-v3/include/std/condition_variable +++ b/libstdc++-v3/include/std/condition_variable @@ -193,15 +193,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __wait_until_impl(unique_lock<mutex>& __lock, const chrono::time_point<steady_clock, _Dur>& __atime) { - auto __s = chrono::time_point_cast<chrono::seconds>(__atime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); - - __gthread_time_t __ts = - { - static_cast<std::time_t>(__s.time_since_epoch().count()), - static_cast<long>(__ns.count()) - }; - + __gthread_time_t __ts = chrono::__to_timeout_gthread_time_t(__atime); _M_cond.wait_until(*__lock.mutex(), CLOCK_MONOTONIC, __ts); return (steady_clock::now() < __atime @@ -214,15 +206,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __wait_until_impl(unique_lock<mutex>& __lock, const chrono::time_point<system_clock, _Dur>& __atime) { - auto __s = chrono::time_point_cast<chrono::seconds>(__atime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); - - __gthread_time_t __ts = - { - static_cast<std::time_t>(__s.time_since_epoch().count()), - static_cast<long>(__ns.count()) - }; - + __gthread_time_t __ts = chrono::__to_timeout_gthread_time_t(__atime); _M_cond.wait_until(*__lock.mutex(), __ts); return (system_clock::now() < __atime diff --git a/libstdc++-v3/include/std/format b/libstdc++-v3/include/std/format index 281c038..1102ac8 100644 --- a/libstdc++-v3/include/std/format +++ b/libstdc++-v3/include/std/format @@ -105,6 +105,7 @@ namespace __format template<typename _CharT> class _Sink; template<typename _CharT> class _Fixedbuf_sink; template<typename _Out, typename _CharT> class _Padding_sink; + template<typename _Out, typename _CharT> class _Escaping_sink; // Output iterator that writes to a type-erase character sink. template<typename _CharT> @@ -1068,6 +1069,17 @@ namespace __format template<typename _Out, typename _CharT> _Out + __write_escape_seqs(_Out __out, basic_string_view<_CharT> __units) + { + using _UChar = make_unsigned_t<_CharT>; + for (_CharT __c : __units) + __out = __format::__write_escape_seq( + __out, static_cast<_UChar>(__c), _Escapes<_CharT>::_S_x()); + return __out; + } + + template<typename _Out, typename _CharT> + _Out __write_escaped_char(_Out __out, _CharT __c) { using _UChar = make_unsigned_t<_CharT>; @@ -1124,12 +1136,10 @@ namespace __format template<typename _CharT, typename _Out> _Out - __write_escaped_unicode(_Out __out, - basic_string_view<_CharT> __str, - _Term_char __term) + __write_escaped_unicode_part(_Out __out, basic_string_view<_CharT>& __str, + bool& __prev_esc, _Term_char __term) { using _Str_view = basic_string_view<_CharT>; - using _UChar = make_unsigned_t<_CharT>; using _Esc = _Escapes<_CharT>; static constexpr char32_t __replace = U'\uFFFD'; @@ -1143,10 +1153,10 @@ namespace __format }(); __unicode::_Utf_view<char32_t, _Str_view> __v(std::move(__str)); + __str = {}; + auto __first = __v.begin(); auto const __last = __v.end(); - - bool __prev_esc = true; while (__first != __last) { bool __esc_ascii = false; @@ -1185,15 +1195,32 @@ namespace __format __out = __format::__write_escaped_char(__out, *__first.base()); else if (__esc_unicode) __out = __format::__write_escape_seq(__out, *__first, _Esc::_S_u()); - else // __esc_replace - for (_CharT __c : _Str_view(__first.base(), __first._M_units())) - __out = __format::__write_escape_seq(__out, - static_cast<_UChar>(__c), - _Esc::_S_x()); + // __esc_replace + else if (_Str_view __units(__first.base(), __first._M_units()); + __units.end() != __last.base()) + __out = __format::__write_escape_seqs(__out, __units); + else + { + __str = __units; + return __out; + } + __prev_esc = true; ++__first; - } + + return __out; + } + + template<typename _CharT, typename _Out> + _Out + __write_escaped_unicode(_Out __out, basic_string_view<_CharT> __str, + _Term_char __term) + { + bool __prev_escape = true; + __out = __format::__write_escaped_unicode_part(__out, __str, + __prev_escape, __term); + __out = __format::__write_escape_seqs(__out, __str); return __out; } @@ -1399,7 +1426,6 @@ namespace __format _M_format_range(_Rg&& __rg, basic_format_context<_Out, _CharT>& __fc) const { using _Range = remove_reference_t<_Rg>; - using _String = basic_string<_CharT>; using _String_view = basic_string_view<_CharT>; if constexpr (!is_lvalue_reference_v<_Rg>) return _M_format_range<_Range&>(__rg, __fc); @@ -1412,55 +1438,28 @@ namespace __format size_t(ranges::distance(__rg))); return format(__str, __fc); } - else if (!_M_spec._M_debug) + else { + auto __handle_debug = [this, &__rg]<typename _NOut>(_NOut __nout) + { + if (!_M_spec._M_debug) + return ranges::copy(__rg, std::move(__nout)).out; + + _Escaping_sink<_NOut, _CharT> + __sink(std::move(__nout), _Term_quote); + ranges::copy(__rg, __sink.out()); + return __sink._M_finish(); + }; + const size_t __padwidth = _M_spec._M_get_width(__fc); if (__padwidth == 0 && _M_spec._M_prec_kind == _WP_none) - return ranges::copy(__rg, __fc.out()).out; + return __handle_debug(__fc.out()); - _Padding_sink<_Out, _CharT> __sink(__fc.out(), __padwidth, - _M_spec._M_get_precision(__fc)); - ranges::copy(__rg, __sink.out()); + _Padding_sink<_Out, _CharT> + __sink(__fc.out(), __padwidth, _M_spec._M_get_precision(__fc)); + __handle_debug(__sink.out()); return __sink._M_finish(_M_spec._M_align, _M_spec._M_fill); } - else if constexpr (ranges::forward_range<_Rg> || ranges::sized_range<_Rg>) - { - const size_t __n(ranges::distance(__rg)); - size_t __w = __n; - if constexpr (!__unicode::__literal_encoding_is_unicode<_CharT>()) - if (size_t __max = _M_spec._M_get_precision(__fc); __n > __max) - __w == __max; - - if (__w <= __format::__stackbuf_size<_CharT>) - { - _CharT __buf[__format::__stackbuf_size<_CharT>]; - ranges::copy_n(ranges::begin(__rg), __w, __buf); - return _M_format_escaped(_String_view(__buf, __n), __fc); - } - else if constexpr (ranges::random_access_range<_Rg>) - { - ranges::iterator_t<_Rg> __first = ranges::begin(__rg); - ranges::subrange __sub(__first, ranges::next(__first, __w)); - return _M_format_escaped(_String(from_range, __sub), __fc); - } - else if (__w <= __n) - { - ranges::subrange __sub( - counted_iterator(ranges::begin(__rg), __w), - default_sentinel); - return _M_format_escaped(_String(from_range, __sub), __fc); - } - else if constexpr (ranges::sized_range<_Rg>) - return _M_format_escaped(_String(from_range, __rg), __fc); - else - { - // N.B. preserve the computed size - ranges::subrange __sub(__rg, __n); - return _M_format_escaped(_String(from_range, __sub), __fc); - } - } - else - return _M_format_escaped(_String(from_range, __rg), __fc); } constexpr void @@ -3997,6 +3996,93 @@ namespace __format } }; + template<typename _Out, typename _CharT> + class _Escaping_sink : public _Buf_sink<_CharT> + { + using _Esc = _Escapes<_CharT>; + + _Out _M_out; + _Term_char _M_term : 2; + unsigned _M_prev_escape : 1; + unsigned _M_out_discards : 1; + + void + _M_sync_discarding() + { + if constexpr (is_same_v<_Out, _Sink_iter<_CharT>>) + _M_out_discards = _M_out._M_discarding(); + } + + void + _M_write() + { + span<_CharT> __bytes = this->_M_used(); + basic_string_view<_CharT> __str(__bytes.data(), __bytes.size()); + + size_t __rem = 0; + if constexpr (__unicode::__literal_encoding_is_unicode<_CharT>()) + { + bool __prev_escape = _M_prev_escape; + _M_out = __format::__write_escaped_unicode_part( + std::move(_M_out), __str, __prev_escape, _M_term); + _M_prev_escape = __prev_escape; + + __rem = __str.size(); + if (__rem > 0 && __str.data() != this->_M_buf) [[unlikely]] + ranges::move(__str, this->_M_buf); + } + else + _M_out = __format::__write_escaped_ascii( + std::move(_M_out), __str, _M_term); + + this->_M_reset(this->_M_buf, __rem); + _M_sync_discarding(); + } + + void + _M_overflow() override + { + if (_M_out_discards) + this->_M_rewind(); + else + _M_write(); + } + + bool + _M_discarding() const override + { return _M_out_discards; } + + public: + [[__gnu__::__always_inline__]] + explicit + _Escaping_sink(_Out __out, _Term_char __term) + : _M_out(std::move(__out)), _M_term(__term), + _M_prev_escape(true), _M_out_discards(false) + { + _M_out = __format::__write(std::move(_M_out), _Esc::_S_term(_M_term)); + _M_sync_discarding(); + } + + _Out + _M_finish() + { + if (_M_out_discards) + return std::move(_M_out); + + if (!this->_M_used().empty()) + { + _M_write(); + if constexpr (__unicode::__literal_encoding_is_unicode<_CharT>()) + if (auto __rem = this->_M_used(); !__rem.empty()) + { + basic_string_view<_CharT> __str(__rem.data(), __rem.size()); + _M_out = __format::__write_escape_seqs(std::move(_M_out), __str); + } + } + return __format::__write(std::move(_M_out), _Esc::_S_term(_M_term)); + } + }; + enum class _Arg_t : unsigned char { _Arg_none, _Arg_bool, _Arg_c, _Arg_i, _Arg_u, _Arg_ll, _Arg_ull, _Arg_flt, _Arg_dbl, _Arg_ldbl, _Arg_str, _Arg_sv, _Arg_ptr, _Arg_handle, diff --git a/libstdc++-v3/include/std/mutex b/libstdc++-v3/include/std/mutex index 631c380..d4fc4c6 100644 --- a/libstdc++-v3/include/std/mutex +++ b/libstdc++-v3/include/std/mutex @@ -179,14 +179,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_try_lock_until(const chrono::time_point<chrono::system_clock, _Duration>& __atime) { - auto __s = chrono::time_point_cast<chrono::seconds>(__atime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); - - __gthread_time_t __ts = { - static_cast<std::time_t>(__s.time_since_epoch().count()), - static_cast<long>(__ns.count()) - }; - + __gthread_time_t __ts = chrono::__to_timeout_gthread_time_t(__atime); return static_cast<_Derived*>(this)->_M_timedlock(__ts); } @@ -196,14 +189,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_try_lock_until(const chrono::time_point<chrono::steady_clock, _Duration>& __atime) { - auto __s = chrono::time_point_cast<chrono::seconds>(__atime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); - - __gthread_time_t __ts = { - static_cast<std::time_t>(__s.time_since_epoch().count()), - static_cast<long>(__ns.count()) - }; - + __gthread_time_t __ts = chrono::__to_timeout_gthread_time_t(__atime); return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC, __ts); } diff --git a/libstdc++-v3/include/std/shared_mutex b/libstdc++-v3/include/std/shared_mutex index 94c8532..a267ad7 100644 --- a/libstdc++-v3/include/std/shared_mutex +++ b/libstdc++-v3/include/std/shared_mutex @@ -520,15 +520,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION try_lock_until(const chrono::time_point<chrono::system_clock, _Duration>& __atime) { - auto __s = chrono::time_point_cast<chrono::seconds>(__atime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); - - __gthread_time_t __ts = - { - static_cast<std::time_t>(__s.time_since_epoch().count()), - static_cast<long>(__ns.count()) - }; - + struct timespec __ts = chrono::__to_timeout_timespec(__atime); int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts); // On self-deadlock, we just fail to acquire the lock. Technically, // the program violated the precondition. @@ -546,15 +538,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION try_lock_until(const chrono::time_point<chrono::steady_clock, _Duration>& __atime) { - auto __s = chrono::time_point_cast<chrono::seconds>(__atime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); - - __gthread_time_t __ts = - { - static_cast<std::time_t>(__s.time_since_epoch().count()), - static_cast<long>(__ns.count()) - }; - + struct timespec __ts = chrono::__to_timeout_timespec(__atime); int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC, &__ts); // On self-deadlock, we just fail to acquire the lock. Technically, @@ -596,14 +580,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION try_lock_shared_until(const chrono::time_point<chrono::system_clock, _Duration>& __atime) { - auto __s = chrono::time_point_cast<chrono::seconds>(__atime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); - - __gthread_time_t __ts = - { - static_cast<std::time_t>(__s.time_since_epoch().count()), - static_cast<long>(__ns.count()) - }; + struct timespec __ts = chrono::__to_timeout_timespec(__atime); int __ret; // Unlike for lock(), we are not allowed to throw an exception so if @@ -636,15 +613,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION try_lock_shared_until(const chrono::time_point<chrono::steady_clock, _Duration>& __atime) { - auto __s = chrono::time_point_cast<chrono::seconds>(__atime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); - - __gthread_time_t __ts = - { - static_cast<std::time_t>(__s.time_since_epoch().count()), - static_cast<long>(__ns.count()) - }; - + struct timespec __ts = chrono::__to_timeout_timespec(__atime); int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC, &__ts); // On self-deadlock, we just fail to acquire the lock. Technically, diff --git a/libstdc++-v3/python/libstdcxx/v6/printers.py b/libstdc++-v3/python/libstdcxx/v6/printers.py index e5336b7..1822d42 100644 --- a/libstdc++-v3/python/libstdcxx/v6/printers.py +++ b/libstdc++-v3/python/libstdcxx/v6/printers.py @@ -287,7 +287,11 @@ class SharedPointerPrinter(printer_base): def _get_refcounts(self): if self._typename == 'std::atomic': # A tagged pointer is stored as uintptr_t. - ptr_val = self._val['_M_refcount']['_M_val']['_M_i'] + val = self._val['_M_refcount']['_M_val'] + if val.type.is_scalar: # GCC 16 stores uintptr_t + ptr_val = val + else: # GCC 12-15 stores std::atomic<uintptr_t> + ptr_val = val['_M_i'] ptr_val = ptr_val - (ptr_val % 2) # clear lock bit ptr_type = find_type(self._val['_M_refcount'].type, 'pointer') return ptr_val.cast(ptr_type) diff --git a/libstdc++-v3/src/c++11/thread.cc b/libstdc++-v3/src/c++11/thread.cc index 6c2ec29..5cfe564 100644 --- a/libstdc++-v3/src/c++11/thread.cc +++ b/libstdc++-v3/src/c++11/thread.cc @@ -231,10 +231,30 @@ namespace std _GLIBCXX_VISIBILITY(default) _GLIBCXX_BEGIN_NAMESPACE_VERSION namespace this_thread { +namespace +{ + // returns min(s, Dur::max()) + template<typename Dur> + inline chrono::seconds + limit(chrono::seconds s) + { + static_assert(ratio_equal<typename Dur::period, ratio<1>>::value, + "period must be seconds to avoid potential overflow"); + + if (s > Dur::max()) [[__unlikely__]] + s = chrono::duration_cast<chrono::seconds>(Dur::max()); + return s; + } +} + void __sleep_for(chrono::seconds __s, chrono::nanoseconds __ns) { #ifdef _GLIBCXX_USE_NANOSLEEP +#pragma GCC diagnostic ignored "-Wc++17-extensions" + if constexpr (is_integral<time_t>::value) // POSIX.1-2001 allows floating + __s = limit<chrono::duration<time_t>>(__s); + struct ::timespec __ts = { static_cast<std::time_t>(__s.count()), @@ -246,6 +266,8 @@ namespace this_thread const auto target = chrono::steady_clock::now() + __s + __ns; while (true) { + __s = limit<chrono::duration<unsigned>>(__s); + unsigned secs = __s.count(); if (__ns.count() > 0) { @@ -271,12 +293,28 @@ namespace this_thread break; __s = chrono::duration_cast<chrono::seconds>(target - now); __ns = chrono::duration_cast<chrono::nanoseconds>(target - (now + __s)); - } + } #elif defined(_GLIBCXX_USE_WIN32_SLEEP) - unsigned long ms = __ns.count() / 1000000; - if (__ns.count() > 0 && ms == 0) - ms = 1; - ::Sleep(chrono::milliseconds(__s).count() + ms); + + // Can't use limit<chrono::milliseconds>(__s) here because it would + // multiply __s by 1000 which could overflow. + // Limit to milliseconds::max() and truncate to seconds: + chrono::milliseconds ms = chrono::milliseconds::max(); + if (__s < chrono::duration_cast<chrono::seconds>(ms)) + { + ms = __s; + ms += chrono::__detail::ceil<chrono::milliseconds>(__ns); + } + + // Use Sleep(DWORD millis) where DWORD is uint32_t. + constexpr chrono::milliseconds max_sleep(INFINITE - 1u); + while (ms > max_sleep) + { + ::Sleep(max_sleep.count()); + ms -= max_sleep; + } + + ::Sleep(ms.count()); #endif } } diff --git a/libstdc++-v3/src/c++17/fs_path.cc b/libstdc++-v3/src/c++17/fs_path.cc index 215afa0..03bb5ec 100644 --- a/libstdc++-v3/src/c++17/fs_path.cc +++ b/libstdc++-v3/src/c++17/fs_path.cc @@ -34,6 +34,7 @@ #include <filesystem> #include <algorithm> #include <array> +#include <new> #include <bits/stl_uninitialized.h> #include <ext/numeric_traits.h> // __gnu_cxx::__int_traits @@ -207,6 +208,10 @@ struct path::_List::_Impl _Impl(int cap) : _M_size(0), _M_capacity(cap) { } + ~_Impl() { clear(); } + + // Align the first member like the value_type so that we can store one or + // more objects of that type immediately after the memory occupied by *this. alignas(value_type) int _M_size; int _M_capacity; @@ -246,29 +251,67 @@ struct path::_List::_Impl unique_ptr<_Impl, _Impl_deleter> copy() const { const auto n = size(); - void* p = ::operator new(sizeof(_Impl) + n * sizeof(value_type)); - unique_ptr<_Impl, _Impl_deleter> newptr(::new (p) _Impl{n}); + // *this already has n elements so don't need to check if n overflows: + auto newptr = create_unchecked(n); std::uninitialized_copy_n(begin(), n, newptr->begin()); newptr->_M_size = n; return newptr; } + // We use the two least significant bits to store a _Type value so + // require memory aligned to at least 4 bytes: + static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= 4); + // Require memory suitably aligned for an _Impl and its value types: + static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= alignof(value_type)); + // Clear the lowest two bits from the pointer (i.e. remove the _Type value) static _Impl* notype(_Impl* p) { constexpr uintptr_t mask = ~(uintptr_t)0x3; return reinterpret_cast<_Impl*>(reinterpret_cast<uintptr_t>(p) & mask); } + + // Create a new _Impl with capacity for n components. + static unique_ptr<_Impl, _Impl_deleter> + create(int n) + { + using __gnu_cxx::__int_traits; + // Nobody should need paths with this many components. + if (n >= __int_traits<int>::__max / 4) + std::__throw_bad_alloc(); + + if constexpr (__int_traits<int>::__max >= __int_traits<size_t>::__max) + { + // Check that the calculation in create_unchecked(n) won't overflow. + size_t bytes; + if (__builtin_mul_overflow(n, sizeof(value_type), &bytes) + || __builtin_add_overflow(sizeof(_Impl), bytes, &bytes)) + std::__throw_bad_alloc(); + } + // Otherwise, it can't overflow, even for 20-bit size_t on msp430. + + return create_unchecked(n); + } + + // pre: no overflow in Si + n * Sv + static unique_ptr<_Impl, _Impl_deleter> + create_unchecked(int n) + { + void* p = ::operator new(sizeof(_Impl) + n * sizeof(value_type)); + return std::unique_ptr<_Impl, _Impl_deleter>(::new(p) _Impl{n}); + } }; -void path::_List::_Impl_deleter::operator()(_Impl* p) const noexcept +// Destroy and deallocate an _Impl. +void +path::_List::_Impl_deleter::operator()(_Impl* p) const noexcept { p = _Impl::notype(p); if (p) { - __glibcxx_assert(p->_M_size <= p->_M_capacity); - p->clear(); - ::operator delete(p, sizeof(*p) + p->_M_capacity * sizeof(value_type)); + const auto n = p->_M_capacity; + p->~_Impl(); + ::operator delete(p, sizeof(_Impl) + n * sizeof(_Impl::value_type)); } } @@ -455,24 +498,7 @@ path::_List::reserve(int newcap, bool exact = false) newcap = nextcap; } - using __gnu_cxx::__int_traits; - // Nobody should need paths with this many components. - if (newcap >= __int_traits<int>::__max / 4) - std::__throw_bad_alloc(); - - size_t bytes; - if constexpr (__int_traits<int>::__max >= __int_traits<size_t>::__max) - { - size_t components; - if (__builtin_mul_overflow(newcap, sizeof(value_type), &components) - || __builtin_add_overflow(sizeof(_Impl), components, &bytes)) - std::__throw_bad_alloc(); - } - else // This won't overflow, even for 20-bit size_t on msp430. - bytes = sizeof(_Impl) + newcap * sizeof(value_type); - - void* p = ::operator new(bytes); - std::unique_ptr<_Impl, _Impl_deleter> newptr(::new(p) _Impl{newcap}); + auto newptr = _Impl::create(newcap); const int cursize = curptr ? curptr->size() : 0; if (cursize) { diff --git a/libstdc++-v3/src/c++20/atomic.cc b/libstdc++-v3/src/c++20/atomic.cc index 4120e1a..7978809 100644 --- a/libstdc++-v3/src/c++20/atomic.cc +++ b/libstdc++-v3/src/c++20/atomic.cc @@ -350,14 +350,7 @@ __platform_wait_until(const __platform_wait_t* __addr, __platform_wait_t __old, const __wait_clock_t::time_point& __atime) noexcept { - auto __s = chrono::time_point_cast<chrono::seconds>(__atime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); - - struct timespec __rt = - { - static_cast<std::time_t>(__s.time_since_epoch().count()), - static_cast<long>(__ns.count()) - }; + struct timespec __rt = chrono::__to_timeout_timespec(__atime); if (syscall (SYS_futex, __addr, static_cast<int>(__futex_wait_flags::__wait_bitset_private), @@ -378,14 +371,7 @@ bool __cond_wait_until(__condvar& __cv, mutex& __mx, const __wait_clock_t::time_point& __atime) { - auto __s = chrono::time_point_cast<chrono::seconds>(__atime); - auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); - - __gthread_time_t __ts = - { - static_cast<std::time_t>(__s.time_since_epoch().count()), - static_cast<long>(__ns.count()) - }; + __gthread_time_t __ts = chrono::__to_timeout_gthread_time_t(__atime); #ifdef _GLIBCXX_USE_PTHREAD_COND_CLOCKWAIT if constexpr (is_same_v<chrono::steady_clock, __wait_clock_t>) diff --git a/libstdc++-v3/testsuite/20_util/shared_ptr/atomic/pr118757.cc b/libstdc++-v3/testsuite/20_util/shared_ptr/atomic/pr118757.cc new file mode 100644 index 0000000..d54abd8 --- /dev/null +++ b/libstdc++-v3/testsuite/20_util/shared_ptr/atomic/pr118757.cc @@ -0,0 +1,29 @@ +// { dg-do run { target c++20 } } +// { dg-require-gthreads "" } +// { dg-require-effective-target hosted } + +#include <memory> +#include <chrono> +#include <thread> +#include <barrier> + +std::shared_ptr<int> q = std::make_shared<int>(42); +std::atomic<std::shared_ptr<int>> p = q; + +std::barrier bar(2); + +void signaller() +{ + std::this_thread::sleep_for(std::chrono::seconds(1)); + p.store(std::shared_ptr<int>(q, nullptr)); + p.notify_one(); + bar.arrive_and_wait(); +} + +int main(int, char**) +{ + std::thread thr(signaller); + p.wait(q); + bar.arrive_and_wait(); + thr.join(); +} diff --git a/libstdc++-v3/testsuite/20_util/weak_ptr/pr118757.cc b/libstdc++-v3/testsuite/20_util/weak_ptr/pr118757.cc new file mode 100644 index 0000000..f048f13 --- /dev/null +++ b/libstdc++-v3/testsuite/20_util/weak_ptr/pr118757.cc @@ -0,0 +1,30 @@ +// { dg-do run { target c++20 } } +// { dg-require-gthreads "" } +// { dg-require-effective-target hosted } + +#include <memory> +#include <chrono> +#include <thread> +#include <barrier> + +std::shared_ptr<int> s = std::make_shared<int>(42); +std::weak_ptr<int> q = s; +std::atomic<std::weak_ptr<int>> p = q; + +std::barrier bar(2); + +void signaller() +{ + std::this_thread::sleep_for(std::chrono::seconds(1)); + p.store(std::shared_ptr<int>(s, nullptr)); + p.notify_one(); + bar.arrive_and_wait(); +} + +int main(int, char**) +{ + std::thread thr(signaller); + p.wait(q); + bar.arrive_and_wait(); + thr.join(); +} diff --git a/libstdc++-v3/testsuite/30_threads/condition_variable/members/116586.cc b/libstdc++-v3/testsuite/30_threads/condition_variable/members/116586.cc new file mode 100644 index 0000000..7114007 --- /dev/null +++ b/libstdc++-v3/testsuite/30_threads/condition_variable/members/116586.cc @@ -0,0 +1,60 @@ +// { dg-do run { target c++11 } } + +#include <condition_variable> +#include <chrono> +#include <mutex> +#include <initializer_list> +#include <testsuite_hooks.h> + +namespace chrono = std::chrono; + +// thread.timedmutex.requirements.general: +// If abs_time has already passed, the function attempts to obtain +// ownership without blocking (as if by calling try_lock()). + +template <typename Clock> +void +test_absolute(chrono::nanoseconds offset) +{ + std::mutex mtx; + std::condition_variable cv; + chrono::time_point<Clock> tp(offset); + std::unique_lock<std::mutex> lock(mtx); + // Doesn't cope with spurious wakeup + VERIFY(cv.wait_until(lock, tp) == std::cv_status::timeout); +} + +// The type of clock used for the actual wait depends on whether +// _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK is defined. We might as well just test +// both steady_clock and system_clock. +template <typename Clock> +void +test_relative(chrono::nanoseconds offset) +{ + std::mutex mtx; + std::condition_variable cv; + const auto d = -Clock::now().time_since_epoch() + offset; + std::unique_lock<std::mutex> lock(mtx); + // Doesn't cope with spurious wakeup + VERIFY(cv.wait_for(lock, d) == std::cv_status::timeout); +} + +int main() +{ + // It's not really possible to arrange for the relative calls to have + // tv_nsec == 0 due to time advancing. + for (const chrono::nanoseconds offset : { + // tv_sec == 0, tv_nsec == 0 + chrono::nanoseconds{0}, + // tv_sec == 0, tv_nsec < 0 + chrono::duration_cast<chrono::nanoseconds>(chrono::milliseconds{-10}), + // tv_sec < 0 + chrono::duration_cast<chrono::nanoseconds>(chrono::seconds{-10}) + }) { + test_absolute<chrono::system_clock>(offset); + test_relative<chrono::system_clock>(offset); + + test_absolute<chrono::steady_clock>(offset); + test_relative<chrono::steady_clock>(offset); + } +} diff --git a/libstdc++-v3/testsuite/30_threads/future/members/116586.cc b/libstdc++-v3/testsuite/30_threads/future/members/116586.cc new file mode 100644 index 0000000..b7cd12c --- /dev/null +++ b/libstdc++-v3/testsuite/30_threads/future/members/116586.cc @@ -0,0 +1,55 @@ +// { dg-do run { target c++11 } } + +#include <future> +#include <chrono> +#include <initializer_list> +#include <testsuite_hooks.h> + +namespace chrono = std::chrono; + +// thread.timedmutex.requirements.general: +// If abs_time has already passed, the function attempts to obtain +// ownership without blocking (as if by calling try_lock()). + +template <typename Clock> +void +test_absolute(chrono::nanoseconds offset) +{ + std::promise<int> p; + std::future<int> f = p.get_future(); + const chrono::time_point<Clock> tp(offset); + VERIFY(f.wait_until(tp) == std::future_status::timeout); +} + +// The type of clock used for the actual wait depends on whether +// _GLIBCXX_HAVE_LINUX_FUTEX is defined. We might as well just test both +// steady_clock and system_clock. +template <typename Clock> +void +test_relative(chrono::nanoseconds offset) +{ + std::promise<int> p; + std::future<int> f = p.get_future(); + const auto d = -Clock::now().time_since_epoch() + offset; + VERIFY(f.wait_for(d) == std::future_status::timeout); +} + +int main() +{ + // It's not really possible to arrange for the relative calls to have tv_nsec + // == 0 due to time advancing. + for (const chrono::nanoseconds offset : { + // tv_sec == 0, tv_nsec == 0 + chrono::nanoseconds{0}, + // tv_sec == 0, tv_nsec < 0 + chrono::duration_cast<chrono::nanoseconds>(chrono::milliseconds{-10}), + // tv_sec < 0 + chrono::duration_cast<chrono::nanoseconds>(chrono::seconds{-10}) + }) { + test_absolute<chrono::system_clock>(offset); + test_relative<chrono::system_clock>(offset); + + test_absolute<chrono::steady_clock>(offset); + test_relative<chrono::steady_clock>(offset); + } +} diff --git a/libstdc++-v3/testsuite/30_threads/recursive_timed_mutex/try_lock_until/116586.cc b/libstdc++-v3/testsuite/30_threads/recursive_timed_mutex/try_lock_until/116586.cc new file mode 100644 index 0000000..941f3af --- /dev/null +++ b/libstdc++-v3/testsuite/30_threads/recursive_timed_mutex/try_lock_until/116586.cc @@ -0,0 +1,72 @@ +// { dg-do run { target c++11 } } + +#include <mutex> +#include <chrono> +#include <future> +#include <initializer_list> +#include <testsuite_hooks.h> + +namespace chrono = std::chrono; + +// thread.timedmutex.requirements.general: +// If abs_time has already passed, the function attempts to obtain +// ownership without blocking (as if by calling try_lock()). + +template <typename Clock> +void +test_absolute(chrono::nanoseconds offset) +{ + std::recursive_timed_mutex mtx; + chrono::time_point<Clock> tp(offset); + VERIFY(mtx.try_lock_until(tp)); + + { + // To test failing to lock a recursive mutex we need to try to lock on a + // different thread. + auto t = std::async(std::launch::async, [&mtx, tp]() { + VERIFY(!mtx.try_lock_until(tp)); + }); + } +} + +// The type of clock used for the actual wait depends on whether +// _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK is defined. We might as well just test +// both steady_clock and system_clock. +template <typename Clock> +void +test_relative(chrono::nanoseconds offset) +{ + std::recursive_timed_mutex mtx; + const auto d = -Clock::now().time_since_epoch() + offset; + VERIFY(mtx.try_lock_for(d)); + + { + // To test failing to lock a recursive mutex we need to try to lock on a + // different thread. + auto t = std::async(std::launch::async, [&mtx, d]() { + VERIFY(!mtx.try_lock_for(d)); + }); + } +} + +int main() +{ + // Try once with an offset that ought to result in tv_sec == 0, tv_nsec < 0 + // and one with an offset that ought to result in tv_sec < 0, tv_nsec == 0 + // for the absolute calls at least. It's not really possible to arrange for + // the relative calls to have tv_nsec == 0 due to time advancing. + for (const chrono::nanoseconds offset : { + // tv_sec == 0, tv_nsec == 0 + chrono::nanoseconds{0}, + // tv_sec == 0, tv_nsec < 0 + chrono::duration_cast<chrono::nanoseconds>(chrono::milliseconds{-10}), + // tv_sec < 0 + chrono::duration_cast<chrono::nanoseconds>(chrono::seconds{-10}) + }) { + test_absolute<chrono::system_clock>(offset); + test_relative<chrono::system_clock>(offset); + + test_absolute<chrono::steady_clock>(offset); + test_relative<chrono::steady_clock>(offset); + } +} diff --git a/libstdc++-v3/testsuite/30_threads/semaphore/try_acquire_for.cc b/libstdc++-v3/testsuite/30_threads/semaphore/try_acquire_for.cc index 39681c7..94acb25 100644 --- a/libstdc++-v3/testsuite/30_threads/semaphore/try_acquire_for.cc +++ b/libstdc++-v3/testsuite/30_threads/semaphore/try_acquire_for.cc @@ -24,6 +24,7 @@ #include <chrono> #include <thread> #include <atomic> +#include <initializer_list> #include <testsuite_hooks.h> void test01() @@ -90,9 +91,30 @@ test03() s.try_acquire_for(timeout); } +// Prove semaphore doesn't suffer from PR116586 +template <typename Clock> +void +test_relative(std::chrono::nanoseconds offset) +{ + std::binary_semaphore sem(1); + VERIFY(sem.try_acquire_for(offset)); + VERIFY(!sem.try_acquire_for(offset)); +} + int main() { test01(); test02(); test03(); + using namespace std::chrono; + for (const nanoseconds offset : { + nanoseconds{0}, + nanoseconds{-10ms}, + nanoseconds{-10s} + }) { + test_relative<std::chrono::system_clock>(offset); + test_relative<std::chrono::system_clock>(offset - std::chrono::system_clock::now().time_since_epoch()); + test_relative<std::chrono::steady_clock>(offset); + test_relative<std::chrono::steady_clock>(offset - std::chrono::steady_clock::now().time_since_epoch()); + } } diff --git a/libstdc++-v3/testsuite/30_threads/semaphore/try_acquire_until.cc b/libstdc++-v3/testsuite/30_threads/semaphore/try_acquire_until.cc index de0068d..ed6bd11 100644 --- a/libstdc++-v3/testsuite/30_threads/semaphore/try_acquire_until.cc +++ b/libstdc++-v3/testsuite/30_threads/semaphore/try_acquire_until.cc @@ -24,6 +24,7 @@ #include <chrono> #include <thread> #include <atomic> +#include <initializer_list> #include <testsuite_hooks.h> void test01() @@ -87,8 +88,31 @@ void test02() b.wait(1); } +// Prove semaphore doesn't suffer from PR116586 +template <typename Clock> +void +test_absolute(std::chrono::nanoseconds offset) +{ + std::binary_semaphore sem(1); + std::chrono::time_point<Clock> tp(offset); + VERIFY(sem.try_acquire_until(tp)); + VERIFY(!sem.try_acquire_until(tp)); +} + int main() { test01(); test02(); + using namespace std::chrono; + for (const nanoseconds offset : { + // tv_sec == 0, tv_nsec == 0 + nanoseconds{0}, + // tv_sec == 0, tv_nsec < 0 + nanoseconds{-10ms}, + // tv_sec < 0 + nanoseconds{-10s} + }) { + test_absolute<std::chrono::system_clock>(offset); + test_absolute<std::chrono::steady_clock>(offset); + } } diff --git a/libstdc++-v3/testsuite/30_threads/shared_timed_mutex/try_lock_until/116586.cc b/libstdc++-v3/testsuite/30_threads/shared_timed_mutex/try_lock_until/116586.cc new file mode 100644 index 0000000..cebbb3a --- /dev/null +++ b/libstdc++-v3/testsuite/30_threads/shared_timed_mutex/try_lock_until/116586.cc @@ -0,0 +1,97 @@ +// { dg-do run { target c++14 } } + +#include <shared_mutex> +#include <chrono> +#include <future> +#include <initializer_list> +#include <testsuite_hooks.h> + +namespace chrono = std::chrono; + +// thread.timedmutex.requirements.general: +// If abs_time has already passed, the function attempts to obtain +// ownership without blocking (as if by calling try_lock()). + +template <typename Clock> +void +test_exclusive_absolute(chrono::nanoseconds offset) +{ + std::shared_timed_mutex stm; + chrono::time_point<Clock> tp(offset); + VERIFY(stm.try_lock_until(tp)); + VERIFY(!stm.try_lock_until(tp)); +} + +template <typename Clock> +void +test_shared_absolute(chrono::nanoseconds offset) +{ + std::shared_timed_mutex stm; + chrono::time_point<Clock> tp(offset); + VERIFY(stm.try_lock_shared_until(tp)); + stm.unlock_shared(); + + VERIFY(stm.try_lock_for(chrono::seconds{10})); + + { + // NPTL will give us EDEADLK if pthread_rwlock_timedrdlock() is called on + // the same thread that already holds the exclusive (write) lock, so let's + // arrange for a different thread to try to acquire the shared lock. + auto t = std::async(std::launch::async, [&stm, tp]() { + VERIFY(!stm.try_lock_shared_until(tp)); + }); + } +} + +// The type of clock used for the actual wait depends on whether +// _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK is defined. We might as well just test +// both steady_clock and system_clock. +template <typename Clock> +void +test_exclusive_relative(chrono::nanoseconds offset) +{ + std::shared_timed_mutex stm; + const auto d = -Clock::now().time_since_epoch() + offset; + VERIFY(stm.try_lock_for(d)); + VERIFY(!stm.try_lock_for(d)); +} + +template <typename Clock> +void +test_shared_relative(chrono::nanoseconds offset) +{ + std::shared_timed_mutex stm; + const auto d = -Clock::now().time_since_epoch() + offset; + VERIFY(stm.try_lock_shared_for(d)); + stm.unlock_shared(); + // Should complete immediately + VERIFY(stm.try_lock_for(chrono::seconds{10})); + VERIFY(!stm.try_lock_shared_for(d)); +} + +int main() +{ + // Try once with an offset that ought to result in tv_sec == 0, tv_nsec < 0 + // and one with an offset that ought to result in tv_sec < 0, tv_nsec == 0 + // for the absolute calls at least. It's not really possible to arrange for + // the relative calls to have tv_nsec == 0 due to time advancing. + using namespace std::chrono_literals; + for (const chrono::nanoseconds offset : { + // tv_sec == 0, tv_nsec == 0 + chrono::nanoseconds{0}, + // tv_sec == 0, tv_nsec < 0 + chrono::nanoseconds{-10ms}, + // tv_sec < 0 + chrono::nanoseconds{-10s} + }) { + test_exclusive_absolute<chrono::system_clock>(offset); + test_shared_absolute<chrono::system_clock>(offset); + test_exclusive_relative<chrono::system_clock>(offset); + test_shared_relative<chrono::system_clock>(offset); + + test_exclusive_absolute<chrono::steady_clock>(offset); + test_shared_absolute<chrono::steady_clock>(offset); + test_exclusive_relative<chrono::steady_clock>(offset); + test_shared_relative<chrono::steady_clock>(offset); + } +} diff --git a/libstdc++-v3/testsuite/30_threads/this_thread/113327.cc b/libstdc++-v3/testsuite/30_threads/this_thread/113327.cc new file mode 100644 index 0000000..2daa2b0 --- /dev/null +++ b/libstdc++-v3/testsuite/30_threads/this_thread/113327.cc @@ -0,0 +1,29 @@ +// { dg-do run { target c++11 } } +// { dg-additional-options "-pthread" { target pthread } } +// { dg-require-gthreads "" } + +// PR libstdc++/113327 +// std::sleep_for(std::chrono::hours::max()) returns immediately + +#include <thread> +#include <chrono> +#include <cstdlib> +#include <csignal> + +int main() +{ + std::thread sleepy([] { + // Rather than overflowing to a negative value, the timeout should be + // truncated to seconds::max() and so sleep for 292 billion years. + std::this_thread::sleep_for(std::chrono::minutes::max()); + // This should not happen: + throw 1; + }); + // Give the new thread a chance to start sleeping: + std::this_thread::yield(); + std::this_thread::sleep_for(std::chrono::seconds(2)); + // If we get here without the other thread throwing an exception + // then it should be sleeping peacefully, so the test passed. + // pthread_kill(sleepy.native_handle(), SIGINT); + std::_Exit(0); +} diff --git a/libstdc++-v3/testsuite/30_threads/this_thread/sleep_for.cc b/libstdc++-v3/testsuite/30_threads/this_thread/sleep_for.cc index 3f55ccc..5b0518d 100644 --- a/libstdc++-v3/testsuite/30_threads/this_thread/sleep_for.cc +++ b/libstdc++-v3/testsuite/30_threads/this_thread/sleep_for.cc @@ -37,7 +37,20 @@ test01() VERIFY( (chr::system_clock::now() - begin) >= ms ); } +void +test_negative() +{ + chr::system_clock::time_point begin = chr::system_clock::now(); + + std::this_thread::sleep_for(-chr::hours(8)); + + // That should have completed immediately, but be generous because we don't + // want spurious failures on busy machines. + VERIFY( (chr::system_clock::now() - begin) < chr::seconds(10) ); +} + int main() { test01(); + test_negative(); } diff --git a/libstdc++-v3/testsuite/30_threads/this_thread/sleep_until.cc b/libstdc++-v3/testsuite/30_threads/this_thread/sleep_until.cc index 1fb82b6..8c70c2e 100644 --- a/libstdc++-v3/testsuite/30_threads/this_thread/sleep_until.cc +++ b/libstdc++-v3/testsuite/30_threads/this_thread/sleep_until.cc @@ -26,18 +26,36 @@ namespace chr = std::chrono; +template <typename Clock> void test01() { - chr::system_clock::time_point begin = chr::system_clock::now(); + typename Clock::time_point begin = Clock::now(); chr::microseconds ms(500); - std::this_thread::sleep_until(chr::system_clock::now() + ms); + std::this_thread::sleep_until(Clock::now() + ms); - VERIFY( (chr::system_clock::now() - begin) >= ms ); + VERIFY( (Clock::now() - begin) >= ms ); +} + +template <typename Clock> +void +test_negative() +{ + typename Clock::time_point begin = Clock::now(); + + typename Clock::time_point tp(-chr::hours(8)); + std::this_thread::sleep_until(tp); + + // That should have completed immediately, but be generous because we don't + // want spurious failures on busy machines. + VERIFY( (Clock::now() - begin) < chr::seconds(10) ); } int main() { - test01(); + test01<chr::steady_clock>(); + test01<chr::system_clock>(); + test_negative<chr::steady_clock>(); + test_negative<chr::system_clock>(); } diff --git a/libstdc++-v3/testsuite/30_threads/timed_mutex/try_lock_until/116586.cc b/libstdc++-v3/testsuite/30_threads/timed_mutex/try_lock_until/116586.cc new file mode 100644 index 0000000..dcba7aa --- /dev/null +++ b/libstdc++-v3/testsuite/30_threads/timed_mutex/try_lock_until/116586.cc @@ -0,0 +1,57 @@ +// { dg-do run { target c++11 } } + +#include <chrono> +#include <mutex> +#include <initializer_list> +#include <testsuite_hooks.h> + +namespace chrono = std::chrono; + +// thread.timedmutex.requirements.general: +// If abs_time has already passed, the function attempts to obtain +// ownership without blocking (as if by calling try_lock()). + +template <typename Clock> +void +test_absolute(chrono::nanoseconds offset) +{ + std::timed_mutex mtx; + chrono::time_point<Clock> tp(offset); + VERIFY(mtx.try_lock_until(tp)); + VERIFY(!mtx.try_lock_until(tp)); +} + +// The type of clock used for the actual wait depends on whether +// _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK is defined. We might as well just test +// both steady_clock and system_clock. +template <typename Clock> +void +test_relative(chrono::nanoseconds offset) +{ + std::timed_mutex mtx; + const auto d = -Clock::now().time_since_epoch() + offset; + VERIFY(mtx.try_lock_for(d)); + VERIFY(!mtx.try_lock_for(d)); +} + +int main() +{ + // Try once with an offset that ought to result in tv_sec == 0, tv_nsec < 0 + // and one with an offset that ought to result in tv_sec < 0, tv_nsec == 0 + // for the absolute calls at least. It's not really possible to arrange for + // the relative calls to have tv_nsec == 0 due to time advancing. + for (const chrono::nanoseconds offset : { + // tv_sec == 0, tv_nsec == 0 + chrono::nanoseconds{0}, + // tv_sec == 0, tv_nsec < 0 + chrono::duration_cast<chrono::nanoseconds>(chrono::milliseconds{-10}), + // tv_sec < 0 + chrono::duration_cast<chrono::nanoseconds>(chrono::seconds{-10}) + }) { + test_absolute<chrono::system_clock>(offset); + test_relative<chrono::system_clock>(offset); + + test_absolute<chrono::steady_clock>(offset); + test_relative<chrono::steady_clock>(offset); + } +} diff --git a/libstdc++-v3/testsuite/std/format/ranges/string.cc b/libstdc++-v3/testsuite/std/format/ranges/string.cc index 99e5eaf..bef2cc7 100644 --- a/libstdc++-v3/testsuite/std/format/ranges/string.cc +++ b/libstdc++-v3/testsuite/std/format/ranges/string.cc @@ -279,6 +279,93 @@ void test_padding() VERIFY( strip_prefix(resv, 46, '*') ); VERIFY( strip_quotes(resv) ); VERIFY( resv == in ); + + // width is 5, size is 15 + in = "\u2160\u2161\u2162\u2163\u2164"; + in += in; // width is 10, size is 30 + in += in; // width is 20, size is 60 + in += in; // width is 40, size is 120 + in += in; // width is 80, size is 240 + in += in; // width is 160, size is 480 + + lc.assign_range(in); + + resv = res = std::format("{:s}", lc); + VERIFY( resv == in ); + + resv = res = std::format("{:*>10s}", lc); + VERIFY( resv == in ); + + resv = res = std::format("{:*>200s}", lc); + VERIFY( strip_prefix(resv, 40, '*') ); + VERIFY( resv == in ); + + resv = res = std::format("{:?s}", lc); + VERIFY( strip_quotes(resv) ); + VERIFY( resv == in ); + + resv = res = std::format("{:*>10?s}", lc); + VERIFY( strip_quotes(resv) ); + VERIFY( resv == in ); + + resv = res = std::format("{:*>200?s}", lc); + VERIFY( strip_prefix(resv, 38, '*') ); + VERIFY( strip_quotes(resv) ); + VERIFY( resv == in ); +} + +void test_escaping() +{ + std::string res; + std::string_view resv; + + const std::string_view input = + "\t\n\r\\\"" + "\u008a" // Cc, Control, Line Tabulation Set, + "\u00ad" // Cf, Format, Soft Hyphen + "\u1d3d" // Lm, Modifier letter, Modifier Letter Capital Ou + "\u00a0" // Zs, Space Separator, No-Break Space (NBSP) + "\u2029" // Zp, Paragraph Separator, Paragraph Separator + "\U0001f984" // So, Other Symbol, Unicorn Face + ; + const std::string_view output = + R"(\t\n\r\\\")" + R"(\u{8a})" + R"(\u{ad})" + "\u1d3d" + R"(\u{a0})" + R"(\u{2029})" + "\U0001f984"; + + std::forward_list<char> lc(std::from_range, input); + resv = res = std::format("{:s}", lc); + VERIFY( resv == input ); + resv = res = std::format("{:?s}", lc); + VERIFY( strip_quotes(resv) ); + VERIFY( resv == output ); + + // width is 5, size is 15 + std::string in = "\u2160\u2161\u2162\u2163\u2164"; + in += in; // width is 10, size is 30 + in += in; // width is 20, size is 60 + in += in; // width is 40, size is 120 + in += in; // width is 80, size is 240 + in += in; // width is 160, size is 480 + std::string_view inv = in; + + // last charcter is incomplete + lc.assign_range(inv.substr(0, 479)); + + // non-debug format, chars copied as is + resv = res = std::format("{:s}", lc); + VERIFY( resv == inv.substr(0, 479) ); + + // debug-format, incomplete code-point sequence is esaped + resv = res = std::format("{:?s}", lc); + VERIFY( strip_quotes(resv) ); + VERIFY( resv.substr(0, 477) == inv.substr(0, 477) ); + resv.remove_prefix(477); + VERIFY( resv == R"(\x{e2}\x{85})" ); } int main() @@ -287,4 +374,6 @@ int main() test_outputs<char>(); test_outputs<wchar_t>(); test_nested(); + test_padding(); + test_escaping(); } diff --git a/libstdc++-v3/testsuite/std/time/month_day/io.cc b/libstdc++-v3/testsuite/std/time/month_day/io.cc index 30aa588..c3ae180 100644 --- a/libstdc++-v3/testsuite/std/time/month_day/io.cc +++ b/libstdc++-v3/testsuite/std/time/month_day/io.cc @@ -23,6 +23,45 @@ test_ostream() } void +test_format() +{ + using namespace std::chrono; + std::locale loc_fr(ISO_8859(15,fr_FR)); + + auto s = std::format("{:%b%%%B%t%m%n %d%%%e}", month(1)/day(3)); + VERIFY( s == "Jan%January\t01\n 03% 3" ); + s = std::format(loc_fr, "{:L%b%%%B%t%m%n %d%%%e}", month(1)/day(3)); + VERIFY( s == "janv.%janvier\t01\n 03% 3"); + + s = std::format("{0:%m/%d} {0}", month(10)/day(13)); + VERIFY( s == "10/13 Oct/13" ); + s = std::format("{0:%m/%d} {0}", month(13)/day(34)); + VERIFY( s == "13/34 13 is not a valid month/34 is not a valid day" ); + + std::string_view specs = "aAbBcCdDeFgGhHIjmMpqQrRSTuUVwWxXyYzZ"; + std::string_view my_specs = "bBdehm"; + for (char c : specs) + { + char fmt[] = { '{', ':', '%', c, '}' }; + try + { + auto md = month(1)/day(10); + (void) std::vformat(std::string_view(fmt, 5), std::make_format_args(md)); + // The call above should throw for any conversion-spec not in my_specs: + VERIFY(my_specs.find(c) != my_specs.npos); + } + catch (const std::format_error& e) + { + VERIFY(my_specs.find(c) == my_specs.npos); + std::string_view s = e.what(); + // Libstdc++-specific message: + VERIFY(s.find("format argument does not contain the information " + "required by the chrono-specs") != s.npos); + } + } +} + +void test_parse() { using namespace std::chrono; @@ -102,6 +141,6 @@ test_parse() int main() { test_ostream(); - // TODO: test_format(); + test_format(); test_parse(); } diff --git a/libstdc++-v3/testsuite/std/time/month_day_last/io.cc b/libstdc++-v3/testsuite/std/time/month_day_last/io.cc index d15192c..484a8d8 100644 --- a/libstdc++-v3/testsuite/std/time/month_day_last/io.cc +++ b/libstdc++-v3/testsuite/std/time/month_day_last/io.cc @@ -22,8 +22,47 @@ test_ostream() VERIFY( ss.str() == "juil./last" ); } +void +test_format() +{ + using namespace std::chrono; + std::locale loc_fr(ISO_8859(15,fr_FR)); + + auto s = std::format("{:%b%%%B%t%m%n}", month(3)/last); + VERIFY( s == "Mar%March\t03\n" ); + s = std::format(loc_fr, "{:L%b%%%B%t%m%n}", month(3)/last); + VERIFY( s == "mars%mars\t03\n"); + + s = std::format("{0:%m/last} {0}", month(4)/last); + VERIFY( s == "04/last Apr/last" ); + s = std::format("{0:%m/last} {0}", month(0)/last); + VERIFY( s == "00/last 0 is not a valid month/last" ); + + std::string_view specs = "aAbBcCdDeFgGhHIjmMpqQrRSTuUVwWxXyYzZ"; + std::string_view my_specs = "bBhm"; + for (char c : specs) + { + char fmt[] = { '{', ':', '%', c, '}' }; + try + { + auto mdl = month(1)/last; + (void) std::vformat(std::string_view(fmt, 5), std::make_format_args(mdl)); + // The call above should throw for any conversion-spec not in my_specs: + VERIFY(my_specs.find(c) != my_specs.npos); + } + catch (const std::format_error& e) + { + VERIFY(my_specs.find(c) == my_specs.npos); + std::string_view s = e.what(); + // Libstdc++-specific message: + VERIFY(s.find("format argument does not contain the information " + "required by the chrono-specs") != s.npos); + } + } +} + int main() { test_ostream(); - // TODO: test_format(); + test_format(); } diff --git a/libstdc++-v3/testsuite/std/time/month_weekday/io.cc b/libstdc++-v3/testsuite/std/time/month_weekday/io.cc index 1838030..0c2dcaf 100644 --- a/libstdc++-v3/testsuite/std/time/month_weekday/io.cc +++ b/libstdc++-v3/testsuite/std/time/month_weekday/io.cc @@ -23,8 +23,47 @@ test_ostream() VERIFY( ss.str() == "juil./jeu.[4]" ); } +void +test_format() +{ + using namespace std::chrono; + std::locale loc_fr(ISO_8859(15,fr_FR)); + + auto s = std::format("{:%b%%%B%t%m%n %a%%%A%t%u%n%w}", month(5)/weekday(1)[2]); + VERIFY( s == "May%May\t05\n Mon%Monday\t1\n1" ); + s = std::format(loc_fr, "{:L%b%%%B%t%m%n %a%%%A%t%u%n%w}", month(5)/weekday(1)[2]); + VERIFY( s == "mai%mai\t05\n lun.%lundi\t1\n1"); + + s = std::format("{0:%m/%u[]} {0}", month(9)/weekday(0)[2]); + VERIFY( s == "09/7[] Sep/Sun[2]" ); + s = std::format("{0:%m/%u[]} {0}", month(111)/weekday(8)[0]); + VERIFY( s == "111/8[] 111 is not a valid month/8 is not a valid weekday[0 is not a valid index]" ); + + std::string_view specs = "aAbBcCdDeFgGhHIjmMpqQrRSTuUVwWxXyYzZ"; + std::string_view my_specs = "aAbBhmuw"; + for (char c : specs) + { + char fmt[] = { '{', ':', '%', c, '}' }; + try + { + auto mwi = month(1)/weekday(1)[1]; + (void) std::vformat(std::string_view(fmt, 5), std::make_format_args(mwi)); + // The call above should throw for any conversion-spec not in my_specs: + VERIFY(my_specs.find(c) != my_specs.npos); + } + catch (const std::format_error& e) + { + VERIFY(my_specs.find(c) == my_specs.npos); + std::string_view s = e.what(); + // Libstdc++-specific message: + VERIFY(s.find("format argument does not contain the information " + "required by the chrono-specs") != s.npos); + } + } +} + int main() { test_ostream(); - // TODO: test_format(); + test_format(); } diff --git a/libstdc++-v3/testsuite/std/time/month_weekday_last/io.cc b/libstdc++-v3/testsuite/std/time/month_weekday_last/io.cc index 6ba4d8a..2c29258 100644 --- a/libstdc++-v3/testsuite/std/time/month_weekday_last/io.cc +++ b/libstdc++-v3/testsuite/std/time/month_weekday_last/io.cc @@ -23,8 +23,47 @@ test_ostream() VERIFY( ss.str() == "juil./jeu.[last]" ); } +void +test_format() +{ + using namespace std::chrono; + std::locale loc_fr(ISO_8859(15,fr_FR)); + + auto s = std::format("{:%b%%%B%t%m%n %a%%%A%t%u%n%w}", month(6)/weekday(2)[last]); + VERIFY( s == "Jun%June\t06\n Tue%Tuesday\t2\n2" ); + s = std::format(loc_fr, "{:L%b%%%B%t%m%n %a%%%A%t%u%n%w}", month(6)/weekday(2)[last]); + VERIFY( s == "juin%juin\t06\n mar.%mardi\t2\n2"); + + s = std::format("{0:%m/%w[last]} {0}", month(8)/weekday(7)[last]); + VERIFY( s == "08/0[last] Aug/Sun[last]" ); + s = std::format("{0:%m/%w[last]} {0}", month(70)/weekday(9)[last]); + VERIFY( s == "70/9[last] 70 is not a valid month/9 is not a valid weekday[last]" ); + + std::string_view specs = "aAbBcCdDeFgGhHIjmMpqQrRSTuUVwWxXyYzZ"; + std::string_view my_specs = "aAbBhmuw"; + for (char c : specs) + { + char fmt[] = { '{', ':', '%', c, '}' }; + try + { + auto mwl = month(1)/weekday(1)[last]; + (void) std::vformat(std::string_view(fmt, 5), std::make_format_args(mwl)); + // The call above should throw for any conversion-spec not in my_specs: + VERIFY(my_specs.find(c) != my_specs.npos); + } + catch (const std::format_error& e) + { + VERIFY(my_specs.find(c) == my_specs.npos); + std::string_view s = e.what(); + // Libstdc++-specific message: + VERIFY(s.find("format argument does not contain the information " + "required by the chrono-specs") != s.npos); + } + } +} + int main() { test_ostream(); - // TODO: test_format(); + test_format(); } diff --git a/libstdc++-v3/testsuite/std/time/weekday_indexed/io.cc b/libstdc++-v3/testsuite/std/time/weekday_indexed/io.cc index ca315de..ae86419 100644 --- a/libstdc++-v3/testsuite/std/time/weekday_indexed/io.cc +++ b/libstdc++-v3/testsuite/std/time/weekday_indexed/io.cc @@ -22,8 +22,47 @@ test_ostream() VERIFY( ss.str() == "sam.[1]" ); } +void +test_format() +{ + using namespace std::chrono; + std::locale loc_fr(ISO_8859(15,fr_FR)); + + auto s = std::format("{:%a%%%A%t%u%n%w}", weekday(7)[3]); + VERIFY( s == "Sun%Sunday\t7\n0" ); + s = std::format(loc_fr, "{:L%a%%%A%t%u%n%w}", weekday(7)[3]); + VERIFY( s == "dim.%dimanche\t7\n0"); + + s = std::format("{0:%w[]} {0}", weekday(4)[4]); + VERIFY( s == "4[] Thu[4]" ); + s = std::format("{0:%w[]} {0}", weekday(10)[7]); + VERIFY( s == "10[] 10 is not a valid weekday[7 is not a valid index]" ); + + std::string_view specs = "aAbBcCdDeFgGhHIjmMpqQrRSTuUVwWxXyYzZ"; + std::string_view my_specs = "aAuw"; + for (char c : specs) + { + char fmt[] = { '{', ':', '%', c, '}' }; + try + { + auto wi = weekday(1)[1]; + (void) std::vformat(std::string_view(fmt, 5), std::make_format_args(wi)); + // The call above should throw for any conversion-spec not in my_specs: + VERIFY(my_specs.find(c) != my_specs.npos); + } + catch (const std::format_error& e) + { + VERIFY(my_specs.find(c) == my_specs.npos); + std::string_view s = e.what(); + // Libstdc++-specific message: + VERIFY(s.find("format argument does not contain the information " + "required by the chrono-specs") != s.npos); + } + } +} + int main() { test_ostream(); - // TODO: test_format(); + test_format(); } diff --git a/libstdc++-v3/testsuite/std/time/weekday_last/io.cc b/libstdc++-v3/testsuite/std/time/weekday_last/io.cc index 3b64c65..49cf0d5 100644 --- a/libstdc++-v3/testsuite/std/time/weekday_last/io.cc +++ b/libstdc++-v3/testsuite/std/time/weekday_last/io.cc @@ -22,8 +22,47 @@ test_ostream() VERIFY( ss.str() == "sam.[last]" ); } +void +test_format() +{ + using namespace std::chrono; + std::locale loc_fr(ISO_8859(15,fr_FR)); + + auto s = std::format("{:%a%%%A%t%u%n%w}", weekday(5)[last]); + VERIFY( s == "Fri%Friday\t5\n5" ); + s = std::format(loc_fr, "{:L%a%%%A%t%u%n%w}", weekday(5)[last]); + VERIFY( s == "ven.%vendredi\t5\n5"); + + s = std::format("{0:%w[last]} {0}", weekday(6)[last]); + VERIFY( s == "6[last] Sat[last]" ); + s = std::format("{0:%w[last]} {0}", weekday(9)[last]); + VERIFY( s == "9[last] 9 is not a valid weekday[last]" ); + + std::string_view specs = "aAbBcCdDeFgGhHIjmMpqQrRSTuUVwWxXyYzZ"; + std::string_view my_specs = "aAuw"; + for (char c : specs) + { + char fmt[] = { '{', ':', '%', c, '}' }; + try + { + auto wl = weekday(1)[last]; + (void) std::vformat(std::string_view(fmt, 5), std::make_format_args(wl)); + // The call above should throw for any conversion-spec not in my_specs: + VERIFY(my_specs.find(c) != my_specs.npos); + } + catch (const std::format_error& e) + { + VERIFY(my_specs.find(c) == my_specs.npos); + std::string_view s = e.what(); + // Libstdc++-specific message: + VERIFY(s.find("format argument does not contain the information " + "required by the chrono-specs") != s.npos); + } + } +} + int main() { test_ostream(); - // TODO: test_format(); + test_format(); } diff --git a/libstdc++-v3/testsuite/std/time/year_month/io.cc b/libstdc++-v3/testsuite/std/time/year_month/io.cc index 7bb3442..3392eb3 100644 --- a/libstdc++-v3/testsuite/std/time/year_month/io.cc +++ b/libstdc++-v3/testsuite/std/time/year_month/io.cc @@ -23,6 +23,45 @@ test_ostream() } void +test_format() +{ + using namespace std::chrono; + std::locale loc_fr(ISO_8859(15,fr_FR)); + + auto s = std::format("{:%C%%%y\t%Y %b%%%B%t%m%n}", year(2019)/month(4)); + VERIFY( s == "20%19\t2019 Apr%April\t04\n" ); + s = std::format(loc_fr, "{:L%C%%%y\t%Y %b%%%B%t%m%n}", year(2019)/month(4)); + VERIFY( s == "20%19\t2019 avril%avril\t04\n"); + + s = std::format("{0:%Y/%m} {0}", year(2018)/month(2)); + VERIFY( s == "2018/02 2018/Feb" ); + s = std::format("{0:%Y/%m} {0}", year(-32768)/month(15)); + VERIFY( s == "-32768/15 -32768 is not a valid year/15 is not a valid month" ); + + std::string_view specs = "aAbBcCdDeFgGhHIjmMpqQrRSTuUVwWxXyYzZ"; + std::string_view my_specs = "CbBhmyY"; + for (char c : specs) + { + char fmt[] = { '{', ':', '%', c, '}' }; + try + { + auto ym = year(2013)/month(1); + (void) std::vformat(std::string_view(fmt, 5), std::make_format_args(ym)); + // The call above should throw for any conversion-spec not in my_specs: + VERIFY(my_specs.find(c) != my_specs.npos); + } + catch (const std::format_error& e) + { + VERIFY(my_specs.find(c) == my_specs.npos); + std::string_view s = e.what(); + // Libstdc++-specific message: + VERIFY(s.find("format argument does not contain the information " + "required by the chrono-specs") != s.npos); + } + } +} + +void test_parse() { using namespace std::chrono; @@ -73,6 +112,6 @@ test_parse() int main() { test_ostream(); - // TODO: test_format(); + test_format(); test_parse(); } |