diff options
Diffstat (limited to 'gcc')
70 files changed, 765 insertions, 1462 deletions
diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt index a7fd14a..b7ce67a 100644 --- a/gcc/c-family/c.opt +++ b/gcc/c-family/c.opt @@ -774,7 +774,7 @@ C++ ObjC++ Var(warn_extra_semi) Init(-1) Warning Warn about semicolon after in-class function definition. Wflex-array-member-not-at-end -C C++ Var(warn_flex_array_member_not_at_end) Warning +C ObjC C++ ObjC++ Var(warn_flex_array_member_not_at_end) Warning Warn when a structure containing a C99 flexible array member as the last field is not at the end of another structure. @@ -866,7 +866,7 @@ C ObjC C++ ObjC++ Var(warn_if_not_aligned) Init(1) Warning Warn when the field in a struct is not aligned. Wignored-qualifiers -C C++ Var(warn_ignored_qualifiers) Warning EnabledBy(Wextra) +C ObjC C++ ObjC++ Var(warn_ignored_qualifiers) Warning EnabledBy(Wextra) Warn whenever type qualifiers are ignored. Wignored-attributes @@ -1013,7 +1013,7 @@ C ObjC C++ ObjC++ Var(warn_memset_transposed_args) Warning LangEnabledBy(C ObjC Warn about suspicious calls to memset where the third argument is constant literal zero and the second is not. Wmisleading-indentation -C C++ Common Var(warn_misleading_indentation) Warning LangEnabledBy(C C++,Wall) +C ObjC C++ ObjC++ Common Var(warn_misleading_indentation) Warning LangEnabledBy(C ObjC C++ ObjC++,Wall) Warn when the indentation of the code does not reflect the block structure. Wmismatched-dealloc @@ -1187,7 +1187,7 @@ C ObjC Var(warn_old_style_definition) Init(-1) Warning Warn if an old-style parameter definition is used. Wopenacc-parallelism -C C++ Var(warn_openacc_parallelism) Warning +C ObjC C++ ObjC++ Var(warn_openacc_parallelism) Warning Warn about potentially suboptimal choices related to OpenACC parallelism. Wopenmp @@ -1195,7 +1195,7 @@ C ObjC C++ ObjC++ Warning Var(warn_openmp) Init(1) Warn about suspicious OpenMP code. Wopenmp-simd -C C++ Var(warn_openmp_simd) Warning LangEnabledBy(C C++,Wall) +C ObjC C++ ObjC++ Var(warn_openmp_simd) Warning LangEnabledBy(C ObjC C++ ObjC++,Wall) Warn if a simd directive is overridden by the vectorizer cost model. Woverlength-strings @@ -1243,11 +1243,11 @@ C++ ObjC++ Var(warn_pessimizing_move) Warning LangEnabledBy(C++ ObjC++, Wall) Warn about calling std::move on a local object in a return statement preventing copy elision. Wplacement-new -C++ Warning Alias(Wplacement-new=, 1, 0) +C++ ObjC++ Warning Alias(Wplacement-new=, 1, 0) Warn for placement new expressions with undefined behavior. Wplacement-new= -C++ Joined RejectNegative UInteger Var(warn_placement_new) Init(-1) Warning IntegerRange(0, 2) +C++ ObjC++ Joined RejectNegative UInteger Var(warn_placement_new) Init(-1) Warning IntegerRange(0, 2) Warn for placement new expressions with undefined behavior. Wpmf-conversions @@ -1417,7 +1417,7 @@ C ObjC C++ ObjC++ LangEnabledBy(C ObjC C++ ObjC++,Wall, 3, 0) IntegerRange(0, 3) ; Wstrict-flex-arrays -C C++ Var(warn_strict_flex_arrays) Warning +C ObjC C++ ObjC++ Var(warn_strict_flex_arrays) Warning Warn about improper usages of flexible array members according to the level of -fstrict-flex-arrays. @@ -1495,7 +1495,7 @@ C ObjC C++ ObjC++ Var(warn_switch_outside_range) Warning Init(1) Warn about switch values that are outside of the switch's type range. Wsync-nand -C C++ Var(warn_sync_nand) Init(1) Warning +C ObjC C++ ObjC++ Var(warn_sync_nand) Init(1) Warning Warn when __sync_fetch_and_nand and __sync_nand_and_fetch built-in functions are used. Wsynth @@ -1900,35 +1900,35 @@ EnumValue Enum(on_off) String(on) Value(1) fcontract-assumption-mode= -C++ Joined RejectNegative +C++ ObjC++ Joined RejectNegative -fcontract-assumption-mode=[on|off] Enable or disable treating axiom level contracts as assumptions (default on). fcontract-build-level= -C++ Joined RejectNegative +C++ ObjC++ Joined RejectNegative -fcontract-build-level=[off|default|audit] Specify max contract level to generate runtime checks for. fcontract-strict-declarations= -C++ Var(flag_contract_strict_declarations) Enum(on_off) Joined Init(0) RejectNegative +C++ ObjC++ Var(flag_contract_strict_declarations) Enum(on_off) Joined Init(0) RejectNegative -fcontract-strict-declarations=[on|off] Enable or disable warnings on generalized redeclaration of functions with contracts (default off). fcontract-mode= -C++ Var(flag_contract_mode) Enum(on_off) Joined Init(1) RejectNegative +C++ ObjC++ Var(flag_contract_mode) Enum(on_off) Joined Init(1) RejectNegative -fcontract-mode=[on|off] Enable or disable all contract facilities (default on). fcontract-continuation-mode= -C++ Joined RejectNegative +C++ ObjC++ Joined RejectNegative -fcontract-continuation-mode=[on|off] Enable or disable contract continuation mode (default off). fcontract-role= -C++ Joined RejectNegative +C++ ObjC++ Joined RejectNegative -fcontract-role=<name>:<semantics> Specify the semantics for all levels in a role (default, review), or a custom contract role with given semantics (ex: opt:assume,assume,assume). fcontract-semantic= -C++ Joined RejectNegative +C++ ObjC++ Joined RejectNegative -fcontract-semantic=<level>:<semantic> Specify the concrete semantics for level. fcoroutines -C++ LTO Var(flag_coroutines) +C++ ObjC++ LTO Var(flag_coroutines) Enable C++ coroutines (experimental). fdebug-cpp @@ -2130,23 +2130,23 @@ C ObjC Var(warn_compare_distinct_pointer_types) Warning Init(1) Warn if pointers of distinct types are compared without a cast. flang-info-include-translate -C++ Var(note_include_translate_yes) +C++ ObjC++ Var(note_include_translate_yes) Note #include directives translated to import declarations. flang-info-include-translate-not -C++ Var(note_include_translate_no) +C++ ObjC++ Var(note_include_translate_no) Note #include directives not translated to import declarations, and not known to be textual. flang-info-include-translate= -C++ Joined RejectNegative MissingArgError(missing header name) +C++ ObjC++ Joined RejectNegative MissingArgError(missing header name) Note a #include translation of a specific header. flang-info-module-cmi -C++ Var(note_module_cmi_yes) +C++ ObjC++ Var(note_module_cmi_yes) Note Compiled Module Interface pathnames. flang-info-module-cmi= -C++ Joined RejectNegative MissingArgError(missing module name) +C++ ObjC++ Joined RejectNegative MissingArgError(missing module name) Note Compiled Module Interface pathname of a specific module or header-unit. fmax-include-depth= @@ -2357,10 +2357,10 @@ C++ ObjC++ Var(flag_sized_deallocation) Init(-1) Enable C++14 sized deallocation support. fstrict-flex-arrays -C C++ Common Alias(fstrict-flex-arrays=,3,0) +C ObjC C++ ObjC++ Common Alias(fstrict-flex-arrays=,3,0) fstrict-flex-arrays= -C C++ Common Joined RejectNegative UInteger Var(flag_strict_flex_arrays) Init(0) IntegerRange(0,3) +C ObjC C++ ObjC++ Common Joined RejectNegative UInteger Var(flag_strict_flex_arrays) Init(0) IntegerRange(0,3) -fstrict-flex-arrays=<level> Control when to treat the trailing array of a structure as a flexible array member for the purposes of accessing the elements of such an array. The default is treating all trailing arrays of structures as flexible array members. fsquangle diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h index 3f29f17..4efa2c0 100644 --- a/gcc/common/config/i386/cpuinfo.h +++ b/gcc/common/config/i386/cpuinfo.h @@ -1024,8 +1024,6 @@ get_available_features (struct __processor_model *cpu_model, set_feature (FEATURE_AMX_AVX512); if (eax & bit_AMX_TF32) set_feature (FEATURE_AMX_TF32); - if (eax & bit_AMX_TRANSPOSE) - set_feature (FEATURE_AMX_TRANSPOSE); if (eax & bit_AMX_FP8) set_feature (FEATURE_AMX_FP8); if (eax & bit_AMX_MOVRS) diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc index 9e807e4..d3509e1 100644 --- a/gcc/common/config/i386/i386-common.cc +++ b/gcc/common/config/i386/i386-common.cc @@ -134,8 +134,6 @@ along with GCC; see the file COPYING3. If not see (OPTION_MASK_ISA2_AMX_TILE_SET | OPTION_MASK_ISA2_AMX_AVX512) #define OPTION_MASK_ISA2_AMX_TF32_SET \ (OPTION_MASK_ISA2_AMX_TILE_SET | OPTION_MASK_ISA2_AMX_TF32) -#define OPTION_MASK_ISA2_AMX_TRANSPOSE_SET \ - (OPTION_MASK_ISA2_AMX_TILE_SET | OPTION_MASK_ISA2_AMX_TRANSPOSE) #define OPTION_MASK_ISA2_AMX_FP8_SET \ (OPTION_MASK_ISA2_AMX_TILE_SET | OPTION_MASK_ISA2_AMX_FP8) #define OPTION_MASK_ISA2_MOVRS_SET OPTION_MASK_ISA2_MOVRS @@ -303,8 +301,8 @@ along with GCC; see the file COPYING3. If not see (OPTION_MASK_ISA2_AMX_TILE | OPTION_MASK_ISA2_AMX_INT8_UNSET \ | OPTION_MASK_ISA2_AMX_BF16_UNSET | OPTION_MASK_ISA2_AMX_FP16_UNSET \ | OPTION_MASK_ISA2_AMX_COMPLEX_UNSET | OPTION_MASK_ISA2_AMX_AVX512_UNSET \ - | OPTION_MASK_ISA2_AMX_TF32_UNSET | OPTION_MASK_ISA2_AMX_TRANSPOSE_UNSET \ - | OPTION_MASK_ISA2_AMX_FP8_UNSET | OPTION_MASK_ISA2_AMX_MOVRS_UNSET) + | OPTION_MASK_ISA2_AMX_TF32_UNSET | OPTION_MASK_ISA2_AMX_FP8_UNSET \ + | OPTION_MASK_ISA2_AMX_MOVRS_UNSET) #define OPTION_MASK_ISA2_AMX_INT8_UNSET OPTION_MASK_ISA2_AMX_INT8 #define OPTION_MASK_ISA2_AMX_BF16_UNSET OPTION_MASK_ISA2_AMX_BF16 #define OPTION_MASK_ISA2_UINTR_UNSET OPTION_MASK_ISA2_UINTR @@ -330,7 +328,6 @@ along with GCC; see the file COPYING3. If not see #define OPTION_MASK_ISA2_AVX10_2_UNSET OPTION_MASK_ISA2_AVX10_2 #define OPTION_MASK_ISA2_AMX_AVX512_UNSET OPTION_MASK_ISA2_AMX_AVX512 #define OPTION_MASK_ISA2_AMX_TF32_UNSET OPTION_MASK_ISA2_AMX_TF32 -#define OPTION_MASK_ISA2_AMX_TRANSPOSE_UNSET OPTION_MASK_ISA2_AMX_TRANSPOSE #define OPTION_MASK_ISA2_AMX_FP8_UNSET OPTION_MASK_ISA2_AMX_FP8 #define OPTION_MASK_ISA2_MOVRS_UNSET OPTION_MASK_ISA2_MOVRS #define OPTION_MASK_ISA2_AMX_MOVRS_UNSET OPTION_MASK_ISA2_AMX_MOVRS @@ -1396,20 +1393,6 @@ ix86_handle_option (struct gcc_options *opts, } return true; - case OPT_mamx_transpose: - if (value) - { - opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_AMX_TRANSPOSE_SET; - opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AMX_TRANSPOSE_SET; - } - else - { - opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_AMX_TRANSPOSE_UNSET; - opts->x_ix86_isa_flags2_explicit |= - OPTION_MASK_ISA2_AMX_TRANSPOSE_UNSET; - } - return true; - case OPT_mamx_fp8: if (value) { diff --git a/gcc/common/config/i386/i386-cpuinfo.h b/gcc/common/config/i386/i386-cpuinfo.h index c73a87d..0e75626 100644 --- a/gcc/common/config/i386/i386-cpuinfo.h +++ b/gcc/common/config/i386/i386-cpuinfo.h @@ -269,8 +269,7 @@ enum processor_features FEATURE_AVX10_2 = 116, FEATURE_AMX_AVX512, FEATURE_AMX_TF32, - FEATURE_AMX_TRANSPOSE, - FEATURE_AMX_FP8, + FEATURE_AMX_FP8 = 120, FEATURE_MOVRS, FEATURE_AMX_MOVRS, CPU_FEATURE_MAX diff --git a/gcc/common/config/i386/i386-isas.h b/gcc/common/config/i386/i386-isas.h index 379bb34..fcd3ab2 100644 --- a/gcc/common/config/i386/i386-isas.h +++ b/gcc/common/config/i386/i386-isas.h @@ -188,8 +188,6 @@ ISA_NAMES_TABLE_START ISA_NAMES_TABLE_ENTRY("amx-avx512", FEATURE_AMX_AVX512, P_NONE, "-mamx-avx512") ISA_NAMES_TABLE_ENTRY("amx-tf32", FEATURE_AMX_TF32, P_NONE, "-mamx-tf32") - ISA_NAMES_TABLE_ENTRY("amx-transpose", FEATURE_AMX_TRANSPOSE, - P_NONE, "-mamx-transpose") ISA_NAMES_TABLE_ENTRY("amx-fp8", FEATURE_AMX_FP8, P_NONE, "-mamx-fp8") ISA_NAMES_TABLE_ENTRY("movrs", FEATURE_MOVRS, P_NONE, "-mmovrs") ISA_NAMES_TABLE_ENTRY("amx-movrs", FEATURE_AMX_MOVRS, P_NONE, "-mamx-movrs") diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc index efa2a45..adfd220 100644 --- a/gcc/common/config/riscv/riscv-common.cc +++ b/gcc/common/config/riscv/riscv-common.cc @@ -1404,6 +1404,47 @@ fail: return NULL; } +/* Get the profile that best matches the current architecture string, + where best is defined as the most expansive profile. */ + +const char * +riscv_subset_list::get_profile_name () const +{ + const char *best_profile = NULL; + int max_ext_count = -1; + + for (int i = 0; riscv_profiles_table[i].profile_name != nullptr; ++i) + { + riscv_subset_list *subset_list = riscv_subset_list::parse ( + riscv_profiles_table[i].profile_string, NULL); + if (!subset_list) + continue; + if (subset_list->xlen () == this->xlen ()) + { + int ext_count = 0; + bool all_found = true; + for (riscv_subset_t *p = subset_list->m_head; p != NULL; + p = p->next, ++ext_count) + { + if (!this->lookup (p->name.c_str (), + p->major_version, + p->minor_version)) + { + all_found = false; + break; + } + } + if (all_found && ext_count > max_ext_count) + { + max_ext_count = ext_count; + best_profile = riscv_profiles_table[i].profile_name; + } + } + delete subset_list; + } + return best_profile; +} + /* Clone whole subset list. */ riscv_subset_list * diff --git a/gcc/config.gcc b/gcc/config.gcc index a73bf95..2f478e2 100644 --- a/gcc/config.gcc +++ b/gcc/config.gcc @@ -464,8 +464,8 @@ i[34567]86-*-* | x86_64-*-*) avx10_2mediaintrin.h avx10_2convertintrin.h avx10_2bf16intrin.h avx10_2satcvtintrin.h avx10_2minmaxintrin.h avx10_2copyintrin.h - amxavx512intrin.h amxtf32intrin.h amxtransposeintrin.h - amxfp8intrin.h movrsintrin.h amxmovrsintrin.h" + amxavx512intrin.h amxtf32intrin.h amxfp8intrin.h + movrsintrin.h amxmovrsintrin.h" ;; ia64-*-*) extra_headers=ia64intrin.h diff --git a/gcc/config/i386/amxmovrsintrin.h b/gcc/config/i386/amxmovrsintrin.h index 97969f8..019adcf 100644 --- a/gcc/config/i386/amxmovrsintrin.h +++ b/gcc/config/i386/amxmovrsintrin.h @@ -59,53 +59,6 @@ __asm__ volatile \ #pragma GCC pop_options #endif /* __DISABLE_AMX_MOVRS__ */ -#if !defined(__AMX_MOVRS__) || !defined (__AMX_TRANSPOSE__) -#pragma GCC push_options -#pragma GCC target("amx-movrs,amx-transpose") -#define __DISABLE_AMX_MOVRS_TRANSPOSE__ -#endif /* __AMX_MOVRS_TRANSPOSE__ */ - -#define _tile_2rpntlvwz0rs_internal(tdst, base, stride) \ - __asm__ volatile \ - ("{t2rpntlvwz0rs\t(%0,%1,1), %%tmm"#tdst \ - "|t2rpntlvwz0rs\t%%tmm"#tdst", [%0+%1*1]}" \ - :: "r" ((const void*) (base)), "r" ((long) (stride))) - -#define _tile_2rpntlvwz0rst1_internal(tdst, base, stride) \ - __asm__ volatile \ - ("{t2rpntlvwz0rst1\t(%0,%1,1), %%tmm"#tdst \ - "|t2rpntlvwz0rst1\t%%tmm"#tdst", [%0+%1*1]}" \ - :: "r" ((const void*) (base)), "r" ((long) (stride))) - -#define _tile_2rpntlvwz1rs_internal(tdst, base, stride) \ - __asm__ volatile \ - ("{t2rpntlvwz1rs\t(%0,%1,1), %%tmm"#tdst \ - "|t2rpntlvwz1rs\t%%tmm"#tdst", [%0+%1*1]}" \ - :: "r" ((const void*) (base)), "r" ((long) (stride))) - -#define _tile_2rpntlvwz1rst1_internal(tdst, base, stride) \ - __asm__ volatile \ - ("{t2rpntlvwz1rst1\t(%0,%1,1), %%tmm"#tdst \ - "|t2rpntlvwz1rst1\t%%tmm"#tdst", [%0+%1*1]}" \ - :: "r" ((const void*) (base)), "r" ((long) (stride))) - -#define _tile_2rpntlvwz0rs(tdst, base, stride) \ - _tile_2rpntlvwz0rs_internal(tdst, base, stride) - -#define _tile_2rpntlvwz0rst1(tdst, base, stride) \ - _tile_2rpntlvwz0rst1_internal(tdst, base, stride) - -#define _tile_2rpntlvwz1rs(tdst, base, stride) \ - _tile_2rpntlvwz1rs_internal(tdst, base, stride) - -#define _tile_2rpntlvwz1rst1(tdst, base, stride) \ - _tile_2rpntlvwz1rst1_internal(tdst, base, stride) - -#ifdef __DISABLE_AMX_MOVRS_TRANSPOSE__ -#undef __DISABLE_AMX_MOVRS_TRANSPOSE__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_MOVRS_TRANSPOSE__ */ - #endif /* __x86_64__ */ #endif /* _AMX_MOVRSINTRIN_H_INCLUDED */ diff --git a/gcc/config/i386/amxtransposeintrin.h b/gcc/config/i386/amxtransposeintrin.h index f06603e..6409db3 100644 --- a/gcc/config/i386/amxtransposeintrin.h +++ b/gcc/config/i386/amxtransposeintrin.h @@ -21,157 +21,4 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ -#if !defined _IMMINTRIN_H_INCLUDED -#error "Never use <amxtransposeintrin.h> directly; include <immintrin.h> instead." -#endif - -#ifndef _AMXTRANSPOSEINTRIN_H_INCLUDED -#define _AMXTRANSPOSEINTRIN_H_INCLUDED - -#if !defined(__AMX_TRANSPOSE__) -#pragma GCC push_options -#pragma GCC target("amx-transpose") -#define __DISABLE_AMX_TRANSPOSE__ -#endif /* __AMX_TRANSPOSE__ */ - -#if defined(__x86_64__) -#define _tile_transposed_internal(dst,src) \ - __asm__ volatile\ - ("{ttransposed\t%%tmm"#src", %%tmm"#dst"|ttransposed\t%%tmm"#dst", %%tmm"#src"}" ::) - -#define _tile_2rpntlvwz0_internal(dst,base,stride) \ - __asm__ volatile\ - ("{t2rpntlvwz0\t(%0,%1,1), %%tmm"#dst"|t2rpntlvwz0\t%%tmm"#dst", [%0+%1*1]}" \ - :: "r" ((const void*) (base)), "r" ((long) (stride))) - -#define _tile_2rpntlvwz0t1_internal(dst,base,stride) \ - __asm__ volatile\ - ("{t2rpntlvwz0t1\t(%0,%1,1), %%tmm"#dst"|t2rpntlvwz0t1\t%%tmm"#dst", [%0+%1*1]}" \ - :: "r" ((const void*)(base)), "r" ((long)(stride))) - -#define _tile_2rpntlvwz1_internal(dst,base,stride) \ - __asm__ volatile\ - ("{t2rpntlvwz1\t(%0,%1,1), %%tmm"#dst"|t2rpntlvwz1\t%%tmm"#dst", [%0+%1*1]}" \ - :: "r" ((const void*)(base)), "r" ((long)(stride))) - -#define _tile_2rpntlvwz1t1_internal(dst,base,stride) \ - __asm__ volatile\ - ("{t2rpntlvwz1t1\t(%0,%1,1), %%tmm"#dst"|t2rpntlvwz1t1\t%%tmm"#dst", [%0+%1*1]}" \ - :: "r" ((const void*)(base)), "r" ((long)(stride))) - -#define _tile_transposed(dst,src) \ - _tile_transposed_internal (dst, src) - -#define _tile_2rpntlvwz0(dst,base,stride) \ - _tile_2rpntlvwz0_internal (dst, base, stride) - -#define _tile_2rpntlvwz0t1(dst,base,stride) \ - _tile_2rpntlvwz0t1_internal (dst, base, stride) - -#define _tile_2rpntlvwz1(dst,base,stride) \ - _tile_2rpntlvwz1_internal (dst, base, stride) - -#define _tile_2rpntlvwz1t1(dst,base,stride) \ - _tile_2rpntlvwz1t1_internal (dst, base, stride) - -#if !defined(__AMX_BF16__) -#pragma GCC push_options -#pragma GCC target("amx-bf16") -#define __DISABLE_AMX_BF16__ -#endif /* __AMX_BF16__ */ - -#define _tile_tdpbf16ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{ttdpbf16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|ttdpbf16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_tdpbf16ps(src1_dst,src2,src3) \ - _tile_tdpbf16ps_internal (src1_dst, src2, src3) - -#ifdef __DISABLE_AMX_BF16__ -#undef __DISABLE_AMX_BF16__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_BF16__ */ - -#if !defined(__AMX_FP16__) -#pragma GCC push_options -#pragma GCC target("amx-fp16") -#define __DISABLE_AMX_FP16__ -#endif /* __AMX_FP16__ */ - -#define _tile_tdpfp16ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{ttdpfp16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|ttdpfp16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_tdpfp16ps(src1_dst,src2,src3) \ - _tile_tdpfp16ps_internal (src1_dst, src2, src3) - -#ifdef __DISABLE_AMX_FP16__ -#undef __DISABLE_AMX_FP16__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_FP16__ */ - -#if !defined(__AMX_COMPLEX__) -#pragma GCC push_options -#pragma GCC target("amx-complex") -#define __DISABLE_AMX_COMPLEX__ -#endif /* __AMX_COMPLEX__ */ - -#define _tile_conjtcmmimfp16ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{tconjtcmmimfp16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|tconjtcmmimfp16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_conjtfp16_internal(dst,src) \ - __asm__ volatile\ - ("{tconjtfp16\t%%tmm"#src", %%tmm"#dst"|tconjtfp16\t%%tmm"#dst", %%tmm"#src"}" ::) - -#define _tile_tcmmimfp16ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{ttcmmimfp16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|ttcmmimfp16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_tcmmrlfp16ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{ttcmmrlfp16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|ttcmmrlfp16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_conjtcmmimfp16ps(src1_dst,src2,src3) \ - _tile_conjtcmmimfp16ps_internal (src1_dst, src2, src3) - -#define _tile_conjtfp16(dst,src) \ - _tile_conjtfp16_internal (dst, src) - -#define _tile_tcmmimfp16ps(src1_dst,src2,src3) \ - _tile_tcmmimfp16ps_internal (src1_dst, src2, src3) - -#define _tile_tcmmrlfp16ps(src1_dst,src2,src3) \ - _tile_tcmmrlfp16ps_internal (src1_dst, src2, src3) - -#ifdef __DISABLE_AMX_COMPLEX__ -#undef __DISABLE_AMX_COMPLEX__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_COMPLEX__ */ - -#if !defined(__AMX_TF32__) -#pragma GCC push_options -#pragma GCC target("amx-tf32") -#define __DISABLE_AMX_TF32__ -#endif /* __AMX_TF32__ */ - -#define _tile_tmmultf32ps_internal(src1_dst,src2,src3) \ - __asm__ volatile\ - ("{ttmmultf32ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|ttmmultf32ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::) - -#define _tile_tmmultf32ps(src1_dst,src2,src3) \ - _tile_tmmultf32ps_internal (src1_dst, src2, src3) - -#ifdef __DISABLE_AMX_TF32__ -#undef __DISABLE_AMX_TF32__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_TF32__ */ - -#endif /* __x86_64__ */ - -#ifdef __DISABLE_AMX_TRANSPOSE__ -#undef __DISABLE_AMX_TRANSPOSE__ -#pragma GCC pop_options -#endif /* __DISABLE_AMX_TRANSPOSE__ */ - -#endif /* _AMXTRANSPOSEINTRIN_H_INCLUDED */ +#error "AMX-TRANSPOSE support has been removed since GCC 16." diff --git a/gcc/config/i386/cpuid.h b/gcc/config/i386/cpuid.h index 25e2835..04149c1 100644 --- a/gcc/config/i386/cpuid.h +++ b/gcc/config/i386/cpuid.h @@ -170,7 +170,6 @@ /* AMX sub leaf (%eax == 0x1e, %ecx == 1) */ /* %eax */ #define bit_AMX_FP8 (1 << 4) -#define bit_AMX_TRANSPOSE (1 << 5) #define bit_AMX_TF32 (1 << 6) #define bit_AMX_AVX512 (1 << 7) #define bit_AMX_MOVRS (1 << 8) diff --git a/gcc/config/i386/i386-c.cc b/gcc/config/i386/i386-c.cc index 457aa05..0037465 100644 --- a/gcc/config/i386/i386-c.cc +++ b/gcc/config/i386/i386-c.cc @@ -743,8 +743,6 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag, def_or_undef (parse_in, "__AMX_AVX512__"); if (isa_flag2 & OPTION_MASK_ISA2_AMX_TF32) def_or_undef (parse_in, "__AMX_TF32__"); - if (isa_flag2 & OPTION_MASK_ISA2_AMX_TRANSPOSE) - def_or_undef (parse_in, "__AMX_TRANSPOSE__"); if (isa_flag2 & OPTION_MASK_ISA2_AMX_FP8) def_or_undef (parse_in, "__AMX_FP8__"); if (isa_flag2 & OPTION_MASK_ISA2_MOVRS) diff --git a/gcc/config/i386/i386-isa.def b/gcc/config/i386/i386-isa.def index 6fa601d..a1d994c 100644 --- a/gcc/config/i386/i386-isa.def +++ b/gcc/config/i386/i386-isa.def @@ -122,7 +122,6 @@ DEF_PTA(AVX10_1) DEF_PTA(AVX10_2) DEF_PTA(AMX_AVX512) DEF_PTA(AMX_TF32) -DEF_PTA(AMX_TRANSPOSE) DEF_PTA(AMX_FP8) DEF_PTA(MOVRS) DEF_PTA(AMX_MOVRS) diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc index cad4019..35cba3f 100644 --- a/gcc/config/i386/i386-options.cc +++ b/gcc/config/i386/i386-options.cc @@ -264,7 +264,6 @@ static struct ix86_target_opts isa2_opts[] = { "-mavx10.2", OPTION_MASK_ISA2_AVX10_2 }, { "-mamx-avx512", OPTION_MASK_ISA2_AMX_AVX512 }, { "-mamx-tf32", OPTION_MASK_ISA2_AMX_TF32 }, - { "-mamx-transpose", OPTION_MASK_ISA2_AMX_TRANSPOSE }, { "-mamx-fp8", OPTION_MASK_ISA2_AMX_FP8 }, { "-mmovrs", OPTION_MASK_ISA2_MOVRS }, { "-mamx-movrs", OPTION_MASK_ISA2_AMX_MOVRS } @@ -1123,7 +1122,6 @@ ix86_valid_target_attribute_inner_p (tree fndecl, tree args, char *p_strings[], IX86_ATTR_ISA ("avx10.2", OPT_mavx10_2), IX86_ATTR_ISA ("amx-avx512", OPT_mamx_avx512), IX86_ATTR_ISA ("amx-tf32", OPT_mamx_tf32), - IX86_ATTR_ISA ("amx-transpose", OPT_mamx_transpose), IX86_ATTR_ISA ("amx-fp8", OPT_mamx_fp8), IX86_ATTR_ISA ("movrs", OPT_mmovrs), IX86_ATTR_ISA ("amx-movrs", OPT_mamx_movrs), diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h index f4c89f0..fbd8d9a 100644 --- a/gcc/config/i386/i386.h +++ b/gcc/config/i386/i386.h @@ -2485,8 +2485,8 @@ constexpr wide_int_bitmask PTA_PANTHERLAKE = constexpr wide_int_bitmask PTA_DIAMONDRAPIDS = PTA_GRANITERAPIDS_D | PTA_AVXIFMA | PTA_AVXNECONVERT | PTA_AVXVNNIINT16 | PTA_AVXVNNIINT8 | PTA_CMPCCXADD | PTA_SHA512 | PTA_SM3 | PTA_SM4 | PTA_AVX10_2 - | PTA_APX_F | PTA_AMX_AVX512 | PTA_AMX_FP8 | PTA_AMX_TF32 | PTA_AMX_TRANSPOSE - | PTA_MOVRS | PTA_AMX_MOVRS | PTA_USER_MSR; + | PTA_APX_F | PTA_AMX_AVX512 | PTA_AMX_FP8 | PTA_AMX_TF32 | PTA_MOVRS + | PTA_AMX_MOVRS | PTA_USER_MSR; constexpr wide_int_bitmask PTA_BDVER1 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_POPCNT | PTA_LZCNT diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt index 1192176..8449450 100644 --- a/gcc/config/i386/i386.opt +++ b/gcc/config/i386/i386.opt @@ -1362,10 +1362,6 @@ mamx-tf32 Target Mask(ISA2_AMX_TF32) Var(ix86_isa_flags2) Save Support AMX-TF32 built-in functions and code generation. -mamx-transpose -Target Mask(ISA2_AMX_TRANSPOSE) Var(ix86_isa_flags2) Save -Support AMX-TRANSPOSE built-in functions and code generation. - mamx-fp8 Target Mask(ISA2_AMX_FP8) Var(ix86_isa_flags2) Save Support AMX-FP8 built-in functions and code generation. diff --git a/gcc/config/i386/i386.opt.urls b/gcc/config/i386/i386.opt.urls index cce524c..a9bbac0 100644 --- a/gcc/config/i386/i386.opt.urls +++ b/gcc/config/i386/i386.opt.urls @@ -605,9 +605,6 @@ UrlSuffix(gcc/x86-Options.html#index-mamx-avx512) mamx-tf32 UrlSuffix(gcc/x86-Options.html#index-mamx-tf32) -mamx-transpose -UrlSuffix(gcc/x86-Options.html#index-mamx-transpose) - mamx-fp8 UrlSuffix(gcc/x86-Options.html#index-mamx-fp8) diff --git a/gcc/config/i386/immintrin.h b/gcc/config/i386/immintrin.h index b195fe5..f5a11ff 100644 --- a/gcc/config/i386/immintrin.h +++ b/gcc/config/i386/immintrin.h @@ -136,8 +136,6 @@ #include <amxtf32intrin.h> -#include <amxtransposeintrin.h> - #include <amxfp8intrin.h> #include <prfchwintrin.h> diff --git a/gcc/config/riscv/riscv-c.cc b/gcc/config/riscv/riscv-c.cc index 4fc0528..d497326 100644 --- a/gcc/config/riscv/riscv-c.cc +++ b/gcc/config/riscv/riscv-c.cc @@ -165,6 +165,15 @@ riscv_cpu_cpp_builtins (cpp_reader *pfile) if (!subset_list) return; + /* Define profile macro if a profile was used. */ + const char *profile_name = subset_list->get_profile_name (); + if (profile_name) + { + char *profile_macro = (char *)alloca (strlen (profile_name) + 10); + sprintf (profile_macro, "__riscv_%s", profile_name); + builtin_define (profile_macro); + } + size_t max_ext_len = 0; /* Figure out the max length of extension name for reserving buffer. */ diff --git a/gcc/config/riscv/riscv-subset.h b/gcc/config/riscv/riscv-subset.h index 4cd860f..1887ed7 100644 --- a/gcc/config/riscv/riscv-subset.h +++ b/gcc/config/riscv/riscv-subset.h @@ -105,6 +105,8 @@ public: unsigned xlen () const {return m_xlen;}; + const char *get_profile_name () const; + riscv_subset_list *clone () const; static riscv_subset_list *parse (const char *, location_t *); diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi index a12855d..94b76b7 100644 --- a/gcc/doc/extend.texi +++ b/gcc/doc/extend.texi @@ -6750,11 +6750,6 @@ Enable/disable the generation of the AMX-AVX512 instructions. @itemx no-amx-tf32 Enable/disable the generation of the AMX-TF32 instructions. -@cindex @code{target("amx-transpose")} function attribute, x86 -@item amx-transpose -@itemx no-amx-transpose -Enable/disable the generation of the AMX-TRANSPOSE instructions. - @cindex @code{target("amx-fp8")} function attribute, x86 @item amx-fp8 @itemx no-amx-fp8 diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi index 8559b73..6bd5128 100644 --- a/gcc/doc/invoke.texi +++ b/gcc/doc/invoke.texi @@ -1506,8 +1506,7 @@ See RS/6000 and PowerPC Options. -mamx-tile -mamx-int8 -mamx-bf16 -muintr -mhreset -mavxvnni -mamx-fp8 -mavx512fp16 -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mapxf --musermsr -mavx10.1 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mmovrs --mamx-movrs +-musermsr -mavx10.1 -mavx10.2 -mamx-avx512 -mamx-tf32 -mmovrs -mamx-movrs -mcldemote -mms-bitfields -mno-align-stringops -minline-all-stringops -minline-stringops-dynamically -mstringop-strategy=@var{alg} -mkl -mwidekl @@ -36154,9 +36153,6 @@ preferred alignment to @option{-mpreferred-stack-boundary=2}. @opindex mamx-tf32 @itemx -mamx-tf32 @need 200 -@opindex mamx-transpose -@itemx -mamx-transpose -@need 200 @itemx -mamx-fp8 @opindex mamx-fp8 @need 200 @@ -36175,9 +36171,9 @@ WAITPKG, VPCLMULQDQ, AVX512BITALG, MOVDIRI, MOVDIR64B, AVX512BF16, ENQCMD, AVX512VPOPCNTDQ, AVX512VNNI, SERIALIZE, UINTR, HRESET, AMXTILE, AMXINT8, AMXBF16, KL, WIDEKL, AVXVNNI, AVX512-FP16, AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AMX-FP16, PREFETCHI, RAOINT, AMX-COMPLEX, AVXVNNIINT16, SM3, SHA512, -SM4, APX_F, USER_MSR, AVX10.1, AVX10.2, AMX-AVX512, AMX-TF32, AMX-TRANSPOSE, -AMX-FP8, MOVRS, AMX-MOVRS or CLDEMOTE extended instruction sets. Each has a -corresponding @option{-mno-} option to disable use of these instructions. +SM4, APX_F, USER_MSR, AVX10.1, AVX10.2, AMX-AVX512, AMX-TF32, AMX-FP8, MOVRS, +AMX-MOVRS or CLDEMOTE extended instruction sets. Each has a corresponding +@option{-mno-} option to disable use of these instructions. These extensions are also available as built-in functions: see @ref{x86 Built-in Functions}, for details of the functions enabled and diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi index c001e8e..29742e2 100644 --- a/gcc/doc/sourcebuild.texi +++ b/gcc/doc/sourcebuild.texi @@ -2698,9 +2698,6 @@ Target supports the execution of @code{amx-movrs} instructions. @item amx_tf32 Target supports the execution of @code{amx-tf32} instructions. -@item amx_transpose -Target supports the execution of @code{amx-transpose} instructions. - @item amx_fp8 Target supports the execution of @code{amx-fp8} instructions. diff --git a/gcc/testsuite/g++.dg/other/i386-2.C b/gcc/testsuite/g++.dg/other/i386-2.C index 88252ad..d4c73f5 100644 --- a/gcc/testsuite/g++.dg/other/i386-2.C +++ b/gcc/testsuite/g++.dg/other/i386-2.C @@ -1,5 +1,5 @@ /* { dg-do compile { target i?86-*-* x86_64-*-* } } */ -/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mamx-fp8 -mmovrs -mamx-movrs" } */ +/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-fp8 -mmovrs -mamx-movrs" } */ /* { dg-skip-if "requires hosted libstdc++ for cstdlib malloc" { ! hostedlib } } */ /* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h, diff --git a/gcc/testsuite/g++.dg/other/i386-3.C b/gcc/testsuite/g++.dg/other/i386-3.C index a234e4f..e925607 100644 --- a/gcc/testsuite/g++.dg/other/i386-3.C +++ b/gcc/testsuite/g++.dg/other/i386-3.C @@ -1,5 +1,5 @@ /* { dg-do compile { target i?86-*-* x86_64-*-* } } */ -/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mamx-fp8 -mmovrs -mamx-movrs" } */ +/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-fp8 -mmovrs -mamx-movrs" } */ /* { dg-skip-if "requires hosted libstdc++ for cstdlib malloc" { ! hostedlib } } */ /* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h, diff --git a/gcc/testsuite/g++.dg/tree-ssa/cselim-1.C b/gcc/testsuite/g++.dg/tree-ssa/cselim-1.C new file mode 100644 index 0000000..a621945 --- /dev/null +++ b/gcc/testsuite/g++.dg/tree-ssa/cselim-1.C @@ -0,0 +1,37 @@ +/* { dg-do compile { target c++11 } } */ +/* { dg-options "-O2 -fdump-tree-phiopt1-details -fdump-tree-optimized" } */ +/* PR tree-optimization/122178 */ +/* cselim/cselim-limited should be able to handle clobbers. */ + +#include <new> + +struct s1 +{ + bool t; +}; + +void f(s1 *a, bool b) +{ + if (b) + { + a = new(a)s1{1}; + } + else + { + a = new(a)s1{0}; + } +} + +/* + The above should be optimized in phiopt1 to: + *a = {CLOBBER(bob)}; + a->t = b; + */ + + +/* { dg-final { scan-tree-dump-times "factoring out stores" 1 "phiopt1" } } */ +/* { dg-final { scan-tree-dump-times "factoring out clobber" 1 "phiopt1" } } */ +/* { dg-final { scan-tree-dump-times " converted to straightline code" 1 "phiopt1" } } */ +/* { dg-final { scan-tree-dump-not "if " "phiopt1" } } */ +/* { dg-final { scan-tree-dump-not "if " "optimized" } } */ + diff --git a/gcc/testsuite/gcc.dg/vect/pr120687-1.c b/gcc/testsuite/gcc.dg/vect/pr120687-1.c index ce9cf63..ac684c0 100644 --- a/gcc/testsuite/gcc.dg/vect/pr120687-1.c +++ b/gcc/testsuite/gcc.dg/vect/pr120687-1.c @@ -11,6 +11,6 @@ frd (unsigned *p, unsigned *lastone) return sum; } -/* { dg-final { scan-tree-dump "reduction: detected reduction chain" "vect" } } */ +/* { dg-final { scan-tree-dump "Starting SLP discovery of reduction chain" "vect" } } */ /* { dg-final { scan-tree-dump-not "SLP discovery of reduction chain failed" "vect" } } */ /* { dg-final { scan-tree-dump "optimized: loop vectorized" "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr120687-2.c b/gcc/testsuite/gcc.dg/vect/pr120687-2.c index dfc6dc7..25f0355 100644 --- a/gcc/testsuite/gcc.dg/vect/pr120687-2.c +++ b/gcc/testsuite/gcc.dg/vect/pr120687-2.c @@ -12,6 +12,6 @@ frd (float *p, float *lastone) return sum; } -/* { dg-final { scan-tree-dump "reduction: detected reduction chain" "vect" } } */ +/* { dg-final { scan-tree-dump "Starting SLP discovery of reduction chain" "vect" } } */ /* { dg-final { scan-tree-dump-not "SLP discovery of reduction chain failed" "vect" } } */ /* { dg-final { scan-tree-dump "optimized: loop vectorized" "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr120687-3.c b/gcc/testsuite/gcc.dg/vect/pr120687-3.c index f20a66a..31a6c94 100644 --- a/gcc/testsuite/gcc.dg/vect/pr120687-3.c +++ b/gcc/testsuite/gcc.dg/vect/pr120687-3.c @@ -11,6 +11,6 @@ frd (float *p, float *lastone) return sum; } -/* { dg-final { scan-tree-dump "reduction: detected reduction chain" "vect" } } */ +/* { dg-final { scan-tree-dump "Starting SLP discovery of reduction chain" "vect" } } */ /* { dg-final { scan-tree-dump-not "SLP discovery of reduction chain failed" "vect" } } */ /* { dg-final { scan-tree-dump "optimized: loop vectorized" "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr121949_1.c b/gcc/testsuite/gcc.dg/vect/pr121949_1.c new file mode 100644 index 0000000..9e8d41e --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr121949_1.c @@ -0,0 +1,45 @@ +#ifndef TYPE +#define TYPE short +#define MAX 16 +#define IV_TYPE char +#endif + +#include "tree-vect.h" + +__attribute__((noipa)) +void f(TYPE* acc) +{ + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +__attribute__((noipa)) +void g(TYPE* acc) +{ +#pragma GCC novector + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +int main () +{ + + check_vect (); + + TYPE acc1[MAX] = {}; + TYPE acc2[MAX] = {}; +#pragma GCC novector + for (int i = 0; i < MAX; i++) + acc1[i] = acc2[i] = i; + + f (acc1); + f (acc2); + +#pragma GCC novector + for (int i = 0; i < MAX; i++) + if (acc1[i] != acc2[i]) + __builtin_abort (); +} + +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { vect_var_shift && vect_int } } } } */ +/* { dg-final { scan-tree-dump "vect_recog_over_widening_pattern: detected" "vect" { target { vect_var_shift && vect_int } } } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr121949_2.c b/gcc/testsuite/gcc.dg/vect/pr121949_2.c new file mode 100644 index 0000000..f448eb6 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr121949_2.c @@ -0,0 +1,45 @@ +#ifndef TYPE +#define TYPE int +#define MAX 32 +#define IV_TYPE short +#endif + +#include "tree-vect.h" + +__attribute__((noipa)) +void f(TYPE* acc) +{ + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +__attribute__((noipa)) +void g(TYPE* acc) +{ +#pragma GCC novector + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +int main () +{ + + check_vect (); + + TYPE acc1[MAX] = {}; + TYPE acc2[MAX] = {}; +#pragma GCC novector + for (int i = 0; i < MAX; i++) + acc1[i] = acc2[i] = i; + + f (acc1); + f (acc2); + +#pragma GCC novector + for (int i = 0; i < MAX; i++) + if (acc1[i] != acc2[i]) + __builtin_abort (); +} + +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { vect_var_shift && vect_int } } } } */ +/* { dg-final { scan-tree-dump-not "vect_recog_over_widening_pattern: detected" "vect" { target { vect_var_shift && vect_int } } } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr121949_3.c b/gcc/testsuite/gcc.dg/vect/pr121949_3.c new file mode 100644 index 0000000..b7e6a3d --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr121949_3.c @@ -0,0 +1,45 @@ +#ifndef TYPE +#define TYPE long long +#define MAX 64 +#define IV_TYPE int +#endif + +#include "tree-vect.h" + +__attribute__((noipa)) +void f(TYPE* acc) +{ + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +__attribute__((noipa)) +void g(TYPE* acc) +{ +#pragma GCC novector + for (IV_TYPE row = 0; row < MAX; ++row) + acc[row] = acc[row] << row; +} + +int main () +{ + + check_vect (); + + TYPE acc1[MAX] = {}; + TYPE acc2[MAX] = {}; +#pragma GCC novector + for (int i = 0; i < MAX; i++) + acc1[i] = acc2[i] = i; + + f (acc1); + f (acc2); + +#pragma GCC novector + for (int i = 0; i < MAX; i++) + if (acc1[i] != acc2[i]) + __builtin_abort (); +} + +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { vect_var_shift && vect_int } } } } */ +/* { dg-final { scan-tree-dump "vect_recog_vector_vector_shift_pattern: detected" "vect" { target { vect_var_shift && vect_int } } } } */ diff --git a/gcc/testsuite/gcc.target/i386/amx-check.h b/gcc/testsuite/gcc.target/i386/amx-check.h index 0addb5b..c43a955 100644 --- a/gcc/testsuite/gcc.target/i386/amx-check.h +++ b/gcc/testsuite/gcc.target/i386/amx-check.h @@ -260,9 +260,6 @@ main () #ifdef AMX_TF32 && __builtin_cpu_supports ("amx-tf32") #endif -#ifdef AMX_TRANSPOSE - && __builtin_cpu_supports ("amx-transpose") -#endif #ifdef AMX_FP8 && __builtin_cpu_supports ("amx-fp8") #endif diff --git a/gcc/testsuite/gcc.target/i386/amxmovrs-2rpntlvwrs-2.c b/gcc/testsuite/gcc.target/i386/amxmovrs-2rpntlvwrs-2.c deleted file mode 100644 index 0093ef7..0000000 --- a/gcc/testsuite/gcc.target/i386/amxmovrs-2rpntlvwrs-2.c +++ /dev/null @@ -1,58 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_movrs } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-movrs -mamx-transpose -mavx512fp16 -mavx512bf16" } */ -#define AMX_MOVRS -#define AMX_TRANSPOSE -#define DO_TEST test_amx_movrs_t2rpntlvwrs -void test_amx_movrs_t2rpntlvwrs (); -#include "amx-helper.h" - -#define init_pair_tile_reg_and_src_z_t(tmm_num, src, buffer, ztype, wtype) \ -{ \ - init_pair_tile_src (tmm_num, &src, buffer, ztype); \ - _tile_2rpntlvwz##ztype##rs##wtype (tmm_num, buffer, _STRIDE); \ -} - -void test_amx_movrs_t2rpntlvwrs () -{ - __tilecfg_u cfg; - __tilepair src; - __tile ref_0, ref_1; - uint8_t buffer[2048]; - int i; - - init_tile_config (&cfg); - - for (i = 0; i < 2048; i++) - buffer[i] = i % 256; - - /* Check t2rpntlvwz0rs. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 0,); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz1rs. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 1,); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz0t1rs. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 0, t1); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz1t1rs. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 1, t1); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxmovrs-asmatt-1.c b/gcc/testsuite/gcc.target/i386/amxmovrs-asmatt-1.c index d99a97f..339550b 100644 --- a/gcc/testsuite/gcc.target/i386/amxmovrs-asmatt-1.c +++ b/gcc/testsuite/gcc.target/i386/amxmovrs-asmatt-1.c @@ -1,11 +1,7 @@ /* { dg-do compile { target { ! ia32 } } } */ -/* { dg-options "-O2 -mamx-movrs -mamx-transpose" } */ +/* { dg-options "-O2 -mamx-movrs" } */ /* { dg-final { scan-assembler "tileloaddrs\[ \\t]+\[^\n\]*\(%\[a-z0-9\]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ /* { dg-final { scan-assembler "tileloaddrst1\[ \\t]+\[^\n\]*\(%\[a-z0-9]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0rs\[ \\t]+\[^\n\]*\(%\[a-z0-9\]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0rst1\[ \\t]+\[^\n\]*\(%\[a-z0-9\]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1rs\[ \\t]+\[^\n\]*\(%\[a-z0-9\]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1rst1\[ \\t]+\[^\n\]*\(%\[a-z0-9\]*\,%\[a-z0-9\]*\,\[124\]\)+\[^\n\]*%tmm\[0-9\]" } } */ #include <immintrin.h> extern const void* base; @@ -20,8 +16,4 @@ void TEST() { _tile_loaddrs (TMM1, base, stride); _tile_loaddrst1 (TMM1, base, stride); - _tile_2rpntlvwz0rs (TMM0, base, stride); - _tile_2rpntlvwz0rst1 (TMM1, base, stride); - _tile_2rpntlvwz1rs (TMM2, base, stride); - _tile_2rpntlvwz1rst1 (TMM3, base, stride); } diff --git a/gcc/testsuite/gcc.target/i386/amxmovrs-asmintel-1.c b/gcc/testsuite/gcc.target/i386/amxmovrs-asmintel-1.c index 6a33986..6a522b5 100644 --- a/gcc/testsuite/gcc.target/i386/amxmovrs-asmintel-1.c +++ b/gcc/testsuite/gcc.target/i386/amxmovrs-asmintel-1.c @@ -1,12 +1,8 @@ /* { dg-do compile { target { ! ia32 } } } */ /* { dg-require-effective-target masm_intel } */ -/* { dg-options "-O2 -mamx-movrs -mamx-transpose -masm=intel" } */ +/* { dg-options "-O2 -mamx-movrs -masm=intel" } */ /* { dg-final { scan-assembler-times "tileloaddrs\[ \\t]%tmm\[0-9\]" 1 } } */ /* { dg-final { scan-assembler-times "tileloaddrst1\[ \\t]%tmm\[0-9\]" 1 } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0rs\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0rst1\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1rs\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1rst1\[ \\t]%tmm\[0-9\]" } } */ #include <immintrin.h> extern const void* base; @@ -21,8 +17,4 @@ void TEST() { _tile_loaddrs (TMM1, base, stride); _tile_loaddrst1 (TMM1, base, stride); - _tile_2rpntlvwz0rs (TMM0, base, stride); - _tile_2rpntlvwz0rst1 (TMM1, base, stride); - _tile_2rpntlvwz1rs (TMM2, base, stride); - _tile_2rpntlvwz1rst1 (TMM3, base, stride); } diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-2rpntlvw-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-2rpntlvw-2.c deleted file mode 100644 index 2d01827..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-2rpntlvw-2.c +++ /dev/null @@ -1,54 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-options "-O2 -mamx-transpose" } */ -#define AMX_TRANSPOSE -#define DO_TEST test_amx_transpose_t2rpntlvw -void test_amx_transpose_t2rpntlvw (); -#include "amx-helper.h" -#define init_pair_tile_reg_and_src_z_t(tmm_num, src, buffer, ztype, wtype) \ -{ \ - init_pair_tile_src (tmm_num, &src, buffer, ztype); \ - _tile_2rpntlvwz##ztype##wtype (tmm_num, buffer, _STRIDE); \ -} - -void test_amx_transpose_t2rpntlvw () -{ - __tilecfg_u cfg; - __tilepair src; - __tile ref_0, ref_1; - uint8_t buffer[2048]; - int i; - - init_tile_config (&cfg); - - for (i = 0; i < 2048; i++) - buffer[i] = i % 256; - - /* Check t2rpntlvwz0. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 0,); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz1. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 1,); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz0t1. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 0, t1); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); - - /* Check t2rpntlvwz1t1. */ - init_pair_tile_reg_and_src_z_t (0, src, buffer, 1, t1); - _tile_stored (0, ref_0.buf, _STRIDE); - _tile_stored (1, ref_1.buf, _STRIDE); - if (!check_pair_tile_register (&ref_0, &ref_1, &src)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-asmatt-1.c b/gcc/testsuite/gcc.target/i386/amxtranspose-asmatt-1.c deleted file mode 100644 index a970f5d..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-asmatt-1.c +++ /dev/null @@ -1,39 +0,0 @@ -/* { dg-do compile { target { ! ia32 } } } */ -/* { dg-options "-O2 -mamx-transpose -mamx-bf16 -mamx-complex -mamx-fp16 -mamx-tf32" } */ -/* { dg-final { scan-assembler "ttdpbf16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "ttdpfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "ttransposed\[ \\t]+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0\[ \\t]+\[^\n\]*\\(%\[a-z0-9]*\,%\[a-z0-9\]*\,\[124\]\\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0t1\[ \\t]+\[^\n\]*\\(%\[a-z0-9]*\,%\[a-z0-9\]*\,\[124\]\\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1\[ \\t]+\[^\n\]*\\(%\[a-z0-9]*\,%\[a-z0-9\]*\,\[124\]\\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1t1\[ \\t]+\[^\n\]*\\(%\[a-z0-9]*\,%\[a-z0-9\]*\,\[124\]\\)+\[^\n\]*%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "tconjtcmmimfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "tconjtfp16\[ \\t]+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "ttcmmimfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "ttcmmrlfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -/* { dg-final { scan-assembler "ttmmultf32ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1" } } */ -#include <immintrin.h> - -extern const void* base; -extern const int stride; - -#define TMM0 0 -#define TMM1 1 -#define TMM2 2 -#define TMM3 3 - -void TEST() -{ - _tile_tdpbf16ps (TMM1, TMM2, TMM3); - _tile_tdpfp16ps (TMM1, TMM2, TMM3); - _tile_transposed (TMM1, TMM2); - _tile_2rpntlvwz0 (TMM0, base, stride); - _tile_2rpntlvwz0t1 (TMM1, base, stride); - _tile_2rpntlvwz1 (TMM2, base, stride); - _tile_2rpntlvwz1t1 (TMM3, base, stride); - _tile_conjtcmmimfp16ps (TMM1, TMM2, TMM3); - _tile_conjtfp16 (TMM1, TMM2); - _tile_tcmmimfp16ps (TMM1, TMM2, TMM3); - _tile_tcmmrlfp16ps (TMM1, TMM2, TMM3); - _tile_tmmultf32ps (TMM1, TMM2, TMM3); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-asmintel-1.c b/gcc/testsuite/gcc.target/i386/amxtranspose-asmintel-1.c deleted file mode 100644 index 2cf73ae..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-asmintel-1.c +++ /dev/null @@ -1,35 +0,0 @@ -/* { dg-do compile { target { ! ia32 } } } */ -/* { dg-require-effective-target masm_intel } */ -/* { dg-options "-O2 -mamx-transpose -mamx-bf16 -mamx-complex -mamx-fp16 -mamx-tf32 -masm=intel" } */ -/* { dg-final { scan-assembler "ttdpbf16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -/* { dg-final { scan-assembler "ttdpfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -/* { dg-final { scan-assembler "ttransposed\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz0t1\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "t2rpntlvwz1t1\[ \\t]%tmm\[0-9\]" } } */ -/* { dg-final { scan-assembler "tconjtcmmimfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -/* { dg-final { scan-assembler "tconjtfp16\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2" } } */ -/* { dg-final { scan-assembler "ttcmmimfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -/* { dg-final { scan-assembler "ttcmmrlfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -/* { dg-final { scan-assembler "ttmmultf32ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3" } } */ -#include <immintrin.h> - -extern const void* base; -extern const int stride; - -void TEST() -{ - _tile_tdpbf16ps (1, 2, 3); - _tile_tdpfp16ps (1, 2, 3); - _tile_transposed (1, 2); - _tile_2rpntlvwz0 (5, base, stride); - _tile_2rpntlvwz0t1 (4, base, stride); - _tile_2rpntlvwz1 (3, base, stride); - _tile_2rpntlvwz1t1 (2, base, stride); - _tile_conjtcmmimfp16ps (1, 2, 3); - _tile_conjtfp16 (1, 2); - _tile_tcmmimfp16ps (1, 2, 3); - _tile_tcmmrlfp16ps (1, 2, 3); - _tile_tmmultf32ps (1, 2, 3); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-conjtcmmimfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-conjtcmmimfp16ps-2.c deleted file mode 100644 index 159867d..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-conjtcmmimfp16ps-2.c +++ /dev/null @@ -1,55 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_complex } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-complex -mavx512fp16" } */ -#define AMX_TRANSPOSE -#define AMX_COMPLEX -#define DO_TEST test_amx_transpose_conjtcmmimfp16ps -void test_amx_transpose_conjtcmmimfp16ps (); -#include "amx-helper.h" - -void calc_matrix_conjtcmmimfp16ps (__tile *dst, __tile *src1, __tile *src2) -{ - uint16_t *src1_buf = (uint16_t *) src1->buf; - uint16_t *src2_buf = (uint16_t *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, k, n, t; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - for (t = 0; t < 2; t+=2) - dst_buf[m * N + n] += - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t + 1])) - - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t + 1]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t])); -} - -void test_amx_transpose_conjtcmmimfp16ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024]; - - init_fp16_max_tile_buffer (tmp_dst_buf); - init_fp16_max_tile_zero_buffer (tmp_dst_zero_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_conjtcmmimfp16ps (&dst, &src1, &src2); - - _tile_conjtcmmimfp16ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-conjtfp16-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-conjtfp16-2.c deleted file mode 100644 index 710d76a..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-conjtfp16-2.c +++ /dev/null @@ -1,48 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_complex } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-complex -mavx512fp16" } */ -#define AMX_TRANSPOSE -#define AMX_COMPLEX -#define DO_TEST test_amx_transpose_conjtfp16 -void test_amx_transpose_conjtfp16 (); -#include "amx-helper.h" - -void calc_matrix_conjtfp16 (__tile *dst, __tile *src) -{ - uint16_t *src_buf = (uint16_t *) src->buf; - float *dst_buf = (float *) dst->buf; - - int M = dst->rows; - int N = dst->colsb / 4; - int i, j, t; - - for (i = 0; i < M; i++) - for (j = 0; j < N; j++) - for (t = 0; t < 2; t+=2) - { - dst_buf[i * 2 * N + 2 * j + t] = src_buf[j * 2 * M + 2 * i + t]; - dst_buf[i * 2 * N + 2 * j + t + 1] = -src_buf[j * 2 * M + 2 * i + t + 1]; - } -} - -void test_amx_transpose_conjtfp16 () -{ - __tilecfg_u cfg; - __tile src, dst, ref; - uint8_t tmp_dst_buf[1024]; - - init_fp16_max_tile_buffer (tmp_dst_buf); - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (2, src, tmp_dst_buf); - - /* Check tconjtfp16. */ - calc_matrix_conjtfp16 (&dst, &src); - _tile_conjtfp16 (1, 2); - _tile_stored (1, ref.buf, _STRIDE); - - if (!check_tile_register (&ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmimfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmimfp16ps-2.c deleted file mode 100644 index e2a0f10..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmimfp16ps-2.c +++ /dev/null @@ -1,55 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_complex } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-complex -mavx512fp16" } */ -#define AMX_TRANSPOSE -#define AMX_COMPLEX -#define DO_TEST test_amx_transpose_tcmmimfp16ps -void test_amx_transpose_tcmmimfp16ps (); -#include "amx-helper.h" - -void calc_matrix_tcmmimfp16ps (__tile *dst, __tile *src1, __tile *src2) -{ - uint16_t *src1_buf = (uint16_t *) src1->buf; - uint16_t *src2_buf = (uint16_t *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, k, n, t; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - for (t = 0; t < 2; t+=2) - dst_buf[m * N + n] += - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t + 1])) + - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t + 1]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t])); -} - -void test_amx_transpose_tcmmimfp16ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024]; - - init_fp16_max_tile_buffer (tmp_dst_buf); - init_fp16_max_tile_zero_buffer (tmp_dst_zero_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_tcmmimfp16ps (&dst, &src1, &src2); - - _tile_tcmmimfp16ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmrlfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmrlfp16ps-2.c deleted file mode 100644 index b09186c..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-tcmmrlfp16ps-2.c +++ /dev/null @@ -1,55 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_complex } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-complex -mavx512fp16" } */ -#define AMX_TRANSPOSE -#define AMX_COMPLEX -#define DO_TEST test_amx_transpose_tcmmrlfp16ps -void test_amx_transpose_tcmmrlfp16ps (); -#include "amx-helper.h" - -void calc_matrix_tcmmrlfp16ps (__tile *dst, __tile *src1, __tile *src2) -{ - uint16_t *src1_buf = (uint16_t *) src1->buf; - uint16_t *src2_buf = (uint16_t *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, k, n, t; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - for (t = 0; t < 2; t+=2) - dst_buf[m * N + n] += - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t])) - - (make_fp16_f32(src1_buf[k * 2 * M + 2 * m + t + 1]) * - make_fp16_f32(src2_buf[k * 2 * N + 2 * n + t + 1])); -} - -void test_amx_transpose_tcmmrlfp16ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024]; - - init_fp16_max_tile_buffer (tmp_dst_buf); - init_fp16_max_tile_zero_buffer (tmp_dst_zero_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_tcmmrlfp16ps (&dst, &src1, &src2); - - _tile_tcmmrlfp16ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-tdpbf16ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-tdpbf16ps-2.c deleted file mode 100644 index 6a3226b..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-tdpbf16ps-2.c +++ /dev/null @@ -1,53 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_bf16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-bf16 -mavx512bf16" } */ -#define AMX_TRANSPOSE -#define AMX_BF16 -#define DO_TEST test_amx_transpose_tdpbf16ps -void test_amx_transpose_tdpbf16ps (); -#include "amx-helper.h" - -void calc_matrix_tdpbf16ps(__tile *dst, __tile *src1, __tile *src2) -{ - uint16_t *src1_buf = (uint16_t *) src1->buf; - uint16_t *src2_buf = (uint16_t *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, k, n, t; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - for (t = 0; t < 2; t+=2) - dst_buf[m * N + n] += - (make_bf16_f32 (src1_buf[k * 2 * M + 2 * m + t]) * - make_bf16_f32 (src2_buf[k * 2 * N + 2 * n + t])) + - (make_bf16_f32 (src1_buf[k * 2 * M + 2 * m + t + 1]) * - make_bf16_f32 (src2_buf[k * 2 * N + 2 * n + t + 1])); -} - -void test_amx_transpose_tdpbf16ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024]; - - init_bf16_max_tile_buffer (tmp_dst_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_tdpbf16ps (&dst, &src1, &src2); - - _tile_tdpbf16ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_float_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-tdpfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-tdpfp16ps-2.c deleted file mode 100644 index 83c3715..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-tdpfp16ps-2.c +++ /dev/null @@ -1,55 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_fp16 } */ -/* { dg-require-effective-target avx512fp16 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-fp16 -mavx512fp16" } */ -#define AMX_TRANSPOSE -#define AMX_FP16 -#define DO_TEST test_amx_transpose_tdpfp16ps -void test_amx_transpose_tdpfp16ps (); -#include "amx-helper.h" - -void calc_matrix_tdpfp16ps(__tile *dst, __tile *src1, __tile *src2) -{ - uint16_t *src1_buf = (uint16_t *) src1->buf; - uint16_t *src2_buf = (uint16_t *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, k, n, t; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - for (t = 0; t < 2; t+=2) - dst_buf[m * N + n] += - (make_fp16_f32 (src1_buf[k * 2 * M + 2 * m + t]) * - make_fp16_f32 (src2_buf[k * 2 * N + 2 * n + t])) + - (make_fp16_f32 (src1_buf[k * 2 * M + 2 * m + t + 1]) * - make_fp16_f32 (src2_buf[k * 2 * N + 2 * n + t + 1])); -} - -void test_amx_transpose_tdpfp16ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024]; - - init_fp16_max_tile_buffer (tmp_dst_buf); - init_fp16_max_tile_zero_buffer(tmp_dst_zero_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_tdpfp16ps (&dst, &src1, &src2); - - _tile_tdpfp16ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_float_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-tmmultf32ps-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-tmmultf32ps-2.c deleted file mode 100644 index 44166c1..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-tmmultf32ps-2.c +++ /dev/null @@ -1,51 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-require-effective-target amx_tf32 } */ -/* { dg-options "-O2 -mamx-transpose -mamx-tf32" } */ -#define AMX_TRANSPOSE -#define AMX_TF32 -#define DO_TEST test_amx_transpose_tmmultf32ps -void test_amx_transpose_tmmultf32ps(); -#include "amx-helper.h" - -void calc_matrix_tmmultf32ps(__tile *dst, __tile *src1, __tile *src2) -{ - float *src1_buf = (float *) src1->buf; - float *src2_buf = (float *) src2->buf; - float *dst_buf = (float *) dst->buf; - - int K = src1->rows; - int M = src1->colsb / 4; - int N = src2->colsb / 4; - int m, n, k; - - for (m = 0; m < M; m++) - for (k = 0; k < K; k++) - for (n = 0; n < N; n++) - dst_buf[m * N + n] += - zero_lower_mantissa_bits_fp32 (silence_snan_fp32 (src1_buf[k * M + m])) * - zero_lower_mantissa_bits_fp32 (silence_snan_fp32 (src2_buf[k * N + n])); - -} - -void test_amx_transpose_tmmultf32ps () -{ - __tilecfg_u cfg; - __tile dst, dst_ref, src1, src2; - uint8_t tmp_dst_buf[1024]; - - init_fp32_max_tile_buffer (tmp_dst_buf); - - init_tile_config (&cfg); - init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf); - init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf); - - calc_matrix_tmmultf32ps (&dst, &src1, &src2); - - _tile_tmmultf32ps (1, 2, 3); - _tile_stored (1, dst_ref.buf, _STRIDE); - - if (!check_tile_register (&dst_ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/amxtranspose-transposed-2.c b/gcc/testsuite/gcc.target/i386/amxtranspose-transposed-2.c deleted file mode 100644 index 73c709c..0000000 --- a/gcc/testsuite/gcc.target/i386/amxtranspose-transposed-2.c +++ /dev/null @@ -1,39 +0,0 @@ -/* { dg-do run { target { ! ia32 } } } */ -/* { dg-require-effective-target amx_transpose } */ -/* { dg-options "-O2 -mamx-transpose" } */ -#define AMX_TRANSPOSE -#define DO_TEST test_amx_transpose_transposed -void test_amx_transpose_transposed (); -#include "amx-helper.h" - -void calc_matrix_ttransposed (__tile *dst, __tile *src) -{ - uint32_t *src_buf = (uint32_t *) src->buf; - uint32_t *dst_buf = (uint32_t *) dst->buf; - - int M = src->rows; - int N = src->colsb / 4; - int i, j; - - for (i = 0; i < M; i++) - for (j = 0; j < N; j++) - dst_buf[j * M + i] = (uint32_t) src_buf[i * N + j]; -} - -void test_amx_transpose_transposed () -{ - __tilecfg_u cfg; - __tile src, dst, ref; - - init_tile_config (&cfg); - init_tile_reg_and_src (1, dst); - init_tile_reg_and_src (2, src); - - /* Check ttransposed. */ - calc_matrix_ttransposed (&dst, &src); - _tile_transposed (1, 2); - _tile_stored (1, ref.buf, _STRIDE); - - if (!check_tile_register (&ref, &dst)) - abort (); -} diff --git a/gcc/testsuite/gcc.target/i386/funcspec-56.inc b/gcc/testsuite/gcc.target/i386/funcspec-56.inc index e462ead..3d9af7a 100644 --- a/gcc/testsuite/gcc.target/i386/funcspec-56.inc +++ b/gcc/testsuite/gcc.target/i386/funcspec-56.inc @@ -90,7 +90,6 @@ extern void test_user_msr (void) __attribute__((__target__("usermsr"))); extern void test_avx10_2 (void) __attribute__((__target__("avx10.2"))); extern void test_amx_avx512 (void) __attribute__((__target__("amx-avx512"))); extern void test_amx_tf32 (void) __attribute__((__target__("amx-tf32"))); -extern void test_amx_transpose (void) __attribute__((__target__("amx-transpose"))); extern void test_amx_fp8 (void) __attribute__((__target__("amx-fp8"))); extern void test_movrs (void) __attribute__((__target__("movrs"))); extern void test_amx_movrs (void) __attribute__((__target__("amx-movrs"))); @@ -185,7 +184,6 @@ extern void test_no_user_msr (void) __attribute__((__target__("no-usermsr"))); extern void test_no_avx10_2 (void) __attribute__((__target__("no-avx10.2"))); extern void test_no_amx_avx512 (void) __attribute__((__target__("no-amx-avx512"))); extern void test_no_amx_tf32 (void) __attribute__((__target__("no-amx-tf32"))); -extern void test_no_amx_transpose (void) __attribute__((__target__("no-amx-transpose"))); extern void test_no_amx_fp8 (void) __attribute__((__target__("no-amx-fp8"))); extern void test_no_movrs (void) __attribute__((__target__("no-movrs"))); extern void test_no_amx_movrs (void) __attribute__((__target__("no-amx-movrs"))); diff --git a/gcc/testsuite/gcc.target/i386/sse-12.c b/gcc/testsuite/gcc.target/i386/sse-12.c index cabccb0..fc406b0 100644 --- a/gcc/testsuite/gcc.target/i386/sse-12.c +++ b/gcc/testsuite/gcc.target/i386/sse-12.c @@ -3,7 +3,7 @@ popcntintrin.h gfniintrin.h and mm_malloc.h are usable with -O -std=c89 -pedantic-errors. */ /* { dg-do compile } */ -/* { dg-options "-O -std=c89 -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mamx-fp16 -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mamx-fp8 -mmovrs -mamx-movrs" } */ +/* { dg-options "-O -std=c89 -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mamx-fp16 -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-fp8 -mmovrs -mamx-movrs" } */ #include <x86intrin.h> diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c index 189e19e..7541956 100644 --- a/gcc/testsuite/gcc.target/i386/sse-13.c +++ b/gcc/testsuite/gcc.target/i386/sse-13.c @@ -1,5 +1,5 @@ /* { dg-do compile } */ -/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mavx512vp2intersect -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mamx-fp8 -mmovrs -mamx-movrs" } */ +/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mavx512vp2intersect -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16 -mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-fp8 -mmovrs -mamx-movrs" } */ /* { dg-add-options bind_pic_locally } */ #include <mm_malloc.h> diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c index f3b7c112..1e86c75 100644 --- a/gcc/testsuite/gcc.target/i386/sse-14.c +++ b/gcc/testsuite/gcc.target/i386/sse-14.c @@ -1,5 +1,5 @@ /* { dg-do compile } */ -/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mamx-fp16 -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-transpose -mamx-fp8 -mmovrs -mamx-movrs" } */ +/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -msha -mxsavec -mxsaves -mclflushopt -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavxifma -mavxvnniint8 -mavxneconvert -mamx-fp16 -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mavx10.2 -mamx-avx512 -mamx-tf32 -mamx-fp8 -mmovrs -mamx-movrs" } */ /* { dg-add-options bind_pic_locally } */ #include <mm_malloc.h> diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c index 0cb0368..418fe23 100644 --- a/gcc/testsuite/gcc.target/i386/sse-22.c +++ b/gcc/testsuite/gcc.target/i386/sse-22.c @@ -103,7 +103,7 @@ #ifndef DIFFERENT_PRAGMAS -#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,gfni,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,amx-fp16,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-transpose,amx-fp8,movrs,amx-movrs") +#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,gfni,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,amx-fp16,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-fp8,movrs,amx-movrs") #endif /* Following intrinsics require immediate arguments. They @@ -220,7 +220,7 @@ test_4 (_mm_cmpestrz, int, __m128i, int, __m128i, int, 1) /* immintrin.h (AVX/AVX2/RDRND/FSGSBASE/F16C/RTM/AVX512F/SHA) */ #ifdef DIFFERENT_PRAGMAS -#pragma GCC target ("avx,avx2,rdrnd,fsgsbase,f16c,rtm,sha,gfni,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,amx-fp16,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-transpose,amx-fp8,movrs,amx-movrs") +#pragma GCC target ("avx,avx2,rdrnd,fsgsbase,f16c,rtm,sha,gfni,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,amx-fp16,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-fp8,movrs,amx-movrs") #endif #include <immintrin.h> test_1 (_cvtss_sh, unsigned short, float, 1) diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c index 95db1f7..f9b0613 100644 --- a/gcc/testsuite/gcc.target/i386/sse-23.c +++ b/gcc/testsuite/gcc.target/i386/sse-23.c @@ -895,6 +895,6 @@ #define __builtin_ia32_minmaxsh_mask_round(A, B, C, D, E, F) __builtin_ia32_minmaxsh_mask_round (A, B, 100, D, E, 4) #define __builtin_ia32_minmaxss_mask_round(A, B, C, D, E, F) __builtin_ia32_minmaxss_mask_round (A, B, 100, D, E, 4) -#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-transpose,amx-fp8,movrs,amx-movrs") +#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2,amx-avx512,amx-tf32,amx-fp8,movrs,amx-movrs") #include <x86intrin.h> diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-1.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-1.c new file mode 100644 index 0000000..5fc17ab --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-1.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rvi20u64 -mabi=lp64" } */ + +int main () { + +#ifndef __riscv_rvi20u64 +#error "__riscv_rvi20u64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-2.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-2.c new file mode 100644 index 0000000..86f2771 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-2.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rvi20u32 -mabi=ilp32" } */ + +int main () { + +#ifndef __riscv_rvi20u32 +#error "__riscv_rvi20u32" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-3.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-3.c new file mode 100644 index 0000000..7787549 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-3.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rva20u64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rva20u64 +#error "__riscv_rva20u64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-4.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-4.c new file mode 100644 index 0000000..abb20b7 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-4.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rva22u64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rva22u64 +#error "__riscv_rva22u64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-5.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-5.c new file mode 100644 index 0000000..0840cdc --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-5.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rva23u64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rva23u64 +#error "__riscv_rva23u64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-6.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-6.c new file mode 100644 index 0000000..7159780 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-6.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rva23s64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rva23s64 +#error "__riscv_rva23s64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-7.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-7.c new file mode 100644 index 0000000..1366159 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-7.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rvb23u64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rvb23u64 +#error "__riscv_rvb23u64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/predef-profiles-8.c b/gcc/testsuite/gcc.target/riscv/predef-profiles-8.c new file mode 100644 index 0000000..c0c5003 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/predef-profiles-8.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rvb23s64 -mabi=lp64d" } */ + +int main () { + +#ifndef __riscv_rvb23s64 +#error "__riscv_rvb23s64" +#endif + + return 0; +}
\ No newline at end of file diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp index 1acfb37..4ee8d12 100644 --- a/gcc/testsuite/lib/target-supports.exp +++ b/gcc/testsuite/lib/target-supports.exp @@ -11238,17 +11238,6 @@ proc check_effective_target_amx_tf32 { } { } "-mamx-tf32" ] } -# Return 1 if amx-transpose instructions can be compiled. -proc check_effective_target_amx_transpose { } { - return [check_no_compiler_messages amx_transpose object { - void - foo () - { - __asm__ volatile ("ttransposed\t%%tmm1, %%tmm2" ::); - } - } "-mamx-transpose" ] -} - # Return 1 if amx-fp8 instructions can be compiled. proc check_effective_target_amx_fp8 { } { return [check_no_compiler_messages amx_fp8 object { diff --git a/gcc/tree-ssa-dom.cc b/gcc/tree-ssa-dom.cc index b1ac35e..087d842 100644 --- a/gcc/tree-ssa-dom.cc +++ b/gcc/tree-ssa-dom.cc @@ -2040,11 +2040,6 @@ cprop_operand (gimple *stmt, use_operand_p op_p, range_query *query) if (val && val != op) { - /* Do not replace hard register operands in asm statements. */ - if (gimple_code (stmt) == GIMPLE_ASM - && !may_propagate_copy_into_asm (op)) - return; - /* Certain operands are not allowed to be copy propagated due to their interaction with exception handling and some GCC extensions. */ diff --git a/gcc/tree-ssa-phiopt.cc b/gcc/tree-ssa-phiopt.cc index 3d6673c..031184d 100644 --- a/gcc/tree-ssa-phiopt.cc +++ b/gcc/tree-ssa-phiopt.cc @@ -3648,16 +3648,20 @@ cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb, if (then_assign == NULL || !gimple_assign_single_p (then_assign) - || gimple_clobber_p (then_assign) - || gimple_has_volatile_ops (then_assign) || else_assign == NULL || !gimple_assign_single_p (else_assign) - || gimple_clobber_p (else_assign) - || gimple_has_volatile_ops (else_assign) || stmt_references_abnormal_ssa_name (then_assign) || stmt_references_abnormal_ssa_name (else_assign)) return false; + /* Allow both being clobbers but no other volatile operations. */ + if (gimple_clobber_p (then_assign) + && gimple_clobber_p (else_assign)) + ; + else if (gimple_has_volatile_ops (then_assign) + || gimple_has_volatile_ops (else_assign)) + return false; + lhs = gimple_assign_lhs (then_assign); if (!operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0)) return false; @@ -3674,7 +3678,14 @@ cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb, if (!is_gimple_reg_type (TREE_TYPE (lhs))) { - if (!operand_equal_p (then_rhs, else_rhs)) + /* Handle clobbers seperately as operand_equal_p does not check + the kind of the clobbers being the same. */ + if (TREE_CLOBBER_P (then_rhs) && TREE_CLOBBER_P (else_rhs)) + { + if (CLOBBER_KIND (then_rhs) != CLOBBER_KIND (else_rhs)) + return false; + } + else if (!operand_equal_p (then_rhs, else_rhs)) return false; /* Currently only handle commoning of `= {}`. */ if (TREE_CODE (then_rhs) != CONSTRUCTOR) @@ -3683,7 +3694,10 @@ cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb, if (dump_file && (dump_flags & TDF_DETAILS)) { - fprintf(dump_file, "factoring out stores:\n\tthen:\n"); + if (TREE_CLOBBER_P (then_rhs)) + fprintf(dump_file, "factoring out clobber:\n\tthen:\n"); + else + fprintf(dump_file, "factoring out stores:\n\tthen:\n"); print_gimple_stmt (dump_file, then_assign, 0, TDF_VOPS|TDF_MEMSYMS); fprintf(dump_file, "\telse:\n"); @@ -4555,8 +4569,8 @@ pass_phiopt::execute (function *) hoist_adjacent_loads (bb, bb1, bb2, bb3); /* Try to see if there are only store in each side of the if - and try to remove that. */ - if (EDGE_COUNT (bb3->preds) == 2) + and try to remove that; don't do this for -Og. */ + if (EDGE_COUNT (bb3->preds) == 2 && !optimize_debug) while (cond_if_else_store_replacement_limited (bb1, bb2, bb3)) ; } @@ -4572,7 +4586,8 @@ pass_phiopt::execute (function *) /* Factor out operations from the phi if possible. */ if (single_pred_p (bb1) - && EDGE_COUNT (merge->preds) == 2) + && EDGE_COUNT (merge->preds) == 2 + && !optimize_debug) { for (gsi = gsi_start (phis); !gsi_end_p (gsi); ) { diff --git a/gcc/tree-ssa-propagate.cc b/gcc/tree-ssa-propagate.cc index 872f881..f02b10d 100644 --- a/gcc/tree-ssa-propagate.cc +++ b/gcc/tree-ssa-propagate.cc @@ -578,10 +578,6 @@ substitute_and_fold_engine::replace_uses_in (gimple *stmt) if (val == tuse || val == NULL_TREE) continue; - if (gimple_code (stmt) == GIMPLE_ASM - && !may_propagate_copy_into_asm (tuse)) - continue; - if (!may_propagate_copy (tuse, val)) continue; @@ -1142,15 +1138,6 @@ may_propagate_copy_into_stmt (gimple *dest, tree orig) return true; } -/* Similarly, but we know that we're propagating into an ASM_EXPR. */ - -bool -may_propagate_copy_into_asm (tree dest ATTRIBUTE_UNUSED) -{ - return true; -} - - /* Replace *OP_P with value VAL (assumed to be a constant or another SSA_NAME). Use this version when not const/copy propagating values. For example, diff --git a/gcc/tree-ssa-propagate.h b/gcc/tree-ssa-propagate.h index 200fc73..7819c0c 100644 --- a/gcc/tree-ssa-propagate.h +++ b/gcc/tree-ssa-propagate.h @@ -67,7 +67,6 @@ extern void move_ssa_defining_stmt_for_defs (gimple *, gimple *); extern bool stmt_makes_single_store (gimple *); extern bool may_propagate_copy (tree, tree, bool = false); extern bool may_propagate_copy_into_stmt (gimple *, tree); -extern bool may_propagate_copy_into_asm (tree); extern void propagate_value (use_operand_p, tree); extern void replace_exp (use_operand_p, tree); extern void propagate_tree_value (tree *, tree); diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc index 73398e5..97c1bf0 100644 --- a/gcc/tree-vect-loop.cc +++ b/gcc/tree-vect-loop.cc @@ -161,7 +161,7 @@ along with GCC; see the file COPYING3. If not see static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *, unsigned *); static stmt_vec_info vect_is_simple_reduction (loop_vec_info, stmt_vec_info, - gphi **, bool *, bool); + gphi **); /* Function vect_is_simple_iv_evolution. @@ -341,8 +341,7 @@ vect_phi_first_order_recurrence_p (loop_vec_info loop_vinfo, class loop *loop, slp analyses or not. */ static void -vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop, - bool slp) +vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop) { basic_block bb = loop->header; auto_vec<stmt_vec_info, 64> worklist; @@ -425,19 +424,15 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop, && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); gphi *double_reduc; - bool reduc_chain; stmt_vec_info reduc_stmt_info - = vect_is_simple_reduction (loop_vinfo, stmt_vinfo, &double_reduc, - &reduc_chain, slp); + = vect_is_simple_reduction (loop_vinfo, stmt_vinfo, &double_reduc); if (reduc_stmt_info && double_reduc) { - bool inner_chain; stmt_vec_info inner_phi_info = loop_vinfo->lookup_stmt (double_reduc); /* ??? Pass down flag we're the inner loop of a double reduc. */ stmt_vec_info inner_reduc_info - = vect_is_simple_reduction (loop_vinfo, inner_phi_info, - NULL, &inner_chain, slp); + = vect_is_simple_reduction (loop_vinfo, inner_phi_info, NULL); if (inner_reduc_info) { STMT_VINFO_REDUC_DEF (stmt_vinfo) = reduc_stmt_info; @@ -478,12 +473,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop, STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def; STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def; - /* Store the reduction cycles for possible vectorization in - loop-aware SLP if it was not detected as reduction - chain. */ - if (! reduc_chain) - LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push - (reduc_stmt_info); + LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt_info); } } else if (vect_phi_first_order_recurrence_p (loop_vinfo, loop, phi)) @@ -518,11 +508,11 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop, a[i] = i; */ static void -vect_analyze_scalar_cycles (loop_vec_info loop_vinfo, bool slp) +vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) { class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); - vect_analyze_scalar_cycles_1 (loop_vinfo, loop, slp); + vect_analyze_scalar_cycles_1 (loop_vinfo, loop); /* When vectorizing an outer-loop, the inner-loop is executed sequentially. Reductions in such inner-loop therefore have different properties than @@ -534,87 +524,7 @@ vect_analyze_scalar_cycles (loop_vec_info loop_vinfo, bool slp) current checks are too strict. */ if (loop->inner) - vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner, slp); -} - -/* Transfer group and reduction information from STMT_INFO to its - pattern stmt. */ - -static void -vect_fixup_reduc_chain (stmt_vec_info stmt_info) -{ - stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info); - stmt_vec_info stmtp; - gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp) - && REDUC_GROUP_FIRST_ELEMENT (stmt_info)); - REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info); - do - { - stmtp = STMT_VINFO_RELATED_STMT (stmt_info); - gcc_checking_assert (STMT_VINFO_DEF_TYPE (stmtp) - == STMT_VINFO_DEF_TYPE (stmt_info)); - REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp; - stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info); - if (stmt_info) - REDUC_GROUP_NEXT_ELEMENT (stmtp) - = STMT_VINFO_RELATED_STMT (stmt_info); - } - while (stmt_info); -} - -/* Fixup scalar cycles that now have their stmts detected as patterns. */ - -static void -vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo) -{ - stmt_vec_info first; - unsigned i; - - FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first) - { - stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first); - while (next) - { - if ((STMT_VINFO_IN_PATTERN_P (next) - != STMT_VINFO_IN_PATTERN_P (first)) - || STMT_VINFO_REDUC_IDX (vect_stmt_to_vectorize (next)) == -1) - break; - next = REDUC_GROUP_NEXT_ELEMENT (next); - } - /* If all reduction chain members are well-formed patterns adjust - the group to group the pattern stmts instead. */ - if (! next - && STMT_VINFO_REDUC_IDX (vect_stmt_to_vectorize (first)) != -1) - { - if (STMT_VINFO_IN_PATTERN_P (first)) - { - vect_fixup_reduc_chain (first); - LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i] - = STMT_VINFO_RELATED_STMT (first); - } - } - /* If not all stmt in the chain are patterns or if we failed - to update STMT_VINFO_REDUC_IDX dissolve the chain and handle - it as regular reduction instead. */ - else - { - stmt_vec_info vinfo = first; - stmt_vec_info last = NULL; - while (vinfo) - { - next = REDUC_GROUP_NEXT_ELEMENT (vinfo); - REDUC_GROUP_FIRST_ELEMENT (vinfo) = NULL; - REDUC_GROUP_NEXT_ELEMENT (vinfo) = NULL; - last = vinfo; - vinfo = next; - } - STMT_VINFO_DEF_TYPE (vect_stmt_to_vectorize (first)) - = vect_internal_def; - loop_vinfo->reductions.safe_push (vect_stmt_to_vectorize (last)); - LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).unordered_remove (i); - --i; - } - } + vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); } /* Function vect_get_loop_niters. @@ -2264,12 +2174,10 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, /* Classify all cross-iteration scalar data-flow cycles. Cross-iteration cycles caused by virtual phis are analyzed separately. */ - vect_analyze_scalar_cycles (loop_vinfo, !force_single_lane); + vect_analyze_scalar_cycles (loop_vinfo); vect_pattern_recog (loop_vinfo); - vect_fixup_scalar_cycles_with_patterns (loop_vinfo); - /* Analyze the access patterns of the data-refs in the loop (consecutive, complex, etc.). FORNOW: Only handle consecutive access pattern. */ @@ -2678,10 +2586,6 @@ again: if (applying_suggested_uf) return ok; - /* If there are reduction chains re-trying will fail anyway. */ - if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ()) - return ok; - /* Likewise if the grouped loads or stores in the SLP cannot be handled via interleaving or lane instructions. */ slp_instance instance; @@ -3756,7 +3660,7 @@ check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi, static stmt_vec_info vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, - gphi **double_reduc, bool *reduc_chain_p, bool slp) + gphi **double_reduc) { gphi *phi = as_a <gphi *> (phi_info->stmt); gimple *phi_use_stmt = NULL; @@ -3768,7 +3672,6 @@ vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, bool inner_loop_of_double_reduc = double_reduc == NULL; if (double_reduc) *double_reduc = NULL; - *reduc_chain_p = false; STMT_VINFO_REDUC_TYPE (phi_info) = TREE_CODE_REDUCTION; tree phi_name = PHI_RESULT (phi); @@ -3918,12 +3821,8 @@ vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, if (code == COND_EXPR && !nested_in_vect_loop) STMT_VINFO_REDUC_TYPE (phi_info) = COND_REDUCTION; - /* Fill in STMT_VINFO_REDUC_IDX and gather stmts for an SLP - reduction chain for which the additional restriction is that - all operations in the chain are the same. */ - auto_vec<stmt_vec_info, 8> reduc_chain; + /* Fill in STMT_VINFO_REDUC_IDX. */ unsigned i; - bool is_slp_reduc = !nested_in_vect_loop && code != COND_EXPR; for (i = path.length () - 1; i >= 1; --i) { gimple *stmt = USE_STMT (path[i].second); @@ -3940,39 +3839,8 @@ vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, STMT_VINFO_REDUC_IDX (stmt_info) = path[i].second->use - gimple_call_arg_ptr (call, 0); } - bool leading_conversion = (CONVERT_EXPR_CODE_P (op.code) - && (i == 1 || i == path.length () - 1)); - if ((op.code != code && !leading_conversion) - /* We can only handle the final value in epilogue - generation for reduction chains. */ - || (i != 1 && !has_single_use (gimple_get_lhs (stmt)))) - is_slp_reduc = false; - /* For reduction chains we support a trailing/leading - conversions. We do not store those in the actual chain. */ - if (leading_conversion) - continue; - reduc_chain.safe_push (stmt_info); } - if (slp && is_slp_reduc && reduc_chain.length () > 1) - { - for (unsigned i = 0; i < reduc_chain.length () - 1; ++i) - { - REDUC_GROUP_FIRST_ELEMENT (reduc_chain[i]) = reduc_chain[0]; - REDUC_GROUP_NEXT_ELEMENT (reduc_chain[i]) = reduc_chain[i+1]; - } - REDUC_GROUP_FIRST_ELEMENT (reduc_chain.last ()) = reduc_chain[0]; - REDUC_GROUP_NEXT_ELEMENT (reduc_chain.last ()) = NULL; - - /* Save the chain for further analysis in SLP detection. */ - LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (reduc_chain[0]); - REDUC_GROUP_SIZE (reduc_chain[0]) = reduc_chain.length (); - - *reduc_chain_p = true; - if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "reduction: detected reduction chain\n"); - } - else if (dump_enabled_p ()) + if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "reduction: detected reduction\n"); @@ -5390,7 +5258,6 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, tree new_temp = NULL_TREE, new_name, new_scalar_dest; gimple *epilog_stmt = NULL; gimple *exit_phi; - tree bitsize; tree def; tree orig_name, scalar_result; imm_use_iterator imm_iter; @@ -5405,8 +5272,7 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, # b1 = phi <b2, b0> a2 = operation (a1) b2 = operation (b1) */ - const bool slp_reduc - = SLP_INSTANCE_KIND (slp_node_instance) != slp_inst_kind_reduc_chain; + const bool slp_reduc = !reduc_info->is_reduc_chain; tree induction_index = NULL_TREE; unsigned int group_size = SLP_TREE_LANES (slp_node); @@ -5608,7 +5474,6 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, scalar_results.truncate (0); scalar_results.reserve_exact (group_size); new_scalar_dest = vect_create_destination_var (scalar_dest, NULL); - bitsize = TYPE_SIZE (scalar_type); /* True if we should implement SLP_REDUC using native reduction operations instead of scalar operations. */ @@ -6030,6 +5895,7 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, if (reduce_with_shift && (!slp_reduc || group_size == 1)) { + tree bitsize = TYPE_SIZE (TREE_TYPE (vectype1)); int element_bitsize = tree_to_uhwi (bitsize); /* Enforced by vectorizable_reduction, which disallows SLP reductions for variable-length vectors and also requires direct target support @@ -6098,9 +5964,10 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, dump_printf_loc (MSG_NOTE, vect_location, "Reduce using scalar code.\n"); + tree compute_type = TREE_TYPE (vectype1); + tree bitsize = TYPE_SIZE (compute_type); int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1)); int element_bitsize = tree_to_uhwi (bitsize); - tree compute_type = TREE_TYPE (vectype); gimple_seq stmts = NULL; FOR_EACH_VEC_ELT (reduc_inputs, i, vec_temp) { @@ -6956,8 +6823,6 @@ vectorizable_reduction (loop_vec_info loop_vinfo, bool single_defuse_cycle = false; tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE; tree cond_reduc_val = NULL_TREE; - const bool reduc_chain - = SLP_INSTANCE_KIND (slp_node_instance) == slp_inst_kind_reduc_chain; /* Make sure it was already recognized as a reduction computation. */ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def @@ -7019,6 +6884,7 @@ vectorizable_reduction (loop_vec_info loop_vinfo, double_reduc = true; } + const bool reduc_chain = reduc_info->is_reduc_chain; slp_node_instance->reduc_phis = slp_node; /* ??? We're leaving slp_node to point to the PHIs, we only need it to get at the number of vector stmts which wasn't @@ -7030,33 +6896,28 @@ vectorizable_reduction (loop_vec_info loop_vinfo, /* Verify following REDUC_IDX from the latch def leads us back to the PHI and compute the reduction chain length. Discover the real - reduction operation stmt on the way (stmt_info and slp_for_stmt_info). */ - tree reduc_def - = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi, loop_latch_edge (loop)); + reduction operation stmt on the way (slp_for_stmt_info). */ unsigned reduc_chain_length = 0; - bool only_slp_reduc_chain = true; stmt_info = NULL; slp_tree slp_for_stmt_info = NULL; slp_tree vdef_slp = slp_node_instance->root; - /* For double-reductions we start SLP analysis at the inner loop LC PHI - which is the def of the outer loop live stmt. */ - if (double_reduc) - vdef_slp = SLP_TREE_CHILDREN (vdef_slp)[0]; - while (reduc_def != PHI_RESULT (reduc_def_phi)) + while (vdef_slp != slp_node) { - stmt_vec_info def = loop_vinfo->lookup_def (reduc_def); - stmt_vec_info vdef = vect_stmt_to_vectorize (def); - int reduc_idx = STMT_VINFO_REDUC_IDX (vdef); - if (STMT_VINFO_REDUC_IDX (vdef) == -1 - || SLP_TREE_REDUC_IDX (vdef_slp) == -1) + int reduc_idx = SLP_TREE_REDUC_IDX (vdef_slp); + if (reduc_idx == -1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction chain broken by patterns.\n"); return false; } - if (!REDUC_GROUP_FIRST_ELEMENT (vdef)) - only_slp_reduc_chain = false; + stmt_vec_info vdef = SLP_TREE_REPRESENTATIVE (vdef_slp); + if (is_a <gphi *> (vdef->stmt)) + { + vdef_slp = SLP_TREE_CHILDREN (vdef_slp)[reduc_idx]; + /* Do not count PHIs towards the chain length. */ + continue; + } gimple_match_op op; if (!gimple_extract_op (vdef->stmt, &op)) { @@ -7080,11 +6941,8 @@ vectorizable_reduction (loop_vec_info loop_vinfo, else { /* First non-conversion stmt. */ - if (!stmt_info) - { - stmt_info = vdef; - slp_for_stmt_info = vdef_slp; - } + if (!slp_for_stmt_info) + slp_for_stmt_info = vdef_slp; if (lane_reducing_op_p (op.code)) { @@ -7116,29 +6974,15 @@ vectorizable_reduction (loop_vec_info loop_vinfo, } else if (!vectype_in) vectype_in = SLP_TREE_VECTYPE (slp_node); - if (!REDUC_GROUP_FIRST_ELEMENT (vdef)) - { - gcc_assert (reduc_idx == SLP_TREE_REDUC_IDX (vdef_slp)); - vdef_slp = SLP_TREE_CHILDREN (vdef_slp)[reduc_idx]; - } + vdef_slp = SLP_TREE_CHILDREN (vdef_slp)[reduc_idx]; } - - reduc_def = op.ops[reduc_idx]; reduc_chain_length++; } + stmt_info = SLP_TREE_REPRESENTATIVE (slp_for_stmt_info); + /* PHIs should not participate in patterns. */ gcc_assert (!STMT_VINFO_RELATED_STMT (phi_info)); - /* STMT_VINFO_REDUC_DEF doesn't point to the first but the last - element. */ - if (REDUC_GROUP_FIRST_ELEMENT (stmt_info)) - { - gcc_assert (!REDUC_GROUP_NEXT_ELEMENT (stmt_info)); - stmt_info = REDUC_GROUP_FIRST_ELEMENT (stmt_info); - } - if (REDUC_GROUP_FIRST_ELEMENT (stmt_info)) - gcc_assert (REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info); - /* 1. Is vectorizable reduction? */ /* Not supportable if the reduction variable is used in the loop, unless it's a reduction chain. */ @@ -7453,8 +7297,7 @@ vectorizable_reduction (loop_vec_info loop_vinfo, { /* When vectorizing a reduction chain w/o SLP the reduction PHI is not directy used in stmt. */ - if (!only_slp_reduc_chain - && reduc_chain_length != 1) + if (reduc_chain_length != 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -7789,22 +7632,18 @@ vectorizable_reduction (loop_vec_info loop_vinfo, /* All but single defuse-cycle optimized and fold-left reductions go through their own vectorizable_* routines. */ + stmt_vec_info tem + = SLP_TREE_REPRESENTATIVE (SLP_INSTANCE_TREE (slp_node_instance)); if (!single_defuse_cycle && reduction_type != FOLD_LEFT_REDUCTION) + STMT_VINFO_DEF_TYPE (tem) = vect_internal_def; + else { - stmt_vec_info tem - = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (phi_info)); - if (REDUC_GROUP_FIRST_ELEMENT (tem)) - { - gcc_assert (!REDUC_GROUP_NEXT_ELEMENT (tem)); - tem = REDUC_GROUP_FIRST_ELEMENT (tem); - } - STMT_VINFO_DEF_TYPE (vect_orig_stmt (tem)) = vect_internal_def; - STMT_VINFO_DEF_TYPE (tem) = vect_internal_def; + STMT_VINFO_DEF_TYPE (tem) = vect_reduction_def; + if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)) + vect_reduction_update_partial_vector_usage (loop_vinfo, reduc_info, + slp_node, op.code, op.type, + vectype_in); } - else if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)) - vect_reduction_update_partial_vector_usage (loop_vinfo, reduc_info, - slp_node, op.code, op.type, - vectype_in); return true; } @@ -8238,8 +8077,6 @@ vect_transform_cycle_phi (loop_vec_info loop_vinfo, int i; bool nested_cycle = false; int vec_num; - const bool reduc_chain - = SLP_INSTANCE_KIND (slp_node_instance) == slp_inst_kind_reduc_chain; if (nested_in_vect_loop_p (loop, stmt_info)) { @@ -8308,7 +8145,7 @@ vect_transform_cycle_phi (loop_vec_info loop_vinfo, vec<stmt_vec_info> &stmts = SLP_TREE_SCALAR_STMTS (slp_node); unsigned int num_phis = stmts.length (); - if (reduc_chain) + if (reduc_info->is_reduc_chain) num_phis = 1; initial_values.reserve (num_phis); for (unsigned int i = 0; i < num_phis; ++i) diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc index 74a9a19..becee62 100644 --- a/gcc/tree-vect-patterns.cc +++ b/gcc/tree-vect-patterns.cc @@ -1022,13 +1022,11 @@ vect_reassociating_reduction_p (vec_info *vinfo, if (loop && nested_in_vect_loop_p (loop, stmt_info)) return false; - if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def) - { - if (needs_fold_left_reduction_p (TREE_TYPE (gimple_assign_lhs (assign)), - code)) - return false; - } - else if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) == NULL) + if (!vect_is_reduction (stmt_info)) + return false; + + if (needs_fold_left_reduction_p (TREE_TYPE (gimple_assign_lhs (assign)), + code)) return false; *op0_out = gimple_assign_rhs1 (assign); @@ -4087,10 +4085,13 @@ vect_recog_vector_vector_shift_pattern (vec_info *vinfo, != TYPE_PRECISION (TREE_TYPE (oprnd0))) return NULL; - stmt_vec_info def_vinfo = vect_get_internal_def (vinfo, oprnd1); - if (!def_vinfo) + stmt_vec_info def_vinfo = vinfo->lookup_def (oprnd1); + if (!def_vinfo || STMT_VINFO_DEF_TYPE (def_vinfo) == vect_external_def) return NULL; + def_vinfo = vect_stmt_to_vectorize (def_vinfo); + gcc_assert (def_vinfo); + *type_out = get_vectype_for_scalar_type (vinfo, TREE_TYPE (oprnd0)); if (*type_out == NULL_TREE) return NULL; diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc index f553e8f..fe3bcff 100644 --- a/gcc/tree-vect-slp.cc +++ b/gcc/tree-vect-slp.cc @@ -4187,41 +4187,24 @@ vect_build_slp_instance (vec_info *vinfo, Return FALSE if SLP build fails. */ static bool -vect_analyze_slp_reduc_chain (vec_info *vinfo, +vect_analyze_slp_reduc_chain (loop_vec_info vinfo, scalar_stmts_to_slp_tree_map_t *bst_map, - stmt_vec_info stmt_info, + vec<stmt_vec_info> &scalar_stmts, + stmt_vec_info reduc_phi_info, unsigned max_tree_size, unsigned *limit) { - vec<stmt_vec_info> scalar_stmts; - - /* Collect the reduction stmts and store them in scalar_stmts. */ - scalar_stmts.create (REDUC_GROUP_SIZE (stmt_info)); - stmt_vec_info next_info = stmt_info; - while (next_info) - { - scalar_stmts.quick_push (vect_stmt_to_vectorize (next_info)); - next_info = REDUC_GROUP_NEXT_ELEMENT (next_info); - } - /* Mark the first element of the reduction chain as reduction to properly - transform the node. In the reduction analysis phase only the last - element of the chain is marked as reduction. */ - STMT_VINFO_DEF_TYPE (stmt_info) - = STMT_VINFO_DEF_TYPE (scalar_stmts.last ()); - STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info)) - = STMT_VINFO_REDUC_DEF (vect_orig_stmt (scalar_stmts.last ())); + /* If there's no budget left bail out early. */ + if (*limit == 0) + return false; /* Build the tree for the SLP instance. */ vec<stmt_vec_info> root_stmt_infos = vNULL; vec<tree> remain = vNULL; - /* If there's no budget left bail out early. */ - if (*limit == 0) - return false; - if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, - "Starting SLP discovery for\n"); + "Starting SLP discovery of reduction chain for\n"); for (unsigned i = 0; i < scalar_stmts.length (); ++i) dump_printf_loc (MSG_NOTE, vect_location, " %G", scalar_stmts[i]->stmt); @@ -4233,136 +4216,234 @@ vect_analyze_slp_reduc_chain (vec_info *vinfo, poly_uint64 max_nunits = 1; unsigned tree_size = 0; + /* ??? We need this only for SLP discovery. */ + for (unsigned i = 0; i < scalar_stmts.length (); ++i) + REDUC_GROUP_FIRST_ELEMENT (scalar_stmts[i]) = scalar_stmts[0]; + slp_tree node = vect_build_slp_tree (vinfo, scalar_stmts, group_size, &max_nunits, matches, limit, &tree_size, bst_map); + + for (unsigned i = 0; i < scalar_stmts.length (); ++i) + REDUC_GROUP_FIRST_ELEMENT (scalar_stmts[i]) = NULL; + if (node != NULL) { - /* Calculate the unrolling factor based on the smallest type. */ - poly_uint64 unrolling_factor - = calculate_unrolling_factor (max_nunits, group_size); + /* Create a new SLP instance. */ + slp_instance new_instance = XNEW (class _slp_instance); + SLP_INSTANCE_TREE (new_instance) = node; + SLP_INSTANCE_LOADS (new_instance) = vNULL; + SLP_INSTANCE_ROOT_STMTS (new_instance) = root_stmt_infos; + SLP_INSTANCE_REMAIN_DEFS (new_instance) = remain; + SLP_INSTANCE_KIND (new_instance) = slp_inst_kind_reduc_chain; + new_instance->reduc_phis = NULL; + new_instance->cost_vec = vNULL; + new_instance->subgraph_entries = vNULL; - if (maybe_ne (unrolling_factor, 1U) - && is_a <bb_vec_info> (vinfo)) + vect_reduc_info reduc_info = info_for_reduction (vinfo, node); + reduc_info->is_reduc_chain = true; + + if (dump_enabled_p ()) + dump_printf_loc (MSG_NOTE, vect_location, + "SLP size %u vs. limit %u.\n", + tree_size, max_tree_size); + + /* Fixup SLP reduction chains. If this is a reduction chain with + a conversion in front amend the SLP tree with a node for that. */ + gimple *scalar_def = STMT_VINFO_REDUC_DEF (reduc_phi_info)->stmt; + if (is_gimple_assign (scalar_def) + && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (scalar_def))) + { + stmt_vec_info conv_info = vect_stmt_to_vectorize + (STMT_VINFO_REDUC_DEF (reduc_phi_info)); + scalar_stmts = vNULL; + scalar_stmts.create (group_size); + for (unsigned i = 0; i < group_size; ++i) + scalar_stmts.quick_push (conv_info); + slp_tree conv = vect_create_new_slp_node (scalar_stmts, 1); + SLP_TREE_VECTYPE (conv) + = get_vectype_for_scalar_type (vinfo, + TREE_TYPE + (gimple_assign_lhs (scalar_def)), + group_size); + SLP_TREE_REDUC_IDX (conv) = 0; + conv->cycle_info.id = node->cycle_info.id; + SLP_TREE_CHILDREN (conv).quick_push (node); + SLP_INSTANCE_TREE (new_instance) = conv; + } + /* Fill the backedge child of the PHI SLP node. The + general matching code cannot find it because the + scalar code does not reflect how we vectorize the + reduction. */ + use_operand_p use_p; + imm_use_iterator imm_iter; + class loop *loop = LOOP_VINFO_LOOP (vinfo); + FOR_EACH_IMM_USE_FAST (use_p, imm_iter, + gimple_get_lhs (scalar_def)) + /* There are exactly two non-debug uses, the reduction + PHI and the loop-closed PHI node. */ + if (!is_gimple_debug (USE_STMT (use_p)) + && gimple_bb (USE_STMT (use_p)) == loop->header) + { + auto_vec<stmt_vec_info, 64> phis (group_size); + stmt_vec_info phi_info = vinfo->lookup_stmt (USE_STMT (use_p)); + for (unsigned i = 0; i < group_size; ++i) + phis.quick_push (phi_info); + slp_tree *phi_node = bst_map->get (phis); + unsigned dest_idx = loop_latch_edge (loop)->dest_idx; + SLP_TREE_CHILDREN (*phi_node)[dest_idx] + = SLP_INSTANCE_TREE (new_instance); + SLP_INSTANCE_TREE (new_instance)->refcnt++; + } + + vinfo->slp_instances.safe_push (new_instance); + + /* ??? We've replaced the old SLP_INSTANCE_GROUP_SIZE with + the number of scalar stmts in the root in a few places. + Verify that assumption holds. */ + gcc_assert (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (new_instance)) + .length () == group_size); + + if (dump_enabled_p ()) { - unsigned HOST_WIDE_INT const_max_nunits; - if (!max_nunits.is_constant (&const_max_nunits) - || const_max_nunits > group_size) + dump_printf_loc (MSG_NOTE, vect_location, + "Final SLP tree for instance %p:\n", + (void *) new_instance); + vect_print_slp_graph (MSG_NOTE, vect_location, + SLP_INSTANCE_TREE (new_instance)); + } + + return true; + } + /* Failed to SLP. */ + if (dump_enabled_p ()) + dump_printf_loc (MSG_NOTE, vect_location, + "SLP discovery of reduction chain failed\n"); + return false; +} + +/* Analyze an SLP instance starting from SCALAR_STMTS which are a group + of KIND. Return true if successful. */ + +static bool +vect_analyze_slp_reduction (loop_vec_info vinfo, + stmt_vec_info scalar_stmt, + unsigned max_tree_size, unsigned *limit, + scalar_stmts_to_slp_tree_map_t *bst_map, + bool force_single_lane) +{ + slp_instance_kind kind = slp_inst_kind_reduc_group; + + /* If there's no budget left bail out early. */ + if (*limit == 0) + return false; + + vec<stmt_vec_info> scalar_stmts = vNULL; + /* Try to gather a reduction chain. */ + if (! force_single_lane + && STMT_VINFO_DEF_TYPE (scalar_stmt) == vect_reduction_def) + { + bool fail = false; + /* ??? We could leave operation code checking to SLP discovery. */ + code_helper code + = STMT_VINFO_REDUC_CODE (STMT_VINFO_REDUC_DEF + (vect_orig_stmt (scalar_stmt))); + bool first = true; + stmt_vec_info next_stmt = scalar_stmt; + do + { + stmt_vec_info stmt = next_stmt; + gimple_match_op op; + if (!gimple_extract_op (STMT_VINFO_STMT (stmt), &op)) + gcc_unreachable (); + tree reduc_def = gimple_arg (STMT_VINFO_STMT (stmt), + STMT_VINFO_REDUC_IDX (stmt)); + next_stmt = vect_stmt_to_vectorize (vinfo->lookup_def (reduc_def)); + gcc_assert (is_a <gphi *> (STMT_VINFO_STMT (next_stmt)) + || STMT_VINFO_REDUC_IDX (next_stmt) != -1); + if (!gimple_extract_op (STMT_VINFO_STMT (vect_orig_stmt (stmt)), &op)) + gcc_unreachable (); + if (CONVERT_EXPR_CODE_P (op.code) + && (first + || is_a <gphi *> (STMT_VINFO_STMT (next_stmt)))) + ; + else if (code != op.code) { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: store group " - "size not a multiple of the vector size " - "in basic block SLP\n"); - vect_free_slp_tree (node); - return false; + fail = true; + break; } - /* Fatal mismatch. */ - if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "SLP discovery succeeded but node needs " - "splitting\n"); - memset (matches, true, group_size); - matches[group_size / const_max_nunits * const_max_nunits] = false; - vect_free_slp_tree (node); + else + scalar_stmts.safe_push (stmt); + first = false; } - else + while (!is_a <gphi *> (STMT_VINFO_STMT (next_stmt))); + if (!fail && scalar_stmts.length () > 1) { - /* Create a new SLP instance. */ - slp_instance new_instance = XNEW (class _slp_instance); - SLP_INSTANCE_TREE (new_instance) = node; - SLP_INSTANCE_LOADS (new_instance) = vNULL; - SLP_INSTANCE_ROOT_STMTS (new_instance) = root_stmt_infos; - SLP_INSTANCE_REMAIN_DEFS (new_instance) = remain; - SLP_INSTANCE_KIND (new_instance) = slp_inst_kind_reduc_chain; - new_instance->reduc_phis = NULL; - new_instance->cost_vec = vNULL; - new_instance->subgraph_entries = vNULL; + scalar_stmts.reverse (); + if (vect_analyze_slp_reduc_chain (vinfo, bst_map, scalar_stmts, + next_stmt, max_tree_size, limit)) + return true; + scalar_stmts.release (); + } + } - if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "SLP size %u vs. limit %u.\n", - tree_size, max_tree_size); + scalar_stmts.create (1); + scalar_stmts.quick_push (scalar_stmt); - /* Fixup SLP reduction chains. If this is a reduction chain with - a conversion in front amend the SLP tree with a node for that. */ - gimple *scalar_def - = vect_orig_stmt (scalar_stmts[group_size - 1])->stmt; - if (STMT_VINFO_DEF_TYPE (scalar_stmts[0]) != vect_reduction_def) - { - /* Get at the conversion stmt - we know it's the single use - of the last stmt of the reduction chain. */ - use_operand_p use_p; - bool r = single_imm_use (gimple_assign_lhs (scalar_def), - &use_p, &scalar_def); - gcc_assert (r); - stmt_vec_info next_info = vinfo->lookup_stmt (scalar_def); - next_info = vect_stmt_to_vectorize (next_info); - scalar_stmts = vNULL; - scalar_stmts.create (group_size); - for (unsigned i = 0; i < group_size; ++i) - scalar_stmts.quick_push (next_info); - slp_tree conv = vect_create_new_slp_node (scalar_stmts, 1); - SLP_TREE_VECTYPE (conv) - = get_vectype_for_scalar_type (vinfo, - TREE_TYPE - (gimple_assign_lhs (scalar_def)), - group_size); - SLP_TREE_REDUC_IDX (conv) = 0; - conv->cycle_info.id = node->cycle_info.id; - SLP_TREE_CHILDREN (conv).quick_push (node); - SLP_INSTANCE_TREE (new_instance) = conv; - /* We also have to fake this conversion stmt as SLP reduction - group so we don't have to mess with too much code - elsewhere. */ - REDUC_GROUP_FIRST_ELEMENT (next_info) = next_info; - REDUC_GROUP_NEXT_ELEMENT (next_info) = NULL; - } - /* Fill the backedge child of the PHI SLP node. The - general matching code cannot find it because the - scalar code does not reflect how we vectorize the - reduction. */ - use_operand_p use_p; - imm_use_iterator imm_iter; - class loop *loop = LOOP_VINFO_LOOP (as_a <loop_vec_info> (vinfo)); - FOR_EACH_IMM_USE_FAST (use_p, imm_iter, - gimple_get_lhs (scalar_def)) - /* There are exactly two non-debug uses, the reduction - PHI and the loop-closed PHI node. */ - if (!is_gimple_debug (USE_STMT (use_p)) - && gimple_bb (USE_STMT (use_p)) == loop->header) - { - auto_vec<stmt_vec_info, 64> phis (group_size); - stmt_vec_info phi_info - = vinfo->lookup_stmt (USE_STMT (use_p)); - for (unsigned i = 0; i < group_size; ++i) - phis.quick_push (phi_info); - slp_tree *phi_node = bst_map->get (phis); - unsigned dest_idx = loop_latch_edge (loop)->dest_idx; - SLP_TREE_CHILDREN (*phi_node)[dest_idx] - = SLP_INSTANCE_TREE (new_instance); - SLP_INSTANCE_TREE (new_instance)->refcnt++; - } + if (dump_enabled_p ()) + { + dump_printf_loc (MSG_NOTE, vect_location, + "Starting SLP discovery for\n"); + for (unsigned i = 0; i < scalar_stmts.length (); ++i) + dump_printf_loc (MSG_NOTE, vect_location, + " %G", scalar_stmts[i]->stmt); + } - vinfo->slp_instances.safe_push (new_instance); + /* Build the tree for the SLP instance. */ + unsigned int group_size = scalar_stmts.length (); + bool *matches = XALLOCAVEC (bool, group_size); + poly_uint64 max_nunits = 1; + unsigned tree_size = 0; - /* ??? We've replaced the old SLP_INSTANCE_GROUP_SIZE with - the number of scalar stmts in the root in a few places. - Verify that assumption holds. */ - gcc_assert (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (new_instance)) - .length () == group_size); + slp_tree node = vect_build_slp_tree (vinfo, scalar_stmts, group_size, + &max_nunits, matches, limit, + &tree_size, bst_map); + if (node != NULL) + { + /* Create a new SLP instance. */ + slp_instance new_instance = XNEW (class _slp_instance); + SLP_INSTANCE_TREE (new_instance) = node; + SLP_INSTANCE_LOADS (new_instance) = vNULL; + SLP_INSTANCE_ROOT_STMTS (new_instance) = vNULL; + SLP_INSTANCE_REMAIN_DEFS (new_instance) = vNULL; + SLP_INSTANCE_KIND (new_instance) = kind; + new_instance->reduc_phis = NULL; + new_instance->cost_vec = vNULL; + new_instance->subgraph_entries = vNULL; - if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Final SLP tree for instance %p:\n", - (void *) new_instance); - vect_print_slp_graph (MSG_NOTE, vect_location, - SLP_INSTANCE_TREE (new_instance)); - } + if (dump_enabled_p ()) + dump_printf_loc (MSG_NOTE, vect_location, + "SLP size %u vs. limit %u.\n", + tree_size, max_tree_size); - return true; + vinfo->slp_instances.safe_push (new_instance); + + /* ??? We've replaced the old SLP_INSTANCE_GROUP_SIZE with + the number of scalar stmts in the root in a few places. + Verify that assumption holds. */ + gcc_assert (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (new_instance)) + .length () == group_size); + + if (dump_enabled_p ()) + { + dump_printf_loc (MSG_NOTE, vect_location, + "Final SLP tree for instance %p:\n", + (void *) new_instance); + vect_print_slp_graph (MSG_NOTE, vect_location, + SLP_INSTANCE_TREE (new_instance)); } + + return true; } /* Failed to SLP. */ @@ -5256,40 +5337,6 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size, if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo)) { - /* Find SLP sequences starting from reduction chains. */ - FOR_EACH_VEC_ELT (loop_vinfo->reduction_chains, i, first_element) - if (! STMT_VINFO_RELEVANT_P (first_element) - && ! STMT_VINFO_LIVE_P (first_element)) - ; - else if (force_single_lane - || ! vect_analyze_slp_reduc_chain (vinfo, bst_map, - first_element, - max_tree_size, &limit)) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "SLP discovery of reduction chain failed\n"); - /* Dissolve reduction chain group. */ - stmt_vec_info vinfo = first_element; - stmt_vec_info last = NULL; - while (vinfo) - { - stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (vinfo); - REDUC_GROUP_FIRST_ELEMENT (vinfo) = NULL; - REDUC_GROUP_NEXT_ELEMENT (vinfo) = NULL; - last = vinfo; - vinfo = next; - } - STMT_VINFO_DEF_TYPE (first_element) = vect_internal_def; - /* ??? When there's a conversion around the reduction - chain 'last' isn't the entry of the reduction. */ - if (STMT_VINFO_DEF_TYPE (last) != vect_reduction_def) - return opt_result::failure_at (vect_location, - "SLP build failed.\n"); - /* It can be still vectorized as part of an SLP reduction. */ - loop_vinfo->reductions.safe_push (last); - } - /* Find SLP sequences starting from groups of reductions. */ if (loop_vinfo->reductions.length () > 0) { @@ -5315,23 +5362,13 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size, if (!force_single_lane && !lane_reducing_stmt_p (STMT_VINFO_STMT (next_info))) scalar_stmts.quick_push (next_info); - else - { - /* Do SLP discovery for single-lane reductions. */ - vec<stmt_vec_info> stmts; - vec<stmt_vec_info> roots = vNULL; - vec<tree> remain = vNULL; - stmts.create (1); - stmts.quick_push (next_info); - if (! vect_build_slp_instance (vinfo, - slp_inst_kind_reduc_group, - stmts, roots, remain, - max_tree_size, &limit, - bst_map, - force_single_lane)) - return opt_result::failure_at (vect_location, - "SLP build failed.\n"); - } + /* Do SLP discovery for single-lane reductions. */ + else if (! vect_analyze_slp_reduction (loop_vinfo, next_info, + max_tree_size, &limit, + bst_map, + force_single_lane)) + return opt_result::failure_at (vect_location, + "SLP build failed.\n"); } } /* Save for re-processing on failure. */ @@ -5349,20 +5386,13 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size, scalar_stmts.release (); /* Do SLP discovery for single-lane reductions. */ for (auto stmt_info : saved_stmts) - { - vec<stmt_vec_info> stmts; - vec<stmt_vec_info> roots = vNULL; - vec<tree> remain = vNULL; - stmts.create (1); - stmts.quick_push (vect_stmt_to_vectorize (stmt_info)); - if (! vect_build_slp_instance (vinfo, - slp_inst_kind_reduc_group, - stmts, roots, remain, - max_tree_size, &limit, - bst_map, force_single_lane)) - return opt_result::failure_at (vect_location, - "SLP build failed.\n"); - } + if (! vect_analyze_slp_reduction (loop_vinfo, + vect_stmt_to_vectorize + (stmt_info), + max_tree_size, &limit, + bst_map, force_single_lane)) + return opt_result::failure_at (vect_location, + "SLP build failed.\n"); } saved_stmts.release (); } diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc index dcb2522..83acbb3 100644 --- a/gcc/tree-vect-stmts.cc +++ b/gcc/tree-vect-stmts.cc @@ -2062,16 +2062,13 @@ vector_vector_composition_type (tree vtype, poly_uint64 nelts, tree *ptype, VECTYPE is the vector type that the vectorized statements will use. If ELSVALS is nonzero the supported else values will be stored in the - vector ELSVALS points to. - - For loads PERM_OK indicates whether we can code generate a - SLP_TREE_LOAD_PERMUTATION on the node. */ + vector ELSVALS points to. */ static bool get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, tree vectype, slp_tree slp_node, bool masked_p, vec_load_store_type vls_type, - bool perm_ok, vect_load_store_data *ls) + vect_load_store_data *ls) { vect_memory_access_type *memory_access_type = &ls->memory_access_type; poly_int64 *poffset = &ls->poffset; @@ -2081,6 +2078,8 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, internal_fn *lanes_ifn = &ls->lanes_ifn; vec<int> *elsvals = &ls->elsvals; tree *ls_type = &ls->ls_type; + bool *slp_perm = &ls->slp_perm; + unsigned *n_perms = &ls->n_perms; loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; @@ -2093,6 +2092,15 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, *misalignment = DR_MISALIGNMENT_UNKNOWN; *poffset = 0; *ls_type = NULL_TREE; + *slp_perm = false; + *n_perms = -1U; + + bool perm_ok = true; + poly_int64 vf = loop_vinfo ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) : 1; + + if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) + perm_ok = vect_transform_slp_perm_load (vinfo, slp_node, vNULL, NULL, + vf, true, n_perms); if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) { @@ -2534,7 +2542,7 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, poly_uint64 read_amount = vf * TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) - read_amount *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info)); + read_amount *= group_size; auto target_alignment = DR_TARGET_ALIGNMENT (STMT_VINFO_DR_INFO (stmt_info)); @@ -2627,6 +2635,60 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, if (!loop_vinfo && *memory_access_type == VMAT_ELEMENTWISE) return false; + /* Some loads need to explicitly permute the loaded data if there + is a load permutation. Among those are: + - VMAT_ELEMENTWISE. + - VMAT_STRIDED_SLP. + - VMAT_GATHER_SCATTER: + - Strided gather (fallback for VMAT_STRIDED_SLP if #lanes == 1). + - Grouped strided gather (ditto but for #lanes > 1). + + For VMAT_ELEMENTWISE we can fold the load permutation into the + individual indices we access directly, eliding the permutation. + Strided gather only allows load permutations for the + single-element case. */ + + if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists () + && !(*memory_access_type == VMAT_ELEMENTWISE + || (mat_gather_scatter_p (*memory_access_type) + && SLP_TREE_LANES (slp_node) == 1 + && single_element_p))) + { + if (!loop_vinfo) + { + /* In BB vectorization we may not actually use a loaded vector + accessing elements in excess of DR_GROUP_SIZE. */ + stmt_vec_info group_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; + group_info = DR_GROUP_FIRST_ELEMENT (group_info); + unsigned HOST_WIDE_INT nunits; + unsigned j, k, maxk = 0; + FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (slp_node), j, k) + if (k > maxk) + maxk = k; + tree vectype = SLP_TREE_VECTYPE (slp_node); + if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits) + || maxk >= (DR_GROUP_SIZE (group_info) & ~(nunits - 1))) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "BB vectorization with gaps at the end of " + "a load is not supported\n"); + return false; + } + } + + if (!perm_ok) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, + vect_location, + "unsupported load permutation\n"); + return false; + } + + *slp_perm = true; + } + return true; } @@ -8009,7 +8071,7 @@ vectorizable_store (vec_info *vinfo, vect_load_store_data &ls = slp_node->get_data (_ls_data); if (cost_vec && !get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask_node, - vls_type, false, &_ls_data)) + vls_type, &_ls_data)) return false; /* Temporary aliases to analysis data, should not be modified through these. */ @@ -9454,7 +9516,6 @@ vectorizable_load (vec_info *vinfo, bool compute_in_loop = false; class loop *at_loop; int vec_num; - bool slp_perm = false; bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo); poly_uint64 vf; tree aggr_type; @@ -9592,17 +9653,11 @@ vectorizable_load (vec_info *vinfo, else group_size = 1; - bool perm_ok = true; - unsigned n_perms = -1U; - if (cost_vec && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) - perm_ok = vect_transform_slp_perm_load (vinfo, slp_node, vNULL, NULL, vf, - true, &n_perms); - vect_load_store_data _ls_data{}; vect_load_store_data &ls = slp_node->get_data (_ls_data); if (cost_vec && !get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask_node, - VLS_LOAD, perm_ok, &ls)) + VLS_LOAD, &ls)) return false; /* Temporary aliases to analysis data, should not be modified through these. */ @@ -9623,56 +9678,6 @@ vectorizable_load (vec_info *vinfo, bool type_mode_padding_p = TYPE_PRECISION (scalar_type) < GET_MODE_PRECISION (GET_MODE_INNER (mode)); - /* ??? The following checks should really be part of - get_load_store_type. */ - if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists () - && !(memory_access_type == VMAT_ELEMENTWISE - || (mat_gather_scatter_p (memory_access_type) - && SLP_TREE_LANES (slp_node) == 1 - && (!grouped_load - || !DR_GROUP_NEXT_ELEMENT (first_stmt_info))))) - { - slp_perm = true; - - if (!loop_vinfo && cost_vec) - { - /* In BB vectorization we may not actually use a loaded vector - accessing elements in excess of DR_GROUP_SIZE. */ - stmt_vec_info group_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; - group_info = DR_GROUP_FIRST_ELEMENT (group_info); - unsigned HOST_WIDE_INT nunits; - unsigned j, k, maxk = 0; - FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (slp_node), j, k) - if (k > maxk) - maxk = k; - tree vectype = SLP_TREE_VECTYPE (slp_node); - if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits) - || maxk >= (DR_GROUP_SIZE (group_info) & ~(nunits - 1))) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "BB vectorization with gaps at the end of " - "a load is not supported\n"); - return false; - } - } - - if (cost_vec) - { - if (!perm_ok) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, - vect_location, - "unsupported load permutation\n"); - return false; - } - ls.n_perms = n_perms; - } - else - n_perms = ls.n_perms; - } - if (slp_node->ldst_lanes && memory_access_type != VMAT_LOAD_STORE_LANES) { @@ -10027,7 +10032,7 @@ vectorizable_load (vec_info *vinfo, not only the number of vector stmts the permutation result fits in. */ int ncopies; - if (slp_perm) + if (ls.slp_perm) { gcc_assert (memory_access_type != VMAT_ELEMENTWISE); /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for @@ -10135,18 +10140,18 @@ vectorizable_load (vec_info *vinfo, if (!costing_p) { - if (slp_perm) + if (ls.slp_perm) dr_chain.quick_push (gimple_assign_lhs (new_stmt)); else slp_node->push_vec_def (new_stmt); } } - if (slp_perm) + if (ls.slp_perm) { if (costing_p) { - gcc_assert (n_perms != -1U); - inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm, + gcc_assert (ls.n_perms != -1U); + inside_cost += record_stmt_cost (cost_vec, ls.n_perms, vec_perm, slp_node, 0, vect_body); } else @@ -10154,7 +10159,7 @@ vectorizable_load (vec_info *vinfo, unsigned n_perms2; vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf, false, &n_perms2); - gcc_assert (n_perms == n_perms2); + gcc_assert (ls.n_perms == n_perms2); } } @@ -10219,7 +10224,7 @@ vectorizable_load (vec_info *vinfo, instead the access is contiguous but it might be permuted. No gap adjustment is needed though. */ ; - else if (slp_perm + else if (ls.slp_perm && (group_size != scalar_lanes || !multiple_p (nunits, group_size))) { @@ -10568,7 +10573,7 @@ vectorizable_load (vec_info *vinfo, if (mat_gather_scatter_p (memory_access_type)) { - gcc_assert ((!grouped_load && !slp_perm) || ls.ls_type); + gcc_assert ((!grouped_load && !ls.slp_perm) || ls.ls_type); /* If we pun the original vectype the loads as well as costing, length, etc. is performed with the new type. After loading we VIEW_CONVERT @@ -10930,14 +10935,14 @@ vectorizable_load (vec_info *vinfo, /* Store vector loads in the corresponding SLP_NODE. */ if (!costing_p) { - if (slp_perm) + if (ls.slp_perm) dr_chain.quick_push (gimple_assign_lhs (new_stmt)); else slp_node->push_vec_def (new_stmt); } } - if (slp_perm) + if (ls.slp_perm) { if (costing_p) { @@ -11034,7 +11039,7 @@ vectorizable_load (vec_info *vinfo, stmt_info, bump); } - if (grouped_load || slp_perm) + if (grouped_load || ls.slp_perm) dr_chain.create (vec_num); gimple *new_stmt = NULL; @@ -11531,11 +11536,11 @@ vectorizable_load (vec_info *vinfo, /* Collect vector loads and later create their permutation in vect_transform_slp_perm_load. */ - if (!costing_p && (grouped_load || slp_perm)) + if (!costing_p && (grouped_load || ls.slp_perm)) dr_chain.quick_push (new_temp); /* Store vector loads in the corresponding SLP_NODE. */ - if (!costing_p && !slp_perm) + if (!costing_p && !ls.slp_perm) slp_node->push_vec_def (new_stmt); /* With SLP permutation we load the gaps as well, without @@ -11544,7 +11549,7 @@ vectorizable_load (vec_info *vinfo, group_elt += nunits; if (!costing_p && maybe_ne (group_gap_adj, 0U) - && !slp_perm + && !ls.slp_perm && known_eq (group_elt, group_size - group_gap_adj)) { poly_wide_int bump_val @@ -11561,7 +11566,7 @@ vectorizable_load (vec_info *vinfo, elements loaded for a permuted SLP load. */ if (!costing_p && maybe_ne (group_gap_adj, 0U) - && slp_perm) + && ls.slp_perm) { poly_wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) * group_gap_adj); @@ -11572,7 +11577,7 @@ vectorizable_load (vec_info *vinfo, stmt_info, bump); } - if (slp_perm) + if (ls.slp_perm) { /* For SLP we know we've seen all possible uses of dr_chain so direct vect_transform_slp_perm_load to DCE the unused parts. @@ -11580,9 +11585,9 @@ vectorizable_load (vec_info *vinfo, in PR101120 and friends. */ if (costing_p) { - gcc_assert (n_perms != -1U); - if (n_perms != 0) - inside_cost = record_stmt_cost (cost_vec, n_perms, vec_perm, + gcc_assert (ls.n_perms != -1U); + if (ls.n_perms != 0) + inside_cost = record_stmt_cost (cost_vec, ls.n_perms, vec_perm, slp_node, 0, vect_body); } else @@ -11591,7 +11596,7 @@ vectorizable_load (vec_info *vinfo, bool ok = vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf, false, &n_perms2, nullptr, true); - gcc_assert (ok && n_perms == n_perms2); + gcc_assert (ok && ls.n_perms == n_perms2); } dr_chain.release (); } diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h index 52bc0d6..a6c313e 100644 --- a/gcc/tree-vectorizer.h +++ b/gcc/tree-vectorizer.h @@ -290,6 +290,8 @@ struct vect_load_store_data : vect_data { tree strided_offset_vectype; // VMAT_GATHER_SCATTER_IFN, originally strided tree ls_type; // VMAT_GATHER_SCATTER_IFN auto_vec<int> elsvals; + /* True if the load requires a load permutation. */ + bool slp_perm; // SLP_TREE_LOAD_PERMUTATION unsigned n_perms; // SLP_TREE_LOAD_PERMUTATION }; @@ -844,6 +846,9 @@ public: following land-reducing operation would be assigned to. */ unsigned int reduc_result_pos; + /* Whether this represents a reduction chain. */ + bool is_reduc_chain; + /* Whether we force a single cycle PHI during reduction vectorization. */ bool force_single_cycle; @@ -1066,10 +1071,6 @@ public: /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ auto_vec<stmt_vec_info> reductions; - /* All reduction chains in the loop, represented by the first - stmt in the chain. */ - auto_vec<stmt_vec_info> reduction_chains; - /* Defs that could not be analyzed such as OMP SIMD calls without a LHS. */ auto_vec<stmt_vec_info> alternate_defs; @@ -1290,7 +1291,6 @@ public: #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions -#define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_EARLY_BREAKS(L) (L)->early_breaks |