From 9afa5081212e1fc3cb2c4efc9b4f421eecf68810 Mon Sep 17 00:00:00 2001 From: "Hu, Lin1" Date: Mon, 19 Aug 2024 10:09:18 +0800 Subject: AVX10.2 ymm rounding: Support vreducep{s,d,h} and vrndscalep{s,d,h} intrins gcc/ChangeLog: * config/i386/avx10_2roundingintrin.h: New intrins. * config/i386/i386-builtin.def (BDESC): Add new builtins. * config/i386/sse.md: (reducep): Add condition check. (_rndscale): Ditto. gcc/testsuite/ChangeLog: * gcc.target/i386/avx-1.c: Add new builtin test. * gcc.target/i386/sse-13.c: Ditto. * gcc.target/i386/sse-14.c: Ditto. * gcc.target/i386/sse-22.c: Add new macro test. * gcc.target/i386/sse-23.c: Ditto. * gcc.target/i386/avx10_2-rounding-3.c: Add test. --- gcc/config/i386/avx10_2roundingintrin.h | 367 ++++++++++++++++++++++++++++++++ gcc/config/i386/i386-builtin.def | 6 + gcc/config/i386/sse.md | 4 +- 3 files changed, 375 insertions(+), 2 deletions(-) (limited to 'gcc/config') diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h index ac09144..d6b8e26 100644 --- a/gcc/config/i386/avx10_2roundingintrin.h +++ b/gcc/config/i386/avx10_2roundingintrin.h @@ -3646,6 +3646,233 @@ _mm256_maskz_range_round_ps (__mmask8 __U, __m256 __A, __m256 __B, (__mmask8) __U, __R); } + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_reduce_round_pd (__m256d __A, const int __C, const int __R) +{ + return (__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) __A, + __C, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_reduce_round_pd (__m256d __W, __mmask8 __U, __m256d __A, + const int __C, const int __R) +{ + return (__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) __A, + __C, + (__v4df) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_reduce_round_pd (__mmask8 __U, __m256d __A, const int __C, + const int __R) +{ + return (__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) __A, + __C, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_reduce_round_ph (__m256h __A, const int __C, const int __R) +{ + return (__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) __A, + __C, + (__v16hf) + _mm256_setzero_ph (), + (__mmask16) -1, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_reduce_round_ph (__m256h __W, __mmask16 __U, __m256h __A, + const int __C, const int __R) +{ + return (__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) __A, + __C, + (__v16hf) __W, + (__mmask16) __U, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_reduce_round_ph (__mmask16 __U, __m256h __A, const int __C, + const int __R) +{ + return (__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) __A, + __C, + (__v16hf) + _mm256_setzero_ph (), + (__mmask16) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_reduce_round_ps (__m256 __A, const int __C, const int __R) +{ + return (__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) __A, + __C, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_reduce_round_ps (__m256 __W, __mmask8 __U, __m256 __A, + const int __C, const int __R) +{ + return (__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) __A, + __C, + (__v8sf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_reduce_round_ps (__mmask8 __U, __m256 __A, const int __C, + const int __R) +{ + return (__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) __A, + __C, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_roundscale_round_pd (__m256d __A, const int __C, const int __R) +{ + return + (__m256d) __builtin_ia32_rndscalepd256_mask_round ((__v4df) __A, + __C, + (__v4df) + _mm256_undefined_pd (), + (__mmask8) -1, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_roundscale_round_pd (__m256d __W, __mmask8 __U, __m256d __A, + const int __C, const int __R) +{ + return (__m256d) __builtin_ia32_rndscalepd256_mask_round ((__v4df) __A, + __C, + (__v4df) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_roundscale_round_pd (__mmask8 __U, __m256d __A, const int __C, + const int __R) +{ + return + (__m256d) __builtin_ia32_rndscalepd256_mask_round ((__v4df) __A, + __C, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_roundscale_round_ph (__m256h __A, const int __C, const int __R) +{ + return + (__m256h) __builtin_ia32_rndscaleph256_mask_round ((__v16hf) __A, + __C, + (__v16hf) + _mm256_undefined_ph (), + (__mmask16) -1, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_roundscale_round_ph (__m256h __W, __mmask16 __U, __m256h __A, + const int __C, const int __R) +{ + return (__m256h) __builtin_ia32_rndscaleph256_mask_round ((__v16hf) __A, + __C, + (__v16hf) __W, + (__mmask16) __U, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_roundscale_round_ph (__mmask16 __U, __m256h __A, const int __C, + const int __R) +{ + return + (__m256h) __builtin_ia32_rndscaleph256_mask_round ((__v16hf) __A, + __C, + (__v16hf) + _mm256_setzero_ph (), + (__mmask16) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_roundscale_round_ps (__m256 __A, const int __C, const int __R) +{ + return + (__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) __A, + __C, + (__v8sf) + _mm256_undefined_ps (), + (__mmask8) -1, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_roundscale_round_ps (__m256 __W, __mmask8 __U, __m256 __A, + const int __C, const int __R) +{ + return (__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) __A, + __C, + (__v8sf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_roundscale_round_ps (__mmask8 __U, __m256 __A, const int __C, + const int __R) +{ + return (__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) __A, + __C, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, + __R); +} #else #define _mm256_add_round_pd(A, B, R) \ ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \ @@ -5523,6 +5750,146 @@ _mm256_maskz_range_round_ps (__mmask8 __U, __m256 __A, __m256 __B, (_mm256_setzero_ps ()), \ (__mmask8) (U), \ (R))) + +#define _mm256_reduce_round_pd(A, C, R) \ + ((__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) (A), \ + (C), \ + (__v4df) \ + (_mm256_setzero_pd ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_reduce_round_pd(W, U, A, C, R) \ + ((__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) (A), \ + (C), \ + (__v4df) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_reduce_round_pd(U, A, C, R) \ + ((__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) (A), \ + (C), \ + (__v4df) \ + (_mm256_setzero_pd ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_reduce_round_ph(A, C, R) \ + ((__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) (A), \ + (C), \ + (__v16hf) \ + (_mm256_setzero_ph ()), \ + (__mmask16) (-1), \ + (R))) + +#define _mm256_mask_reduce_round_ph(W, U, A, C, R) \ + ((__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) (A), \ + (C), \ + (__v16hf) (W), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_maskz_reduce_round_ph(U, A, C, R) \ + ((__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) (A), \ + (C), \ + (__v16hf) \ + (_mm256_setzero_ph ()), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_reduce_round_ps(A, C, R) \ + ((__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) (A), \ + (C), \ + (__v8sf) \ + (_mm256_setzero_ps ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_reduce_round_ps(W, U, A, C, R) \ + ((__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) (A), \ + (C), \ + (__v8sf) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_reduce_round_ps(U, A, C, R) \ + ((__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) (A), \ + (C), \ + (__v8sf) \ + (_mm256_setzero_ps ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_roundscale_round_pd(A, C, R) \ + ((__m256d) \ + __builtin_ia32_rndscalepd256_mask_round ((__v4df) (A), \ + (C), \ + (__v4df) \ + (_mm256_undefined_pd ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_roundscale_round_pd(W, U, A, C, R) \ + ((__m256d) __builtin_ia32_rndscalepd256_mask_round ((__v4df) (A), \ + (C), \ + (__v4df) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_roundscale_round_pd(U, A, C, R) \ + ((__m256d) __builtin_ia32_rndscalepd256_mask_round ((__v4df) (A), \ + (C), \ + (__v4df) \ + (_mm256_setzero_pd ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_roundscale_round_ph(A, C, R) \ + ((__m256h) \ + __builtin_ia32_rndscaleph256_mask_round ((__v16hf) (A), \ + (C), \ + (__v16hf) \ + (_mm256_undefined_ph ()), \ + (__mmask16) (-1), \ + (R))) + +#define _mm256_mask_roundscale_round_ph(W, U, A, C, R) \ + ((__m256h) __builtin_ia32_rndscaleph256_mask_round ((__v16hf) (A), \ + (C), \ + (__v16hf) (W), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_maskz_roundscale_round_ph(U, A, C, R) \ + ((__m256h) __builtin_ia32_rndscaleph256_mask_round ((__v16hf) (A), \ + (C), \ + (__v16hf) \ + (_mm256_setzero_ph ()), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_roundscale_round_ps(A, C, R) \ + ((__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) (A), \ + (C), \ + (__v8sf) \ + (_mm256_undefined_ps ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_roundscale_round_ps(W, U, A, C, R) \ + ((__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) (A), \ + (C), \ + (__v8sf) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_roundscale_round_ps(U, A, C, R) \ + ((__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) (A), \ + (C), \ + (__v8sf) \ + (_mm256_setzero_ps ()), \ + (__mmask8) (U), \ + (R))) #endif #define _mm256_cmul_round_pch(A, B, R) _mm256_fcmul_round_pch ((A), (B), (R)) diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def index 232ec53..2b9acfa 100644 --- a/gcc/config/i386/i386-builtin.def +++ b/gcc/config/i386/i386-builtin.def @@ -3462,6 +3462,12 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_mulv16hf3_mask_round, "__builti BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_mulv8sf3_mask_round, "__builtin_ia32_mulps256_mask_round", IX86_BUILTIN_VMULPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512dq_rangepv4df_mask_round, "__builtin_ia32_rangepd256_mask_round", IX86_BUILTIN_VRANGEPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT_V4DF_UQI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512dq_rangepv8sf_mask_round, "__builtin_ia32_rangeps256_mask_round", IX86_BUILTIN_VRANGEPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT_V8SF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_reducepv4df_mask_round, "__builtin_ia32_reducepd256_mask_round", IX86_BUILTIN_VREDUCEPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT_V4DF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_reducepv16hf_mask_round, "__builtin_ia32_reduceph256_mask_round", IX86_BUILTIN_VREDUCEPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_INT_V16HF_UHI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_reducepv8sf_mask_round, "__builtin_ia32_reduceps256_mask_round", IX86_BUILTIN_VREDUCEPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT_V8SF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_rndscalev4df_mask_round, "__builtin_ia32_rndscalepd256_mask_round", IX86_BUILTIN_VRNDSCALEPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT_V4DF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_rndscalev16hf_mask_round, "__builtin_ia32_rndscaleph256_mask_round", IX86_BUILTIN_VRNDSCALEPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_INT_V16HF_UHI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_rndscalev8sf_mask_round, "__builtin_ia32_rndscaleps256_mask_round", IX86_BUILTIN_VRNDSCALEPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT_V8SF_UQI_INT) BDESC_END (ROUND_ARGS, MULTI_ARG) diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index 61ec8ef..e850910 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -3772,7 +3772,7 @@ [(match_operand:VFH_AVX512VL 1 "" "") (match_operand:SI 2 "const_0_to_255_operand")] UNSPEC_REDUCE))] - "TARGET_AVX512DQ || (VALID_AVX512FP16_REG_MODE (mode))" + "(TARGET_AVX512DQ || (VALID_AVX512FP16_REG_MODE (mode))) && " "vreduce\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sse") (set_attr "prefix" "evex") @@ -13934,7 +13934,7 @@ [(match_operand:VFH_AVX512VL 1 "nonimmediate_operand" "") (match_operand:SI 2 "const_0_to_255_operand")] UNSPEC_ROUND))] - "TARGET_AVX512F" + "TARGET_AVX512F && " "vrndscale\t{%2, %1, %0|%0, %1, %2}" [(set_attr "length_immediate" "1") (set_attr "prefix" "evex") -- cgit v1.1