From b2754227139512adecb6fda067632b587ff4a017 Mon Sep 17 00:00:00 2001 From: "Hu, Lin1" Date: Mon, 19 Aug 2024 10:09:01 +0800 Subject: AVX10.2 ymm rounding: Support vcvttps2{,u}{dq,qq} and vcvtu{dq,qq}2p{s,d,h} intrins gcc/ChangeLog: * config/i386/avx10_2roundingintrin.h: New intrins. * config/i386/i386-builtin.def (BDESC): Add new builtins. * config/i386/sse.md (unspec_fix_truncv8sfv8si2): Extend rounding control. (fixuns_trunc2): Ditto. (floatuns2): Add condition check. (fix_trunc2): Remove round_saeonly_name. gcc/testsuite/ChangeLog: * gcc.target/i386/avx-1.c: Add new builtin test. * gcc.target/i386/sse-13.c: Ditto. * gcc.target/i386/sse-14.c: Ditto. * gcc.target/i386/sse-22.c: Add new macro test. * gcc.target/i386/sse-23.c: Ditto. * gcc.target/i386/avx10_2-rounding-2.c: Add test. --- gcc/config/i386/avx10_2roundingintrin.h | 492 ++++++++++++++++++++++++++++++++ gcc/config/i386/i386-builtin.def | 9 + gcc/config/i386/sse.md | 27 +- 3 files changed, 515 insertions(+), 13 deletions(-) (limited to 'gcc/config') diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h index 45a04e5..384facb 100644 --- a/gcc/config/i386/avx10_2roundingintrin.h +++ b/gcc/config/i386/avx10_2roundingintrin.h @@ -1451,6 +1451,312 @@ _mm256_maskz_cvtt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R) (__mmask16) __U, __R); } + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtt_roundps_epi32 (__m256 __A, const int __R) +{ + return + (__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtt_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A, + const int __R) +{ + return (__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtt_roundps_epi32 (__mmask8 __U, __m256 __A, const int __R) +{ + return + (__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtt_roundps_epi64 (__m128 __A, const int __R) +{ + return + (__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtt_roundps_epi64 (__m256i __W, __mmask8 __U, __m128 __A, + const int __R) +{ + return (__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtt_roundps_epi64 (__mmask8 __U, __m128 __A, const int __R) +{ + return + (__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtt_roundps_epu32 (__m256 __A, const int __R) +{ + return + (__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtt_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A, + const int __R) +{ + return (__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtt_roundps_epu32 (__mmask8 __U, __m256 __A, const int __R) +{ + return + (__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtt_roundps_epu64 (__m128 __A, const int __R) +{ + return + (__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtt_roundps_epu64 (__m256i __W, __mmask8 __U, __m128 __A, + const int __R) +{ + return (__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtt_roundps_epu64 (__mmask8 __U, __m128 __A, const int __R) +{ + return + (__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvt_roundepu32_ph (__m256i __A, const int __R) +{ + return (__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) __A, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvt_roundepu32_ph (__m128h __W, __mmask8 __U, __m256i __A, + const int __R) +{ + return (__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) __A, + (__v8hf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvt_roundepu32_ph (__mmask8 __U, __m256i __A, const int __R) +{ + return (__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) __A, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvt_roundepu32_ps (__m256i __A, const int __R) +{ + return + (__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) __A, + (__v8sf) + _mm256_undefined_ps (), + (__mmask8) -1, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvt_roundepu32_ps (__m256 __W, __mmask8 __U, __m256i __A, + const int __R) +{ + return (__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) __A, + (__v8sf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvt_roundepu32_ps (__mmask8 __U, __m256i __A, const int __R) +{ + return (__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvt_roundepu64_pd (__m256i __A, const int __R) +{ + return (__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvt_roundepu64_pd (__m256d __W, __mmask8 __U, __m256i __A, + const int __R) +{ + return (__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) __A, + (__v4df) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvt_roundepu64_pd (__mmask8 __U, __m256i __A, const int __R) +{ + return (__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvt_roundepu64_ph (__m256i __A, const int __R) +{ + return (__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) __A, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvt_roundepu64_ph (__m128h __W, __mmask8 __U, __m256i __A, + const int __R) +{ + return (__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) __A, + (__v8hf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvt_roundepu64_ph (__mmask8 __U, __m256i __A, const int __R) +{ + return (__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) __A, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) __U, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvt_roundepu64_ps (__m256i __A, const int __R) +{ + return (__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvt_roundepu64_ps (__m128 __W, __mmask8 __U, __m256i __A, + const int __R) +{ + return (__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) __A, + (__v4sf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvt_roundepu64_ps (__mmask8 __U, __m256i __A, const int __R) +{ + return (__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, + __R); +} #else #define _mm256_add_round_pd(A, B, R) \ ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \ @@ -2310,6 +2616,192 @@ _mm256_maskz_cvtt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R) (_mm256_setzero_si256 ()), \ (__mmask16) (U), \ (R))) + +#define _mm256_cvtt_roundps_epi32(A, R) \ + ((__m256i) \ + __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) (A), \ + (__v8si) \ + (_mm256_undefined_si256 ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvtt_roundps_epi32(W, U, A, R) \ + ((__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) (A), \ + (__v8si) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvtt_roundps_epi32(U, A, R) \ + ((__m256i) \ + __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) (A), \ + (__v8si) \ + (_mm256_setzero_si256 ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_cvtt_roundps_epi64(A, R) \ + ((__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) (A), \ + (__v4di) \ + (_mm256_setzero_si256 ()),\ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvtt_roundps_epi64(W, U, A, R) \ + ((__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) (A), \ + (__v4di) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvtt_roundps_epi64(U, A, R) \ + ((__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) (A), \ + (__v4di) \ + (_mm256_setzero_si256 ()),\ + (__mmask8) (U), \ + (R))) + +#define _mm256_cvtt_roundps_epu32(A, R) \ + ((__m256i) \ + __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) (A), \ + (__v8si) \ + (_mm256_undefined_si256 ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvtt_roundps_epu32(W, U, A, R) \ + ((__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) (A), \ + (__v8si) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvtt_roundps_epu32(U, A, R) \ + ((__m256i) \ + __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) (A), \ + (__v8si) \ + (_mm256_setzero_si256 ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_cvtt_roundps_epu64(A, R) \ + ((__m256i) \ + __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) (A), \ + (__v4di) \ + (_mm256_setzero_si256 ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvtt_roundps_epu64(W, U, A, R) \ + ((__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) (A), \ + (__v4di) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvtt_roundps_epu64(U, A, R) \ + ((__m256i) \ + __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) (A), \ + (__v4di) \ + (_mm256_setzero_si256 ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_cvt_roundepu32_ph(A, R) \ + ((__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) (A), \ + (__v8hf) \ + (_mm_setzero_ph ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvt_roundepu32_ph(W, U, A, R) \ + ((__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) (A), \ + (__v8hf) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvt_roundepu32_ph(U, A, R) \ + ((__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) (A), \ + (__v8hf) \ + (_mm_setzero_ph ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_cvt_roundepu32_ps(A, R) \ + ((__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) (A), \ + (__v8sf) \ + (_mm256_undefined_ps ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvt_roundepu32_ps(W, U, A, R) \ + ((__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) (A), \ + (__v8sf) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvt_roundepu32_ps(U, A, R) \ + ((__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) (A), \ + (__v8sf) \ + (_mm256_setzero_ps ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_cvt_roundepu64_pd(A, R) \ + ((__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) (A), \ + (__v4df) \ + (_mm256_setzero_pd ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvt_roundepu64_pd(W, U, A, R) \ + ((__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) (A), \ + (__v4df) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvt_roundepu64_pd(U, A, R) \ + ((__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) (A), \ + (__v4df) \ + (_mm256_setzero_pd ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_cvt_roundepu64_ph(A, R) \ + ((__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) (A), \ + (__v8hf) \ + (_mm_setzero_ph ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvt_roundepu64_ph(W, U, A, R) \ + ((__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) (A), \ + (__v8hf) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvt_roundepu64_ph(U, A, R) \ + ((__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) (A), \ + (__v8hf) \ + (_mm_setzero_ph ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_cvt_roundepu64_ps(A, R) \ + ((__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) (A), \ + (__v4sf) \ + (_mm_setzero_ps ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvt_roundepu64_ps(W, U, A, R) \ + ((__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) (A), \ + (__v4sf) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvt_roundepu64_ps(U, A, R) \ + ((__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) (A), \ + (__v4sf) \ + (_mm_setzero_ps ()), \ + (__mmask8) (U), \ + (R))) #endif #ifdef __DISABLE_AVX10_2_256__ diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def index a883336..1290ae6 100644 --- a/gcc/config/i386/i386-builtin.def +++ b/gcc/config/i386/i386-builtin.def @@ -3361,6 +3361,15 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv4di2_mask_round, "__builtin_ia32_vcvttph2uqq256_mask_round", IX86_BUILTIN_VCVTTPH2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv16hi2_mask_round, "__builtin_ia32_vcvttph2uw256_mask_round", IX86_BUILTIN_VCVTTPH2UW256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fix_truncv16hi2_mask_round, "__builtin_ia32_vcvttph2w256_mask_round", IX86_BUILTIN_VCVTTPH2W256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fix_truncv8sfv8si2_mask_round, "__builtin_ia32_cvttps2dq256_mask_round", IX86_BUILTIN_VCVTTPS2DQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fix_truncv4sfv4di2_mask_round, "__builtin_ia32_cvttps2qq256_mask_round", IX86_BUILTIN_VCVTTPS2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fixuns_truncv8sfv8si2_mask_round, "__builtin_ia32_cvttps2udq256_mask_round", IX86_BUILTIN_VCVTTPS2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fixuns_truncv4sfv4di2_mask_round, "__builtin_ia32_cvttps2uqq256_mask_round", IX86_BUILTIN_VCVTTPS2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtudq2ph_v8si_mask_round, "__builtin_ia32_vcvtudq2ph256_mask_round", IX86_BUILTIN_VCVTUDQ2PH256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8SI_V8HF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv8siv8sf2_mask_round, "__builtin_ia32_cvtudq2ps256_mask_round", IX86_BUILTIN_VCVTUDQ2PS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SI_V8SF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv4div4df2_mask_round, "__builtin_ia32_cvtuqq2pd256_mask_round", IX86_BUILTIN_VCVTUQQ2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DI_V4DF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtuqq2ph_v4di_mask_round, "__builtin_ia32_vcvtuqq2ph256_mask_round", IX86_BUILTIN_VCVTUQQ2PH256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V4DI_V8HF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv4div4sf2_mask_round, "__builtin_ia32_cvtuqq2ps256_mask_round", IX86_BUILTIN_VCVTUQQ2PS256_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4DI_V4SF_UQI_INT) BDESC_END (ROUND_ARGS, MULTI_ARG) diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index 53c15d1..25de88b 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -8401,7 +8401,7 @@ [(set (match_operand:VF1_AVX512VL 0 "register_operand" "=v") (unsigned_float:VF1_AVX512VL (match_operand: 1 "nonimmediate_operand" "")))] - "TARGET_AVX512F" + "TARGET_AVX512F && " "vcvtudq2ps\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -8530,12 +8530,13 @@ (set_attr "prefix" "evex") (set_attr "mode" "XI")]) -(define_insn "unspec_fix_truncv8sfv8si2" +(define_insn "unspec_fix_truncv8sfv8si2" [(set (match_operand:V8SI 0 "register_operand" "=v") - (unspec:V8SI [(match_operand:V8SF 1 "nonimmediate_operand" "vm")] + (unspec:V8SI [(match_operand:V8SF 1 "" "")] UNSPEC_VCVTT))] - "TARGET_AVX && " - "vcvttps2dq\t{%1, %0|%0, %1}" + "TARGET_AVX && + && (! || TARGET_AVX10_2_256)" + "vcvttps2dq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "") (set_attr "mode" "OI")]) @@ -9488,12 +9489,12 @@ (set_attr "prefix" "evex") (set_attr "mode" "")]) -(define_insn "fix_trunc2" +(define_insn "fix_trunc2" [(set (match_operand:VI8_256_512 0 "register_operand" "=v") (any_fix:VI8_256_512 - (match_operand: 1 "" "")))] - "TARGET_AVX512DQ && " - "vcvttps2qq\t{%1, %0|%0, %1}" + (match_operand: 1 "nonimmediate_operand" "vm")))] + "TARGET_AVX512DQ" + "vcvttps2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) @@ -9648,13 +9649,13 @@ DONE; }) -(define_insn "unspec_fixuns_trunc2" +(define_insn "unspec_fixuns_trunc2" [(set (match_operand: 0 "register_operand" "=v") (unspec: - [(match_operand:VF1_128_256 1 "nonimmediate_operand" "vm")] + [(match_operand:VF1_128_256 1 "" "")] UNSPEC_VCVTTU))] - "TARGET_AVX512VL" - "vcvttps2udq\t{%1, %0|%0, %1}" + "TARGET_AVX512VL && " + "vcvttps2udq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) -- cgit v1.1