aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorHongyu Wang <hongyu.wang@intel.com>2022-04-22 14:42:30 +0800
committerHongyu Wang <hongyu.wang@intel.com>2022-04-25 09:17:36 +0800
commit3c940d42701707559fabe49be99296f60fbc43e7 (patch)
treed28690de2ae3f8c0f8d7743aa9d53c2ed58076d5 /gcc
parentbd64885971be856a0feffb0d6eb59e62655d1285 (diff)
downloadgcc-3c940d42701707559fabe49be99296f60fbc43e7.zip
gcc-3c940d42701707559fabe49be99296f60fbc43e7.tar.gz
gcc-3c940d42701707559fabe49be99296f60fbc43e7.tar.bz2
AVX512F: Add missing macro for mask(z?)_scalf_s[sd] [PR 105339]
Add missing macro under O0 and adjust macro format for scalf intrinsics. gcc/ChangeLog: PR target/105339 * config/i386/avx512fintrin.h (_mm512_scalef_round_pd): Add parentheses for parameters and djust format. (_mm512_mask_scalef_round_pd): Ditto. (_mm512_maskz_scalef_round_pd): Ditto. (_mm512_scalef_round_ps): Ditto. (_mm512_mask_scalef_round_ps): Ditto. (_mm512_maskz_scalef_round_ps): Ditto. (_mm_scalef_round_sd): Use _mm_undefined_pd. (_mm_scalef_round_ss): Use _mm_undefined_ps. (_mm_mask_scalef_round_sd): New macro. (_mm_mask_scalef_round_ss): Ditto. (_mm_maskz_scalef_round_sd): Ditto. (_mm_maskz_scalef_round_ss): Ditto. gcc/testsuite/ChangeLog: PR target/105339 * gcc.target/i386/sse-14.c: Add tests for new macro.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/i386/avx512fintrin.h76
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-14.c4
2 files changed, 60 insertions, 20 deletions
diff --git a/gcc/config/i386/avx512fintrin.h b/gcc/config/i386/avx512fintrin.h
index 29511fd..77d6249 100644
--- a/gcc/config/i386/avx512fintrin.h
+++ b/gcc/config/i386/avx512fintrin.h
@@ -3286,31 +3286,67 @@ _mm_maskz_scalef_round_ss (__mmask8 __U, __m128 __A, __m128 __B, const int __R)
(__mmask8) __U, __R);
}
#else
-#define _mm512_scalef_round_pd(A, B, C) \
- (__m512d)__builtin_ia32_scalefpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, C)
-
-#define _mm512_mask_scalef_round_pd(W, U, A, B, C) \
- (__m512d)__builtin_ia32_scalefpd512_mask(A, B, W, U, C)
-
-#define _mm512_maskz_scalef_round_pd(U, A, B, C) \
- (__m512d)__builtin_ia32_scalefpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, C)
+#define _mm512_scalef_round_pd(A, B, C) \
+ ((__m512d) \
+ __builtin_ia32_scalefpd512_mask((A), (B), \
+ (__v8df) _mm512_undefined_pd(), \
+ -1, (C)))
+
+#define _mm512_mask_scalef_round_pd(W, U, A, B, C) \
+ ((__m512d) __builtin_ia32_scalefpd512_mask((A), (B), (W), (U), (C)))
+
+#define _mm512_maskz_scalef_round_pd(U, A, B, C) \
+ ((__m512d) \
+ __builtin_ia32_scalefpd512_mask((A), (B), \
+ (__v8df) _mm512_setzero_pd(), \
+ (U), (C)))
+
+#define _mm512_scalef_round_ps(A, B, C) \
+ ((__m512) \
+ __builtin_ia32_scalefps512_mask((A), (B), \
+ (__v16sf) _mm512_undefined_ps(), \
+ -1, (C)))
+
+#define _mm512_mask_scalef_round_ps(W, U, A, B, C) \
+ ((__m512) __builtin_ia32_scalefps512_mask((A), (B), (W), (U), (C)))
+
+#define _mm512_maskz_scalef_round_ps(U, A, B, C) \
+ ((__m512) \
+ __builtin_ia32_scalefps512_mask((A), (B), \
+ (__v16sf) _mm512_setzero_ps(), \
+ (U), (C)))
+
+#define _mm_scalef_round_sd(A, B, C) \
+ ((__m128d) \
+ __builtin_ia32_scalefsd_mask_round ((A), (B), \
+ (__v2df) _mm_undefined_pd (), \
+ -1, (C)))
-#define _mm512_scalef_round_ps(A, B, C) \
- (__m512)__builtin_ia32_scalefps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), -1, C)
+#define _mm_scalef_round_ss(A, B, C) \
+ ((__m128) \
+ __builtin_ia32_scalefss_mask_round ((A), (B), \
+ (__v4sf) _mm_undefined_ps (), \
+ -1, (C)))
-#define _mm512_mask_scalef_round_ps(W, U, A, B, C) \
- (__m512)__builtin_ia32_scalefps512_mask(A, B, W, U, C)
+#define _mm_mask_scalef_round_sd(W, U, A, B, C) \
+ ((__m128d) \
+ __builtin_ia32_scalefsd_mask_round ((A), (B), (W), (U), (C)))
-#define _mm512_maskz_scalef_round_ps(U, A, B, C) \
- (__m512)__builtin_ia32_scalefps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, C)
+#define _mm_mask_scalef_round_ss(W, U, A, B, C) \
+ ((__m128) \
+ __builtin_ia32_scalefss_mask_round ((A), (B), (W), (U), (C)))
-#define _mm_scalef_round_sd(A, B, C) \
- (__m128d)__builtin_ia32_scalefsd_mask_round (A, B, \
- (__v2df)_mm_setzero_pd (), -1, C)
+#define _mm_maskz_scalef_round_sd(U, A, B, C) \
+ ((__m128d) \
+ __builtin_ia32_scalefsd_mask_round ((A), (B), \
+ (__v2df) _mm_setzero_pd (), \
+ (U), (C)))
-#define _mm_scalef_round_ss(A, B, C) \
- (__m128)__builtin_ia32_scalefss_mask_round (A, B, \
- (__v4sf)_mm_setzero_ps (), -1, C)
+#define _mm_maskz_scalef_round_ss(U, A, B, C) \
+ ((__m128) \
+ __builtin_ia32_scalefss_mask_round ((A), (B), \
+ (__v4sf) _mm_setzero_ps (), \
+ (U), (C)))
#endif
#define _mm_mask_scalef_sd(W, U, A, B) \
diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c
index 956a9d1..f41493b 100644
--- a/gcc/testsuite/gcc.target/i386/sse-14.c
+++ b/gcc/testsuite/gcc.target/i386/sse-14.c
@@ -430,7 +430,9 @@ test_3 (_mm_maskz_mul_round_sd, __m128d, __mmask8, __m128d, __m128d, 9)
test_3 (_mm512_maskz_mul_round_ps, __m512, __mmask16, __m512, __m512, 9)
test_3 (_mm_maskz_mul_round_ss, __m128, __mmask8, __m128, __m128, 9)
test_3 (_mm512_maskz_scalef_round_pd, __m512d, __mmask8, __m512d, __m512d, 9)
+test_3 (_mm_maskz_scalef_round_sd, __m128d, __mmask8, __m128d, __m128d, 9)
test_3 (_mm512_maskz_scalef_round_ps, __m512, __mmask16, __m512, __m512, 9)
+test_3 (_mm_maskz_scalef_round_ss, __m128, __mmask8, __m128, __m128, 9)
test_3 (_mm512_maskz_shuffle_f32x4, __m512, __mmask16, __m512, __m512, 1)
test_3 (_mm512_maskz_shuffle_f64x2, __m512d, __mmask8, __m512d, __m512d, 1)
test_3 (_mm512_maskz_shuffle_i32x4, __m512i, __mmask16, __m512i, __m512i, 1)
@@ -545,7 +547,9 @@ test_4 (_mm_mask_mul_round_sd, __m128d, __m128d, __mmask8, __m128d, __m128d, 9)
test_4 (_mm512_mask_mul_round_ps, __m512, __m512, __mmask16, __m512, __m512, 9)
test_4 (_mm_mask_mul_round_ss, __m128, __m128, __mmask8, __m128, __m128, 9)
test_4 (_mm512_mask_scalef_round_pd, __m512d, __m512d, __mmask8, __m512d, __m512d, 9)
+test_4 (_mm_mask_scalef_round_sd, __m128d, __m128d, __mmask8, __m128d, __m128d, 9)
test_4 (_mm512_mask_scalef_round_ps, __m512, __m512, __mmask16, __m512, __m512, 9)
+test_4 (_mm_mask_scalef_round_ss, __m128, __m128, __mmask8, __m128, __m128, 9)
test_4 (_mm512_mask_shuffle_f32x4, __m512, __m512, __mmask16, __m512, __m512, 1)
test_4 (_mm512_mask_shuffle_f64x2, __m512d, __m512d, __mmask8, __m512d, __m512d, 1)
test_4 (_mm512_mask_shuffle_i32x4, __m512i, __m512i, __mmask16, __m512i, __m512i, 1)