aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorHu, Lin1 <lin1.hu@intel.com>2024-08-19 10:09:19 +0800
committerHaochen Jiang <haochen.jiang@intel.com>2024-08-19 10:17:24 +0800
commit1f86cf06c7897f6ab467443b5fe8789cc95fe0c4 (patch)
treedfc173b00d28e4c1a9fbcce5783f9fdb873e9ed0 /gcc
parent9afa5081212e1fc3cb2c4efc9b4f421eecf68810 (diff)
downloadgcc-1f86cf06c7897f6ab467443b5fe8789cc95fe0c4.zip
gcc-1f86cf06c7897f6ab467443b5fe8789cc95fe0c4.tar.gz
gcc-1f86cf06c7897f6ab467443b5fe8789cc95fe0c4.tar.bz2
AVX10.2 ymm rounding: Support vscalefp{s,d,h} intrins
gcc/ChangeLog: * config/i386/avx10_2roundingintrin.h: New intrins. * config/i386/i386-builtin.def: Add new builtins. * config/i386/sse.md: (<avx512>_scalef<mode><mask_name><round_name>): Add condition check. gcc/testsuite/ChangeLog: * gcc.target/i386/avx-1.c: Add new builtin test. * gcc.target/i386/sse-13.c: Ditto. * gcc.target/i386/sse-14.c: Ditto. * gcc.target/i386/sse-22.c: Add new macro test. * gcc.target/i386/sse-23.c: Ditto. * gcc.target/i386/avx10_2-rounding-3.c: Add test.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/i386/avx10_2roundingintrin.h182
-rw-r--r--gcc/config/i386/i386-builtin.def3
-rw-r--r--gcc/config/i386/sse.md2
-rw-r--r--gcc/testsuite/gcc.target/i386/avx-1.c3
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c25
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-13.c3
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-14.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-22.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-23.c3
9 files changed, 238 insertions, 1 deletions
diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h
index d6b8e26..f35f233 100644
--- a/gcc/config/i386/avx10_2roundingintrin.h
+++ b/gcc/config/i386/avx10_2roundingintrin.h
@@ -3873,6 +3873,119 @@ _mm256_maskz_roundscale_round_ps (__mmask8 __U, __m256 __A, const int __C,
(__mmask8) __U,
__R);
}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_scalef_round_pd (__m256d __A, __m256d __B, const int __R)
+{
+ return
+ (__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_undefined_pd (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_scalef_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B, const int __R)
+{
+ return (__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_scalef_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
+ const int __R)
+{
+ return (__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_scalef_round_ph (__m256h __A, __m256h __B, const int __R)
+{
+ return
+ (__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf)
+ _mm256_undefined_ph (),
+ (__mmask16) -1,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_scalef_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
+ __m256h __B, const int __R)
+{
+ return (__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __W,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_scalef_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
+ const int __R)
+{
+ return (__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf)
+ _mm256_setzero_ph (),
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_scalef_round_ps (__m256 __A, __m256 __B, const int __R)
+{
+ return (__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_undefined_ps (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_scalef_round_ps (__m256 __W, __mmask8 __U, __m256 __A,
+ __m256 __B, const int __R)
+{
+ return (__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_scalef_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
+ const int __R)
+{
+ return (__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U,
+ __R);
+}
#else
#define _mm256_add_round_pd(A, B, R) \
((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
@@ -5890,6 +6003,75 @@ _mm256_maskz_roundscale_round_ps (__mmask8 __U, __m256 __A, const int __C,
(_mm256_setzero_ps ()), \
(__mmask8) (U), \
(R)))
+
+#define _mm256_scalef_round_pd(A, B, R) \
+ ((__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4df) \
+ (_mm256_undefined_pd ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_scalef_round_pd(W, U, A, B, R) \
+ ((__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4df) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_scalef_round_pd(U, A, B, R) \
+ ((__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4df) \
+ (_mm256_setzero_pd ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_scalef_round_ph(A, B, R) \
+ ((__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) \
+ (_mm256_undefined_ph ()), \
+ (__mmask16) (-1), \
+ (R)))
+
+#define _mm256_mask_scalef_round_ph(W, U, A, B, R) \
+ ((__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) (W), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_maskz_scalef_round_ph(U, A, B, R) \
+ ((__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) \
+ (_mm256_setzero_ph ()), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_scalef_round_ps(A, B, R) \
+ ((__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8sf) \
+ (_mm256_undefined_ps ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_scalef_round_ps(W, U, A, B, R) \
+ ((__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8sf) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_scalef_round_ps(U, A, B, R) \
+ ((__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8sf) \
+ (_mm256_setzero_ps ()), \
+ (__mmask8) (U), \
+ (R)))
#endif
#define _mm256_cmul_round_pch(A, B, R) _mm256_fcmul_round_pch ((A), (B), (R))
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index 2b9acfa..8be3e11 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -3468,6 +3468,9 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_reducepv8sf_mask_round, "__buil
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_rndscalev4df_mask_round, "__builtin_ia32_rndscalepd256_mask_round", IX86_BUILTIN_VRNDSCALEPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT_V4DF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_rndscalev16hf_mask_round, "__builtin_ia32_rndscaleph256_mask_round", IX86_BUILTIN_VRNDSCALEPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_INT_V16HF_UHI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_rndscalev8sf_mask_round, "__builtin_ia32_rndscaleps256_mask_round", IX86_BUILTIN_VRNDSCALEPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_scalefv4df_mask_round, "__builtin_ia32_scalefpd256_mask_round", IX86_BUILTIN_VSCALEFPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_scalefv16hf_mask_round, "__builtin_ia32_scalefph256_mask_round", IX86_BUILTIN_VSCALEFPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_scalefv8sf_mask_round, "__builtin_ia32_scalefps256_mask_round", IX86_BUILTIN_VSCALEFPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index e850910..8f34c93 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -13333,7 +13333,7 @@
[(match_operand:VFH_AVX512VL 1 "register_operand" "v")
(match_operand:VFH_AVX512VL 2 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_SCALEF))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && <round_mode_condition>"
"vscalef<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c
index 1b6cc87..e41a4ec 100644
--- a/gcc/testsuite/gcc.target/i386/avx-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx-1.c
@@ -992,6 +992,9 @@
#define __builtin_ia32_rndscalepd256_mask_round(A, B, C, D, E) __builtin_ia32_rndscalepd256_mask_round(A, 1, C, D, 8)
#define __builtin_ia32_rndscaleph256_mask_round(A, B, C, D, E) __builtin_ia32_rndscaleph256_mask_round(A, 1, C, D, 8)
#define __builtin_ia32_rndscaleps256_mask_round(A, B, C, D, E) __builtin_ia32_rndscaleps256_mask_round(A, 1, C, D, 8)
+#define __builtin_ia32_scalefpd256_mask_round(A, B, C, D, E) __builtin_ia32_scalefpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_scalefph256_mask_round(A, B, C, D, E) __builtin_ia32_scalefph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_scalefps256_mask_round(A, B, C, D, E) __builtin_ia32_scalefps256_mask_round(A, B, C, D, 8)
#include <wmmintrin.h>
#include <immintrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c
index 5d13ad9..331e79c 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c
@@ -174,6 +174,15 @@
/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefpd\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefpd\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefpd\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefph\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefps\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
@@ -524,3 +533,19 @@ avx10_2_test_23 (void)
x = _mm256_mask_roundscale_round_ps (x, 2, x, 0x42, _MM_FROUND_NO_EXC);
x = _mm256_maskz_roundscale_round_ps (2, x, 0x42, _MM_FROUND_NO_EXC);
}
+
+void extern
+avx10_2_test_24 (void)
+{
+ xd = _mm256_scalef_round_pd (xd, xd, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ xd = _mm256_mask_scalef_round_pd (xd, m8, xd, xd, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ xd = _mm256_maskz_scalef_round_pd (m8, xd, xd, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+ xh = _mm256_scalef_round_ph (xh, xh, 8);
+ xh = _mm256_mask_scalef_round_ph (xh, m16, xh, xh, 8);
+ xh = _mm256_maskz_scalef_round_ph (m16, xh, xh, 11);
+
+ x = _mm256_scalef_round_ps (x, x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ x = _mm256_mask_scalef_round_ps (x, m8, x, x, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ x = _mm256_maskz_scalef_round_ps (m8, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c
index d158023..a393e28 100644
--- a/gcc/testsuite/gcc.target/i386/sse-13.c
+++ b/gcc/testsuite/gcc.target/i386/sse-13.c
@@ -999,5 +999,8 @@
#define __builtin_ia32_rndscalepd256_mask_round(A, B, C, D, E) __builtin_ia32_rndscalepd256_mask_round(A, 1, C, D, 8)
#define __builtin_ia32_rndscaleph256_mask_round(A, B, C, D, E) __builtin_ia32_rndscaleph256_mask_round(A, 1, C, D, 8)
#define __builtin_ia32_rndscaleps256_mask_round(A, B, C, D, E) __builtin_ia32_rndscaleps256_mask_round(A, 1, C, D, 8)
+#define __builtin_ia32_scalefpd256_mask_round(A, B, C, D, E) __builtin_ia32_scalefpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_scalefph256_mask_round(A, B, C, D, E) __builtin_ia32_scalefph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_scalefps256_mask_round(A, B, C, D, E) __builtin_ia32_scalefps256_mask_round(A, B, C, D, 8)
#include <x86intrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c
index f691729..ddec892 100644
--- a/gcc/testsuite/gcc.target/i386/sse-14.c
+++ b/gcc/testsuite/gcc.target/i386/sse-14.c
@@ -1144,6 +1144,9 @@ test_2 (_mm256_min_round_ps, __m256, __m256, __m256, 8)
test_2 (_mm256_mul_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_mul_round_ph, __m256h, __m256h, __m256h, 9)
test_2 (_mm256_mul_round_ps, __m256, __m256, __m256, 9)
+test_2 (_mm256_scalef_round_pd, __m256d, __m256d, __m256d, 9)
+test_2 (_mm256_scalef_round_ph, __m256h, __m256h, __m256h, 9)
+test_2 (_mm256_scalef_round_ps, __m256, __m256, __m256, 9)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1245,6 +1248,9 @@ test_3 (_mm256_maskz_min_round_ps, __m256, __mmask8, __m256, __m256, 8)
test_3 (_mm256_maskz_mul_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
test_3 (_mm256_maskz_mul_round_ph, __m256h, __mmask16, __m256h, __m256h, 9)
test_3 (_mm256_maskz_mul_round_ps, __m256, __mmask8, __m256, __m256, 9)
+test_3 (_mm256_maskz_scalef_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
+test_3 (_mm256_maskz_scalef_round_ph, __m256h, __mmask16, __m256h, __m256h, 9)
+test_3 (_mm256_maskz_scalef_round_ps, __m256, __mmask8, __m256, __m256, 9)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
@@ -1338,6 +1344,9 @@ test_4 (_mm256_mask_min_round_ps, __m256, __m256, __mmask8, __m256, __m256, 8)
test_4 (_mm256_mask_mul_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
test_4 (_mm256_mask_mul_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 9)
test_4 (_mm256_mask_mul_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
+test_4 (_mm256_mask_scalef_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
+test_4 (_mm256_mask_scalef_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 9)
+test_4 (_mm256_mask_scalef_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
test_4x (_mm256_maskz_fixupimm_round_pd, __m256d, __mmask8, __m256d, __m256d, __m256i, 3, 8)
test_4x (_mm256_maskz_fixupimm_round_ps, __m256, __mmask8, __m256, __m256, __m256i, 3, 8)
test_4x (_mm256_mask_fixupimm_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256i, 3, 8)
diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c
index 767a7df..b5d9645 100644
--- a/gcc/testsuite/gcc.target/i386/sse-22.c
+++ b/gcc/testsuite/gcc.target/i386/sse-22.c
@@ -1187,6 +1187,9 @@ test_2 (_mm256_min_round_ps, __m256, __m256, __m256, 8)
test_2 (_mm256_mul_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_mul_round_ph, __m256h, __m256h, __m256h, 9)
test_2 (_mm256_mul_round_ps, __m256, __m256, __m256, 9)
+test_2 (_mm256_scalef_round_pd, __m256d, __m256d, __m256d, 9)
+test_2 (_mm256_scalef_round_ph, __m256h, __m256h, __m256h, 9)
+test_2 (_mm256_scalef_round_ps, __m256, __m256, __m256, 9)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1287,6 +1290,9 @@ test_3 (_mm256_maskz_min_round_ps, __m256, __mmask8, __m256, __m256, 8)
test_3 (_mm256_maskz_mul_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
test_3 (_mm256_maskz_mul_round_ph, __m256h, __mmask16, __m256h, __m256h, 9)
test_3 (_mm256_maskz_mul_round_ps, __m256, __mmask8, __m256, __m256, 9)
+test_3 (_mm256_maskz_scalef_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
+test_3 (_mm256_maskz_scalef_round_ph, __m256h, __mmask16, __m256h, __m256h, 9)
+test_3 (_mm256_maskz_scalef_round_ps, __m256, __mmask8, __m256, __m256, 9)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
@@ -1380,6 +1386,9 @@ test_4 (_mm256_mask_min_round_ps, __m256, __m256, __mmask8, __m256, __m256, 8)
test_4 (_mm256_mask_mul_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
test_4 (_mm256_mask_mul_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 9)
test_4 (_mm256_mask_mul_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
+test_4 (_mm256_mask_scalef_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
+test_4 (_mm256_mask_scalef_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 9)
+test_4 (_mm256_mask_scalef_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
test_4x (_mm256_maskz_fixupimm_round_pd, __m256d, __mmask8, __m256d, __m256d, __m256i, 3, 8)
test_4x (_mm256_maskz_fixupimm_round_ps, __m256, __mmask8, __m256, __m256, __m256i, 3, 8)
test_4x (_mm256_mask_fixupimm_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256i, 3, 8)
diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c
index 8c065fe..bd9f93d 100644
--- a/gcc/testsuite/gcc.target/i386/sse-23.c
+++ b/gcc/testsuite/gcc.target/i386/sse-23.c
@@ -974,6 +974,9 @@
#define __builtin_ia32_rndscalepd256_mask_round(A, B, C, D, E) __builtin_ia32_rndscalepd256_mask_round(A, 1, C, D, 8)
#define __builtin_ia32_rndscaleph256_mask_round(A, B, C, D, E) __builtin_ia32_rndscaleph256_mask_round(A, 1, C, D, 8)
#define __builtin_ia32_rndscaleps256_mask_round(A, B, C, D, E) __builtin_ia32_rndscaleps256_mask_round(A, 1, C, D, 8)
+#define __builtin_ia32_scalefpd256_mask_round(A, B, C, D, E) __builtin_ia32_scalefpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_scalefph256_mask_round(A, B, C, D, E) __builtin_ia32_scalefph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_scalefps256_mask_round(A, B, C, D, E) __builtin_ia32_scalefps256_mask_round(A, B, C, D, 8)
#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")