aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorliuhongt <hongtao.liu@intel.com>2020-03-02 17:58:04 +0800
committerliuhongt <hongtao.liu@intel.com>2021-09-22 12:56:30 +0800
commitdb3b96df03fdbe2fb770729501e2e9b65e66c2da (patch)
tree69d34dc30b53e2a8be67231b9496b9021915c3b9 /gcc
parented643e9f171e99b0aa1453b3f29ed1103e9b5c80 (diff)
downloadgcc-db3b96df03fdbe2fb770729501e2e9b65e66c2da.zip
gcc-db3b96df03fdbe2fb770729501e2e9b65e66c2da.tar.gz
gcc-db3b96df03fdbe2fb770729501e2e9b65e66c2da.tar.bz2
AVX512FP16: Add vfcmaddcsh/vfmaddcsh/vfcmulcsh/vfmulcsh.
gcc/ChangeLog: * config/i386/avx512fp16intrin.h (_mm_mask_fcmadd_sch): New intrinsic. (_mm_mask3_fcmadd_sch): Likewise. (_mm_maskz_fcmadd_sch): Likewise. (_mm_fcmadd_sch): Likewise. (_mm_mask_fmadd_sch): Likewise. (_mm_mask3_fmadd_sch): Likewise. (_mm_maskz_fmadd_sch): Likewise. (_mm_fmadd_sch): Likewise. (_mm_mask_fcmadd_round_sch): Likewise. (_mm_mask3_fcmadd_round_sch): Likewise. (_mm_maskz_fcmadd_round_sch): Likewise. (_mm_fcmadd_round_sch): Likewise. (_mm_mask_fmadd_round_sch): Likewise. (_mm_mask3_fmadd_round_sch): Likewise. (_mm_maskz_fmadd_round_sch): Likewise. (_mm_fmadd_round_sch): Likewise. (_mm_fcmul_sch): Likewise. (_mm_mask_fcmul_sch): Likewise. (_mm_maskz_fcmul_sch): Likewise. (_mm_fmul_sch): Likewise. (_mm_mask_fmul_sch): Likewise. (_mm_maskz_fmul_sch): Likewise. (_mm_fcmul_round_sch): Likewise. (_mm_mask_fcmul_round_sch): Likewise. (_mm_maskz_fcmul_round_sch): Likewise. (_mm_fmul_round_sch): Likewise. (_mm_mask_fmul_round_sch): Likewise. (_mm_maskz_fmul_round_sch): Likewise. * config/i386/i386-builtin.def: Add corresponding new builtins. * config/i386/sse.md (avx512fp16_fmaddcsh_v8hf_maskz<round_expand_name>): New expander. (avx512fp16_fcmaddcsh_v8hf_maskz<round_expand_name>): Ditto. (avx512fp16_fma_<complexopname>sh_v8hf<mask_scalarcz_name><round_scalarcz_name>): New define insn. (avx512fp16_<complexopname>sh_v8hf_mask<round_name>): Ditto. (avx512fp16_<complexopname>sh_v8hf<mask_scalarc_name><round_scalarcz_name>): Ditto. * config/i386/subst.md (mask_scalarcz_name): New. (mask_scalarc_name): Ditto. (mask_scalarc_operand3): Ditto. (mask_scalarcz_operand4): Ditto. (round_scalarcz_name): Ditto. (round_scalarc_mask_operand3): Ditto. (round_scalarcz_mask_operand4): Ditto. (round_scalarc_mask_op3): Ditto. (round_scalarcz_mask_op4): Ditto. (round_scalarcz_constraint): Ditto. (round_scalarcz_nimm_predicate): Ditto. (mask_scalarcz): Ditto. (mask_scalarc): Ditto. (round_scalarcz): Ditto. gcc/testsuite/ChangeLog: * gcc.target/i386/avx-1.c: Add test for new builtins. * gcc.target/i386/sse-13.c: Ditto. * gcc.target/i386/sse-23.c: Ditto. * gcc.target/i386/sse-14.c: Add test for new intrinsics. * gcc.target/i386/sse-22.c: Ditto.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/i386/avx512fp16intrin.h475
-rw-r--r--gcc/config/i386/i386-builtin.def10
-rw-r--r--gcc/config/i386/sse.md76
-rw-r--r--gcc/config/i386/subst.md63
-rw-r--r--gcc/testsuite/gcc.target/i386/avx-1.c10
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-13.c10
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-14.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-22.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/sse-23.c10
9 files changed, 682 insertions, 0 deletions
diff --git a/gcc/config/i386/avx512fp16intrin.h b/gcc/config/i386/avx512fp16intrin.h
index e402a59..e01cff6 100644
--- a/gcc/config/i386/avx512fp16intrin.h
+++ b/gcc/config/i386/avx512fp16intrin.h
@@ -6511,6 +6511,481 @@ _mm512_maskz_fmul_round_pch (__mmask16 __A, __m512h __B,
#endif /* __OPTIMIZE__ */
+/* Intrinsics vf[,c]maddcsh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+#ifdef __AVX512VL__
+ return (__m128h) __builtin_ia32_movaps128_mask (
+ (__v4sf)
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D, __B,
+ _MM_FROUND_CUR_DIRECTION),
+ (__v4sf) __A, __B);
+#else
+ return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D, __B,
+ _MM_FROUND_CUR_DIRECTION),
+ (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D)
+{
+ return (__m128h) _mm_move_ss ((__m128) __C,
+ (__m128)
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C, __D,
+ _MM_FROUND_CUR_DIRECTION));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_maskz_round ((__v8hf) __B,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+#ifdef __AVX512VL__
+ return (__m128h) __builtin_ia32_movaps128_mask (
+ (__v4sf)
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D, __B,
+ _MM_FROUND_CUR_DIRECTION),
+ (__v4sf) __A, __B);
+#else
+ return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D, __B,
+ _MM_FROUND_CUR_DIRECTION),
+ (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D)
+{
+ return (__m128h) _mm_move_ss ((__m128) __C,
+ (__m128)
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C, __D,
+ _MM_FROUND_CUR_DIRECTION));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_maskz_round ((__v8hf) __B,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_sch (__m128h __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+#ifdef __AVX512VL__
+ return (__m128h) __builtin_ia32_movaps128_mask (
+ (__v4sf)
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __B, __E),
+ (__v4sf) __A, __B);
+#else
+ return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __B, __E),
+ (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C,
+ __mmask8 __D, const int __E)
+{
+ return (__m128h) _mm_move_ss ((__m128) __C,
+ (__m128)
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __D, __E));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_maskz_round ((__v8hf) __B,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+#ifdef __AVX512VL__
+ return (__m128h) __builtin_ia32_movaps128_mask (
+ (__v4sf)
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __B, __E),
+ (__v4sf) __A, __B);
+#else
+ return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __B, __E),
+ (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C,
+ __mmask8 __D, const int __E)
+{
+ return (__m128h) _mm_move_ss ((__m128) __C,
+ (__m128)
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __D, __E));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_maskz_round ((__v8hf) __B,
+ (__v8hf) __C,
+ (__v8hf) __D,
+ __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __D);
+}
+#else
+#ifdef __AVX512VL__
+#define _mm_mask_fcmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) __builtin_ia32_movaps128_mask ( \
+ (__v4sf) \
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) (A), \
+ (__v8hf) (C), \
+ (__v8hf) (D), \
+ (B), (E)), \
+ (__v4sf) (A), (B)))
+
+#else
+#define _mm_mask_fcmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) __builtin_ia32_blendvps ((__v4sf) (A), \
+ (__v4sf) \
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) (A), \
+ (__v8hf) (C), \
+ (__v8hf) (D), \
+ (B), (E)), \
+ (__v4sf) _mm_set_ss ((float) ((int) (B) << 31))))
+#endif
+
+#define _mm_mask3_fcmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) _mm_move_ss ((__m128) (C), \
+ (__m128) \
+ __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) (A), \
+ (__v8hf) (B), \
+ (__v8hf) (C), \
+ (D), (E))))
+
+#define _mm_maskz_fcmadd_round_sch(A, B, C, D, E) \
+ __builtin_ia32_vfcmaddcsh_maskz_round ((B), (C), (D), (A), (E))
+
+#define _mm_fcmadd_round_sch(A, B, C, D) \
+ __builtin_ia32_vfcmaddcsh_round ((A), (B), (C), (D))
+
+#ifdef __AVX512VL__
+#define _mm_mask_fmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) __builtin_ia32_movaps128_mask ( \
+ (__v4sf) \
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) (A), \
+ (__v8hf) (C), \
+ (__v8hf) (D), \
+ (B), (E)), \
+ (__v4sf) (A), (B)))
+
+#else
+#define _mm_mask_fmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) __builtin_ia32_blendvps ((__v4sf) (A), \
+ (__v4sf) \
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) (A), \
+ (__v8hf) (C), \
+ (__v8hf) (D), \
+ (B), (E)), \
+ (__v4sf) _mm_set_ss ((float) ((int) (B) << 31))))
+#endif
+
+#define _mm_mask3_fmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) _mm_move_ss ((__m128) (C), \
+ (__m128) \
+ __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) (A), \
+ (__v8hf) (B), \
+ (__v8hf) (C), \
+ (D), (E))))
+
+#define _mm_maskz_fmadd_round_sch(A, B, C, D, E) \
+ __builtin_ia32_vfmaddcsh_maskz_round ((B), (C), (D), (A), (E))
+
+#define _mm_fmadd_round_sch(A, B, C, D) \
+ __builtin_ia32_vfmaddcsh_round ((A), (B), (C), (D))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vf[,c]mulcsh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmul_sch (__m128h __A, __m128h __B)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmul_sch (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmul_sch (__m128h __A, __m128h __B)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmul_sch (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmul_round_sch (__m128h __A, __m128h __B, const int __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_round ((__v8hf) __A,
+ (__v8hf) __B,
+ __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmul_round_sch (__m128h __A, __m128h __B, const int __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_round ((__v8hf) __A,
+ (__v8hf) __B, __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C, const int __E)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, __E);
+}
+
+#else
+#define _mm_fcmul_round_sch(__A, __B, __D) \
+ (__m128h) __builtin_ia32_vfcmulcsh_round ((__v8hf) __A, \
+ (__v8hf) __B, __D)
+
+#define _mm_mask_fcmul_round_sch(__A, __B, __C, __D, __E) \
+ (__m128h) __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C, \
+ (__v8hf) __D, \
+ (__v8hf) __A, \
+ __B, __E)
+
+#define _mm_maskz_fcmul_round_sch(__A, __B, __C, __E) \
+ (__m128h) __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B, \
+ (__v8hf) __C, \
+ _mm_setzero_ph (), \
+ __A, __E)
+
+#define _mm_fmul_round_sch(__A, __B, __D) \
+ (__m128h) __builtin_ia32_vfmulcsh_round ((__v8hf) __A, \
+ (__v8hf) __B, __D)
+
+#define _mm_mask_fmul_round_sch(__A, __B, __C, __D, __E) \
+ (__m128h) __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C, \
+ (__v8hf) __D, \
+ (__v8hf) __A, \
+ __B, __E)
+
+#define _mm_maskz_fmul_round_sch(__A, __B, __C, __E) \
+ (__m128h) __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B, \
+ (__v8hf) __C, \
+ _mm_setzero_ph (), \
+ __A, __E)
+
+#endif /* __OPTIMIZE__ */
+
#ifdef __DISABLE_AVX512FP16__
#undef __DISABLE_AVX512FP16__
#pragma GCC pop_options
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index 3d9f099..302e1bc 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -3231,6 +3231,16 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmulc_v32hf_round, "__
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmulc_v32hf_mask_round, "__builtin_ia32_vfcmulcph512_mask_round", IX86_BUILTIN_VFCMULCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmulc_v32hf_round, "__builtin_ia32_vfmulcph512_round", IX86_BUILTIN_VFMULCPH512_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmulc_v32hf_mask_round, "__builtin_ia32_vfmulcph512_mask_round", IX86_BUILTIN_VFMULCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fma_fcmaddcsh_v8hf_round, "__builtin_ia32_vfcmaddcsh_round", IX86_BUILTIN_VFCMADDCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmaddcsh_v8hf_mask_round, "__builtin_ia32_vfcmaddcsh_mask_round", IX86_BUILTIN_VFCMADDCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmaddcsh_v8hf_maskz_round, "__builtin_ia32_vfcmaddcsh_maskz_round", IX86_BUILTIN_VFCMADDCSH_MASKZ_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fma_fmaddcsh_v8hf_round, "__builtin_ia32_vfmaddcsh_round", IX86_BUILTIN_VFMADDCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmaddcsh_v8hf_mask_round, "__builtin_ia32_vfmaddcsh_mask_round", IX86_BUILTIN_VFMADDCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmaddcsh_v8hf_maskz_round, "__builtin_ia32_vfmaddcsh_maskz_round", IX86_BUILTIN_VFMADDCSH_MASKZ_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmulcsh_v8hf_round, "__builtin_ia32_vfcmulcsh_round", IX86_BUILTIN_VFCMULCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmulcsh_v8hf_mask_round, "__builtin_ia32_vfcmulcsh_mask_round", IX86_BUILTIN_VFCMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_round, "__builtin_ia32_vfmulcsh_round", IX86_BUILTIN_VFMULCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_mask_round, "__builtin_ia32_vfmulcsh_mask_round", IX86_BUILTIN_VFMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index f3d5b40..b08a9d3 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -5891,6 +5891,82 @@
[(set_attr "type" "ssemul")
(set_attr "mode" "<MODE>")])
+(define_expand "avx512fp16_fmaddcsh_v8hf_maskz<round_expand_name>"
+ [(match_operand:V8HF 0 "register_operand")
+ (match_operand:V8HF 1 "<round_expand_nimm_predicate>")
+ (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
+ (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
+ (match_operand:QI 4 "register_operand")]
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+{
+ emit_insn (gen_avx512fp16_fma_fmaddcsh_v8hf_maskz<round_expand_name> (
+ operands[0], operands[1], operands[2], operands[3],
+ CONST0_RTX (V8HFmode), operands[4]<round_expand_operand>));
+ DONE;
+})
+
+(define_expand "avx512fp16_fcmaddcsh_v8hf_maskz<round_expand_name>"
+ [(match_operand:V8HF 0 "register_operand")
+ (match_operand:V8HF 1 "<round_expand_nimm_predicate>")
+ (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
+ (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
+ (match_operand:QI 4 "register_operand")]
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+{
+ emit_insn (gen_avx512fp16_fma_fcmaddcsh_v8hf_maskz<round_expand_name> (
+ operands[0], operands[1], operands[2], operands[3],
+ CONST0_RTX (V8HFmode), operands[4]<round_expand_operand>));
+ DONE;
+})
+
+(define_insn "avx512fp16_fma_<complexopname>sh_v8hf<mask_scalarcz_name><round_scalarcz_name>"
+ [(set (match_operand:V8HF 0 "register_operand" "=&v")
+ (vec_merge:V8HF
+ (unspec:V8HF
+ [(match_operand:V8HF 1 "<round_scalarcz_nimm_predicate>" "v")
+ (match_operand:V8HF 2 "<round_scalarcz_nimm_predicate>" "<round_scalarcz_constraint>")
+ (match_operand:V8HF 3 "<round_scalarcz_nimm_predicate>" "0")]
+ UNSPEC_COMPLEX_F_C_MA)
+ (match_dup 2)
+ (const_int 3)))]
+ "TARGET_AVX512FP16"
+ "v<complexopname>sh\t{<round_scalarcz_mask_op4>%2, %1, %0<mask_scalarcz_operand4>|%0<mask_scalarcz_operand4>, %1, %2<round_scalarcz_maskcz_mask_op4>}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V8HF")])
+
+(define_insn "avx512fp16_<complexopname>sh_v8hf_mask<round_name>"
+ [(set (match_operand:V8HF 0 "register_operand" "=&v")
+ (vec_merge:V8HF
+ (vec_merge:V8HF
+ (unspec:V8HF
+ [(match_operand:V8HF 1 "<round_nimm_predicate>" "v")
+ (match_operand:V8HF 2 "<round_nimm_predicate>" "<round_constraint>")
+ (match_operand:V8HF 3 "<round_nimm_predicate>" "0")]
+ UNSPEC_COMPLEX_F_C_MA)
+ (match_dup 1)
+ (unspec:QI [(match_operand:QI 4 "register_operand" "Yk")]
+ UNSPEC_COMPLEX_MASK))
+ (match_dup 2)
+ (const_int 3)))]
+ "TARGET_AVX512FP16"
+ "v<complexopname>sh\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V8HF")])
+
+(define_insn "avx512fp16_<complexopname>sh_v8hf<mask_scalarc_name><round_scalarcz_name>"
+ [(set (match_operand:V8HF 0 "register_operand" "=&v")
+ (vec_merge:V8HF
+ (unspec:V8HF
+ [(match_operand:V8HF 1 "nonimmediate_operand" "v")
+ (match_operand:V8HF 2 "<round_scalarcz_nimm_predicate>" "<round_scalarcz_constraint>")]
+ UNSPEC_COMPLEX_F_C_MUL)
+ (match_dup 1)
+ (const_int 3)))]
+ "TARGET_AVX512FP16"
+ "v<complexopname>sh\t{<round_scalarc_mask_op3>%2, %1, %0<mask_scalarc_operand3>|%0<mask_scalarc_operand3>, %1, %2<round_scalarc_mask_op3>}"
+ [(set_attr "type" "ssemul")
+ (set_attr "mode" "V8HF")])
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel half-precision floating point conversion operations
diff --git a/gcc/config/i386/subst.md b/gcc/config/i386/subst.md
index 3e085a8..11e62c6 100644
--- a/gcc/config/i386/subst.md
+++ b/gcc/config/i386/subst.md
@@ -328,8 +328,12 @@
(match_operand:<avx512fmaskmode> 5 "register_operand")])
(define_subst_attr "mask_scalar_name" "mask_scalar" "" "_mask")
+(define_subst_attr "mask_scalarcz_name" "mask_scalarcz" "" "_maskz")
+(define_subst_attr "mask_scalarc_name" "mask_scalarc" "" "_mask")
+(define_subst_attr "mask_scalarc_operand3" "mask_scalarc" "" "%{%4%}%N3")
(define_subst_attr "mask_scalar_operand3" "mask_scalar" "" "%{%4%}%N3")
(define_subst_attr "mask_scalar_operand4" "mask_scalar" "" "%{%5%}%N4")
+(define_subst_attr "mask_scalarcz_operand4" "mask_scalarcz" "" "%{%5%}%N4")
(define_subst "mask_scalar"
[(set (match_operand:SUBST_V 0)
@@ -347,12 +351,55 @@
(match_dup 2)
(const_int 1)))])
+(define_subst "mask_scalarcz"
+ [(set (match_operand:SUBST_CV 0)
+ (vec_merge:SUBST_CV
+ (match_operand:SUBST_CV 1)
+ (match_operand:SUBST_CV 2)
+ (const_int 3)))]
+ "TARGET_AVX512F"
+ [(set (match_dup 0)
+ (vec_merge:SUBST_CV
+ (vec_merge:SUBST_CV
+ (match_dup 1)
+ (match_operand:SUBST_CV 3 "const0_operand" "C")
+ (unspec:<avx512fmaskmode>
+ [(match_operand:<avx512fmaskcmode> 4 "register_operand" "Yk")]
+ UNSPEC_COMPLEX_MASK))
+ (match_dup 2)
+ (const_int 3)))])
+
+(define_subst "mask_scalarc"
+ [(set (match_operand:SUBST_CV 0)
+ (vec_merge:SUBST_CV
+ (match_operand:SUBST_CV 1)
+ (match_operand:SUBST_CV 2)
+ (const_int 3)))]
+ "TARGET_AVX512F"
+ [(set (match_dup 0)
+ (vec_merge:SUBST_CV
+ (vec_merge:SUBST_CV
+ (match_dup 1)
+ (match_operand:SUBST_CV 3 "nonimm_or_0_operand" "0C")
+ (unspec:<avx512fmaskmode>
+ [(match_operand:<avx512fmaskcmode> 4 "register_operand" "Yk")]
+ UNSPEC_COMPLEX_MASK))
+ (match_dup 2)
+ (const_int 3)))])
+
(define_subst_attr "round_scalar_name" "round_scalar" "" "_round")
+(define_subst_attr "round_scalarcz_name" "round_scalarcz" "" "_round")
(define_subst_attr "round_scalar_mask_operand3" "mask_scalar" "%R3" "%R5")
+(define_subst_attr "round_scalarc_mask_operand3" "mask_scalarc" "%R3" "%R5")
+(define_subst_attr "round_scalarcz_mask_operand4" "mask_scalarcz" "%R4" "%R6")
(define_subst_attr "round_scalar_mask_op3" "round_scalar" "" "<round_scalar_mask_operand3>")
+(define_subst_attr "round_scalarc_mask_op3" "round_scalarcz" "" "<round_scalarc_mask_operand3>")
+(define_subst_attr "round_scalarcz_mask_op4" "round_scalarcz" "" "<round_scalarcz_mask_operand4>")
(define_subst_attr "round_scalar_constraint" "round_scalar" "vm" "v")
+(define_subst_attr "round_scalarcz_constraint" "round_scalarcz" "vm" "v")
(define_subst_attr "round_scalar_prefix" "round_scalar" "vex" "evex")
(define_subst_attr "round_scalar_nimm_predicate" "round_scalar" "nonimmediate_operand" "register_operand")
+(define_subst_attr "round_scalarcz_nimm_predicate" "round_scalarcz" "vector_operand" "register_operand")
(define_subst "round_scalar"
[(set (match_operand:SUBST_V 0)
@@ -370,6 +417,22 @@
(match_operand:SI 3 "const_4_or_8_to_11_operand")]
UNSPEC_EMBEDDED_ROUNDING))])
+(define_subst "round_scalarcz"
+ [(set (match_operand:SUBST_V 0)
+ (vec_merge:SUBST_V
+ (match_operand:SUBST_V 1)
+ (match_operand:SUBST_V 2)
+ (const_int 3)))]
+ "TARGET_AVX512F"
+ [(set (match_dup 0)
+ (unspec:SUBST_V [
+ (vec_merge:SUBST_V
+ (match_dup 1)
+ (match_dup 2)
+ (const_int 3))
+ (match_operand:SI 3 "const_4_or_8_to_11_operand")]
+ UNSPEC_EMBEDDED_ROUNDING))])
+
(define_subst_attr "round_saeonly_scalar_name" "round_saeonly_scalar" "" "_round")
(define_subst_attr "round_saeonly_scalar_mask_operand3" "mask_scalar" "%r3" "%r5")
(define_subst_attr "round_saeonly_scalar_mask_operand4" "mask_scalar" "%r4" "%r6")
diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c
index fcd71a2..8744aa7 100644
--- a/gcc/testsuite/gcc.target/i386/avx-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx-1.c
@@ -797,6 +797,16 @@
#define __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, 8)
#define __builtin_ia32_vfcmulcph512_round(A, B, C) __builtin_ia32_vfcmulcph512_round(A, B, 8)
#define __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_round(A, B, C, D) __builtin_ia32_vfmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmaddcsh_round(A, B, C, D) __builtin_ia32_vfcmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmulcsh_round(A, B, C) __builtin_ia32_vfmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmulcsh_round(A, B, C) __builtin_ia32_vfcmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, 8)
/* avx512fp16vlintrin.h */
#define __builtin_ia32_cmpph128_mask(A, B, C, D) __builtin_ia32_cmpph128_mask(A, B, 1, D)
diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c
index 5d11813..f6d54e3 100644
--- a/gcc/testsuite/gcc.target/i386/sse-13.c
+++ b/gcc/testsuite/gcc.target/i386/sse-13.c
@@ -814,6 +814,16 @@
#define __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, 8)
#define __builtin_ia32_vfcmulcph512_round(A, B, C) __builtin_ia32_vfcmulcph512_round(A, B, 8)
#define __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_round(A, B, C, D) __builtin_ia32_vfmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmaddcsh_round(A, B, C, D) __builtin_ia32_vfcmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmulcsh_round(A, B, C) __builtin_ia32_vfmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmulcsh_round(A, B, C) __builtin_ia32_vfcmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, 8)
/* avx512fp16vlintrin.h */
#define __builtin_ia32_cmpph128_mask(A, B, C, D) __builtin_ia32_cmpph128_mask(A, B, 1, D)
diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c
index f27c73f..956a9d1 100644
--- a/gcc/testsuite/gcc.target/i386/sse-14.c
+++ b/gcc/testsuite/gcc.target/i386/sse-14.c
@@ -774,6 +774,8 @@ test_2 (_mm_cvt_roundi32_sh, __m128h, __m128h, int, 8)
test_2 (_mm_cvt_roundu32_sh, __m128h, __m128h, unsigned, 8)
test_2 (_mm512_fmul_round_pch, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_fcmul_round_pch, __m512h, __m512h, __m512h, 8)
+test_2 (_mm_fmul_round_sch, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_fcmul_round_sch, __m128h, __m128h, __m128h, 8)
test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
@@ -850,8 +852,12 @@ test_3 (_mm_fmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9)
test_3 (_mm_fnmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9)
test_3 (_mm512_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
test_3 (_mm512_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
+test_3 (_mm_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
+test_3 (_mm_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
test_3 (_mm512_maskz_fmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
test_3 (_mm512_maskz_fcmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
+test_3 (_mm_maskz_fmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_fcmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
test_3x (_mm512_mask_reduce_round_ph, __m512h, __m512h, __mmask32, __m512h, 123, 8)
@@ -920,8 +926,16 @@ test_4 (_mm512_mask3_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmas
test_4 (_mm512_mask3_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmask16, 8)
test_4 (_mm512_maskz_fmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
test_4 (_mm512_maskz_fcmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask3_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_mask3_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_maskz_fmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
+test_4 (_mm_maskz_fcmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
test_4 (_mm512_mask_fmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
test_4 (_mm512_mask_fcmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4x (_mm_mask_reduce_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
test_4x (_mm_mask_roundscale_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
test_4x (_mm_mask_getmant_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 1, 1)
diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c
index ccf8c3a..31492ef 100644
--- a/gcc/testsuite/gcc.target/i386/sse-22.c
+++ b/gcc/testsuite/gcc.target/i386/sse-22.c
@@ -878,6 +878,8 @@ test_2 (_mm_cvt_roundss_sh, __m128h, __m128h, __m128, 8)
test_2 (_mm_cvt_roundsd_sh, __m128h, __m128h, __m128d, 8)
test_2 (_mm512_fmul_round_pch, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_fcmul_round_pch, __m512h, __m512h, __m512h, 8)
+test_2 (_mm_fmul_round_sch, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_fcmul_round_sch, __m128h, __m128h, __m128h, 8)
test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
@@ -954,6 +956,10 @@ test_3 (_mm_fnmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9)
test_3 (_mm512_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
test_3 (_mm512_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
test_3 (_mm512_maskz_fmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
+test_3 (_mm_maskz_fmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_fcmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
+test_3 (_mm_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
test_3 (_mm512_maskz_fcmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
@@ -1022,8 +1028,16 @@ test_4 (_mm512_mask3_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmas
test_4 (_mm512_mask3_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmask16, 8)
test_4 (_mm512_maskz_fmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
test_4 (_mm512_maskz_fcmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask3_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_mask3_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_maskz_fmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
+test_4 (_mm_maskz_fcmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
test_4 (_mm512_mask_fmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
test_4 (_mm512_mask_fcmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4x (_mm_mask_reduce_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
test_4x (_mm_mask_roundscale_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
test_4x (_mm_mask_getmant_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 1, 1)
diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c
index a609070..9825126 100644
--- a/gcc/testsuite/gcc.target/i386/sse-23.c
+++ b/gcc/testsuite/gcc.target/i386/sse-23.c
@@ -815,6 +815,16 @@
#define __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, 8)
#define __builtin_ia32_vfcmulcph512_round(A, B, C) __builtin_ia32_vfcmulcph512_round(A, B, 8)
#define __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_round(A, B, C, D) __builtin_ia32_vfmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmaddcsh_round(A, B, C, D) __builtin_ia32_vfcmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmulcsh_round(A, B, C) __builtin_ia32_vfmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmulcsh_round(A, B, C) __builtin_ia32_vfcmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, 8)
/* avx512fp16vlintrin.h */
#define __builtin_ia32_cmpph128_mask(A, B, C, D) __builtin_ia32_cmpph128_mask(A, B, 1, D)