aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/i386
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/i386')
-rw-r--r--gcc/config/i386/avx10_2-512bf16intrin.h14
-rw-r--r--gcc/config/i386/avx10_2-512convertintrin.h64
-rw-r--r--gcc/config/i386/avx10_2-512mediaintrin.h14
-rw-r--r--gcc/config/i386/avx10_2-512minmaxintrin.h14
-rw-r--r--gcc/config/i386/avx10_2-512satcvtintrin.h246
-rw-r--r--gcc/config/i386/avx10_2bf16intrin.h12
-rw-r--r--gcc/config/i386/avx10_2convertintrin.h192
-rw-r--r--gcc/config/i386/avx10_2mediaintrin.h14
-rw-r--r--gcc/config/i386/avx10_2minmaxintrin.h330
-rw-r--r--gcc/config/i386/avx10_2roundingintrin.h6433
-rw-r--r--gcc/config/i386/avx10_2satcvtintrin.h1371
-rw-r--r--gcc/config/i386/cpuid.h5
-rw-r--r--gcc/config/i386/driver-i386.cc9
-rw-r--r--gcc/config/i386/i386-builtin-types.def41
-rw-r--r--gcc/config/i386/i386-builtin.def950
-rw-r--r--gcc/config/i386/i386-c.cc15
-rw-r--r--gcc/config/i386/i386-expand.cc75
-rw-r--r--gcc/config/i386/i386-isa.def5
-rw-r--r--gcc/config/i386/i386-options.cc38
-rw-r--r--gcc/config/i386/i386-rust-and-jit.inc93
-rw-r--r--gcc/config/i386/i386-rust.cc96
-rw-r--r--gcc/config/i386/i386.cc7
-rw-r--r--gcc/config/i386/i386.h6
-rw-r--r--gcc/config/i386/i386.md24
-rw-r--r--gcc/config/i386/i386.opt27
-rw-r--r--gcc/config/i386/i386.opt.urls9
-rw-r--r--gcc/config/i386/immintrin.h2
-rw-r--r--gcc/config/i386/mmx.md16
-rw-r--r--gcc/config/i386/movrsintrin.h19
-rw-r--r--gcc/config/i386/predicates.md2
-rw-r--r--gcc/config/i386/sm4intrin.h4
-rw-r--r--gcc/config/i386/sse.md543
-rw-r--r--gcc/config/i386/subst.md43
-rw-r--r--gcc/config/i386/zn4zn5.md151
34 files changed, 1434 insertions, 9450 deletions
diff --git a/gcc/config/i386/avx10_2-512bf16intrin.h b/gcc/config/i386/avx10_2-512bf16intrin.h
index 307b14a..21e4b36 100644
--- a/gcc/config/i386/avx10_2-512bf16intrin.h
+++ b/gcc/config/i386/avx10_2-512bf16intrin.h
@@ -28,11 +28,11 @@
#ifndef _AVX10_2_512BF16INTRIN_H_INCLUDED
#define _AVX10_2_512BF16INTRIN_H_INCLUDED
-#if !defined (__AVX10_2_512__)
+#if !defined (__AVX10_2__)
#pragma GCC push_options
-#pragma GCC target("avx10.2-512")
-#define __DISABLE_AVX10_2_512__
-#endif /* __AVX10_2_512__ */
+#pragma GCC target("avx10.2")
+#define __DISABLE_AVX10_2__
+#endif /* __AVX10_2__ */
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -673,9 +673,9 @@ _mm512_cmp_pbh_mask (__m512bh __A, __m512bh __B, const int __imm)
#endif /* __OPIMTIZE__ */
-#ifdef __DISABLE_AVX10_2_512__
-#undef __DISABLE_AVX10_2_512__
+#ifdef __DISABLE_AVX10_2__
+#undef __DISABLE_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_512__ */
+#endif /* __DISABLE_AVX10_2__ */
#endif /* _AVX10_2_512BF16INTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx10_2-512convertintrin.h b/gcc/config/i386/avx10_2-512convertintrin.h
index a44481e..611a40d 100644
--- a/gcc/config/i386/avx10_2-512convertintrin.h
+++ b/gcc/config/i386/avx10_2-512convertintrin.h
@@ -28,11 +28,11 @@
#ifndef __AVX10_2_512CONVERTINTRIN_H_INCLUDED
#define __AVX10_2_512CONVERTINTRIN_H_INCLUDED
-#ifndef __AVX10_2_512__
+#ifndef __AVX10_2__
#pragma GCC push_options
-#pragma GCC target("avx10.2-512")
-#define __DISABLE_AVX10_2_512__
-#endif /* __AVX10_2_512__ */
+#pragma GCC target("avx10.2")
+#define __DISABLE_AVX10_2__
+#endif /* __AVX10_2__ */
extern __inline __m512h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -49,7 +49,7 @@ _mm512_cvtx2ps_ph (__m512 __A, __m512 __B)
extern __inline __m512h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtx2ps_ph (__m512h __W, __mmask32 __U, __m512 __A,
- __m512 __B)
+ __m512 __B)
{
return (__m512h) __builtin_ia32_vcvt2ps2phx512_mask_round ((__v16sf) __A,
(__v16sf) __B,
@@ -86,7 +86,7 @@ _mm512_cvtx_round2ps_ph (__m512 __A, __m512 __B, const int __R)
extern __inline __m512h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtx_round2ps_ph (__m512h __W, __mmask32 __U, __m512 __A,
- __m512 __B, const int __R)
+ __m512 __B, const int __R)
{
return (__m512h) __builtin_ia32_vcvt2ps2phx512_mask_round ((__v16sf) __A,
(__v16sf) __B,
@@ -98,7 +98,7 @@ _mm512_mask_cvtx_round2ps_ph (__m512h __W, __mmask32 __U, __m512 __A,
extern __inline __m512h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtx_round2ps_ph (__mmask32 __U, __m512 __A,
- __m512 __B, const int __R)
+ __m512 __B, const int __R)
{
return (__m512h) __builtin_ia32_vcvt2ps2phx512_mask_round ((__v16sf) __A,
(__v16sf) __B,
@@ -166,7 +166,7 @@ _mm512_maskz_cvtbiasph_bf8 (__mmask32 __U, __m512i __A, __m512h __B)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtbiassph_bf8 (__m512i __A, __m512h __B)
+_mm512_cvts_biasph_bf8 (__m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
@@ -177,8 +177,8 @@ _mm512_cvtbiassph_bf8 (__m512i __A, __m512h __B)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtbiassph_bf8 (__m256i __W, __mmask32 __U,
- __m512i __A, __m512h __B)
+_mm512_mask_cvts_biasph_bf8 (__m256i __W, __mmask32 __U,
+ __m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
@@ -188,7 +188,7 @@ _mm512_mask_cvtbiassph_bf8 (__m256i __W, __mmask32 __U,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtbiassph_bf8 (__mmask32 __U, __m512i __A, __m512h __B)
+_mm512_maskz_cvts_biasph_bf8 (__mmask32 __U, __m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
@@ -232,7 +232,7 @@ _mm512_maskz_cvtbiasph_hf8 (__mmask32 __U, __m512i __A, __m512h __B)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtbiassph_hf8 (__m512i __A, __m512h __B)
+_mm512_cvts_biasph_hf8 (__m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
@@ -243,8 +243,8 @@ _mm512_cvtbiassph_hf8 (__m512i __A, __m512h __B)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtbiassph_hf8 (__m256i __W, __mmask32 __U,
- __m512i __A, __m512h __B)
+_mm512_mask_cvts_biasph_hf8 (__m256i __W, __mmask32 __U,
+ __m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
@@ -254,7 +254,7 @@ _mm512_mask_cvtbiassph_hf8 (__m256i __W, __mmask32 __U,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtbiassph_hf8 (__mmask32 __U, __m512i __A, __m512h __B)
+_mm512_maskz_cvts_biasph_hf8 (__mmask32 __U, __m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
@@ -298,7 +298,7 @@ _mm512_maskz_cvt2ph_bf8 (__mmask64 __U, __m512h __A, __m512h __B)
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvts2ph_bf8 (__m512h __A, __m512h __B)
+_mm512_cvts_2ph_bf8 (__m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2bf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
@@ -309,8 +309,8 @@ _mm512_cvts2ph_bf8 (__m512h __A, __m512h __B)
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvts2ph_bf8 (__m512i __W, __mmask64 __U,
- __m512h __A, __m512h __B)
+_mm512_mask_cvts_2ph_bf8 (__m512i __W, __mmask64 __U,
+ __m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2bf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
@@ -320,7 +320,7 @@ _mm512_mask_cvts2ph_bf8 (__m512i __W, __mmask64 __U,
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvts2ph_bf8 (__mmask64 __U, __m512h __A, __m512h __B)
+_mm512_maskz_cvts_2ph_bf8 (__mmask64 __U, __m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2bf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
@@ -364,7 +364,7 @@ _mm512_maskz_cvt2ph_hf8 (__mmask64 __U, __m512h __A, __m512h __B)
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvts2ph_hf8 (__m512h __A, __m512h __B)
+_mm512_cvts_2ph_hf8 (__m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2hf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
@@ -375,8 +375,8 @@ _mm512_cvts2ph_hf8 (__m512h __A, __m512h __B)
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvts2ph_hf8 (__m512i __W, __mmask64 __U,
- __m512h __A, __m512h __B)
+_mm512_mask_cvts_2ph_hf8 (__m512i __W, __mmask64 __U,
+ __m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2hf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
@@ -386,7 +386,7 @@ _mm512_mask_cvts2ph_hf8 (__m512i __W, __mmask64 __U,
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvts2ph_hf8 (__mmask64 __U, __m512h __A, __m512h __B)
+_mm512_maskz_cvts_2ph_hf8 (__mmask64 __U, __m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2hf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
@@ -455,7 +455,7 @@ _mm512_maskz_cvtph_bf8 (__mmask32 __U, __m512h __A)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtsph_bf8 (__m512h __A)
+_mm512_cvts_ph_bf8 (__m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2bf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i)
@@ -465,7 +465,7 @@ _mm512_cvtsph_bf8 (__m512h __A)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtsph_bf8 (__m256i __W, __mmask32 __U, __m512h __A)
+_mm512_mask_cvts_ph_bf8 (__m256i __W, __mmask32 __U, __m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2bf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i) __W,
@@ -474,7 +474,7 @@ _mm512_mask_cvtsph_bf8 (__m256i __W, __mmask32 __U, __m512h __A)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtsph_bf8 (__mmask32 __U, __m512h __A)
+_mm512_maskz_cvts_ph_bf8 (__mmask32 __U, __m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2bf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i)
@@ -513,7 +513,7 @@ _mm512_maskz_cvtph_hf8 (__mmask32 __U, __m512h __A)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtsph_hf8 (__m512h __A)
+_mm512_cvts_ph_hf8 (__m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2hf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i)
@@ -523,7 +523,7 @@ _mm512_cvtsph_hf8 (__m512h __A)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtsph_hf8 (__m256i __W, __mmask32 __U, __m512h __A)
+_mm512_mask_cvts_ph_hf8 (__m256i __W, __mmask32 __U, __m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2hf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i) __W,
@@ -532,7 +532,7 @@ _mm512_mask_cvtsph_hf8 (__m256i __W, __mmask32 __U, __m512h __A)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtsph_hf8 (__mmask32 __U, __m512h __A)
+_mm512_maskz_cvts_ph_hf8 (__mmask32 __U, __m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2hf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i)
@@ -564,9 +564,9 @@ _mm512_maskz_cvtbf8_ph (__mmask32 __U, __m256i __A)
(__m512i) _mm512_maskz_cvtepi8_epi16 (__U, __A), 8));
}
-#ifdef __DISABLE_AVX10_2_512__
-#undef __DISABLE_AVX10_2_512__
+#ifdef __DISABLE_AVX10_2__
+#undef __DISABLE_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_512__ */
+#endif /* __DISABLE_AVX10_2__ */
#endif /* __AVX10_2_512CONVERTINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx10_2-512mediaintrin.h b/gcc/config/i386/avx10_2-512mediaintrin.h
index 5dedabc..43271e7 100644
--- a/gcc/config/i386/avx10_2-512mediaintrin.h
+++ b/gcc/config/i386/avx10_2-512mediaintrin.h
@@ -28,11 +28,11 @@
#ifndef _AVX10_2_512MEDIAINTRIN_H_INCLUDED
#define _AVX10_2_512MEDIAINTRIN_H_INCLUDED
-#if !defined(__AVX10_2_512__)
+#if !defined(__AVX10_2__)
#pragma GCC push_options
-#pragma GCC target("avx10.2-512")
-#define __DISABLE_AVX10_2_512__
-#endif /* __AVX10_2_512__ */
+#pragma GCC target("avx10.2")
+#define __DISABLE_AVX10_2__
+#endif /* __AVX10_2__ */
extern __inline __m512i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -506,9 +506,9 @@ _mm512_maskz_mpsadbw_epu8 (__mmask32 __U, __m512i __X,
(__mmask32)(U))
#endif
-#ifdef __DISABLE_AVX10_2_512__
-#undef __DISABLE_AVX10_2_512__
+#ifdef __DISABLE_AVX10_2__
+#undef __DISABLE_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_512__ */
+#endif /* __DISABLE_AVX10_2__ */
#endif /* __AVX10_2_512MEDIAINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx10_2-512minmaxintrin.h b/gcc/config/i386/avx10_2-512minmaxintrin.h
index 3acdc56..a743346 100644
--- a/gcc/config/i386/avx10_2-512minmaxintrin.h
+++ b/gcc/config/i386/avx10_2-512minmaxintrin.h
@@ -23,11 +23,11 @@
#ifndef _AVX10_2_512MINMAXINTRIN_H_INCLUDED
#define _AVX10_2_512MINMAXINTRIN_H_INCLUDED
-#if !defined (__AVX10_2_512__)
+#if !defined (__AVX10_2__)
#pragma GCC push_options
-#pragma GCC target("avx10.2-512")
-#define __DISABLE_AVX10_2_512__
-#endif /* __AVX10_2_512__ */
+#pragma GCC target("avx10.2")
+#define __DISABLE_AVX10_2__
+#endif /* __AVX10_2__ */
#ifdef __OPTIMIZE__
extern __inline __m512bh
@@ -481,9 +481,9 @@ _mm512_maskz_minmax_round_ps (__mmask16 __U, __m512 __A, __m512 __B,
#endif
-#ifdef __DISABLE_AVX10_2_512__
-#undef __DISABLE_AVX10_2_512__
+#ifdef __DISABLE_AVX10_2__
+#undef __DISABLE_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_512__ */
+#endif /* __DISABLE_AVX10_2__ */
#endif /* _AVX10_2_512MINMAXINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx10_2-512satcvtintrin.h b/gcc/config/i386/avx10_2-512satcvtintrin.h
index 1cef1da..215b7fd 100644
--- a/gcc/config/i386/avx10_2-512satcvtintrin.h
+++ b/gcc/config/i386/avx10_2-512satcvtintrin.h
@@ -28,15 +28,15 @@
#ifndef _AVX10_2_512SATCVTINTRIN_H_INCLUDED
#define _AVX10_2_512SATCVTINTRIN_H_INCLUDED
-#if !defined (__AVX10_2_512__)
+#if !defined (__AVX10_2__)
#pragma GCC push_options
-#pragma GCC target("avx10.2-512")
-#define __DISABLE_AVX10_2_512__
-#endif /* __AVX10_2_512__ */
+#pragma GCC target("avx10.2")
+#define __DISABLE_AVX10_2__
+#endif /* __AVX10_2__ */
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvtbf16_epi8 (__m512bh __A)
+_mm512_ipcvts_bf16_epi8 (__m512bh __A)
{
return
(__m512i) __builtin_ia32_cvtbf162ibs512_mask ((__v32bf) __A,
@@ -47,7 +47,7 @@ _mm512_ipcvtbf16_epi8 (__m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvtbf16_epi8 (__m512i __W, __mmask32 __U, __m512bh __A)
+_mm512_mask_ipcvts_bf16_epi8 (__m512i __W, __mmask32 __U, __m512bh __A)
{
return (__m512i) __builtin_ia32_cvtbf162ibs512_mask ((__v32bf) __A,
(__v32hi) __W,
@@ -56,7 +56,7 @@ _mm512_mask_ipcvtbf16_epi8 (__m512i __W, __mmask32 __U, __m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvtbf16_epi8 (__mmask32 __U, __m512bh __A)
+_mm512_maskz_ipcvts_bf16_epi8 (__mmask32 __U, __m512bh __A)
{
return
(__m512i) __builtin_ia32_cvtbf162ibs512_mask ((__v32bf) __A,
@@ -67,7 +67,7 @@ _mm512_maskz_ipcvtbf16_epi8 (__mmask32 __U, __m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvtbf16_epu8 (__m512bh __A)
+_mm512_ipcvts_bf16_epu8 (__m512bh __A)
{
return
(__m512i) __builtin_ia32_cvtbf162iubs512_mask ((__v32bf) __A,
@@ -78,7 +78,7 @@ _mm512_ipcvtbf16_epu8 (__m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvtbf16_epu8 (__m512i __W, __mmask32 __U, __m512bh __A)
+_mm512_mask_ipcvts_bf16_epu8 (__m512i __W, __mmask32 __U, __m512bh __A)
{
return (__m512i) __builtin_ia32_cvtbf162iubs512_mask ((__v32bf) __A,
(__v32hi) __W,
@@ -87,7 +87,7 @@ _mm512_mask_ipcvtbf16_epu8 (__m512i __W, __mmask32 __U, __m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvtbf16_epu8 (__mmask32 __U, __m512bh __A)
+_mm512_maskz_ipcvts_bf16_epu8 (__mmask32 __U, __m512bh __A)
{
return
(__m512i) __builtin_ia32_cvtbf162iubs512_mask ((__v32bf) __A,
@@ -98,7 +98,7 @@ _mm512_maskz_ipcvtbf16_epu8 (__mmask32 __U, __m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvttbf16_epi8 (__m512bh __A)
+_mm512_ipcvtts_bf16_epi8 (__m512bh __A)
{
return
(__m512i) __builtin_ia32_cvttbf162ibs512_mask ((__v32bf) __A,
@@ -109,7 +109,7 @@ _mm512_ipcvttbf16_epi8 (__m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvttbf16_epi8 (__m512i __W, __mmask32 __U, __m512bh __A)
+_mm512_mask_ipcvtts_bf16_epi8 (__m512i __W, __mmask32 __U, __m512bh __A)
{
return (__m512i) __builtin_ia32_cvttbf162ibs512_mask ((__v32bf) __A,
(__v32hi) __W,
@@ -118,7 +118,7 @@ _mm512_mask_ipcvttbf16_epi8 (__m512i __W, __mmask32 __U, __m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvttbf16_epi8 (__mmask32 __U, __m512bh __A)
+_mm512_maskz_ipcvtts_bf16_epi8 (__mmask32 __U, __m512bh __A)
{
return
(__m512i) __builtin_ia32_cvttbf162ibs512_mask ((__v32bf) __A,
@@ -129,7 +129,7 @@ _mm512_maskz_ipcvttbf16_epi8 (__mmask32 __U, __m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvttbf16_epu8 (__m512bh __A)
+_mm512_ipcvtts_bf16_epu8 (__m512bh __A)
{
return (__m512i)
__builtin_ia32_cvttbf162iubs512_mask ((__v32bf) __A,
@@ -139,7 +139,7 @@ _mm512_ipcvttbf16_epu8 (__m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvttbf16_epu8 (__m512i __W, __mmask32 __U, __m512bh __A)
+_mm512_mask_ipcvtts_bf16_epu8 (__m512i __W, __mmask32 __U, __m512bh __A)
{
return (__m512i) __builtin_ia32_cvttbf162iubs512_mask ((__v32bf) __A,
(__v32hi) __W,
@@ -148,7 +148,7 @@ _mm512_mask_ipcvttbf16_epu8 (__m512i __W, __mmask32 __U, __m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvttbf16_epu8 (__mmask32 __U, __m512bh __A)
+_mm512_maskz_ipcvtts_bf16_epu8 (__mmask32 __U, __m512bh __A)
{
return (__m512i)
__builtin_ia32_cvttbf162iubs512_mask ((__v32bf) __A,
@@ -159,7 +159,7 @@ _mm512_maskz_ipcvttbf16_epu8 (__mmask32 __U, __m512bh __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvtph_epi8 (__m512h __A)
+_mm512_ipcvts_ph_epi8 (__m512h __A)
{
return
(__m512i) __builtin_ia32_cvtph2ibs512_mask ((__v32hf) __A,
@@ -170,7 +170,7 @@ _mm512_ipcvtph_epi8 (__m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvtph_epi8 (__m512i __W, __mmask32 __U, __m512h __A)
+_mm512_mask_ipcvts_ph_epi8 (__m512i __W, __mmask32 __U, __m512h __A)
{
return (__m512i) __builtin_ia32_cvtph2ibs512_mask ((__v32hf) __A,
(__v32hi) __W,
@@ -179,7 +179,7 @@ _mm512_mask_ipcvtph_epi8 (__m512i __W, __mmask32 __U, __m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvtph_epi8 (__mmask32 __U, __m512h __A)
+_mm512_maskz_ipcvts_ph_epi8 (__mmask32 __U, __m512h __A)
{
return
(__m512i) __builtin_ia32_cvtph2ibs512_mask ((__v32hf) __A,
@@ -190,7 +190,7 @@ _mm512_maskz_ipcvtph_epi8 (__mmask32 __U, __m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvtph_epu8 (__m512h __A)
+_mm512_ipcvts_ph_epu8 (__m512h __A)
{
return
(__m512i) __builtin_ia32_cvtph2iubs512_mask ((__v32hf) __A,
@@ -201,7 +201,7 @@ _mm512_ipcvtph_epu8 (__m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvtph_epu8 (__m512i __W, __mmask32 __U, __m512h __A)
+_mm512_mask_ipcvts_ph_epu8 (__m512i __W, __mmask32 __U, __m512h __A)
{
return (__m512i) __builtin_ia32_cvtph2iubs512_mask ((__v32hf) __A,
(__v32hi) __W,
@@ -210,7 +210,7 @@ _mm512_mask_ipcvtph_epu8 (__m512i __W, __mmask32 __U, __m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvtph_epu8 (__mmask32 __U, __m512h __A)
+_mm512_maskz_ipcvts_ph_epu8 (__mmask32 __U, __m512h __A)
{
return
(__m512i) __builtin_ia32_cvtph2iubs512_mask ((__v32hf) __A,
@@ -221,7 +221,7 @@ _mm512_maskz_ipcvtph_epu8 (__mmask32 __U, __m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvtps_epi8 (__m512 __A)
+_mm512_ipcvts_ps_epi8 (__m512 __A)
{
return
(__m512i) __builtin_ia32_cvtps2ibs512_mask ((__v16sf) __A,
@@ -232,7 +232,7 @@ _mm512_ipcvtps_epi8 (__m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvtps_epi8 (__m512i __W, __mmask16 __U, __m512 __A)
+_mm512_mask_ipcvts_ps_epi8 (__m512i __W, __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvtps2ibs512_mask ((__v16sf) __A,
(__v16si) __W,
@@ -241,7 +241,7 @@ _mm512_mask_ipcvtps_epi8 (__m512i __W, __mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvtps_epi8 (__mmask16 __U, __m512 __A)
+_mm512_maskz_ipcvts_ps_epi8 (__mmask16 __U, __m512 __A)
{
return
(__m512i) __builtin_ia32_cvtps2ibs512_mask ((__v16sf) __A,
@@ -252,7 +252,7 @@ _mm512_maskz_ipcvtps_epi8 (__mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvtps_epu8 (__m512 __A)
+_mm512_ipcvts_ps_epu8 (__m512 __A)
{
return
(__m512i) __builtin_ia32_cvtps2iubs512_mask ((__v16sf) __A,
@@ -263,7 +263,7 @@ _mm512_ipcvtps_epu8 (__m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvtps_epu8 (__m512i __W, __mmask16 __U, __m512 __A)
+_mm512_mask_ipcvts_ps_epu8 (__m512i __W, __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvtps2iubs512_mask ((__v16sf) __A,
(__v16si) __W,
@@ -272,7 +272,7 @@ _mm512_mask_ipcvtps_epu8 (__m512i __W, __mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvtps_epu8 (__mmask16 __U, __m512 __A)
+_mm512_maskz_ipcvts_ps_epu8 (__mmask16 __U, __m512 __A)
{
return
(__m512i) __builtin_ia32_cvtps2iubs512_mask ((__v16sf) __A,
@@ -283,7 +283,7 @@ _mm512_maskz_ipcvtps_epu8 (__mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvttph_epi8 (__m512h __A)
+_mm512_ipcvtts_ph_epi8 (__m512h __A)
{
return (__m512i)
__builtin_ia32_cvttph2ibs512_mask ((__v32hf) __A,
@@ -294,7 +294,7 @@ _mm512_ipcvttph_epi8 (__m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvttph_epi8 (__m512i __W, __mmask32 __U, __m512h __A)
+_mm512_mask_ipcvtts_ph_epi8 (__m512i __W, __mmask32 __U, __m512h __A)
{
return (__m512i) __builtin_ia32_cvttph2ibs512_mask ((__v32hf) __A,
(__v32hi) __W,
@@ -303,7 +303,7 @@ _mm512_mask_ipcvttph_epi8 (__m512i __W, __mmask32 __U, __m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvttph_epi8 (__mmask32 __U, __m512h __A)
+_mm512_maskz_ipcvtts_ph_epi8 (__mmask32 __U, __m512h __A)
{
return
(__m512i) __builtin_ia32_cvttph2ibs512_mask ((__v32hf) __A,
@@ -314,7 +314,7 @@ _mm512_maskz_ipcvttph_epi8 (__mmask32 __U, __m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvttph_epu8 (__m512h __A)
+_mm512_ipcvtts_ph_epu8 (__m512h __A)
{
return (__m512i)
__builtin_ia32_cvttph2iubs512_mask ((__v32hf) __A,
@@ -325,7 +325,7 @@ _mm512_ipcvttph_epu8 (__m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvttph_epu8 (__m512i __W, __mmask32 __U, __m512h __A)
+_mm512_mask_ipcvtts_ph_epu8 (__m512i __W, __mmask32 __U, __m512h __A)
{
return (__m512i) __builtin_ia32_cvttph2iubs512_mask ((__v32hf) __A,
(__v32hi) __W,
@@ -334,7 +334,7 @@ _mm512_mask_ipcvttph_epu8 (__m512i __W, __mmask32 __U, __m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvttph_epu8 (__mmask32 __U, __m512h __A)
+_mm512_maskz_ipcvtts_ph_epu8 (__mmask32 __U, __m512h __A)
{
return (__m512i)
__builtin_ia32_cvttph2iubs512_mask ((__v32hf) __A,
@@ -345,7 +345,7 @@ _mm512_maskz_ipcvttph_epu8 (__mmask32 __U, __m512h __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvttps_epi8 (__m512 __A)
+_mm512_ipcvtts_ps_epi8 (__m512 __A)
{
return (__m512i)
__builtin_ia32_cvttps2ibs512_mask ((__v16sf) __A,
@@ -356,7 +356,7 @@ _mm512_ipcvttps_epi8 (__m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvttps_epi8 (__m512i __W, __mmask16 __U, __m512 __A)
+_mm512_mask_ipcvtts_ps_epi8 (__m512i __W, __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvttps2ibs512_mask ((__v16sf) __A,
(__v16si) __W,
@@ -365,7 +365,7 @@ _mm512_mask_ipcvttps_epi8 (__m512i __W, __mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvttps_epi8 (__mmask16 __U, __m512 __A)
+_mm512_maskz_ipcvtts_ps_epi8 (__mmask16 __U, __m512 __A)
{
return (__m512i)
__builtin_ia32_cvttps2ibs512_mask ((__v16sf) __A,
@@ -376,7 +376,7 @@ _mm512_maskz_ipcvttps_epi8 (__mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvttps_epu8 (__m512 __A)
+_mm512_ipcvtts_ps_epu8 (__m512 __A)
{
return (__m512i)
__builtin_ia32_cvttps2iubs512_mask ((__v16sf) __A,
@@ -387,7 +387,7 @@ _mm512_ipcvttps_epu8 (__m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvttps_epu8 (__m512i __W, __mmask16 __U, __m512 __A)
+_mm512_mask_ipcvtts_ps_epu8 (__m512i __W, __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvttps2iubs512_mask ((__v16sf) __A,
(__v16si) __W,
@@ -396,7 +396,7 @@ _mm512_mask_ipcvttps_epu8 (__m512i __W, __mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvttps_epu8 (__mmask16 __U, __m512 __A)
+_mm512_maskz_ipcvtts_ps_epu8 (__mmask16 __U, __m512 __A)
{
return (__m512i)
__builtin_ia32_cvttps2iubs512_mask ((__v16sf) __A,
@@ -407,7 +407,7 @@ _mm512_maskz_ipcvttps_epu8 (__mmask16 __U, __m512 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvttspd_epi32 (__m512d __A)
+_mm512_cvtts_pd_epi32 (__m512d __A)
{
return (__m256i)
__builtin_ia32_cvttpd2dqs512_mask ((__v8df) __A,
@@ -418,7 +418,7 @@ _mm512_cvttspd_epi32 (__m512d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvttspd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
+_mm512_mask_cvtts_pd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
{
return (__m256i) __builtin_ia32_cvttpd2dqs512_mask ((__v8df) __A,
(__v8si) __W,
@@ -427,7 +427,7 @@ _mm512_mask_cvttspd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvttspd_epi32 (__mmask8 __U, __m512d __A)
+_mm512_maskz_cvtts_pd_epi32 (__mmask8 __U, __m512d __A)
{
return
(__m256i) __builtin_ia32_cvttpd2dqs512_mask ((__v8df) __A,
@@ -438,7 +438,7 @@ _mm512_maskz_cvttspd_epi32 (__mmask8 __U, __m512d __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvttspd_epi64 (__m512d __A)
+_mm512_cvtts_pd_epi64 (__m512d __A)
{
return (__m512i)
__builtin_ia32_cvttpd2qqs512_mask ((__v8df) __A,
@@ -449,7 +449,7 @@ _mm512_cvttspd_epi64 (__m512d __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvttspd_epi64 (__m512i __W, __mmask8 __U, __m512d __A)
+_mm512_mask_cvtts_pd_epi64 (__m512i __W, __mmask8 __U, __m512d __A)
{
return (__m512i) __builtin_ia32_cvttpd2qqs512_mask ((__v8df) __A,
(__v8di) __W,
@@ -458,7 +458,7 @@ _mm512_mask_cvttspd_epi64 (__m512i __W, __mmask8 __U, __m512d __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvttspd_epi64 (__mmask8 __U, __m512d __A)
+_mm512_maskz_cvtts_pd_epi64 (__mmask8 __U, __m512d __A)
{
return
(__m512i) __builtin_ia32_cvttpd2qqs512_mask ((__v8df) __A,
@@ -469,7 +469,7 @@ _mm512_maskz_cvttspd_epi64 (__mmask8 __U, __m512d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvttspd_epu32 (__m512d __A)
+_mm512_cvtts_pd_epu32 (__m512d __A)
{
return (__m256i)
__builtin_ia32_cvttpd2udqs512_mask ((__v8df) __A,
@@ -480,7 +480,7 @@ _mm512_cvttspd_epu32 (__m512d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvttspd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
+_mm512_mask_cvtts_pd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
{
return (__m256i) __builtin_ia32_cvttpd2udqs512_mask ((__v8df) __A,
(__v8si) __W,
@@ -489,7 +489,7 @@ _mm512_mask_cvttspd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvttspd_epu32 (__mmask8 __U, __m512d __A)
+_mm512_maskz_cvtts_pd_epu32 (__mmask8 __U, __m512d __A)
{
return
(__m256i) __builtin_ia32_cvttpd2udqs512_mask ((__v8df) __A,
@@ -500,7 +500,7 @@ _mm512_maskz_cvttspd_epu32 (__mmask8 __U, __m512d __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvttspd_epu64 (__m512d __A)
+_mm512_cvtts_pd_epu64 (__m512d __A)
{
return (__m512i)
__builtin_ia32_cvttpd2uqqs512_mask ((__v8df) __A,
@@ -511,7 +511,7 @@ _mm512_cvttspd_epu64 (__m512d __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvttspd_epu64 (__m512i __W, __mmask8 __U, __m512d __A)
+_mm512_mask_cvtts_pd_epu64 (__m512i __W, __mmask8 __U, __m512d __A)
{
return (__m512i) __builtin_ia32_cvttpd2uqqs512_mask ((__v8df) __A,
(__v8di) __W,
@@ -520,7 +520,7 @@ _mm512_mask_cvttspd_epu64 (__m512i __W, __mmask8 __U, __m512d __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvttspd_epu64 (__mmask8 __U, __m512d __A)
+_mm512_maskz_cvtts_pd_epu64 (__mmask8 __U, __m512d __A)
{
return (__m512i)
__builtin_ia32_cvttpd2uqqs512_mask ((__v8df) __A,
@@ -531,7 +531,7 @@ _mm512_maskz_cvttspd_epu64 (__mmask8 __U, __m512d __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvttsps_epi32 (__m512 __A)
+_mm512_cvtts_ps_epi32 (__m512 __A)
{
return (__m512i)
__builtin_ia32_cvttps2dqs512_mask ((__v16sf) __A,
@@ -542,7 +542,7 @@ _mm512_cvttsps_epi32 (__m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvttsps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
+_mm512_mask_cvtts_ps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvttps2dqs512_mask ((__v16sf) __A,
(__v16si) __W,
@@ -551,7 +551,7 @@ _mm512_mask_cvttsps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvttsps_epi32 (__mmask16 __U, __m512 __A)
+_mm512_maskz_cvtts_ps_epi32 (__mmask16 __U, __m512 __A)
{
return
(__m512i) __builtin_ia32_cvttps2dqs512_mask ((__v16sf) __A,
@@ -562,7 +562,7 @@ _mm512_maskz_cvttsps_epi32 (__mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvttsps_epi64 (__m256 __A)
+_mm512_cvtts_ps_epi64 (__m256 __A)
{
return (__m512i)
__builtin_ia32_cvttps2qqs512_mask ((__v8sf) __A,
@@ -573,7 +573,7 @@ _mm512_cvttsps_epi64 (__m256 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvttsps_epi64 (__m512i __W, __mmask8 __U, __m256 __A)
+_mm512_mask_cvtts_ps_epi64 (__m512i __W, __mmask8 __U, __m256 __A)
{
return (__m512i) __builtin_ia32_cvttps2qqs512_mask ((__v8sf) __A,
(__v8di) __W,
@@ -582,7 +582,7 @@ _mm512_mask_cvttsps_epi64 (__m512i __W, __mmask8 __U, __m256 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvttsps_epi64 (__mmask8 __U, __m256 __A)
+_mm512_maskz_cvtts_ps_epi64 (__mmask8 __U, __m256 __A)
{
return
(__m512i) __builtin_ia32_cvttps2qqs512_mask ((__v8sf) __A,
@@ -593,7 +593,7 @@ _mm512_maskz_cvttsps_epi64 (__mmask8 __U, __m256 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvttsps_epu32 (__m512 __A)
+_mm512_cvtts_ps_epu32 (__m512 __A)
{
return (__m512i)
__builtin_ia32_cvttps2udqs512_mask ((__v16sf) __A,
@@ -604,7 +604,7 @@ _mm512_cvttsps_epu32 (__m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvttsps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
+_mm512_mask_cvtts_ps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvttps2udqs512_mask ((__v16sf) __A,
(__v16si) __W,
@@ -613,7 +613,7 @@ _mm512_mask_cvttsps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvttsps_epu32 (__mmask16 __U, __m512 __A)
+_mm512_maskz_cvtts_ps_epu32 (__mmask16 __U, __m512 __A)
{
return (__m512i)
__builtin_ia32_cvttps2udqs512_mask ((__v16sf) __A,
@@ -624,7 +624,7 @@ _mm512_maskz_cvttsps_epu32 (__mmask16 __U, __m512 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvttsps_epu64 (__m256 __A)
+_mm512_cvtts_ps_epu64 (__m256 __A)
{
return (__m512i)
__builtin_ia32_cvttps2uqqs512_mask ((__v8sf) __A,
@@ -635,7 +635,7 @@ _mm512_cvttsps_epu64 (__m256 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvttsps_epu64 (__m512i __W, __mmask8 __U, __m256 __A)
+_mm512_mask_cvtts_ps_epu64 (__m512i __W, __mmask8 __U, __m256 __A)
{
return (__m512i) __builtin_ia32_cvttps2uqqs512_mask ((__v8sf) __A,
(__v8di) __W,
@@ -644,7 +644,7 @@ _mm512_mask_cvttsps_epu64 (__m512i __W, __mmask8 __U, __m256 __A)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvttsps_epu64 (__mmask8 __U, __m256 __A)
+_mm512_maskz_cvtts_ps_epu64 (__mmask8 __U, __m256 __A)
{
return
(__m512i) __builtin_ia32_cvttps2uqqs512_mask ((__v8sf) __A,
@@ -656,7 +656,7 @@ _mm512_maskz_cvttsps_epu64 (__mmask8 __U, __m256 __A)
#ifdef __OPTIMIZE__
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvt_roundph_epi8 (__m512h __A, const int __R)
+_mm512_ipcvts_roundph_epi8 (__m512h __A, const int __R)
{
return
(__m512i) __builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) __A,
@@ -668,8 +668,8 @@ _mm512_ipcvt_roundph_epi8 (__m512h __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvt_roundph_epi8 (__m512i __W, __mmask32 __U, __m512h __A,
- const int __R)
+_mm512_mask_ipcvts_roundph_epi8 (__m512i __W, __mmask32 __U, __m512h __A,
+ const int __R)
{
return (__m512i) __builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) __A,
(__v32hi) __W,
@@ -679,7 +679,7 @@ _mm512_mask_ipcvt_roundph_epi8 (__m512i __W, __mmask32 __U, __m512h __A,
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvt_roundph_epi8 (__mmask32 __U, __m512h __A, const int __R)
+_mm512_maskz_ipcvts_roundph_epi8 (__mmask32 __U, __m512h __A, const int __R)
{
return
(__m512i) __builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) __A,
@@ -691,7 +691,7 @@ _mm512_maskz_ipcvt_roundph_epi8 (__mmask32 __U, __m512h __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvt_roundph_epu8 (__m512h __A, const int __R)
+_mm512_ipcvts_roundph_epu8 (__m512h __A, const int __R)
{
return
(__m512i) __builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) __A,
@@ -703,8 +703,8 @@ _mm512_ipcvt_roundph_epu8 (__m512h __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvt_roundph_epu8 (__m512i __W, __mmask32 __U, __m512h __A,
- const int __R)
+_mm512_mask_ipcvts_roundph_epu8 (__m512i __W, __mmask32 __U, __m512h __A,
+ const int __R)
{
return (__m512i) __builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) __A,
(__v32hi) __W,
@@ -714,7 +714,7 @@ _mm512_mask_ipcvt_roundph_epu8 (__m512i __W, __mmask32 __U, __m512h __A,
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvt_roundph_epu8 (__mmask32 __U, __m512h __A, const int __R)
+_mm512_maskz_ipcvts_roundph_epu8 (__mmask32 __U, __m512h __A, const int __R)
{
return
(__m512i) __builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) __A,
@@ -726,7 +726,7 @@ _mm512_maskz_ipcvt_roundph_epu8 (__mmask32 __U, __m512h __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvt_roundps_epi8 (__m512 __A, const int __R)
+_mm512_ipcvts_roundps_epi8 (__m512 __A, const int __R)
{
return
(__m512i) __builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) __A,
@@ -738,8 +738,8 @@ _mm512_ipcvt_roundps_epi8 (__m512 __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvt_roundps_epi8 (__m512i __W, __mmask16 __U, __m512 __A,
- const int __R)
+_mm512_mask_ipcvts_roundps_epi8 (__m512i __W, __mmask16 __U, __m512 __A,
+ const int __R)
{
return (__m512i) __builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) __A,
(__v16si) __W,
@@ -749,7 +749,7 @@ _mm512_mask_ipcvt_roundps_epi8 (__m512i __W, __mmask16 __U, __m512 __A,
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvt_roundps_epi8 (__mmask16 __U, __m512 __A, const int __R)
+_mm512_maskz_ipcvts_roundps_epi8 (__mmask16 __U, __m512 __A, const int __R)
{
return
(__m512i) __builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) __A,
@@ -761,7 +761,7 @@ _mm512_maskz_ipcvt_roundps_epi8 (__mmask16 __U, __m512 __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvt_roundps_epu8 (__m512 __A, const int __R)
+_mm512_ipcvts_roundps_epu8 (__m512 __A, const int __R)
{
return
(__m512i) __builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) __A,
@@ -773,8 +773,8 @@ _mm512_ipcvt_roundps_epu8 (__m512 __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvt_roundps_epu8 (__m512i __W, __mmask16 __U, __m512 __A,
- const int __R)
+_mm512_mask_ipcvts_roundps_epu8 (__m512i __W, __mmask16 __U, __m512 __A,
+ const int __R)
{
return (__m512i) __builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) __A,
(__v16si) __W,
@@ -784,7 +784,7 @@ _mm512_mask_ipcvt_roundps_epu8 (__m512i __W, __mmask16 __U, __m512 __A,
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvt_roundps_epu8 (__mmask16 __U, __m512 __A, const int __R)
+_mm512_maskz_ipcvts_roundps_epu8 (__mmask16 __U, __m512 __A, const int __R)
{
return
(__m512i) __builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) __A,
@@ -796,7 +796,7 @@ _mm512_maskz_ipcvt_roundps_epu8 (__mmask16 __U, __m512 __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvtt_roundph_epi8 (__m512h __A, const int __R)
+_mm512_ipcvtts_roundph_epi8 (__m512h __A, const int __R)
{
return (__m512i)
__builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) __A,
@@ -808,8 +808,8 @@ _mm512_ipcvtt_roundph_epi8 (__m512h __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvtt_roundph_epi8 (__m512i __W, __mmask32 __U, __m512h __A,
- const int __R)
+_mm512_mask_ipcvtts_roundph_epi8 (__m512i __W, __mmask32 __U, __m512h __A,
+ const int __R)
{
return (__m512i) __builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) __A,
(__v32hi) __W,
@@ -819,7 +819,7 @@ _mm512_mask_ipcvtt_roundph_epi8 (__m512i __W, __mmask32 __U, __m512h __A,
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvtt_roundph_epi8 (__mmask32 __U, __m512h __A, const int __R)
+_mm512_maskz_ipcvtts_roundph_epi8 (__mmask32 __U, __m512h __A, const int __R)
{
return
(__m512i) __builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) __A,
@@ -831,7 +831,7 @@ _mm512_maskz_ipcvtt_roundph_epi8 (__mmask32 __U, __m512h __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvtt_roundph_epu8 (__m512h __A, const int __R)
+_mm512_ipcvtts_roundph_epu8 (__m512h __A, const int __R)
{
return (__m512i)
__builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) __A,
@@ -843,8 +843,8 @@ _mm512_ipcvtt_roundph_epu8 (__m512h __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvtt_roundph_epu8 (__m512i __W, __mmask32 __U, __m512h __A,
- const int __R)
+_mm512_mask_ipcvtts_roundph_epu8 (__m512i __W, __mmask32 __U, __m512h __A,
+ const int __R)
{
return (__m512i) __builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) __A,
(__v32hi) __W,
@@ -854,7 +854,7 @@ _mm512_mask_ipcvtt_roundph_epu8 (__m512i __W, __mmask32 __U, __m512h __A,
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvtt_roundph_epu8 (__mmask32 __U, __m512h __A, const int __R)
+_mm512_maskz_ipcvtts_roundph_epu8 (__mmask32 __U, __m512h __A, const int __R)
{
return (__m512i)
__builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) __A,
@@ -866,7 +866,7 @@ _mm512_maskz_ipcvtt_roundph_epu8 (__mmask32 __U, __m512h __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvtt_roundps_epi8 (__m512 __A, const int __R)
+_mm512_ipcvtts_roundps_epi8 (__m512 __A, const int __R)
{
return (__m512i)
__builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) __A,
@@ -878,8 +878,8 @@ _mm512_ipcvtt_roundps_epi8 (__m512 __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvtt_roundps_epi8 (__m512i __W, __mmask16 __U, __m512 __A,
- const int __R)
+_mm512_mask_ipcvtts_roundps_epi8 (__m512i __W, __mmask16 __U, __m512 __A,
+ const int __R)
{
return (__m512i) __builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) __A,
(__v16si) __W,
@@ -889,7 +889,7 @@ _mm512_mask_ipcvtt_roundps_epi8 (__m512i __W, __mmask16 __U, __m512 __A,
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvtt_roundps_epi8 (__mmask16 __U, __m512 __A, const int __R)
+_mm512_maskz_ipcvtts_roundps_epi8 (__mmask16 __U, __m512 __A, const int __R)
{
return (__m512i)
__builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) __A,
@@ -901,7 +901,7 @@ _mm512_maskz_ipcvtt_roundps_epi8 (__mmask16 __U, __m512 __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ipcvtt_roundps_epu8 (__m512 __A, const int __R)
+_mm512_ipcvtts_roundps_epu8 (__m512 __A, const int __R)
{
return (__m512i)
__builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) __A,
@@ -913,8 +913,8 @@ _mm512_ipcvtt_roundps_epu8 (__m512 __A, const int __R)
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ipcvtt_roundps_epu8 (__m512i __W, __mmask16 __U, __m512 __A,
- const int __R)
+_mm512_mask_ipcvtts_roundps_epu8 (__m512i __W, __mmask16 __U, __m512 __A,
+ const int __R)
{
return (__m512i) __builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) __A,
(__v16si) __W,
@@ -924,7 +924,7 @@ _mm512_mask_ipcvtt_roundps_epu8 (__m512i __W, __mmask16 __U, __m512 __A,
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ipcvtt_roundps_epu8 (__mmask16 __U, __m512 __A, const int __R)
+_mm512_maskz_ipcvtts_roundps_epu8 (__mmask16 __U, __m512 __A, const int __R)
{
return (__m512i)
__builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) __A,
@@ -1214,7 +1214,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
__R);
}
#else
-#define _mm512_ipcvt_roundph_epi8(A, R) \
+#define _mm512_ipcvts_roundph_epi8(A, R) \
((__m512i) \
__builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) (A), \
(__v32hi) \
@@ -1222,13 +1222,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask32) (-1), \
(R)))
-#define _mm512_mask_ipcvt_roundph_epi8(W, U, A, R) \
+#define _mm512_mask_ipcvts_roundph_epi8(W, U, A, R) \
((__m512i) __builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) (A), \
(__v32hi) (W), \
(__mmask32) (U), \
(R)))
-#define _mm512_maskz_ipcvt_roundph_epi8(U, A, R) \
+#define _mm512_maskz_ipcvts_roundph_epi8(U, A, R) \
((__m512i) \
__builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) (A), \
(__v32hi) \
@@ -1236,7 +1236,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask32) (U), \
(R)))
-#define _mm512_ipcvt_roundph_epu8(A, R) \
+#define _mm512_ipcvts_roundph_epu8(A, R) \
((__m512i) \
__builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) (A), \
(__v32hi) \
@@ -1244,13 +1244,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask32) (-1), \
(R)))
-#define _mm512_mask_ipcvt_roundph_epu8(W, U, A, R) \
+#define _mm512_mask_ipcvts_roundph_epu8(W, U, A, R) \
((__m512i) __builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) (A), \
(__v32hi) (W), \
(__mmask32) (U), \
(R)))
-#define _mm512_maskz_ipcvt_roundph_epu8(U, A, R) \
+#define _mm512_maskz_ipcvts_roundph_epu8(U, A, R) \
((__m512i) \
__builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) (A), \
(__v32hi) \
@@ -1258,7 +1258,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask32) (U), \
(R)))
-#define _mm512_ipcvt_roundps_epi8(A, R) \
+#define _mm512_ipcvts_roundps_epi8(A, R) \
((__m512i) \
__builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) (A), \
(__v16si) \
@@ -1266,13 +1266,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask16) (-1), \
(R)))
-#define _mm512_mask_ipcvt_roundps_epi8(W, U, A, R) \
+#define _mm512_mask_ipcvts_roundps_epi8(W, U, A, R) \
((__m512i) __builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) (A), \
(__v16si) (W), \
(__mmask16) (U), \
(R)))
-#define _mm512_maskz_ipcvt_roundps_epi8(U, A, R) \
+#define _mm512_maskz_ipcvts_roundps_epi8(U, A, R) \
((__m512i) \
__builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) (A), \
(__v16si) \
@@ -1280,7 +1280,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask16) (U), \
(R)))
-#define _mm512_ipcvt_roundps_epu8(A, R) \
+#define _mm512_ipcvts_roundps_epu8(A, R) \
((__m512i) \
__builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) (A), \
(__v16si) \
@@ -1288,13 +1288,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask16) (-1), \
(R)))
-#define _mm512_mask_ipcvt_roundps_epu8(W, U, A, R) \
+#define _mm512_mask_ipcvts_roundps_epu8(W, U, A, R) \
((__m512i) __builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) (A), \
(__v16si) (W), \
(__mmask16) (U), \
(R)))
-#define _mm512_maskz_ipcvt_roundps_epu8(U, A, R) \
+#define _mm512_maskz_ipcvts_roundps_epu8(U, A, R) \
((__m512i) \
__builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) (A), \
(__v16si) \
@@ -1302,7 +1302,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask16) (U), \
(R)))
-#define _mm512_ipcvtt_roundph_epi8(A, R) \
+#define _mm512_ipcvtts_roundph_epi8(A, R) \
((__m512i) \
__builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) (A), \
(__v32hi) \
@@ -1310,13 +1310,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask32) (-1), \
(R)))
-#define _mm512_mask_ipcvtt_roundph_epi8(W, U, A, R) \
+#define _mm512_mask_ipcvtts_roundph_epi8(W, U, A, R) \
((__m512i) __builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) (A), \
(__v32hi) (W), \
(__mmask32) (U), \
(R)))
-#define _mm512_maskz_ipcvtt_roundph_epi8(U, A, R) \
+#define _mm512_maskz_ipcvtts_roundph_epi8(U, A, R) \
((__m512i) \
__builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) (A), \
(__v32hi) \
@@ -1324,7 +1324,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask32) (U), \
(R)))
-#define _mm512_ipcvtt_roundph_epu8(A, R) \
+#define _mm512_ipcvtts_roundph_epu8(A, R) \
((__m512i) \
__builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) (A), \
(__v32hi) \
@@ -1332,13 +1332,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask32) (-1), \
(R)))
-#define _mm512_mask_ipcvtt_roundph_epu8(W, U, A, R) \
+#define _mm512_mask_ipcvtts_roundph_epu8(W, U, A, R) \
((__m512i) __builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) (A), \
(__v32hi) (W), \
(__mmask32) (U), \
(R)))
-#define _mm512_maskz_ipcvtt_roundph_epu8(U, A, R) \
+#define _mm512_maskz_ipcvtts_roundph_epu8(U, A, R) \
((__m512i) \
__builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) (A), \
(__v32hi) \
@@ -1346,7 +1346,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask32) (U), \
(R)))
-#define _mm512_ipcvtt_roundps_epi8(A, R) \
+#define _mm512_ipcvtts_roundps_epi8(A, R) \
((__m512i) \
__builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) (A), \
(__v16si) \
@@ -1354,13 +1354,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask16) (-1), \
(R)))
-#define _mm512_mask_ipcvtt_roundps_epi8(W, U, A, R) \
+#define _mm512_mask_ipcvtts_roundps_epi8(W, U, A, R) \
((__m512i) __builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) (A), \
(__v16si) (W), \
(__mmask16) (U), \
(R)))
-#define _mm512_maskz_ipcvtt_roundps_epi8(U, A, R) \
+#define _mm512_maskz_ipcvtts_roundps_epi8(U, A, R) \
((__m512i) \
__builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) (A), \
(__v16si) \
@@ -1368,7 +1368,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask16) (U), \
(R)))
-#define _mm512_ipcvtt_roundps_epu8(A, R) \
+#define _mm512_ipcvtts_roundps_epu8(A, R) \
((__m512i) \
__builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) (A), \
(__v16si) \
@@ -1376,13 +1376,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(__mmask16) (-1), \
(R)))
-#define _mm512_mask_ipcvtt_roundps_epu8(W, U, A, R) \
+#define _mm512_mask_ipcvtts_roundps_epu8(W, U, A, R) \
((__m512i) __builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) (A), \
(__v16si) (W), \
(__mmask16) (U), \
(R)))
-#define _mm512_maskz_ipcvtt_roundps_epu8(U, A, R) \
+#define _mm512_maskz_ipcvtts_roundps_epu8(U, A, R) \
((__m512i) \
__builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) (A), \
(__v16si) \
@@ -1567,9 +1567,9 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R)
(R)))
#endif
-#ifdef __DISABLE_AVX10_2_512__
-#undef __DISABLE_AVX10_2_512__
+#ifdef __DISABLE_AVX10_2__
+#undef __DISABLE_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_512__ */
+#endif /* __DISABLE_AVX10_2__ */
#endif /* _AVX10_2_512SATCVTINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx10_2bf16intrin.h b/gcc/config/i386/avx10_2bf16intrin.h
index af3b4af..e6890fc 100644
--- a/gcc/config/i386/avx10_2bf16intrin.h
+++ b/gcc/config/i386/avx10_2bf16intrin.h
@@ -28,11 +28,11 @@
#ifndef _AVX10_2BF16INTRIN_H_INCLUDED
#define _AVX10_2BF16INTRIN_H_INCLUDED
-#if !defined(__AVX10_2_256__)
+#if !defined(__AVX10_2__)
#pragma GCC push_options
#pragma GCC target("avx10.2")
-#define __DISABLE_AVX10_2_256__
-#endif /* __AVX10_2_256__ */
+#define __DISABLE_AVX10_2__
+#endif /* __AVX10_2__ */
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -1327,9 +1327,9 @@ _mm_comineq_sbh (__m128bh __A, __m128bh __B)
return __builtin_ia32_vcomisbf16neq (__A, __B);
}
-#ifdef __DISABLE_AVX10_2_256__
-#undef __DISABLE_AVX10_2_256__
+#ifdef __DISABLE_AVX10_2__
+#undef __DISABLE_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_256__ */
+#endif /* __DISABLE_AVX10_2__ */
#endif /* __AVX10_2BF16INTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx10_2convertintrin.h b/gcc/config/i386/avx10_2convertintrin.h
index 7c9c238..8cbdc66 100644
--- a/gcc/config/i386/avx10_2convertintrin.h
+++ b/gcc/config/i386/avx10_2convertintrin.h
@@ -28,10 +28,10 @@
#ifndef _AVX10_2CONVERTINTRIN_H_INCLUDED
#define _AVX10_2CONVERTINTRIN_H_INCLUDED
-#if !defined(__AVX10_2_256__)
+#if !defined(__AVX10_2__)
#pragma GCC push_options
#pragma GCC target("avx10.2")
-#define __DISABLE_AVX10_2_256__
+#define __DISABLE_AVX10_2__
#endif /* __AVX10_2__ */
extern __inline __m128h
@@ -70,100 +70,34 @@ extern __inline __m256h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_cvtx2ps_ph (__m256 __A, __m256 __B)
{
- return (__m256h) __builtin_ia32_vcvt2ps2phx256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m256h) __builtin_ia32_vcvt2ps2phx256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v16hf)
+ _mm256_setzero_ph (),
+ (__mmask16) -1);
}
extern __inline __m256h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_cvtx2ps_ph (__m256h __W, __mmask16 __U, __m256 __A, __m256 __B)
{
- return (__m256h) __builtin_ia32_vcvt2ps2phx256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m256h) __builtin_ia32_vcvt2ps2phx256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v16hf) __W,
+ (__mmask16) __U);
}
extern __inline __m256h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_cvtx2ps_ph ( __mmask16 __U, __m256 __A, __m256 __B)
{
- return (__m256h) __builtin_ia32_vcvt2ps2phx256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __OPTIMIZE__
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtx_round2ps_ph (__m256 __A, __m256 __B, const int __R)
-{
- return (__m256h) __builtin_ia32_vcvt2ps2phx256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtx_round2ps_ph (__m256h __W, __mmask16 __U, __m256 __A,
- __m256 __B, const int __R)
-{
- return (__m256h) __builtin_ia32_vcvt2ps2phx256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
+ return (__m256h) __builtin_ia32_vcvt2ps2phx256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v16hf)
+ _mm256_setzero_ph (),
+ (__mmask16) __U);
}
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtx_round2ps_ph (__mmask16 __U, __m256 __A,
- __m256 __B, const int __R)
-{
- return (__m256h) __builtin_ia32_vcvt2ps2phx256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-#else
-#define _mm256_cvtx_round2ps_ph(A, B, R) \
- ((__m256h) __builtin_ia32_vcvt2ps2phx256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_cvtx_round2ps_ph(W, U, A, B, R) \
- ((__m256h) __builtin_ia32_vcvt2ps2phx256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_cvtx_round2ps_ph(U, A, B, R) \
- ((__m256h) __builtin_ia32_vcvt2ps2phx256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-#endif /* __OPTIMIZE__ */
-
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_cvtbiasph_bf8 (__m128i __A, __m128h __B)
@@ -229,7 +163,7 @@ _mm256_maskz_cvtbiasph_bf8 (__mmask16 __U, __m256i __A, __m256h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtbiassph_bf8 (__m128i __A, __m128h __B)
+_mm_cvts_biasph_bf8 (__m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128 ((__v16qi) __A,
(__v8hf) __B);
@@ -237,8 +171,8 @@ _mm_cvtbiassph_bf8 (__m128i __A, __m128h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtbiassph_bf8 (__m128i __W, __mmask8 __U,
- __m128i __A, __m128h __B)
+_mm_mask_cvts_biasph_bf8 (__m128i __W, __mmask8 __U,
+ __m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128_mask ((__v16qi) __A,
(__v8hf) __B,
@@ -248,7 +182,7 @@ _mm_mask_cvtbiassph_bf8 (__m128i __W, __mmask8 __U,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtbiassph_bf8 (__mmask8 __U, __m128i __A, __m128h __B)
+_mm_maskz_cvts_biasph_bf8 (__mmask8 __U, __m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128_mask ((__v16qi) __A,
(__v8hf) __B,
@@ -259,7 +193,7 @@ _mm_maskz_cvtbiassph_bf8 (__mmask8 __U, __m128i __A, __m128h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtbiassph_bf8 (__m256i __A, __m256h __B)
+_mm256_cvts_biasph_bf8 (__m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
@@ -270,8 +204,8 @@ _mm256_cvtbiassph_bf8 (__m256i __A, __m256h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtbiassph_bf8 (__m128i __W, __mmask16 __U,
- __m256i __A, __m256h __B)
+_mm256_mask_cvts_biasph_bf8 (__m128i __W, __mmask16 __U,
+ __m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
@@ -281,7 +215,7 @@ _mm256_mask_cvtbiassph_bf8 (__m128i __W, __mmask16 __U,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtbiassph_bf8 (__mmask16 __U, __m256i __A, __m256h __B)
+_mm256_maskz_cvts_biasph_bf8 (__mmask16 __U, __m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
@@ -355,7 +289,7 @@ _mm256_maskz_cvtbiasph_hf8 (__mmask16 __U, __m256i __A, __m256h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtbiassph_hf8 (__m128i __A, __m128h __B)
+_mm_cvts_biasph_hf8 (__m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128 ((__v16qi) __A,
(__v8hf) __B);
@@ -363,8 +297,8 @@ _mm_cvtbiassph_hf8 (__m128i __A, __m128h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtbiassph_hf8 (__m128i __W, __mmask8 __U,
- __m128i __A, __m128h __B)
+_mm_mask_cvts_biasph_hf8 (__m128i __W, __mmask8 __U,
+ __m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128_mask ((__v16qi) __A,
(__v8hf) __B,
@@ -374,7 +308,7 @@ _mm_mask_cvtbiassph_hf8 (__m128i __W, __mmask8 __U,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtbiassph_hf8 (__mmask8 __U, __m128i __A, __m128h __B)
+_mm_maskz_cvts_biasph_hf8 (__mmask8 __U, __m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128_mask ((__v16qi) __A,
(__v8hf) __B,
@@ -385,7 +319,7 @@ _mm_maskz_cvtbiassph_hf8 (__mmask8 __U, __m128i __A, __m128h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtbiassph_hf8 (__m256i __A, __m256h __B)
+_mm256_cvts_biasph_hf8 (__m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
@@ -396,8 +330,8 @@ _mm256_cvtbiassph_hf8 (__m256i __A, __m256h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtbiassph_hf8 (__m128i __W, __mmask16 __U,
- __m256i __A, __m256h __B)
+_mm256_mask_cvts_biasph_hf8 (__m128i __W, __mmask16 __U,
+ __m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
@@ -407,7 +341,7 @@ _mm256_mask_cvtbiassph_hf8 (__m128i __W, __mmask16 __U,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtbiassph_hf8 (__mmask16 __U, __m256i __A, __m256h __B)
+_mm256_maskz_cvts_biasph_hf8 (__mmask16 __U, __m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
@@ -484,7 +418,7 @@ _mm256_maskz_cvt2ph_bf8 (__mmask32 __U, __m256h __A, __m256h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvts2ph_bf8 (__m128h __A, __m128h __B)
+_mm_cvts_2ph_bf8 (__m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2bf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
@@ -495,8 +429,8 @@ _mm_cvts2ph_bf8 (__m128h __A, __m128h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvts2ph_bf8 (__m128i __W, __mmask16 __U,
- __m128h __A, __m128h __B)
+_mm_mask_cvts_2ph_bf8 (__m128i __W, __mmask16 __U,
+ __m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2bf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
@@ -506,7 +440,7 @@ _mm_mask_cvts2ph_bf8 (__m128i __W, __mmask16 __U,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvts2ph_bf8 (__mmask16 __U, __m128h __A, __m128h __B)
+_mm_maskz_cvts_2ph_bf8 (__mmask16 __U, __m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2bf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
@@ -517,7 +451,7 @@ _mm_maskz_cvts2ph_bf8 (__mmask16 __U, __m128h __A, __m128h __B)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvts2ph_bf8 (__m256h __A, __m256h __B)
+_mm256_cvts_2ph_bf8 (__m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2bf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
@@ -528,8 +462,8 @@ _mm256_cvts2ph_bf8 (__m256h __A, __m256h __B)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvts2ph_bf8 (__m256i __W, __mmask32 __U,
- __m256h __A, __m256h __B)
+_mm256_mask_cvts_2ph_bf8 (__m256i __W, __mmask32 __U,
+ __m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2bf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
@@ -539,7 +473,7 @@ _mm256_mask_cvts2ph_bf8 (__m256i __W, __mmask32 __U,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvts2ph_bf8 (__mmask32 __U, __m256h __A, __m256h __B)
+_mm256_maskz_cvts_2ph_bf8 (__mmask32 __U, __m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2bf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
@@ -616,7 +550,7 @@ _mm256_maskz_cvt2ph_hf8 (__mmask32 __U, __m256h __A, __m256h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvts2ph_hf8 (__m128h __A, __m128h __B)
+_mm_cvts_2ph_hf8 (__m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2hf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
@@ -627,8 +561,8 @@ _mm_cvts2ph_hf8 (__m128h __A, __m128h __B)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvts2ph_hf8 (__m128i __W, __mmask16 __U,
- __m128h __A, __m128h __B)
+_mm_mask_cvts_2ph_hf8 (__m128i __W, __mmask16 __U,
+ __m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2hf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
@@ -638,7 +572,7 @@ _mm_mask_cvts2ph_hf8 (__m128i __W, __mmask16 __U,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvts2ph_hf8 (__mmask16 __U, __m128h __A, __m128h __B)
+_mm_maskz_cvts_2ph_hf8 (__mmask16 __U, __m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2hf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
@@ -649,7 +583,7 @@ _mm_maskz_cvts2ph_hf8 (__mmask16 __U, __m128h __A, __m128h __B)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvts2ph_hf8 (__m256h __A, __m256h __B)
+_mm256_cvts_2ph_hf8 (__m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2hf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
@@ -660,8 +594,8 @@ _mm256_cvts2ph_hf8 (__m256h __A, __m256h __B)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvts2ph_hf8 (__m256i __W, __mmask32 __U,
- __m256h __A, __m256h __B)
+_mm256_mask_cvts_2ph_hf8 (__m256i __W, __mmask32 __U,
+ __m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2hf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
@@ -671,7 +605,7 @@ _mm256_mask_cvts2ph_hf8 (__m256i __W, __mmask32 __U,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvts2ph_hf8 (__mmask32 __U, __m256h __A, __m256h __B)
+_mm256_maskz_cvts_2ph_hf8 (__mmask32 __U, __m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2hf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
@@ -798,7 +732,7 @@ _mm256_maskz_cvtph_bf8 (__mmask16 __U, __m256h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsph_bf8 (__m128h __A)
+_mm_cvts_ph_bf8 (__m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i)
@@ -808,7 +742,7 @@ _mm_cvtsph_bf8 (__m128h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtsph_bf8 (__m128i __W, __mmask8 __U, __m128h __A)
+_mm_mask_cvts_ph_bf8 (__m128i __W, __mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i) __W,
@@ -817,7 +751,7 @@ _mm_mask_cvtsph_bf8 (__m128i __W, __mmask8 __U, __m128h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtsph_bf8 (__mmask8 __U, __m128h __A)
+_mm_maskz_cvts_ph_bf8 (__mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i)
@@ -827,7 +761,7 @@ _mm_maskz_cvtsph_bf8 (__mmask8 __U, __m128h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtsph_bf8 (__m256h __A)
+_mm256_cvts_ph_bf8 (__m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i)
@@ -837,7 +771,7 @@ _mm256_cvtsph_bf8 (__m256h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtsph_bf8 (__m128i __W, __mmask16 __U, __m256h __A)
+_mm256_mask_cvts_ph_bf8 (__m128i __W, __mmask16 __U, __m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i) __W,
@@ -846,7 +780,7 @@ _mm256_mask_cvtsph_bf8 (__m128i __W, __mmask16 __U, __m256h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtsph_bf8 (__mmask16 __U, __m256h __A)
+_mm256_maskz_cvts_ph_bf8 (__mmask16 __U, __m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i)
@@ -914,7 +848,7 @@ _mm256_maskz_cvtph_hf8 (__mmask16 __U, __m256h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsph_hf8 (__m128h __A)
+_mm_cvts_ph_hf8 (__m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i)
@@ -924,7 +858,7 @@ _mm_cvtsph_hf8 (__m128h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtsph_hf8 (__m128i __W, __mmask8 __U, __m128h __A)
+_mm_mask_cvts_ph_hf8 (__m128i __W, __mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i) __W,
@@ -933,7 +867,7 @@ _mm_mask_cvtsph_hf8 (__m128i __W, __mmask8 __U, __m128h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtsph_hf8 (__mmask8 __U, __m128h __A)
+_mm_maskz_cvts_ph_hf8 (__mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i)
@@ -943,7 +877,7 @@ _mm_maskz_cvtsph_hf8 (__mmask8 __U, __m128h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtsph_hf8 (__m256h __A)
+_mm256_cvts_ph_hf8 (__m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i)
@@ -953,7 +887,7 @@ _mm256_cvtsph_hf8 (__m256h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtsph_hf8 (__m128i __W, __mmask16 __U, __m256h __A)
+_mm256_mask_cvts_ph_hf8 (__m128i __W, __mmask16 __U, __m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i) __W,
@@ -962,7 +896,7 @@ _mm256_mask_cvtsph_hf8 (__m128i __W, __mmask16 __U, __m256h __A)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtsph_hf8 (__mmask16 __U, __m256h __A)
+_mm256_maskz_cvts_ph_hf8 (__mmask16 __U, __m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i)
@@ -1018,9 +952,9 @@ _mm256_maskz_cvtbf8_ph (__mmask16 __U, __m128i __A)
(__m256i) _mm256_maskz_cvtepi8_epi16 (__U, __A), 8));
}
-#ifdef __DISABLE_AVX10_2_256__
-#undef __DISABLE_AVX10_2_256__
+#ifdef __DISABLE_AVX10_2__
+#undef __DISABLE_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_256__ */
+#endif /* __DISABLE_AVX10_2__ */
#endif /* __AVX10_2CONVERTINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx10_2mediaintrin.h b/gcc/config/i386/avx10_2mediaintrin.h
index 19578a7..0993e8e 100644
--- a/gcc/config/i386/avx10_2mediaintrin.h
+++ b/gcc/config/i386/avx10_2mediaintrin.h
@@ -28,11 +28,11 @@
#ifndef _AVX10_2MEDIAINTRIN_H_INCLUDED
#define _AVX10_2MEDIAINTRIN_H_INCLUDED
-#if !defined(__AVX10_2_256__)
+#if !defined(__AVX10_2__)
#pragma GCC push_options
-#pragma GCC target("avx10.2-256")
-#define __DISABLE_AVX10_2_256__
-#endif /* __AVX10_2_256__ */
+#pragma GCC target("avx10.2")
+#define __DISABLE_AVX10_2__
+#endif /* __AVX10_2__ */
#define _mm_dpbssd_epi32(W, A, B) \
(__m128i) __builtin_ia32_vpdpbssd128 ((__v4si) (W), (__v4si) (A), (__v4si) (B))
@@ -831,9 +831,9 @@ _mm256_maskz_mpsadbw_epu8 (__mmask16 __U, __m256i __X,
#endif
-#ifdef __DISABLE_AVX10_2_256__
-#undef __DISABLE_AVX10_2_256__
+#ifdef __DISABLE_AVX10_2__
+#undef __DISABLE_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_256__ */
+#endif /* __DISABLE_AVX10_2__ */
#endif /* __AVX10_2MEDIAINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx10_2minmaxintrin.h b/gcc/config/i386/avx10_2minmaxintrin.h
index 278ad05..0a4a253 100644
--- a/gcc/config/i386/avx10_2minmaxintrin.h
+++ b/gcc/config/i386/avx10_2minmaxintrin.h
@@ -23,11 +23,11 @@
#ifndef _AVX10_2MINMAXINTRIN_H_INCLUDED
#define _AVX10_2MINMAXINTRIN_H_INCLUDED
-#if !defined(__AVX10_2_256__)
+#if !defined(__AVX10_2__)
#pragma GCC push_options
#pragma GCC target("avx10.2")
-#define __DISABLE_AVX10_2_256__
-#endif /* __AVX10_2_256__ */
+#define __DISABLE_AVX10_2__
+#endif /* __AVX10_2__ */
#ifdef __OPTIMIZE__
extern __inline __m128bh
@@ -143,10 +143,10 @@ extern __inline __m256d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_minmax_pd (__m256d __A, __m256d __B, const int __C)
{
- return (__m256d) __builtin_ia32_minmaxpd256_mask_round (
+ return (__m256d) __builtin_ia32_minmaxpd256_mask (
(__v4df) __A, (__v4df) __B, __C,
(__v4df) (__m256d) _mm256_undefined_pd (),
- (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
+ (__mmask8) -1);
}
extern __inline __m256d
@@ -154,50 +154,19 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_minmax_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B,
const int __C)
{
- return (__m256d) __builtin_ia32_minmaxpd256_mask_round (
+ return (__m256d) __builtin_ia32_minmaxpd256_mask (
(__v4df) __A, (__v4df) __B, __C, (__v4df) __W,
- (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);
+ (__mmask8) __U);
}
extern __inline __m256d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_minmax_pd (__mmask8 __U, __m256d __A, __m256d __B, const int __C)
{
- return (__m256d) __builtin_ia32_minmaxpd256_mask_round (
+ return (__m256d) __builtin_ia32_minmaxpd256_mask (
(__v4df) __A, (__v4df) __B, __C,
(__v4df) (__m256d) _mm256_setzero_pd (),
- (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_minmax_round_pd (__m256d __A, __m256d __B, const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_minmaxpd256_mask_round (
- (__v4df) __A, (__v4df) __B, __C,
- (__v4df) (__m256d) _mm256_undefined_pd (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_minmax_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B, const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_minmaxpd256_mask_round (
- (__v4df) __A, (__v4df) __B, __C, (__v4df) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_minmax_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_minmaxpd256_mask_round (
- (__v4df) __A, (__v4df) __B, __C,
- (__v4df) (__m256d) _mm256_setzero_pd (),
- (__mmask8) __U, __R);
+ (__mmask8) __U);
}
extern __inline __m128h
@@ -240,10 +209,10 @@ extern __inline __m256h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_minmax_ph (__m256h __A, __m256h __B, const int __C)
{
- return (__m256h) __builtin_ia32_minmaxph256_mask_round (
+ return (__m256h) __builtin_ia32_minmaxph256_mask (
(__v16hf) __A, (__v16hf) __B, __C,
(__v16hf) (__m256h) _mm256_undefined_ph (),
- (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
+ (__mmask16) -1);
}
extern __inline __m256h
@@ -251,50 +220,19 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_minmax_ph (__m256h __W, __mmask16 __U, __m256h __A, __m256h __B,
const int __C)
{
- return (__m256h) __builtin_ia32_minmaxph256_mask_round (
+ return (__m256h) __builtin_ia32_minmaxph256_mask (
(__v16hf) __A, (__v16hf) __B, __C, (__v16hf) __W,
- (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);
+ (__mmask16) __U);
}
extern __inline __m256h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_minmax_ph (__mmask16 __U, __m256h __A, __m256h __B, const int __C)
{
- return (__m256h) __builtin_ia32_minmaxph256_mask_round (
+ return (__m256h) __builtin_ia32_minmaxph256_mask (
(__v16hf) __A, (__v16hf) __B, __C,
(__v16hf) (__m256h) _mm256_setzero_ph (),
- (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_minmax_round_ph (__m256h __A, __m256h __B, const int __C, const int __R)
-{
- return (__m256h) __builtin_ia32_minmaxph256_mask_round (
- (__v16hf) __A, (__v16hf) __B, __C,
- (__v16hf) (__m256h) _mm256_undefined_ph (),
- (__mmask16) -1, __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_minmax_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- __m256h __B, const int __C, const int __R)
-{
- return (__m256h) __builtin_ia32_minmaxph256_mask_round (
- (__v16hf) __A, (__v16hf) __B, __C, (__v16hf) __W,
- (__mmask16) __U, __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_minmax_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- const int __C, const int __R)
-{
- return (__m256h) __builtin_ia32_minmaxph256_mask_round (
- (__v16hf) __A, (__v16hf) __B, __C,
- (__v16hf) (__m256h) _mm256_setzero_ph (),
- (__mmask16) __U, __R);
+ (__mmask16) __U);
}
extern __inline __m128
@@ -337,10 +275,10 @@ extern __inline __m256
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_minmax_ps (__m256 __A, __m256 __B, const int __C)
{
- return (__m256) __builtin_ia32_minmaxps256_mask_round (
+ return (__m256) __builtin_ia32_minmaxps256_mask (
(__v8sf) __A, (__v8sf) __B, __C,
(__v8sf) (__m256) _mm256_undefined_ps (),
- (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
+ (__mmask8) -1);
}
extern __inline __m256
@@ -348,50 +286,19 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_minmax_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
const int __C)
{
- return (__m256) __builtin_ia32_minmaxps256_mask_round (
+ return (__m256) __builtin_ia32_minmaxps256_mask (
(__v8sf) __A, (__v8sf) __B, __C, (__v8sf) __W,
- (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);
+ (__mmask8) __U);
}
extern __inline __m256
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_minmax_ps (__mmask8 __U, __m256 __A, __m256 __B, const int __C)
{
- return (__m256) __builtin_ia32_minmaxps256_mask_round (
+ return (__m256) __builtin_ia32_minmaxps256_mask (
(__v8sf) __A, (__v8sf) __B, __C,
(__v8sf) (__m256) _mm256_setzero_ps (),
- (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_minmax_round_ps (__m256 __A, __m256 __B, const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_minmaxps256_mask_round (
- (__v8sf) __A, (__v8sf) __B, __C,
- (__v8sf) (__m256) _mm256_undefined_ps (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_minmax_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
- const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_minmaxps256_mask_round (
- (__v8sf) __A, (__v8sf) __B, __C, (__v8sf) __W,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_minmax_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_minmaxps256_mask_round (
- (__v8sf) __A, (__v8sf) __B, __C,
- (__v8sf) (__m256) _mm256_setzero_ps (),
- (__mmask8) __U, __R);
+ (__mmask8) __U);
}
extern __inline __m128d
@@ -697,56 +604,27 @@ _mm_maskz_minmax_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
(__mmask8) (U)))
#define _mm256_minmax_pd(A, B, C) \
- ((__m256d) __builtin_ia32_minmaxpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (int) (C), \
- (__v4df) (__m256d) \
- _mm256_undefined_pd (), \
- (__mmask8) (-1), \
- _MM_FROUND_CUR_DIRECTION))
+ ((__m256d) __builtin_ia32_minmaxpd256_mask ((__v4df) (A), \
+ (__v4df) (B), \
+ (int) (C), \
+ (__v4df) (__m256d) \
+ _mm256_undefined_pd (), \
+ (__mmask8) (-1)))
#define _mm256_mask_minmax_pd(W, U, A, B, C) \
- ((__m256d) __builtin_ia32_minmaxpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (int) (C), \
- (__v4df) (__m256d) (W), \
- (__mmask8) (U), \
- _MM_FROUND_CUR_DIRECTION))
+ ((__m256d) __builtin_ia32_minmaxpd256_mask ((__v4df) (A), \
+ (__v4df) (B), \
+ (int) (C), \
+ (__v4df) (__m256d) (W), \
+ (__mmask8) (U)))
#define _mm256_maskz_minmax_pd(U, A, B, C) \
- ((__m256d) __builtin_ia32_minmaxpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (int) (C), \
- (__v4df) (__m256d) \
- _mm256_setzero_pd (), \
- (__mmask8) (U), \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm256_minmax_round_pd(A, B, C, R) \
- ((__m256d) __builtin_ia32_minmaxpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (int) (C), \
- (__v4df) (__m256d) \
- _mm256_undefined_pd (), \
- (__mmask8) (-1), \
- (int) (R)))
-
-#define _mm256_mask_minmax_round_pd(W, U, A, B, C, R) \
- ((__m256d) __builtin_ia32_minmaxpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (int) (C), \
- (__v4df) (__m256d) (W), \
- (__mmask8) (U), \
- (int) (R)))
-
-#define _mm256_maskz_minmax_round_pd(U, A, B, C, R) \
- ((__m256d) __builtin_ia32_minmaxpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (int) (C), \
- (__v4df) (__m256d) \
- _mm256_setzero_pd (), \
- (__mmask8) (U), \
- (int) (R)))
+ ((__m256d) __builtin_ia32_minmaxpd256_mask ((__v4df) (A), \
+ (__v4df) (B), \
+ (int) (C), \
+ (__v4df) (__m256d) \
+ _mm256_setzero_pd (), \
+ (__mmask8) (U)))
#define _mm_minmax_ph(A, B, C) \
((__m128h) __builtin_ia32_minmaxph128_mask ((__v8hf) (A), \
@@ -772,56 +650,27 @@ _mm_maskz_minmax_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
(__mmask8) (U)))
#define _mm256_minmax_ph(A, B, C) \
- ((__m256h) __builtin_ia32_minmaxph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (int) (C), \
- (__v16hf) (__m256h) \
- _mm256_undefined_ph (), \
- (__mmask16) (-1), \
- _MM_FROUND_CUR_DIRECTION))
+ ((__m256h) __builtin_ia32_minmaxph256_mask ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (int) (C), \
+ (__v16hf) (__m256h) \
+ _mm256_undefined_ph (), \
+ (__mmask16) (-1)))
#define _mm256_mask_minmax_ph(W, U, A, B, C) \
- ((__m256h) __builtin_ia32_minmaxph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (int) (C), \
- (__v16hf) (__m256h) (W), \
- (__mmask16) (U), \
- _MM_FROUND_CUR_DIRECTION))
+ ((__m256h) __builtin_ia32_minmaxph256_mask ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (int) (C), \
+ (__v16hf) (__m256h) (W), \
+ (__mmask16) (U)))
#define _mm256_maskz_minmax_ph(U, A, B, C) \
- ((__m256h) __builtin_ia32_minmaxph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (int) (C), \
- (__v16hf) (__m256h) \
- _mm256_setzero_ph (), \
- (__mmask16) (U), \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm256_minmax_round_ph(A, B, C, R) \
- ((__m256h) __builtin_ia32_minmaxph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (int) (C), \
- (__v16hf) (__m256h) \
- _mm256_undefined_ph (), \
- (__mmask16) (-1), \
- (int) (R)))
-
-#define _mm256_mask_minmax_round_ph(W, U, A, B, C, R) \
- ((__m256h) __builtin_ia32_minmaxph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (int) (C), \
- (__v16hf) (__m256h) (W), \
- (__mmask16) (U), \
- (int) (R)))
-
-#define _mm256_maskz_minmax_round_ph(U, A, B, C, R) \
- ((__m256h) __builtin_ia32_minmaxph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (int) (C), \
- (__v16hf) (__m256h) \
- _mm256_setzero_ph (), \
- (__mmask16) (U), \
- (int) (R)))
+ ((__m256h) __builtin_ia32_minmaxph256_mask ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (int) (C), \
+ (__v16hf) (__m256h) \
+ _mm256_setzero_ph (), \
+ (__mmask16) (U)))
#define _mm_minmax_ps(A, B, C) \
((__m128) __builtin_ia32_minmaxps128_mask ((__v4sf) (A), \
@@ -847,56 +696,27 @@ _mm_maskz_minmax_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
(__mmask8) (U)))
#define _mm256_minmax_ps(A, B, C) \
- ((__m256) __builtin_ia32_minmaxps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (int) (C), \
- (__v8sf) (__m256) \
- _mm256_undefined_ps (), \
- (__mmask8) (-1), \
- _MM_FROUND_CUR_DIRECTION))
+ ((__m256) __builtin_ia32_minmaxps256_mask ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (int) (C), \
+ (__v8sf) (__m256) \
+ _mm256_undefined_ps (), \
+ (__mmask8) (-1)))
#define _mm256_mask_minmax_ps(W, U, A, B, C) \
- ((__m256) __builtin_ia32_minmaxps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (int) (C), \
- (__v8sf) (__m256) (W), \
- (__mmask8) (U), \
- _MM_FROUND_CUR_DIRECTION))
+ ((__m256) __builtin_ia32_minmaxps256_mask ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (int) (C), \
+ (__v8sf) (__m256) (W), \
+ (__mmask8) (U)))
#define _mm256_maskz_minmax_ps(U, A, B, C) \
- ((__m256) __builtin_ia32_minmaxps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (int) (C), \
- (__v8sf) (__m256) \
- _mm256_setzero_ps (), \
- (__mmask8) (U), \
- _MM_FROUND_CUR_DIRECTION))
-
-#define _mm256_minmax_round_ps(A, B, C, R) \
- ((__m256) __builtin_ia32_minmaxps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (int) (C), \
- (__v8sf) (__m256) \
- _mm256_undefined_ps (), \
- (__mmask8) (-1), \
- (int) (R)))
-
-#define _mm256_mask_minmax_round_ps(W, U, A, B, C, R) \
- ((__m256) __builtin_ia32_minmaxps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (int) (C), \
- (__v8sf) (__m256) (W), \
- (__mmask8) (U), \
- (int) (R)))
-
-#define _mm256_maskz_minmax_round_ps(U, A, B, C, R) \
- ((__m256) __builtin_ia32_minmaxps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (int) (C), \
- (__v8sf) (__m256) \
- _mm256_setzero_ps (), \
- (__mmask8) (U), \
- (int) (R)))
+ ((__m256) __builtin_ia32_minmaxps256_mask ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (int) (C), \
+ (__v8sf) (__m256) \
+ _mm256_setzero_ps (), \
+ (__mmask8) (U)))
#define _mm_minmax_round_sd(A, B, C, R) \
((__m128d) __builtin_ia32_minmaxsd_mask_round ((__v2df) (A), \
@@ -1056,9 +876,9 @@ _mm_maskz_minmax_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
#endif
-#ifdef __DISABLE_AVX10_2_256__
-#undef __DISABLE_AVX10_2_256__
+#ifdef __DISABLE_AVX10_2__
+#undef __DISABLE_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_256__ */
+#endif /* __DISABLE_AVX10_2__ */
#endif /* _AVX10_2MINMAXINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h
deleted file mode 100644
index e36843d..0000000
--- a/gcc/config/i386/avx10_2roundingintrin.h
+++ /dev/null
@@ -1,6433 +0,0 @@
-/* Copyright (C) 2024-2025 Free Software Foundation, Inc.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- Under Section 7 of GPL version 3, you are granted additional
- permissions described in the GCC Runtime Library Exception, version
- 3.1, as published by the Free Software Foundation.
-
- You should have received a copy of the GNU General Public License and
- a copy of the GCC Runtime Library Exception along with this program;
- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
- <http://www.gnu.org/licenses/>. */
-
-#ifndef _IMMINTRIN_H_INCLUDED
-#error "Never use <avx10_2roundingintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef _AVX10_2ROUNDINGINTRIN_H_INCLUDED
-#define _AVX10_2ROUNDINGINTRIN_H_INCLUDED
-
-#ifndef __AVX10_2_256__
-#pragma GCC push_options
-#pragma GCC target("avx10.2-256")
-#define __DISABLE_AVX10_2_256__
-#endif /* __AVX10_2_256__ */
-
-#ifdef __OPTIMIZE__
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_add_round_pd (__m256d __A, __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_add_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_add_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- const int __R)
-{
- return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_add_round_ph (__m256h __A, __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_undefined_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_add_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_add_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- const int __R)
-{
- return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_add_round_ps (__m256 __A, __m256 __B, const int __R)
-{
- return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_add_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_add_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cmp_round_pd_mask (__m256d __A, __m256d __B, const int __C,
- const int __R)
-{
- return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- __C,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cmp_round_pd_mask (__mmask8 __U, __m256d __A, __m256d __B,
- const int __C, const int __R)
-{
- return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- __C,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cmp_round_ph_mask (__m256h __A, __m256h __B, const int __C,
- const int __R)
-{
- return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- __C,
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cmp_round_ph_mask (__mmask16 __U, __m256h __A, __m256h __B,
- const int __C, const int __R)
-{
- return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- __C,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cmp_round_ps_mask (__m256 __A, __m256 __B, const int __C, const int __R)
-{
- return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- __C,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cmp_round_ps_mask (__mmask8 __U, __m256 __A, __m256 __B,
- const int __C, const int __R)
-{
- return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- __C,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepi32_ph (__m256i __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtdq2ph256_mask_round ((__v8si) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepi32_ph (__m128h __W, __mmask8 __U, __m256i __A,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtdq2ph256_mask_round ((__v8si) __A,
- (__v8hf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepi32_ph (__mmask8 __U, __m256i __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtdq2ph256_mask_round ((__v8si) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepi32_ps (__m256i __A, const int __R)
-{
- return (__m256) __builtin_ia32_cvtdq2ps256_mask_round ((__v8si) __A,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepi32_ps (__m256 __W, __mmask8 __U, __m256i __A,
- const int __R)
-{
- return (__m256) __builtin_ia32_cvtdq2ps256_mask_round ((__v8si) __A,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepi32_ps (__mmask8 __U, __m256i __A, const int __R)
-{
- return (__m256) __builtin_ia32_cvtdq2ps256_mask_round ((__v8si) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundpd_ph (__m256d __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtpd2ph256_mask_round ((__v4df) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundpd_ph (__m128h __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtpd2ph256_mask_round ((__v4df) __A,
- (__v8hf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundpd_ph (__mmask8 __U, __m256d __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtpd2ph256_mask_round ((__v4df) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundpd_ps (__m256d __A, const int __R)
-{
- return (__m128) __builtin_ia32_cvtpd2ps256_mask_round ((__v4df) __A,
- (__v4sf)
- _mm_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundpd_ps (__m128 __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m128) __builtin_ia32_cvtpd2ps256_mask_round ((__v4df) __A,
- (__v4sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundpd_ps (__mmask8 __U, __m256d __A, const int __R)
-{
- return (__m128) __builtin_ia32_cvtpd2ps256_mask_round ((__v4df) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundpd_epi32 (__m256d __A, const int __R)
-{
- return
- (__m128i) __builtin_ia32_cvtpd2dq256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_undefined_si128 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m128i) __builtin_ia32_cvtpd2dq256_mask_round ((__v4df) __A,
- (__v4si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundpd_epi32 (__mmask8 __U, __m256d __A, const int __R)
-{
- return (__m128i) __builtin_ia32_cvtpd2dq256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundpd_epi64 (__m256d __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtpd2qq256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvtpd2qq256_mask_round ((__v4df) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundpd_epi64 (__mmask8 __U, __m256d __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtpd2qq256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundpd_epu32 (__m256d __A, const int __R)
-{
- return
- (__m128i) __builtin_ia32_cvtpd2udq256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_undefined_si128 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m128i) __builtin_ia32_cvtpd2udq256_mask_round ((__v4df) __A,
- (__v4si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundpd_epu32 (__mmask8 __U, __m256d __A, const int __R)
-{
- return (__m128i) __builtin_ia32_cvtpd2udq256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundpd_epu64 (__m256d __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtpd2uqq256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvtpd2uqq256_mask_round ((__v4df) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundpd_epu64 (__mmask8 __U, __m256d __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtpd2uqq256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundph_epi32 (__m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundph_epi32 (__m256i __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundph_epi32 (__mmask8 __U, __m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundph_pd (__m128h __A, const int __R)
-{
- return (__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundph_pd (__m256d __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) __A,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundph_pd (__mmask8 __U, __m128h __A, const int __R)
-{
- return (__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundph_ps (__m128h __A, const int __R)
-{
- return
- (__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) __A,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundph_ps (__m256 __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) __A,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundph_ps (__mmask8 __U, __m128h __A, const int __R)
-{
- return (__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtx_roundph_ps (__m128h __A, const int __R)
-{
- return (__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtx_roundph_ps (__m256 __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) __A,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtx_roundph_ps (__mmask8 __U, __m128h __A, const int __R)
-{
- return (__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundph_epi64 (__m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundph_epi64 (__m256i __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundph_epi64 (__mmask8 __U, __m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundph_epu32 (__m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundph_epu32 (__m256i __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundph_epu32 (__mmask8 __U, __m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundph_epu64 (__m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundph_epu64 (__m256i __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundph_epu64 (__mmask8 __U, __m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundph_epu16 (__m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_undefined_si256 (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundph_epu16 (__m256i __W, __mmask16 __U, __m256h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A,
- (__v16hi) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundph_epu16 (__mmask16 __U, __m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundph_epi16 (__m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_undefined_si256 (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundph_epi16 (__m256i __W, __mmask16 __U, __m256h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A,
- (__v16hi) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundps_pd (__m128 __A, const int __R)
-{
- return
- (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundps_pd (__m256d __W, __mmask8 __U, __m128 __A,
- const int __R)
-{
- return (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundps_pd (__mmask8 __U, __m128 __A, const int __R)
-{
- return (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtx_roundps_ph (__m256 __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtx_roundps_ph (__m128h __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A,
- (__v8hf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtx_roundps_ph (__mmask8 __U, __m256 __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundps_epi32 (__m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundps_epi32 (__mmask8 __U, __m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundps_epi64 (__m128 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundps_epi64 (__m256i __W, __mmask8 __U, __m128 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundps_epi64 (__mmask8 __U, __m128 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundps_epu32 (__m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundps_epu32 (__mmask8 __U, __m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundps_epu64 (__m128 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundps_epu64 (__m256i __W, __mmask8 __U, __m128 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundps_epu64 (__mmask8 __U, __m128 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepi64_pd (__m256i __A, const int __R)
-{
- return (__m256d) __builtin_ia32_cvtqq2pd256_mask_round ((__v4di) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepi64_pd (__m256d __W, __mmask8 __U, __m256i __A,
- const int __R)
-{
- return (__m256d) __builtin_ia32_cvtqq2pd256_mask_round ((__v4di) __A,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepi64_pd (__mmask8 __U, __m256i __A, const int __R)
-{
- return (__m256d) __builtin_ia32_cvtqq2pd256_mask_round ((__v4di) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepi64_ph (__m256i __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtqq2ph256_mask_round ((__v4di) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepi64_ph (__m128h __W, __mmask8 __U, __m256i __A,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtqq2ph256_mask_round ((__v4di) __A,
- (__v8hf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepi64_ph (__mmask8 __U, __m256i __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtqq2ph256_mask_round ((__v4di) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepi64_ps (__m256i __A, const int __R)
-{
- return (__m128) __builtin_ia32_cvtqq2ps256_mask_round ((__v4di) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepi64_ps (__m128 __W, __mmask8 __U, __m256i __A,
- const int __R)
-{
- return (__m128) __builtin_ia32_cvtqq2ps256_mask_round ((__v4di) __A,
- (__v4sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepi64_ps (__mmask8 __U, __m256i __A, const int __R)
-{
- return (__m128) __builtin_ia32_cvtqq2ps256_mask_round ((__v4di) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundpd_epi32 (__m256d __A, const int __R)
-{
- return
- (__m128i) __builtin_ia32_cvttpd2dq256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_undefined_si128 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m128i) __builtin_ia32_cvttpd2dq256_mask_round ((__v4df) __A,
- (__v4si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundpd_epi32 (__mmask8 __U, __m256d __A, const int __R)
-{
- return (__m128i) __builtin_ia32_cvttpd2dq256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundpd_epi64 (__m256d __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttpd2qq256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttpd2qq256_mask_round ((__v4df) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundpd_epi64 (__mmask8 __U, __m256d __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttpd2qq256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundpd_epu32 (__m256d __A, const int __R)
-{
- return
- (__m128i) __builtin_ia32_cvttpd2udq256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_undefined_si128 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m128i) __builtin_ia32_cvttpd2udq256_mask_round ((__v4df) __A,
- (__v4si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundpd_epu32 (__mmask8 __U, __m256d __A, const int __R)
-{
- return
- (__m128i) __builtin_ia32_cvttpd2udq256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundpd_epu64 (__m256d __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttpd2uqq256_mask_round ((__v4df) __A,
- (__v4di) \
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttpd2uqq256_mask_round ((__v4df) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundpd_epu64 (__mmask8 __U, __m256d __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttpd2uqq256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundph_epi32 (__m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundph_epi32 (__m256i __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundph_epi32 (__mmask8 __U, __m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundph_epi64 (__m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundph_epi64 (__m256i __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundph_epi64 (__mmask8 __U, __m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundph_epu32 (__m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundph_epu32 (__m256i __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundph_epu32 (__mmask8 __U, __m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundph_epu64 (__m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundph_epu64 (__m256i __W, __mmask8 __U, __m128h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundph_epu64 (__mmask8 __U, __m128h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundph_epu16 (__m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundph_epu16 (__m256i __W, __mmask16 __U, __m256h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) __A,
- (__v16hi) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundph_epu16 (__mmask16 __U, __m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundph_epi16 (__m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2w256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundph_epi16 (__m256i __W, __mmask16 __U, __m256h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_vcvttph2w256_mask_round ((__v16hf) __A,
- (__v16hi) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_vcvttph2w256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundps_epi32 (__m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundps_epi32 (__mmask8 __U, __m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundps_epi64 (__m128 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundps_epi64 (__m256i __W, __mmask8 __U, __m128 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundps_epi64 (__mmask8 __U, __m128 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundps_epu32 (__m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundps_epu32 (__mmask8 __U, __m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtt_roundps_epu64 (__m128 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtt_roundps_epu64 (__m256i __W, __mmask8 __U, __m128 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtt_roundps_epu64 (__mmask8 __U, __m128 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepu32_ph (__m256i __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepu32_ph (__m128h __W, __mmask8 __U, __m256i __A,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) __A,
- (__v8hf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepu32_ph (__mmask8 __U, __m256i __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepu32_ps (__m256i __A, const int __R)
-{
- return
- (__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) __A,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepu32_ps (__m256 __W, __mmask8 __U, __m256i __A,
- const int __R)
-{
- return (__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) __A,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepu32_ps (__mmask8 __U, __m256i __A, const int __R)
-{
- return (__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepu64_pd (__m256i __A, const int __R)
-{
- return (__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepu64_pd (__m256d __W, __mmask8 __U, __m256i __A,
- const int __R)
-{
- return (__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) __A,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepu64_pd (__mmask8 __U, __m256i __A, const int __R)
-{
- return (__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepu64_ph (__m256i __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepu64_ph (__m128h __W, __mmask8 __U, __m256i __A,
- const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) __A,
- (__v8hf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepu64_ph (__mmask8 __U, __m256i __A, const int __R)
-{
- return (__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) __A,
- (__v8hf)
- _mm_setzero_ph (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepu64_ps (__m256i __A, const int __R)
-{
- return (__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepu64_ps (__m128 __W, __mmask8 __U, __m256i __A,
- const int __R)
-{
- return (__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) __A,
- (__v4sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepu64_ps (__mmask8 __U, __m256i __A, const int __R)
-{
- return (__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepu16_ph (__m256i __A, const int __R)
-{
- return (__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) __A,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepu16_ph (__m256h __W, __mmask16 __U, __m256i __A,
- const int __R)
-{
- return (__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) __A,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepu16_ph (__mmask16 __U, __m256i __A, const int __R)
-{
- return (__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) __A,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvt_roundepi16_ph (__m256i __A, const int __R)
-{
- return (__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) __A,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvt_roundepi16_ph (__m256h __W, __mmask16 __U, __m256i __A,
- const int __R)
-{
- return (__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) __A,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvt_roundepi16_ph (__mmask16 __U, __m256i __A, const int __R)
-{
- return (__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) __A,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_div_round_pd (__m256d __A, __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_div_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_div_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- const int __R)
-{
- return (__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_div_round_ph (__m256h __A, __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_div_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_div_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- const int __R)
-{
- return (__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_div_round_ps (__m256 __A, __m256 __B, const int __R)
-{
- return (__m256) __builtin_ia32_divps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_div_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_divps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_div_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_divps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fcmadd_round_pch (__m256h __A, __m256h __B, __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfcmaddcph256_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fcmadd_round_pch (__m256h __A, __mmask8 __U, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfcmaddcph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fcmadd_round_pch (__m256h __A, __m256h __B, __m256h __D,
- __mmask8 __U, const int __R)
-{
- return (__m256h) __builtin_ia32_vfcmaddcph256_mask3_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fcmadd_round_pch (__mmask8 __U, __m256h __A, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfcmaddcph256_maskz_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fcmul_round_pch (__m256h __A, __m256h __B, const int __R)
-{
- return
- (__m256h) __builtin_ia32_vfcmulcph256_round ((__v16hf) __A,
- (__v16hf) __B,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fcmul_round_pch (__m256h __W, __mmask8 __U, __m256h __A,
- __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_vfcmulcph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fcmul_round_pch (__mmask8 __U, __m256h __A, __m256h __B,
- const int __R)
-{
- return (__m256h) __builtin_ia32_vfcmulcph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fixupimm_round_pd (__m256d __A, __m256d __B, __m256i __D,
- const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_fixupimmpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4di) __D,
- __C,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fixupimm_round_pd (__m256d __A, __mmask8 __U, __m256d __B,
- __m256i __D, const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_fixupimmpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4di) __D,
- __C,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fixupimm_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- __m256i __D, const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_fixupimmpd256_maskz_round ((__v4df) __A,
- (__v4df) __B,
- (__v4di) __D,
- __C,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fixupimm_round_ps (__m256 __A, __m256 __B, __m256i __D, const int __C,
- const int __R)
-{
- return (__m256) __builtin_ia32_fixupimmps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8si) __D,
- __C,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fixupimm_round_ps (__m256 __A, __mmask8 __U, __m256 __B,
- __m256i __D, const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_fixupimmps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8si) __D,
- __C,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fixupimm_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- __m256i __D, const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_fixupimmps256_maskz_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8si) __D,
- __C,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmadd_round_pd (__m256d __A, __m256d __B, __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmaddpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmadd_round_pd (__m256d __A, __mmask8 __U, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmaddpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmadd_round_pd (__m256d __A, __m256d __B, __m256d __D,
- __mmask8 __U, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmaddpd256_mask3_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmadd_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmaddpd256_maskz_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmadd_round_ph (__m256h __A, __m256h __B, __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmadd_round_ph (__m256h __A, __mmask16 __U, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmadd_round_ph (__m256h __A, __m256h __B, __m256h __D,
- __mmask16 __U, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddph256_mask3_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmadd_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddph256_maskz_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmadd_round_ps (__m256 __A, __m256 __B, __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmaddps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmadd_round_ps (__m256 __A, __mmask8 __U, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmaddps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmadd_round_ps (__m256 __A, __m256 __B, __m256 __D,
- __mmask8 __U, const int __R)
-{
- return (__m256) __builtin_ia32_vfmaddps256_mask3_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmadd_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmaddps256_maskz_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmadd_round_pch (__m256h __A, __m256h __B, __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddcph256_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmadd_round_pch (__m256h __A, __mmask16 __U, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddcph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmadd_round_pch (__m256h __A, __m256h __B, __m256h __D,
- __mmask16 __U, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddcph256_mask3_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmadd_round_pch (__mmask16 __U, __m256h __A, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddcph256_maskz_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmaddsub_round_pd (__m256d __A, __m256d __B, __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmaddsubpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmaddsub_round_pd (__m256d __A, __mmask8 __U, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmaddsubpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmaddsub_round_pd (__m256d __A, __m256d __B, __m256d __D,
- __mmask8 __U, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmaddsubpd256_mask3_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmaddsub_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmaddsub_round_ph (__m256h __A, __m256h __B, __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddsubph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmaddsub_round_ph (__m256h __A, __mmask16 __U, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddsubph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmaddsub_round_ph (__m256h __A, __m256h __B, __m256h __D,
- __mmask16 __U, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddsubph256_mask3_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmaddsub_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmaddsubph256_maskz_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmaddsub_round_ps (__m256 __A, __m256 __B, __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmaddsubps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmaddsub_round_ps (__m256 __A, __mmask8 __U, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmaddsubps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmaddsub_round_ps (__m256 __A, __m256 __B, __m256 __D,
- __mmask8 __U, const int __R)
-{
- return (__m256) __builtin_ia32_vfmaddsubps256_mask3_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmaddsub_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmaddsubps256_maskz_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmsub_round_pd (__m256d __A, __m256d __B, __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmsubpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) -1, __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmsub_round_pd (__m256d __A, __mmask8 __U, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmsubpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmsub_round_pd (__m256d __A, __m256d __B, __m256d __D,
- __mmask8 __U, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmsubpd256_mask3_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmsub_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmsubpd256_maskz_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmsub_round_ph (__m256h __A, __m256h __B, __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfmsubph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) -1, __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmsub_round_ph (__m256h __A, __mmask16 __U, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfmsubph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U, __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmsub_round_ph (__m256h __A, __m256h __B, __m256h __D,
- __mmask16 __U, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfmsubph256_mask3_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U, __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmsub_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfmsubph256_maskz_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U, __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmsub_round_ps (__m256 __A, __m256 __B, __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmsubps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) -1, __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmsub_round_ps (__m256 __A, __mmask8 __U, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmsubps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmsub_round_ps (__m256 __A, __m256 __B, __m256 __D,
- __mmask8 __U, const int __R)
-{
- return (__m256) __builtin_ia32_vfmsubps256_mask3_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmsub_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmsubps256_maskz_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U, __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmsubadd_round_pd (__m256d __A, __m256d __B, __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmsubaddpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmsubadd_round_pd (__m256d __A, __mmask8 __U, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmsubaddpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmsubadd_round_pd (__m256d __A, __m256d __B, __m256d __D,
- __mmask8 __U, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmsubaddpd256_mask3_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmsubadd_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfmsubaddpd256_maskz_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmsubadd_round_ph (__m256h __A, __m256h __B, __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfmsubaddph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmsubadd_round_ph (__m256h __A, __mmask16 __U, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfmsubaddph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmsubadd_round_ph (__m256h __A, __m256h __B, __m256h __D,
- __mmask16 __U, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfmsubaddph256_mask3_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmsubadd_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfmsubaddph256_maskz_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmsubadd_round_ps (__m256 __A, __m256 __B, __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmsubaddps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmsubadd_round_ps (__m256 __A, __mmask8 __U, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmsubaddps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fmsubadd_round_ps (__m256 __A, __m256 __B, __m256 __D,
- __mmask8 __U, const int __R)
-{
- return (__m256) __builtin_ia32_vfmsubaddps256_mask3_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmsubadd_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfmsubaddps256_maskz_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fmul_round_pch (__m256h __B, __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmulcph256_round ((__v16hf) __B,
- (__v16hf) __D,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fmul_round_pch (__m256h __A, __mmask8 __U, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h) __builtin_ia32_vfmulcph256_mask_round ((__v16hf) __B,
- (__v16hf) __D,
- (__v16hf) __A,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fmul_round_pch (__mmask8 __U, __m256h __B, __m256h __D,
- const int __R)
-{
- return (__m256h) __builtin_ia32_vfmulcph256_mask_round ((__v16hf) __B,
- (__v16hf) __D,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fnmadd_round_pd (__m256d __A, __m256d __B, __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfnmaddpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fnmadd_round_pd (__m256d __A, __mmask8 __U, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfnmaddpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fnmadd_round_pd (__m256d __A, __m256d __B, __m256d __D,
- __mmask8 __U, const int __R)
-{
- return (__m256d) __builtin_ia32_vfnmaddpd256_mask3_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fnmadd_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfnmaddpd256_maskz_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fnmadd_round_ph (__m256h __A, __m256h __B, __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfnmaddph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fnmadd_round_ph (__m256h __A, __mmask16 __U, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfnmaddph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fnmadd_round_ph (__m256h __A, __m256h __B, __m256h __D,
- __mmask16 __U, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfnmaddph256_mask3_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fnmadd_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfnmaddph256_maskz_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fnmadd_round_ps (__m256 __A, __m256 __B, __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfnmaddps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fnmadd_round_ps (__m256 __A, __mmask8 __U, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfnmaddps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fnmadd_round_ps (__m256 __A, __m256 __B, __m256 __D,
- __mmask8 __U, const int __R)
-{
- return (__m256) __builtin_ia32_vfnmaddps256_mask3_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fnmadd_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfnmaddps256_maskz_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fnmsub_round_pd (__m256d __A, __m256d __B, __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfnmsubpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fnmsub_round_pd (__m256d __A, __mmask8 __U, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfnmsubpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fnmsub_round_pd (__m256d __A, __m256d __B, __m256d __D,
- __mmask8 __U, const int __R)
-{
- return (__m256d) __builtin_ia32_vfnmsubpd256_mask3_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fnmsub_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- __m256d __D, const int __R)
-{
- return (__m256d) __builtin_ia32_vfnmsubpd256_maskz_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fnmsub_round_ph (__m256h __A, __m256h __B, __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfnmsubph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fnmsub_round_ph (__m256h __A, __mmask16 __U, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfnmsubph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fnmsub_round_ph (__m256h __A, __m256h __B, __m256h __D,
- __mmask16 __U, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfnmsubph256_mask3_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fnmsub_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- __m256h __D, const int __R)
-{
- return (__m256h)
- __builtin_ia32_vfnmsubph256_maskz_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __D,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_fnmsub_round_ps (__m256 __A, __m256 __B, __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfnmsubps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_fnmsub_round_ps (__m256 __A, __mmask8 __U, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfnmsubps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask3_fnmsub_round_ps (__m256 __A, __m256 __B, __m256 __D,
- __mmask8 __U, const int __R)
-{
- return (__m256) __builtin_ia32_vfnmsubps256_mask3_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_fnmsub_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- __m256 __D, const int __R)
-{
- return (__m256) __builtin_ia32_vfnmsubps256_maskz_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __D,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_getexp_round_pd (__m256d __A, const int __R)
-{
- return
- (__m256d) __builtin_ia32_getexppd256_mask_round ((__v4df) __A,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_getexp_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m256d) __builtin_ia32_getexppd256_mask_round ((__v4df) __A,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_getexp_round_pd (__mmask8 __U, __m256d __A, const int __R)
-{
- return (__m256d) __builtin_ia32_getexppd256_mask_round ((__v4df) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_getexp_round_ph (__m256h __A, const int __R)
-{
- return (__m256h) __builtin_ia32_getexpph256_mask_round ((__v16hf) __A,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_getexp_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- const int __R)
-{
- return (__m256h) __builtin_ia32_getexpph256_mask_round ((__v16hf) __A,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_getexp_round_ph (__mmask16 __U, __m256h __A, const int __R)
-{
- return (__m256h) __builtin_ia32_getexpph256_mask_round ((__v16hf) __A,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_getexp_round_ps (__m256 __A, const int __R)
-{
- return (__m256) __builtin_ia32_getexpps256_mask_round ((__v8sf) __A,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_getexp_round_ps (__m256 __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256) __builtin_ia32_getexpps256_mask_round ((__v8sf) __A,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_getexp_round_ps (__mmask8 __U, __m256 __A, const int __R)
-{
- return (__m256) __builtin_ia32_getexpps256_mask_round ((__v8sf) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_getmant_round_pd (__m256d __A, _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return
- (__m256d) __builtin_ia32_getmantpd256_mask_round ((__v4df) __A,
- (__C << 2) | __B,
- _mm256_undefined_pd (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_getmant_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return (__m256d) __builtin_ia32_getmantpd256_mask_round ((__v4df) __A,
- (__C << 2) | __B,
- (__v4df) __W, __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_getmant_round_pd (__mmask8 __U, __m256d __A,
- _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return (__m256d) __builtin_ia32_getmantpd256_mask_round ((__v4df) __A,
- (__C << 2) | __B,
- (__v4df)
- _mm256_setzero_pd (),
- __U, __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_getmant_round_ph (__m256h __A, _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return
- (__m256h) __builtin_ia32_getmantph256_mask_round ((__v16hf) __A,
- (__C << 2) | __B,
- _mm256_undefined_ph (),
- (__mmask16) -1, __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_getmant_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return (__m256h) __builtin_ia32_getmantph256_mask_round ((__v16hf) __A,
- (__C << 2) | __B,
- (__v16hf) __W, __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_getmant_round_ph (__mmask8 __U, __m256h __A,
- _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return (__m256h) __builtin_ia32_getmantph256_mask_round ((__v16hf) __A,
- (__C << 2) | __B,
- (__v16hf)
- _mm256_setzero_ph (),
- __U, __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_getmant_round_ps (__m256 __A, _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return
- (__m256) __builtin_ia32_getmantps256_mask_round ((__v8sf) __A,
- (__C << 2) | __B,
- _mm256_undefined_ps (),
- (__mmask8) -1, __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_getmant_round_ps (__m256 __W, __mmask8 __U, __m256 __A,
- _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return (__m256) __builtin_ia32_getmantps256_mask_round ((__v8sf) __A,
- (__C << 2) | __B,
- (__v8sf) __W, __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_getmant_round_ps (__mmask8 __U, __m256 __A,
- _MM_MANTISSA_NORM_ENUM __B,
- _MM_MANTISSA_SIGN_ENUM __C, const int __R)
-{
- return (__m256) __builtin_ia32_getmantps256_mask_round ((__v8sf) __A,
- (__C << 2) | __B,
- (__v8sf)
- _mm256_setzero_ps (),
- __U, __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_max_round_pd (__m256d __A, __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_max_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_max_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- const int __R)
-{
- return (__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_max_round_ph (__m256h __A, __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_undefined_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_max_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_max_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- const int __R)
-{
- return (__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_max_round_ps (__m256 __A, __m256 __B, const int __R)
-{
- return (__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_max_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_max_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_min_round_pd (__m256d __A, __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_min_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_min_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- const int __R)
-{
- return (__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_min_round_ph (__m256h __A, __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_undefined_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_min_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_min_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- const int __R)
-{
- return (__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_min_round_ps (__m256 __A, __m256 __B, const int __R)
-{
- return (__m256) __builtin_ia32_minps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_min_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_minps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_min_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_minps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mul_round_pd (__m256d __A, __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_mulpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_mul_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_mulpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_mul_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- const int __R)
-{
- return (__m256d) __builtin_ia32_mulpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mul_round_ph (__m256h __A, __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_mulph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_undefined_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_mul_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_mulph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_mul_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- const int __R)
-{
- return (__m256h) __builtin_ia32_mulph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mul_round_ps (__m256 __A, __m256 __B, const int __R)
-{
- return (__m256) __builtin_ia32_mulps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_mul_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_mulps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_mul_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_mulps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_range_round_pd (__m256d __A, __m256d __B, const int __C,
- const int __R)
-{
- return (__m256d) __builtin_ia32_rangepd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- __C,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_range_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B, const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_rangepd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- __C,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_range_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_rangepd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- __C,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_range_round_ps (__m256 __A, __m256 __B, const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_rangeps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- __C,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_range_round_ps (__m256 __W, __mmask8 __U, __m256 __A,
- __m256 __B, const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_rangeps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- __C,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_range_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_rangeps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- __C,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_reduce_round_pd (__m256d __A, const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) __A,
- __C,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_reduce_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) __A,
- __C,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_reduce_round_pd (__mmask8 __U, __m256d __A, const int __C,
- const int __R)
-{
- return (__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) __A,
- __C,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_reduce_round_ph (__m256h __A, const int __C, const int __R)
-{
- return (__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) __A,
- __C,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_reduce_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- const int __C, const int __R)
-{
- return (__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) __A,
- __C,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_reduce_round_ph (__mmask16 __U, __m256h __A, const int __C,
- const int __R)
-{
- return (__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) __A,
- __C,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_reduce_round_ps (__m256 __A, const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) __A,
- __C,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_reduce_round_ps (__m256 __W, __mmask8 __U, __m256 __A,
- const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) __A,
- __C,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_reduce_round_ps (__mmask8 __U, __m256 __A, const int __C,
- const int __R)
-{
- return (__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) __A,
- __C,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_roundscale_round_pd (__m256d __A, const int __C, const int __R)
-{
- return
- (__m256d) __builtin_ia32_rndscalepd256_mask_round ((__v4df) __A,
- __C,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_roundscale_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- const int __C, const int __R)
-{
- return (__m256d) __builtin_ia32_rndscalepd256_mask_round ((__v4df) __A,
- __C,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_roundscale_round_pd (__mmask8 __U, __m256d __A, const int __C,
- const int __R)
-{
- return
- (__m256d) __builtin_ia32_rndscalepd256_mask_round ((__v4df) __A,
- __C,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_roundscale_round_ph (__m256h __A, const int __C, const int __R)
-{
- return
- (__m256h) __builtin_ia32_rndscaleph256_mask_round ((__v16hf) __A,
- __C,
- (__v16hf)
- _mm256_undefined_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_roundscale_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- const int __C, const int __R)
-{
- return (__m256h) __builtin_ia32_rndscaleph256_mask_round ((__v16hf) __A,
- __C,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_roundscale_round_ph (__mmask16 __U, __m256h __A, const int __C,
- const int __R)
-{
- return
- (__m256h) __builtin_ia32_rndscaleph256_mask_round ((__v16hf) __A,
- __C,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_roundscale_round_ps (__m256 __A, const int __C, const int __R)
-{
- return
- (__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) __A,
- __C,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_roundscale_round_ps (__m256 __W, __mmask8 __U, __m256 __A,
- const int __C, const int __R)
-{
- return (__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) __A,
- __C,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_roundscale_round_ps (__mmask8 __U, __m256 __A, const int __C,
- const int __R)
-{
- return (__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) __A,
- __C,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_scalef_round_pd (__m256d __A, __m256d __B, const int __R)
-{
- return
- (__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_scalef_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_scalef_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- const int __R)
-{
- return (__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_scalef_round_ph (__m256h __A, __m256h __B, const int __R)
-{
- return
- (__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_undefined_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_scalef_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_scalef_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- const int __R)
-{
- return (__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_scalef_round_ps (__m256 __A, __m256 __B, const int __R)
-{
- return (__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_scalef_round_ps (__m256 __W, __mmask8 __U, __m256 __A,
- __m256 __B, const int __R)
-{
- return (__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_scalef_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_sqrt_round_pd (__m256d __A, const int __R)
-{
- return (__m256d) __builtin_ia32_sqrtpd256_mask_round ((__v4df) __A,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_sqrt_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m256d) __builtin_ia32_sqrtpd256_mask_round ((__v4df) __A,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_sqrt_round_pd (__mmask8 __U, __m256d __A, const int __R)
-{
- return (__m256d) __builtin_ia32_sqrtpd256_mask_round ((__v4df) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_sqrt_round_ph (__m256h __A, const int __R)
-{
- return (__m256h) __builtin_ia32_sqrtph256_mask_round ((__v16hf) __A,
- (__v16hf)
- _mm256_undefined_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_sqrt_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- const int __R)
-{
- return (__m256h) __builtin_ia32_sqrtph256_mask_round ((__v16hf) __A,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_sqrt_round_ph (__mmask16 __U, __m256h __A, const int __R)
-{
- return (__m256h) __builtin_ia32_sqrtph256_mask_round ((__v16hf) __A,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_sqrt_round_ps (__m256 __A, const int __R)
-{
- return (__m256) __builtin_ia32_sqrtps256_mask_round ((__v8sf) __A,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_sqrt_round_ps (__m256 __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256) __builtin_ia32_sqrtps256_mask_round ((__v8sf) __A,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_sqrt_round_ps (__mmask8 __U, __m256 __A, const int __R)
-{
- return (__m256) __builtin_ia32_sqrtps256_mask_round ((__v8sf) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_sub_round_pd (__m256d __A, __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_subpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_sub_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_subpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_sub_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- const int __R)
-{
- return (__m256d) __builtin_ia32_subpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_sub_round_ph (__m256h __A, __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_subph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_undefined_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_sub_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_subph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_sub_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- const int __R)
-{
- return (__m256h) __builtin_ia32_subph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_sub_round_ps (__m256 __A, __m256 __B, const int __R)
-{
- return (__m256) __builtin_ia32_subps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_sub_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_subps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_sub_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_subps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-#else
-#define _mm256_add_round_pd(A, B, R) \
- ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_add_round_pd(W, U, A, B, R) \
- ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_add_round_pd(U, A, B, R) \
- ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_add_round_ph(A, B, R) \
- ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_undefined_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_add_round_ph(W, U, A, B, R) \
- ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_add_round_ph(U, A, B, R) \
- ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_add_round_ps(A, B, R) \
- ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_add_round_ps(W, U, A, B, R) \
- ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_add_round_ps(U, A, B, R)\
- ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cmp_round_pd_mask(A, B, C, R) \
- ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (C), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cmp_round_pd_mask(U, A, B, C, R) \
- ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (C), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cmp_round_ph_mask(A, B, C, R) \
- ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (C), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_cmp_round_ph_mask(U, A, B, C, R) \
- ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (C), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_cmp_round_ps_mask(A, B, C, R) \
- ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (C), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cmp_round_ps_mask(U, A, B, C, R) \
- ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (C), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepi32_ph(A, R) \
- ((__m128h) __builtin_ia32_vcvtdq2ph256_mask_round ((__v8si) (A), \
- (__v8hf) \
- (_mm_setzero_ph ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepi32_ph(W, U, A, R) \
- ((__m128h) __builtin_ia32_vcvtdq2ph256_mask_round ((__v8si) (A), \
- (__v8hf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepi32_ph(U, A, R) \
- ((__m128h) __builtin_ia32_vcvtdq2ph256_mask_round ((__v8si) (A), \
- (__v8hf) \
- (_mm_setzero_ph ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepi32_ps(A, R) \
- ((__m256) __builtin_ia32_cvtdq2ps256_mask_round ((__v8si) (A), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepi32_ps(W, U, A, R) \
- ((__m256) __builtin_ia32_cvtdq2ps256_mask_round ((__v8si) (A), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepi32_ps(U, A, R) \
- ((__m256) __builtin_ia32_cvtdq2ps256_mask_round ((__v8si) (A), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundpd_ph(A, R) \
- ((__m128h) __builtin_ia32_vcvtpd2ph256_mask_round ((__v4df) (A), \
- (_mm_setzero_ph ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundpd_ph(W, U, A, R) \
- ((__m128h) __builtin_ia32_vcvtpd2ph256_mask_round ((__v4df) (A), \
- (__v8hf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundpd_ph(U, A, R) \
- ((__m128h) __builtin_ia32_vcvtpd2ph256_mask_round ((__v4df) (A), \
- (_mm_setzero_ph ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundpd_ps(A, R) \
- ((__m128) __builtin_ia32_cvtpd2ps256_mask_round ((__v4df) (A), \
- (__v4sf) \
- (_mm_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundpd_ps(W, U, A, R) \
- ((__m128) __builtin_ia32_cvtpd2ps256_mask_round ((__v4df) (A), \
- (__v4sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundpd_ps(U, A, R) \
- ((__m128) __builtin_ia32_cvtpd2ps256_mask_round ((__v4df) (A), \
- (__v4sf) \
- (_mm_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundpd_epi32(A, R) \
- ((__m128i) __builtin_ia32_cvtpd2dq256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_undefined_si128 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundpd_epi32(W, U, A, R) \
- ((__m128i) __builtin_ia32_cvtpd2dq256_mask_round ((__v4df) (A), \
- (__v4si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundpd_epi32(U, A, R)\
- ((__m128i) __builtin_ia32_cvtpd2dq256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_setzero_si128 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundpd_epi64(A, R) \
- ((__m256i) __builtin_ia32_cvtpd2qq256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundpd_epi64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvtpd2qq256_mask_round ((__v4df) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundpd_epi64(U, A, R) \
- ((__m256i) __builtin_ia32_cvtpd2qq256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundpd_epu32(A, R) \
- ((__m128i) __builtin_ia32_cvtpd2udq256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_undefined_si128 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundpd_epu32(W, U, A, R) \
- ((__m128i) __builtin_ia32_cvtpd2udq256_mask_round ((__v4df) (A), \
- (__v4si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundpd_epu32(U, A, R) \
- ((__m128i) __builtin_ia32_cvtpd2udq256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_setzero_si128 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundpd_epu64(A, R) \
- ((__m256i) __builtin_ia32_cvtpd2uqq256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()),\
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundpd_epu64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvtpd2uqq256_mask_round ((__v4df) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundpd_epu64(U, A, R) \
- ((__m256i) __builtin_ia32_cvtpd2uqq256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()),\
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundph_epi32(A, R) \
- ((__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()),\
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundph_epi32(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundph_epi32(U, A, R) \
- ((__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()),\
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundph_pd(A, R) \
- ((__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) (A), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundph_pd(W, U, A, R) \
- ((__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) (A), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundph_pd(U, A, R) \
- ((__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) (A), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundph_ps(A, R) \
- ((__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) (A), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundph_ps(W, U, A, R) \
- ((__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) (A), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundph_ps(U, A, R) \
- ((__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) (A), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtx_roundph_ps(A, R) \
- ((__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) (A), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtx_roundph_ps(W, U, A, R) \
- ((__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) (A), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtx_roundph_ps(U, A, R) \
- ((__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) (A), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundph_epi64(A, R) \
- ((__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()),\
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundph_epi64(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundph_epi64(U, A, R) \
- ((__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()),\
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundph_epu32(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundph_epu32(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundph_epu32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundph_epu64(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundph_epu64(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundph_epu64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundph_epu16(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_undefined_si256 ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundph_epu16(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \
- (__v16hi) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundph_epu16(U, A, R) \
- ((__m256i) \
- __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_setzero_si256 ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_cvt_roundph_epi16(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_undefined_si256 ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundph_epi16(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \
- (__v16hi) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundph_epi16(U, A, R) \
- ((__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_setzero_si256 ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_cvt_roundps_pd(A, R) \
- ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundps_pd(W, U, A, R) \
- ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundps_pd(U, A, R) \
- ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtx_roundps_ph(A, R) \
- ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \
- (__v8hf) \
- (_mm_setzero_ph ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtx_roundps_ph(W, U, A, R) \
- ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \
- (__v8hf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtx_roundps_ph(U, A, R) \
- ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \
- (__v8hf) \
- (_mm_setzero_ph ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundps_epi32(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundps_epi32(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundps_epi32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundps_epi64(A, R) \
- ((__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundps_epi64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundps_epi64(U, A, R) \
- ((__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundps_epu32(A, R) \
- ((__m256i) \
- __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundps_epu32(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundps_epu32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundps_epu64(B, R) \
- ((__m256i) \
- __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) (B), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundps_epu64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundps_epu64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepi64_pd(A, R) \
- ((__m256d) __builtin_ia32_cvtqq2pd256_mask_round ((__v4di) (A), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepi64_pd(W, U, A, R) \
- ((__m256d) __builtin_ia32_cvtqq2pd256_mask_round ((__v4di) (A), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepi64_pd(U, A, R) \
- ((__m256d) __builtin_ia32_cvtqq2pd256_mask_round ((__v4di) (A), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepi64_ph(A, R) \
- ((__m128h) __builtin_ia32_vcvtqq2ph256_mask_round ((__v4di) (A), \
- (__v8hf) \
- (_mm_setzero_ph ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepi64_ph(W, U, A, R) \
- ((__m128h) __builtin_ia32_vcvtqq2ph256_mask_round ((__v4di) (A), \
- (__v8hf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepi64_ph(U, A, R) \
- ((__m128h) __builtin_ia32_vcvtqq2ph256_mask_round ((__v4di) (A), \
- (__v8hf) \
- (_mm_setzero_ph ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepi64_ps(A, R) \
- ((__m128) __builtin_ia32_cvtqq2ps256_mask_round ((__v4di) (A), \
- (__v4sf) \
- (_mm_setzero_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepi64_ps(W, U, A, R) \
- ((__m128) __builtin_ia32_cvtqq2ps256_mask_round ((__v4di) (A), \
- (__v4sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepi64_ps(U, A, R) \
- ((__m128) __builtin_ia32_cvtqq2ps256_mask_round ((__v4di) (A), \
- (__v4sf) \
- (_mm_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundpd_epi32(A, R) \
- ((__m128i) __builtin_ia32_cvttpd2dq256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_undefined_si128 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundpd_epi32(W, U, A, R) \
- ((__m128i) __builtin_ia32_cvttpd2dq256_mask_round ((__v4df) (A), \
- (__v4si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundpd_epi32(U, A, R) \
- ((__m128i) __builtin_ia32_cvttpd2dq256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_setzero_si128 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundpd_epi64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2qq256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundpd_epi64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttpd2qq256_mask_round ((__v4df) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundpd_epi64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2qq256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundpd_epu32(A, R) \
- ((__m128i) \
- __builtin_ia32_cvttpd2udq256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_undefined_si128 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundpd_epu32(W, U, A, R) \
- ((__m128i) __builtin_ia32_cvttpd2udq256_mask_round ((__v4df) (A), \
- (__v4si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundpd_epu32(U, A, R) \
- ((__m128i) __builtin_ia32_cvttpd2udq256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_setzero_si128 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundpd_epu64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2uqq256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundpd_epu64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttpd2uqq256_mask_round ((__v4df) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundpd_epu64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2uqq256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundph_epi32(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundph_epi32(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundph_epi32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundph_epi64(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundph_epi64(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundph_epi64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundph_epu32(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundph_epu32(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundph_epu32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundph_epu64(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundph_epu64(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundph_epu64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundph_epu16(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_setzero_si256 ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundph_epu16(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
- (__v16hi) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundph_epu16(U, A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_setzero_si256 ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_cvtt_roundph_epi16(A, R) \
- ((__m256i) \
- __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_setzero_si256 ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundph_epi16(W, U, A, R) \
- ((__m256i) __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
- (__v16hi) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundph_epi16(U, A, R)\
- ((__m256i) \
- __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_setzero_si256 ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_cvtt_roundps_epi32(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundps_epi32(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundps_epi32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundps_epi64(A, R) \
- ((__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()),\
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundps_epi64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundps_epi64(U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()),\
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundps_epu32(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundps_epu32(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundps_epu32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtt_roundps_epu64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtt_roundps_epu64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtt_roundps_epu64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepu32_ph(A, R) \
- ((__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) (A), \
- (__v8hf) \
- (_mm_setzero_ph ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepu32_ph(W, U, A, R) \
- ((__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) (A), \
- (__v8hf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepu32_ph(U, A, R) \
- ((__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) (A), \
- (__v8hf) \
- (_mm_setzero_ph ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepu32_ps(A, R) \
- ((__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) (A), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepu32_ps(W, U, A, R) \
- ((__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) (A), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepu32_ps(U, A, R) \
- ((__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) (A), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepu64_pd(A, R) \
- ((__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) (A), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepu64_pd(W, U, A, R) \
- ((__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) (A), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepu64_pd(U, A, R) \
- ((__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) (A), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepu64_ph(A, R) \
- ((__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) (A), \
- (__v8hf) \
- (_mm_setzero_ph ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepu64_ph(W, U, A, R) \
- ((__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) (A), \
- (__v8hf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepu64_ph(U, A, R) \
- ((__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) (A), \
- (__v8hf) \
- (_mm_setzero_ph ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepu64_ps(A, R) \
- ((__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) (A), \
- (__v4sf) \
- (_mm_setzero_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepu64_ps(W, U, A, R) \
- ((__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) (A), \
- (__v4sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepu64_ps(U, A, R) \
- ((__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) (A), \
- (__v4sf) \
- (_mm_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvt_roundepu16_ph(A, R) \
- ((__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) (A), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepu16_ph(W, U, A, R) \
- ((__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) (A), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepu16_ph(U, A, R) \
- ((__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) (A), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_cvt_roundepi16_ph(A, R) \
- ((__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) (A), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_cvt_roundepi16_ph(W, U, A, R) \
- ((__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) (A), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_cvt_roundepi16_ph(U, A, R) \
- ((__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) (A), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_div_round_pd(A, B, R) \
- ((__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_div_round_pd(W, U, A, B, R) \
- ((__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_div_round_pd(U, A, B, R) \
- ((__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_div_round_ph(A, B, R) \
- ((__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_div_round_ph(W, U, A, B, R) \
- ((__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_div_round_ph(U, A, B, R) \
- ((__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_div_round_ps(A, B, R) \
- ((__m256) __builtin_ia32_divps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_div_round_ps(W, U, A, B, R) \
- ((__m256) __builtin_ia32_divps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_div_round_ps(U, A, B, R) \
- ((__m256) __builtin_ia32_divps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_fcmadd_round_pch(A, B, D, R) \
- (__m256h) __builtin_ia32_vfcmaddcph256_round ((A), (B), (D), (R))
-
-#define _mm256_mask_fcmadd_round_pch(A, U, B, D, R) \
- ((__m256h) __builtin_ia32_vfcmaddcph256_mask_round ((__v16hf)(A), \
- (__v16hf)(B), \
- (__v16hf)(D), \
- (U), (R)))
-
-#define _mm256_mask3_fcmadd_round_pch(A, B, D, U, R) \
- ((__m256h) __builtin_ia32_vfcmaddcph256_mask3_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_maskz_fcmadd_round_pch(U, A, B, D, R) \
- ((__m256h) __builtin_ia32_vfcmaddcph256_maskz_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_fcmul_round_pch(A, B, R) \
- ((__m256h) __builtin_ia32_vfcmulcph256_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (R)))
-
-#define _mm256_mask_fcmul_round_pch(W, U, A, B, R) \
- ((__m256h) __builtin_ia32_vfcmulcph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_fcmul_round_pch(U, A, B, R) \
- ((__m256h) __builtin_ia32_vfcmulcph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_fixupimm_round_pd(A, B, D, C, R) \
- ((__m256d) __builtin_ia32_fixupimmpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4di) (D), \
- (C), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_fixupimm_round_pd(A, U, B, D, C, R)\
- ((__m256d) __builtin_ia32_fixupimmpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4di) (D), \
- (C), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_fixupimm_round_pd(U, A, B, D, C, R)\
- ((__m256d) __builtin_ia32_fixupimmpd256_maskz_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4di) (D), \
- (C), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_fixupimm_round_ps(A, B, D, C, R)\
- ((__m256) __builtin_ia32_fixupimmps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8si) (D), \
- (C), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_fixupimm_round_ps(A, U, B, D, C, R)\
- ((__m256) __builtin_ia32_fixupimmps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8si) (D), \
- (C), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_fixupimm_round_ps(U, A, B, D, C, R)\
- ((__m256) __builtin_ia32_fixupimmps256_maskz_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8si) (D), \
- (C), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_fmadd_round_pd(A, B, D, R) \
- ((__m256d) __builtin_ia32_vfmaddpd256_mask_round (A, B, D, -1, R))
-
-#define _mm256_mask_fmadd_round_pd(A, U, B, D, R) \
- ((__m256d) __builtin_ia32_vfmaddpd256_mask_round (A, B, D, U, R))
-
-#define _mm256_mask3_fmadd_round_pd(A, B, D, U, R) \
- ((__m256d) __builtin_ia32_vfmaddpd256_mask3_round (A, B, D, U, R))
-
-#define _mm256_maskz_fmadd_round_pd(U, A, B, D, R) \
- ((__m256d) __builtin_ia32_vfmaddpd256_maskz_round (A, B, D, U, R))
-
-#define _mm256_fmadd_round_ph(A, B, D, R) \
- ((__m256h) __builtin_ia32_vfmaddph256_mask_round (A, B, D, -1, R))
-
-#define _mm256_mask_fmadd_round_ph(A, U, B, D, R) \
- ((__m256h) __builtin_ia32_vfmaddph256_mask_round (A, B, D, U, R))
-
-#define _mm256_mask3_fmadd_round_ph(A, B, D, U, R) \
- ((__m256h) __builtin_ia32_vfmaddph256_mask3_round (A, B, D, U, R))
-
-#define _mm256_maskz_fmadd_round_ph(U, A, B, D, R) \
- ((__m256h) __builtin_ia32_vfmaddph256_maskz_round (A, B, D, U, R))
-
-#define _mm256_fmadd_round_ps(A, B, D, R) \
- ((__m256)__builtin_ia32_vfmaddps256_mask_round (A, B, D, -1, R))
-
-#define _mm256_mask_fmadd_round_ps(A, U, B, D, R) \
- ((__m256)__builtin_ia32_vfmaddps256_mask_round (A, B, D, U, R))
-
-#define _mm256_mask3_fmadd_round_ps(A, B, D, U, R) \
- ((__m256)__builtin_ia32_vfmaddps256_mask3_round (A, B, D, U, R))
-
-#define _mm256_maskz_fmadd_round_ps(U, A, B, D, R) \
- ((__m256)__builtin_ia32_vfmaddps256_maskz_round (A, B, D, U, R))
-
-#define _mm256_fmadd_round_pch(A, B, D, R) \
- (__m256h) __builtin_ia32_vfmaddcph256_round ((A), (B), (D), (R))
-
-#define _mm256_mask_fmadd_round_pch(A, U, B, D, R) \
- ((__m256h) __builtin_ia32_vfmaddcph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) (D), \
- (U), (R)))
-
-#define _mm256_mask3_fmadd_round_pch(A, B, D, U, R) \
- (__m256h) __builtin_ia32_vfmaddcph256_mask3_round ((A), (B), (D), (U), (R))
-
-#define _mm256_maskz_fmadd_round_pch(U, A, B, D, R) \
- (__m256h) __builtin_ia32_vfmaddcph256_maskz_round ((A), (B), (D), (U), (R))
-
-#define _mm256_fmaddsub_round_pd(A, B, D, R) \
- (__m256d) __builtin_ia32_vfmaddsubpd256_mask_round (A, B, D, -1, R)
-
-#define _mm256_mask_fmaddsub_round_pd(A, U, B, D, R) \
- (__m256d) __builtin_ia32_vfmaddsubpd256_mask_round (A, B, D, U, R)
-
-#define _mm256_mask3_fmaddsub_round_pd(A, B, D, U, R) \
- (__m256d)__builtin_ia32_vfmaddsubpd256_mask3_round (A, B, D, U, R)
-
-#define _mm256_maskz_fmaddsub_round_pd(U, A, B, D, R) \
- (__m256d)__builtin_ia32_vfmaddsubpd256_maskz_round (A, B, D, U, R)
-
-#define _mm256_fmaddsub_round_ph(A, B, D, R) \
- ((__m256h)__builtin_ia32_vfmaddsubph256_mask_round ((A), (B), (D), -1, (R)))
-
-#define _mm256_mask_fmaddsub_round_ph(A, U, B, D, R) \
- ((__m256h)__builtin_ia32_vfmaddsubph256_mask_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_mask3_fmaddsub_round_ph(A, B, D, U, R) \
- ((__m256h)__builtin_ia32_vfmaddsubph256_mask3_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_maskz_fmaddsub_round_ph(U, A, B, D, R) \
- ((__m256h)__builtin_ia32_vfmaddsubph256_maskz_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_fmaddsub_round_ps(A, B, D, R) \
- (__m256)__builtin_ia32_vfmaddsubps256_mask_round (A, B, D, -1, R)
-
-#define _mm256_mask_fmaddsub_round_ps(A, U, B, D, R) \
- (__m256)__builtin_ia32_vfmaddsubps256_mask_round (A, B, D, U, R)
-
-#define _mm256_mask3_fmaddsub_round_ps(A, B, D, U, R) \
- (__m256)__builtin_ia32_vfmaddsubps256_mask3_round (A, B, D, U, R)
-
-#define _mm256_maskz_fmaddsub_round_ps(U, A, B, D, R) \
- (__m256)__builtin_ia32_vfmaddsubps256_maskz_round (A, B, D, U, R)
-
-#define _mm256_fmsub_round_pd(A, B, D, R) \
- (__m256d)__builtin_ia32_vfmsubpd256_mask_round (A, B, D, -1, R)
-
-#define _mm256_mask_fmsub_round_pd(A, U, B, D, R) \
- (__m256d)__builtin_ia32_vfmsubpd256_mask_round (A, B, D, U, R)
-
-#define _mm256_mask3_fmsub_round_pd(A, B, D, U, R) \
- (__m256d)__builtin_ia32_vfmsubpd256_mask3_round (A, B, D, U, R)
-
-#define _mm256_maskz_fmsub_round_pd(U, A, B, D, R) \
- (__m256d)__builtin_ia32_vfmsubpd256_maskz_round (A, B, D, U, R)
-
-#define _mm256_fmsub_round_ph(A, B, D, R) \
- ((__m256h)__builtin_ia32_vfmsubph256_mask_round ((A), (B), (D), -1, (R)))
-
-#define _mm256_mask_fmsub_round_ph(A, U, B, D, R) \
- ((__m256h)__builtin_ia32_vfmsubph256_mask_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_mask3_fmsub_round_ph(A, B, D, U, R) \
- ((__m256h)__builtin_ia32_vfmsubph256_mask3_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_maskz_fmsub_round_ph(U, A, B, D, R) \
- ((__m256h)__builtin_ia32_vfmsubph256_maskz_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_fmsub_round_ps(A, B, D, R) \
- (__m256)__builtin_ia32_vfmsubps256_mask_round (A, B, D, -1, R)
-
-#define _mm256_mask_fmsub_round_ps(A, U, B, D, R) \
- (__m256)__builtin_ia32_vfmsubps256_mask_round (A, B, D, U, R)
-
-#define _mm256_mask3_fmsub_round_ps(A, B, D, U, R) \
- (__m256)__builtin_ia32_vfmsubps256_mask3_round (A, B, D, U, R)
-
-#define _mm256_maskz_fmsub_round_ps(U, A, B, D, R) \
- (__m256)__builtin_ia32_vfmsubps256_maskz_round (A, B, D, U, R)
-
-#define _mm256_fmsubadd_round_pd(A, B, D, R) \
- (__m256d)__builtin_ia32_vfmsubaddpd256_mask_round (A, B, D, -1, R)
-
-#define _mm256_mask_fmsubadd_round_pd(A, U, B, D, R) \
- (__m256d)__builtin_ia32_vfmsubaddpd256_mask_round (A, B, D, U, R)
-
-#define _mm256_mask3_fmsubadd_round_pd(A, B, D, U, R) \
- (__m256d)__builtin_ia32_vfmsubaddpd256_mask3_round (A, B, D, U, R)
-
-#define _mm256_maskz_fmsubadd_round_pd(U, A, B, D, R) \
- (__m256d)__builtin_ia32_vfmsubaddpd256_maskz_round (A, B, D, U, R)
-
-#define _mm256_fmsubadd_round_ph(A, B, D, R) \
- ((__m256h)__builtin_ia32_vfmsubaddph256_mask_round ((A), (B), (D), -1, (R)))
-
-#define _mm256_mask_fmsubadd_round_ph(A, U, B, D, R) \
- ((__m256h)__builtin_ia32_vfmsubaddph256_mask_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_mask3_fmsubadd_round_ph(A, B, D, U, R) \
- ((__m256h)__builtin_ia32_vfmsubaddph256_mask3_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_maskz_fmsubadd_round_ph(U, A, B, D, R) \
- ((__m256h)__builtin_ia32_vfmsubaddph256_maskz_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_fmsubadd_round_ps(A, B, D, R) \
- (__m256)__builtin_ia32_vfmsubaddps256_mask_round (A, B, D, -1, R)
-
-#define _mm256_mask_fmsubadd_round_ps(A, U, B, D, R) \
- (__m256)__builtin_ia32_vfmsubaddps256_mask_round (A, B, D, U, R)
-
-#define _mm256_mask3_fmsubadd_round_ps(A, B, D, U, R) \
- (__m256)__builtin_ia32_vfmsubaddps256_mask3_round (A, B, D, U, R)
-
-#define _mm256_maskz_fmsubadd_round_ps(U, A, B, D, R) \
- (__m256)__builtin_ia32_vfmsubaddps256_maskz_round (A, B, D, U, R)
-
-#define _mm256_fmul_round_pch(B, D, R) \
- ((__m256h) __builtin_ia32_vfmulcph256_round ((__v16hf) (B), \
- (__v16hf) (D), \
- (R)))
-
-#define _mm256_mask_fmul_round_pch(A, U, B, D, R) \
- ((__m256h) __builtin_ia32_vfmulcph256_mask_round ((__v16hf) (B), \
- (__v16hf) (D), \
- (__v16hf) (A), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_fmul_round_pch(U, B, D, R) \
- ((__m256h) __builtin_ia32_vfmulcph256_mask_round ((__v16hf) (B), \
- (__v16hf) (D), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_fnmadd_round_pd(A, B, D, R) \
- (__m256d)__builtin_ia32_vfnmaddpd256_mask_round (A, B, D, -1, R)
-
-#define _mm256_mask_fnmadd_round_pd(A, U, B, D, R) \
- (__m256d)__builtin_ia32_vfnmaddpd256_mask_round (A, B, D, U, R)
-
-#define _mm256_mask3_fnmadd_round_pd(A, B, D, U, R) \
- (__m256d)__builtin_ia32_vfnmaddpd256_mask3_round (A, B, D, U, R)
-
-#define _mm256_maskz_fnmadd_round_pd(U, A, B, D, R) \
- (__m256d)__builtin_ia32_vfnmaddpd256_maskz_round (A, B, D, U, R)
-
-#define _mm256_fnmadd_round_ph(A, B, D, R) \
- ((__m256h)__builtin_ia32_vfnmaddph256_mask_round ((A), (B), (D), -1, (R)))
-
-#define _mm256_mask_fnmadd_round_ph(A, U, B, D, R) \
- ((__m256h)__builtin_ia32_vfnmaddph256_mask_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_mask3_fnmadd_round_ph(A, B, D, U, R) \
- ((__m256h)__builtin_ia32_vfnmaddph256_mask3_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_maskz_fnmadd_round_ph(U, A, B, D, R) \
- ((__m256h)__builtin_ia32_vfnmaddph256_maskz_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_fnmadd_round_ps(A, B, D, R) \
- (__m256)__builtin_ia32_vfnmaddps256_mask_round (A, B, D, -1, R)
-
-#define _mm256_mask_fnmadd_round_ps(A, U, B, D, R) \
- (__m256)__builtin_ia32_vfnmaddps256_mask_round (A, B, D, U, R)
-
-#define _mm256_mask3_fnmadd_round_ps(A, B, D, U, R) \
- (__m256)__builtin_ia32_vfnmaddps256_mask3_round (A, B, D, U, R)
-
-#define _mm256_maskz_fnmadd_round_ps(U, A, B, D, R) \
- (__m256)__builtin_ia32_vfnmaddps256_maskz_round (A, B, D, U, R)
-
-#define _mm256_fnmsub_round_pd(A, B, D, R) \
- (__m256d)__builtin_ia32_vfnmsubpd256_mask_round (A, B, D, -1, R)
-
-#define _mm256_mask_fnmsub_round_pd(A, U, B, D, R) \
- (__m256d)__builtin_ia32_vfnmsubpd256_mask_round (A, B, D, U, R)
-
-#define _mm256_mask3_fnmsub_round_pd(A, B, D, U, R) \
- (__m256d)__builtin_ia32_vfnmsubpd256_mask3_round (A, B, D, U, R)
-
-#define _mm256_maskz_fnmsub_round_pd(U, A, B, D, R) \
- (__m256d)__builtin_ia32_vfnmsubpd256_maskz_round (A, B, D, U, R)
-
-#define _mm256_fnmsub_round_ph(A, B, D, R) \
- ((__m256h)__builtin_ia32_vfnmsubph256_mask_round ((A), (B), (D), -1, (R)))
-
-#define _mm256_mask_fnmsub_round_ph(A, U, B, D, R) \
- ((__m256h)__builtin_ia32_vfnmsubph256_mask_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_mask3_fnmsub_round_ph(A, B, D, U, R) \
- ((__m256h)__builtin_ia32_vfnmsubph256_mask3_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_maskz_fnmsub_round_ph(U, A, B, D, R) \
- ((__m256h)__builtin_ia32_vfnmsubph256_maskz_round ((A), (B), (D), (U), (R)))
-
-#define _mm256_fnmsub_round_ps(A, B, D, R) \
- (__m256)__builtin_ia32_vfnmsubps256_mask_round (A, B, D, -1, R)
-
-#define _mm256_mask_fnmsub_round_ps(A, U, B, D, R) \
- (__m256)__builtin_ia32_vfnmsubps256_mask_round (A, B, D, U, R)
-
-#define _mm256_mask3_fnmsub_round_ps(A, B, D, U, R) \
- (__m256)__builtin_ia32_vfnmsubps256_mask3_round (A, B, D, U, R)
-
-#define _mm256_maskz_fnmsub_round_ps(U, A, B, D, R) \
- (__m256)__builtin_ia32_vfnmsubps256_maskz_round (A, B, D, U, R)
-
-#define _mm256_getexp_round_pd(A, R) \
- ((__m256d) __builtin_ia32_getexppd256_mask_round ((__v4df) (A), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_getexp_round_pd(W, U, A, R) \
- ((__m256d) __builtin_ia32_getexppd256_mask_round ((__v4df) (A), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_getexp_round_pd(U, A, R) \
- ((__m256d) __builtin_ia32_getexppd256_mask_round ((__v4df) (A), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_getexp_round_ph(A, R)\
- ((__m256h) __builtin_ia32_getexpph256_mask_round ((__v16hf) (A), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_getexp_round_ph(W, U, A, R)\
- ((__m256h) __builtin_ia32_getexpph256_mask_round ((__v16hf) (A), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_getexp_round_ph(U, A, R)\
- ((__m256h) __builtin_ia32_getexpph256_mask_round ((__v16hf) (A), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_getexp_round_ps(A, R)\
- ((__m256) __builtin_ia32_getexpps256_mask_round ((__v8sf) (A), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_getexp_round_ps(W, U, A, R)\
- ((__m256) __builtin_ia32_getexpps256_mask_round ((__v8sf) (A), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_getexp_round_ps(U, A, R)\
- ((__m256) __builtin_ia32_getexpps256_mask_round ((__v8sf) (A), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_getmant_round_pd(A, B, C, R) \
- ((__m256d)__builtin_ia32_getmantpd256_mask_round ((__v4df) (__m256d) (A), \
- (int) (((C) << 2) | (B)), \
- (__v4df) (__m256d) \
- _mm256_undefined_pd (), \
- (__mmask8)-1, \
- (R)))
-
-#define _mm256_mask_getmant_round_pd(W, U, A, B, C, R) \
- ((__m256d)__builtin_ia32_getmantpd256_mask_round ((__v4df) (__m256d) (A), \
- (int) (((C) << 2) | (B)), \
- (__v4df) (__m256d) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_getmant_round_pd(U, A, B, C, R) \
- ((__m256d)__builtin_ia32_getmantpd256_mask_round ((__v4df) (__m256d) (A), \
- (int) (((C) << 2) | (B)), \
- (__v4df) (__m256d) \
- _mm256_setzero_pd (), \
- (__mmask8) (U), \
- (R)))
-
-
-#define _mm256_getmant_round_ph(A, B, C, R) \
- ((__m256h)__builtin_ia32_getmantph256_mask_round ((__v16hf) (__m256h) (A), \
- (int) (((C)<<2) | (B)), \
- (__v16hf) (__m256h) \
- _mm256_undefined_ph (), \
- (__mmask16)-1, \
- (R)))
-
-#define _mm256_mask_getmant_round_ph(W, U, A, B, C, R) \
- ((__m256h)__builtin_ia32_getmantph256_mask_round ((__v16hf) (__m256h) (A), \
- (int) (((C)<<2) | (B)), \
- (__v16hf) (__m256h) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_getmant_round_ph(U, A, B, C, R) \
- ((__m256h)__builtin_ia32_getmantph256_mask_round ((__v16hf) (__m256h) (A), \
- (int) (((C)<<2) | (B)), \
- (__v16hf) (__m256h) \
- _mm256_setzero_ph (), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_getmant_round_ps(A, B, C, R) \
- ((__m256)__builtin_ia32_getmantps256_mask_round ((__v8sf) (__m256) (A), \
- (int) (((C)<<2) | (B)), \
- (__v8sf) (__m256) \
- _mm256_undefined_ps (), \
- (__mmask8)-1, \
- (R)))
-
-#define _mm256_mask_getmant_round_ps(W, U, A, B, C, R) \
- ((__m256)__builtin_ia32_getmantps256_mask_round ((__v8sf) (__m256) (A), \
- (int) (((C)<<2) | (B)), \
- (__v8sf) (__m256) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_getmant_round_ps(U, A, B, C, R) \
- ((__m256)__builtin_ia32_getmantps256_mask_round ((__v8sf) (__m256) (A), \
- (int) (((C)<<2) | (B)), \
- (__v8sf) (__m256) \
- _mm256_setzero_ps (), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_max_round_pd(A, B, R) \
- ((__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_max_round_pd(W, U, A, B, R) \
- ((__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_max_round_pd(U, A, B, R) \
- ((__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_max_round_ph(A, B, R) \
- ((__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_undefined_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_max_round_ph(W, U, A, B, R) \
- ((__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_max_round_ph(U, A, B, R) \
- ((__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_max_round_ps(A, B, R) \
- ((__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_max_round_ps(W, U, A, B, R) \
- ((__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_max_round_ps(U, A, B, R) \
- ((__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_min_round_pd(A, B, R) \
- ((__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_min_round_pd(W, U, A, B, R) \
- ((__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_min_round_pd(U, A, B, R) \
- ((__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_min_round_ph(A, B, R) \
- ((__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_undefined_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_min_round_ph(W, U, A, B, R) \
- ((__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_min_round_ph(U, A, B, R) \
- ((__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_min_round_ps(A, B, R) \
- ((__m256) __builtin_ia32_minps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_min_round_ps(W, U, A, B, R) \
- ((__m256) __builtin_ia32_minps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_min_round_ps(U, A, B, R) \
- ((__m256) __builtin_ia32_minps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_mul_round_pd(A, B, R) \
- ((__m256d) __builtin_ia32_mulpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_mul_round_pd(W, U, A, B, R) \
- ((__m256d) __builtin_ia32_mulpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_mul_round_pd(U, A, B, R) \
- ((__m256d) __builtin_ia32_mulpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_mul_round_ph(A, B, R) \
- ((__m256h) __builtin_ia32_mulph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_undefined_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_mul_round_ph(W, U, A, B, R) \
- ((__m256h) __builtin_ia32_mulph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_mul_round_ph(U, A, B, R) \
- ((__m256h) __builtin_ia32_mulph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_mul_round_ps(A, B, R) \
- ((__m256) __builtin_ia32_mulps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_mul_round_ps(W, U, A, B, R) \
- ((__m256) __builtin_ia32_mulps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_mul_round_ps(U, A, B, R) \
- ((__m256) __builtin_ia32_mulps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_range_round_pd(A, B, C, R) \
- ((__m256d) __builtin_ia32_rangepd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (C), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_range_round_pd(W, U, A, B, C, R) \
- ((__m256d) __builtin_ia32_rangepd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (C), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_range_round_pd(U, A, B, C, R) \
- ((__m256d) __builtin_ia32_rangepd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (C), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_range_round_ps(A, B, C, R) \
- ((__m256) __builtin_ia32_rangeps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (C), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_range_round_ps(W, U, A, B, C, R) \
- ((__m256) __builtin_ia32_rangeps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (C), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_range_round_ps(U, A, B, C, R) \
- ((__m256) __builtin_ia32_rangeps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (C), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_reduce_round_pd(A, C, R) \
- ((__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) (A), \
- (C), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_reduce_round_pd(W, U, A, C, R) \
- ((__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) (A), \
- (C), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_reduce_round_pd(U, A, C, R) \
- ((__m256d) __builtin_ia32_reducepd256_mask_round ((__v4df) (A), \
- (C), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_reduce_round_ph(A, C, R) \
- ((__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) (A), \
- (C), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_reduce_round_ph(W, U, A, C, R) \
- ((__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) (A), \
- (C), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_reduce_round_ph(U, A, C, R) \
- ((__m256h) __builtin_ia32_reduceph256_mask_round ((__v16hf) (A), \
- (C), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_reduce_round_ps(A, C, R) \
- ((__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) (A), \
- (C), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_reduce_round_ps(W, U, A, C, R) \
- ((__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) (A), \
- (C), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_reduce_round_ps(U, A, C, R) \
- ((__m256) __builtin_ia32_reduceps256_mask_round ((__v8sf) (A), \
- (C), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_roundscale_round_pd(A, C, R) \
- ((__m256d) \
- __builtin_ia32_rndscalepd256_mask_round ((__v4df) (A), \
- (C), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_roundscale_round_pd(W, U, A, C, R) \
- ((__m256d) __builtin_ia32_rndscalepd256_mask_round ((__v4df) (A), \
- (C), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_roundscale_round_pd(U, A, C, R) \
- ((__m256d) __builtin_ia32_rndscalepd256_mask_round ((__v4df) (A), \
- (C), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_roundscale_round_ph(A, C, R) \
- ((__m256h) \
- __builtin_ia32_rndscaleph256_mask_round ((__v16hf) (A), \
- (C), \
- (__v16hf) \
- (_mm256_undefined_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_roundscale_round_ph(W, U, A, C, R) \
- ((__m256h) __builtin_ia32_rndscaleph256_mask_round ((__v16hf) (A), \
- (C), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_roundscale_round_ph(U, A, C, R) \
- ((__m256h) __builtin_ia32_rndscaleph256_mask_round ((__v16hf) (A), \
- (C), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_roundscale_round_ps(A, C, R) \
- ((__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) (A), \
- (C), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_roundscale_round_ps(W, U, A, C, R) \
- ((__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) (A), \
- (C), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_roundscale_round_ps(U, A, C, R) \
- ((__m256) __builtin_ia32_rndscaleps256_mask_round ((__v8sf) (A), \
- (C), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_scalef_round_pd(A, B, R) \
- ((__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_scalef_round_pd(W, U, A, B, R) \
- ((__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_scalef_round_pd(U, A, B, R) \
- ((__m256d) __builtin_ia32_scalefpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_scalef_round_ph(A, B, R) \
- ((__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_undefined_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_scalef_round_ph(W, U, A, B, R) \
- ((__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_scalef_round_ph(U, A, B, R) \
- ((__m256h) __builtin_ia32_scalefph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_scalef_round_ps(A, B, R) \
- ((__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_scalef_round_ps(W, U, A, B, R) \
- ((__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_scalef_round_ps(U, A, B, R) \
- ((__m256) __builtin_ia32_scalefps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_sqrt_round_pd(A, R) \
- ((__m256d) __builtin_ia32_sqrtpd256_mask_round ((__v4df) (A), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_sqrt_round_pd(W, U, A, R) \
- ((__m256d) __builtin_ia32_sqrtpd256_mask_round ((__v4df) (A), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_sqrt_round_pd(U, A, R) \
- ((__m256d) __builtin_ia32_sqrtpd256_mask_round ((__v4df) (A), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_sqrt_round_ph(A, R) \
- ((__m256h) __builtin_ia32_sqrtph256_mask_round ((__v16hf) (A), \
- (__v16hf) \
- (_mm256_undefined_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_sqrt_round_ph(W, U, A, R) \
- ((__m256h) __builtin_ia32_sqrtph256_mask_round ((__v16hf) (A), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_sqrt_round_ph(U, A, R) \
- ((__m256h) __builtin_ia32_sqrtph256_mask_round ((__v16hf) (A), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_sqrt_round_ps(A, R) \
- ((__m256) __builtin_ia32_sqrtps256_mask_round ((__v8sf) (A), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_sqrt_round_ps(W, U, A, R) \
- ((__m256) __builtin_ia32_sqrtps256_mask_round ((__v8sf) (A), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_sqrt_round_ps(U, A, R) \
- ((__m256) __builtin_ia32_sqrtps256_mask_round ((__v8sf) (A), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_sub_round_pd(A, B, R) \
- ((__m256d) __builtin_ia32_subpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_sub_round_pd(W, U, A, B, R) \
- ((__m256d) __builtin_ia32_subpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_sub_round_pd(U, A, B, R) \
- ((__m256d) __builtin_ia32_subpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_sub_round_ph(A, B, R) \
- ((__m256h) __builtin_ia32_subph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_undefined_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_sub_round_ph(W, U, A, B, R) \
- ((__m256h) __builtin_ia32_subph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_sub_round_ph(U, A, B, R) \
- ((__m256h) __builtin_ia32_subph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_sub_round_ps(A, B, R) \
- ((__m256) __builtin_ia32_subps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_sub_round_ps(W, U, A, B, R) \
- ((__m256) __builtin_ia32_subps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_sub_round_ps(U, A, B, R) \
- ((__m256) __builtin_ia32_subps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-#endif
-
-#define _mm256_cmul_round_pch(A, B, R) _mm256_fcmul_round_pch ((A), (B), (R))
-#define _mm256_mask_cmul_round_pch(W, U, A, B, R) \
- _mm256_mask_fcmul_round_pch ((W), (U), (A), (B), (R))
-#define _mm256_maskz_cmul_round_pch(U, A, B, R) \
- _mm256_maskz_fcmul_round_pch ((U), (A), (B), (R))
-
-#define _mm256_mul_round_pch(A, B, R) _mm256_fmul_round_pch ((A), (B), (R))
-#define _mm256_mask_mul_round_pch(W, U, A, B, R) \
- _mm256_mask_fmul_round_pch ((W), (U), (A), (B), (R))
-#define _mm256_maskz_mul_round_pch(U, A, B, R) \
- _mm256_maskz_fmul_round_pch ((U), (A), (B), (R))
-
-#ifdef __DISABLE_AVX10_2_256__
-#undef __DISABLE_AVX10_2_256__
-#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_256__ */
-
-#endif /* _AVX10_2ROUNDINGINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx10_2satcvtintrin.h b/gcc/config/i386/avx10_2satcvtintrin.h
index 9588bb1..78bcd72 100644
--- a/gcc/config/i386/avx10_2satcvtintrin.h
+++ b/gcc/config/i386/avx10_2satcvtintrin.h
@@ -28,15 +28,15 @@
#ifndef _AVX10_2SATCVTINTRIN_H_INCLUDED
#define _AVX10_2SATCVTINTRIN_H_INCLUDED
-#if !defined (__AVX10_2_256__)
+#if !defined (__AVX10_2__)
#pragma GCC push_options
#pragma GCC target("avx10.2")
-#define __DISABLE_AVX10_2_256__
-#endif /* __AVX10_2_256__ */
+#define __DISABLE_AVX10_2__
+#endif /* __AVX10_2__ */
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvtbf16_epi8 (__m128bh __A)
+_mm_ipcvts_bf16_epi8 (__m128bh __A)
{
return (__m128i) __builtin_ia32_cvtbf162ibs128_mask ((__v8bf) __A,
(__v8hi)
@@ -46,7 +46,7 @@ _mm_ipcvtbf16_epi8 (__m128bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvtbf16_epi8 (__m128i __W, __mmask8 __U, __m128bh __A)
+_mm_mask_ipcvts_bf16_epi8 (__m128i __W, __mmask8 __U, __m128bh __A)
{
return (__m128i) __builtin_ia32_cvtbf162ibs128_mask ((__v8bf) __A,
(__v8hi) __W,
@@ -55,7 +55,7 @@ _mm_mask_ipcvtbf16_epi8 (__m128i __W, __mmask8 __U, __m128bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvtbf16_epi8 (__mmask8 __U, __m128bh __A)
+_mm_maskz_ipcvts_bf16_epi8 (__mmask8 __U, __m128bh __A)
{
return (__m128i) __builtin_ia32_cvtbf162ibs128_mask ((__v8bf) __A,
(__v8hi)
@@ -65,7 +65,7 @@ _mm_maskz_ipcvtbf16_epi8 (__mmask8 __U, __m128bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvtbf16_epi8 (__m256bh __A)
+_mm256_ipcvts_bf16_epi8 (__m256bh __A)
{
return
(__m256i) __builtin_ia32_cvtbf162ibs256_mask ((__v16bf) __A,
@@ -76,7 +76,7 @@ _mm256_ipcvtbf16_epi8 (__m256bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvtbf16_epi8 (__m256i __W, __mmask16 __U, __m256bh __A)
+_mm256_mask_ipcvts_bf16_epi8 (__m256i __W, __mmask16 __U, __m256bh __A)
{
return (__m256i) __builtin_ia32_cvtbf162ibs256_mask ((__v16bf) __A,
(__v16hi) __W,
@@ -85,7 +85,7 @@ _mm256_mask_ipcvtbf16_epi8 (__m256i __W, __mmask16 __U, __m256bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvtbf16_epi8 (__mmask16 __U, __m256bh __A)
+_mm256_maskz_ipcvts_bf16_epi8 (__mmask16 __U, __m256bh __A)
{
return
(__m256i) __builtin_ia32_cvtbf162ibs256_mask ((__v16bf) __A,
@@ -96,7 +96,7 @@ _mm256_maskz_ipcvtbf16_epi8 (__mmask16 __U, __m256bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvtbf16_epu8 (__m128bh __A)
+_mm_ipcvts_bf16_epu8 (__m128bh __A)
{
return
(__m128i) __builtin_ia32_cvtbf162iubs128_mask ((__v8bf) __A,
@@ -107,7 +107,7 @@ _mm_ipcvtbf16_epu8 (__m128bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvtbf16_epu8 (__m128i __W, __mmask8 __U, __m128bh __A)
+_mm_mask_ipcvts_bf16_epu8 (__m128i __W, __mmask8 __U, __m128bh __A)
{
return (__m128i) __builtin_ia32_cvtbf162iubs128_mask ((__v8bf) __A,
(__v8hi) __W,
@@ -116,7 +116,7 @@ _mm_mask_ipcvtbf16_epu8 (__m128i __W, __mmask8 __U, __m128bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvtbf16_epu8 (__mmask8 __U, __m128bh __A)
+_mm_maskz_ipcvts_bf16_epu8 (__mmask8 __U, __m128bh __A)
{
return
(__m128i) __builtin_ia32_cvtbf162iubs128_mask ((__v8bf) __A,
@@ -127,7 +127,7 @@ _mm_maskz_ipcvtbf16_epu8 (__mmask8 __U, __m128bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvtbf16_epu8 (__m256bh __A)
+_mm256_ipcvts_bf16_epu8 (__m256bh __A)
{
return
(__m256i) __builtin_ia32_cvtbf162iubs256_mask ((__v16bf) __A,
@@ -138,7 +138,7 @@ _mm256_ipcvtbf16_epu8 (__m256bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvtbf16_epu8 (__m256i __W, __mmask16 __U, __m256bh __A)
+_mm256_mask_ipcvts_bf16_epu8 (__m256i __W, __mmask16 __U, __m256bh __A)
{
return (__m256i) __builtin_ia32_cvtbf162iubs256_mask ((__v16bf) __A,
(__v16hi) __W,
@@ -147,7 +147,7 @@ _mm256_mask_ipcvtbf16_epu8 (__m256i __W, __mmask16 __U, __m256bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvtbf16_epu8 (__mmask16 __U, __m256bh __A)
+_mm256_maskz_ipcvts_bf16_epu8 (__mmask16 __U, __m256bh __A)
{
return
(__m256i) __builtin_ia32_cvtbf162iubs256_mask ((__v16bf) __A,
@@ -158,7 +158,7 @@ _mm256_maskz_ipcvtbf16_epu8 (__mmask16 __U, __m256bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvtph_epi8 (__m128h __A)
+_mm_ipcvts_ph_epi8 (__m128h __A)
{
return (__m128i) __builtin_ia32_cvtph2ibs128_mask ((__v8hf) __A,
(__v8hi)
@@ -168,7 +168,7 @@ _mm_ipcvtph_epi8 (__m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvtph_epi8 (__m128i __W, __mmask8 __U, __m128h __A)
+_mm_mask_ipcvts_ph_epi8 (__m128i __W, __mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_cvtph2ibs128_mask ((__v8hf) __A,
(__v8hi) __W,
@@ -177,7 +177,7 @@ _mm_mask_ipcvtph_epi8 (__m128i __W, __mmask8 __U, __m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvtph_epi8 (__mmask8 __U, __m128h __A)
+_mm_maskz_ipcvts_ph_epi8 (__mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_cvtph2ibs128_mask ((__v8hf) __A,
(__v8hi)
@@ -187,7 +187,7 @@ _mm_maskz_ipcvtph_epi8 (__mmask8 __U, __m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvtph_epu8 (__m128h __A)
+_mm_ipcvts_ph_epu8 (__m128h __A)
{
return (__m128i) __builtin_ia32_cvtph2iubs128_mask ((__v8hf) __A,
(__v8hi)
@@ -197,7 +197,7 @@ _mm_ipcvtph_epu8 (__m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvtph_epu8 (__m128i __W, __mmask8 __U, __m128h __A)
+_mm_mask_ipcvts_ph_epu8 (__m128i __W, __mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_cvtph2iubs128_mask ((__v8hf) __A,
(__v8hi) __W,
@@ -206,7 +206,7 @@ _mm_mask_ipcvtph_epu8 (__m128i __W, __mmask8 __U, __m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvtph_epu8 (__mmask8 __U, __m128h __A)
+_mm_maskz_ipcvts_ph_epu8 (__mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_cvtph2iubs128_mask ((__v8hf) __A,
(__v8hi)
@@ -216,7 +216,7 @@ _mm_maskz_ipcvtph_epu8 (__mmask8 __U, __m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvtps_epi8 (__m128 __A)
+_mm_ipcvts_ps_epi8 (__m128 __A)
{
return (__m128i) __builtin_ia32_cvtps2ibs128_mask ((__v4sf) __A,
(__v4si)
@@ -226,7 +226,7 @@ _mm_ipcvtps_epi8 (__m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvtps_epi8 (__m128i __W, __mmask8 __U, __m128 __A)
+_mm_mask_ipcvts_ps_epi8 (__m128i __W, __mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvtps2ibs128_mask ((__v4sf) __A,
(__v4si) __W,
@@ -235,7 +235,7 @@ _mm_mask_ipcvtps_epi8 (__m128i __W, __mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvtps_epi8 (__mmask8 __U, __m128 __A)
+_mm_maskz_ipcvts_ps_epi8 (__mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvtps2ibs128_mask ((__v4sf) __A,
(__v4si)
@@ -245,7 +245,7 @@ _mm_maskz_ipcvtps_epi8 (__mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvtps_epu8 (__m128 __A)
+_mm_ipcvts_ps_epu8 (__m128 __A)
{
return (__m128i) __builtin_ia32_cvtps2iubs128_mask ((__v4sf) __A,
(__v4si)
@@ -255,7 +255,7 @@ _mm_ipcvtps_epu8 (__m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvtps_epu8 (__m128i __W, __mmask8 __U, __m128 __A)
+_mm_mask_ipcvts_ps_epu8 (__m128i __W, __mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvtps2iubs128_mask ((__v4sf) __A,
(__v4si) __W,
@@ -264,7 +264,7 @@ _mm_mask_ipcvtps_epu8 (__m128i __W, __mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvtps_epu8 (__mmask8 __U, __m128 __A)
+_mm_maskz_ipcvts_ps_epu8 (__mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvtps2iubs128_mask ((__v4sf) __A,
(__v4si)
@@ -274,7 +274,7 @@ _mm_maskz_ipcvtps_epu8 (__mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvttbf16_epi8 (__m128bh __A)
+_mm_ipcvtts_bf16_epi8 (__m128bh __A)
{
return
(__m128i) __builtin_ia32_cvttbf162ibs128_mask ((__v8bf) __A,
@@ -285,7 +285,7 @@ _mm_ipcvttbf16_epi8 (__m128bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvttbf16_epi8 (__m128i __W, __mmask8 __U, __m128bh __A)
+_mm_mask_ipcvtts_bf16_epi8 (__m128i __W, __mmask8 __U, __m128bh __A)
{
return (__m128i) __builtin_ia32_cvttbf162ibs128_mask ((__v8bf) __A,
(__v8hi) __W,
@@ -294,7 +294,7 @@ _mm_mask_ipcvttbf16_epi8 (__m128i __W, __mmask8 __U, __m128bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvttbf16_epi8 (__mmask8 __U, __m128bh __A)
+_mm_maskz_ipcvtts_bf16_epi8 (__mmask8 __U, __m128bh __A)
{
return (__m128i) __builtin_ia32_cvttbf162ibs128_mask ((__v8bf) __A,
(__v8hi)
@@ -304,7 +304,7 @@ _mm_maskz_ipcvttbf16_epi8 (__mmask8 __U, __m128bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvttbf16_epu8 (__m128bh __A)
+_mm_ipcvtts_bf16_epu8 (__m128bh __A)
{
return
(__m128i) __builtin_ia32_cvttbf162iubs128_mask ((__v8bf) __A,
@@ -315,7 +315,7 @@ _mm_ipcvttbf16_epu8 (__m128bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvttbf16_epu8 (__m128i __W, __mmask8 __U, __m128bh __A)
+_mm_mask_ipcvtts_bf16_epu8 (__m128i __W, __mmask8 __U, __m128bh __A)
{
return (__m128i) __builtin_ia32_cvttbf162iubs128_mask ((__v8bf) __A,
(__v8hi) __W,
@@ -324,7 +324,7 @@ _mm_mask_ipcvttbf16_epu8 (__m128i __W, __mmask8 __U, __m128bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvttbf16_epu8 (__mmask8 __U, __m128bh __A)
+_mm_maskz_ipcvtts_bf16_epu8 (__mmask8 __U, __m128bh __A)
{
return (__m128i) __builtin_ia32_cvttbf162iubs128_mask ((__v8bf) __A,
(__v8hi)
@@ -334,7 +334,7 @@ _mm_maskz_ipcvttbf16_epu8 (__mmask8 __U, __m128bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvttbf16_epi8 (__m256bh __A)
+_mm256_ipcvtts_bf16_epi8 (__m256bh __A)
{
return (__m256i)
__builtin_ia32_cvttbf162ibs256_mask ((__v16bf) __A,
@@ -344,7 +344,7 @@ _mm256_ipcvttbf16_epi8 (__m256bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvttbf16_epi8 (__m256i __W, __mmask16 __U, __m256bh __A)
+_mm256_mask_ipcvtts_bf16_epi8 (__m256i __W, __mmask16 __U, __m256bh __A)
{
return (__m256i) __builtin_ia32_cvttbf162ibs256_mask ((__v16bf) __A,
(__v16hi) __W,
@@ -353,7 +353,7 @@ _mm256_mask_ipcvttbf16_epi8 (__m256i __W, __mmask16 __U, __m256bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvttbf16_epi8 (__mmask16 __U, __m256bh __A)
+_mm256_maskz_ipcvtts_bf16_epi8 (__mmask16 __U, __m256bh __A)
{
return (__m256i)
__builtin_ia32_cvttbf162ibs256_mask ((__v16bf) __A,
@@ -363,7 +363,7 @@ _mm256_maskz_ipcvttbf16_epi8 (__mmask16 __U, __m256bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvttbf16_epu8 (__m256bh __A)
+_mm256_ipcvtts_bf16_epu8 (__m256bh __A)
{
return (__m256i)
__builtin_ia32_cvttbf162iubs256_mask ((__v16bf) __A,
@@ -373,7 +373,7 @@ _mm256_ipcvttbf16_epu8 (__m256bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvttbf16_epu8 (__m256i __W, __mmask16 __U, __m256bh __A)
+_mm256_mask_ipcvtts_bf16_epu8 (__m256i __W, __mmask16 __U, __m256bh __A)
{
return (__m256i) __builtin_ia32_cvttbf162iubs256_mask ((__v16bf) __A,
(__v16hi) __W,
@@ -382,7 +382,7 @@ _mm256_mask_ipcvttbf16_epu8 (__m256i __W, __mmask16 __U, __m256bh __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvttbf16_epu8 (__mmask16 __U, __m256bh __A)
+_mm256_maskz_ipcvtts_bf16_epu8 (__mmask16 __U, __m256bh __A)
{
return (__m256i)
__builtin_ia32_cvttbf162iubs256_mask ((__v16bf) __A,
@@ -392,7 +392,7 @@ _mm256_maskz_ipcvttbf16_epu8 (__mmask16 __U, __m256bh __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvttph_epi8 (__m128h __A)
+_mm_ipcvtts_ph_epi8 (__m128h __A)
{
return (__m128i) __builtin_ia32_cvttph2ibs128_mask ((__v8hf) __A,
(__v8hi)
@@ -402,7 +402,7 @@ _mm_ipcvttph_epi8 (__m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvttph_epi8 (__m128i __W, __mmask8 __U, __m128h __A)
+_mm_mask_ipcvtts_ph_epi8 (__m128i __W, __mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_cvttph2ibs128_mask ((__v8hf) __A,
(__v8hi) __W,
@@ -411,7 +411,7 @@ _mm_mask_ipcvttph_epi8 (__m128i __W, __mmask8 __U, __m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvttph_epi8 (__mmask8 __U, __m128h __A)
+_mm_maskz_ipcvtts_ph_epi8 (__mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_cvttph2ibs128_mask ((__v8hf) __A,
(__v8hi)
@@ -421,7 +421,7 @@ _mm_maskz_ipcvttph_epi8 (__mmask8 __U, __m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvttph_epu8 (__m128h __A)
+_mm_ipcvtts_ph_epu8 (__m128h __A)
{
return (__m128i) __builtin_ia32_cvttph2iubs128_mask ((__v8hf) __A,
(__v8hi)
@@ -431,7 +431,7 @@ _mm_ipcvttph_epu8 (__m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvttph_epu8 (__m128i __W, __mmask8 __U, __m128h __A)
+_mm_mask_ipcvtts_ph_epu8 (__m128i __W, __mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_cvttph2iubs128_mask ((__v8hf) __A,
(__v8hi) __W,
@@ -440,7 +440,7 @@ _mm_mask_ipcvttph_epu8 (__m128i __W, __mmask8 __U, __m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvttph_epu8 (__mmask8 __U, __m128h __A)
+_mm_maskz_ipcvtts_ph_epu8 (__mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_cvttph2iubs128_mask ((__v8hf) __A,
(__v8hi)
@@ -450,7 +450,7 @@ _mm_maskz_ipcvttph_epu8 (__mmask8 __U, __m128h __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvttps_epi8 (__m128 __A)
+_mm_ipcvtts_ps_epi8 (__m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2ibs128_mask ((__v4sf) __A,
(__v4si)
@@ -460,7 +460,7 @@ _mm_ipcvttps_epi8 (__m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvttps_epi8 (__m128i __W, __mmask8 __U, __m128 __A)
+_mm_mask_ipcvtts_ps_epi8 (__m128i __W, __mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2ibs128_mask ((__v4sf) __A,
(__v4si) __W,
@@ -469,7 +469,7 @@ _mm_mask_ipcvttps_epi8 (__m128i __W, __mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvttps_epi8 (__mmask8 __U, __m128 __A)
+_mm_maskz_ipcvtts_ps_epi8 (__mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2ibs128_mask ((__v4sf) __A,
(__v4si)
@@ -479,7 +479,7 @@ _mm_maskz_ipcvttps_epi8 (__mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ipcvttps_epu8 (__m128 __A)
+_mm_ipcvtts_ps_epu8 (__m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2iubs128_mask ((__v4sf) __A,
(__v4si)
@@ -489,7 +489,7 @@ _mm_ipcvttps_epu8 (__m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_ipcvttps_epu8 (__m128i __W, __mmask8 __U, __m128 __A)
+_mm_mask_ipcvtts_ps_epu8 (__m128i __W, __mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2iubs128_mask ((__v4sf) __A,
(__v4si) __W,
@@ -498,7 +498,7 @@ _mm_mask_ipcvttps_epu8 (__m128i __W, __mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_ipcvttps_epu8 (__mmask8 __U, __m128 __A)
+_mm_maskz_ipcvtts_ps_epu8 (__mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2iubs128_mask ((__v4sf) __A,
(__v4si)
@@ -508,7 +508,7 @@ _mm_maskz_ipcvttps_epu8 (__mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttspd_epi32 (__m128d __A)
+_mm_cvtts_pd_epi32 (__m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2dqs128_mask ((__v2df) __A,
(__v4si)
@@ -518,7 +518,7 @@ _mm_cvttspd_epi32 (__m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvttspd_epi32 (__m128i __W, __mmask8 __U, __m128d __A)
+_mm_mask_cvtts_pd_epi32 (__m128i __W, __mmask8 __U, __m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2dqs128_mask ((__v2df) __A,
(__v4si) __W,
@@ -527,7 +527,7 @@ _mm_mask_cvttspd_epi32 (__m128i __W, __mmask8 __U, __m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvttspd_epi32 (__mmask8 __U, __m128d __A)
+_mm_maskz_cvtts_pd_epi32 (__mmask8 __U, __m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2dqs128_mask ((__v2df) __A,
(__v4si)
@@ -537,7 +537,7 @@ _mm_maskz_cvttspd_epi32 (__mmask8 __U, __m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttspd_epi64 (__m128d __A)
+_mm_cvtts_pd_epi64 (__m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2qqs128_mask ((__v2df) __A,
(__v2di)
@@ -547,7 +547,7 @@ _mm_cvttspd_epi64 (__m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvttspd_epi64 (__m128i __W, __mmask8 __U, __m128d __A)
+_mm_mask_cvtts_pd_epi64 (__m128i __W, __mmask8 __U, __m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2qqs128_mask ((__v2df) __A,
(__v2di) __W,
@@ -556,7 +556,7 @@ _mm_mask_cvttspd_epi64 (__m128i __W, __mmask8 __U, __m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvttspd_epi64 (__mmask8 __U, __m128d __A)
+_mm_maskz_cvtts_pd_epi64 (__mmask8 __U, __m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2qqs128_mask ((__v2df) __A,
(__v2di)
@@ -566,7 +566,7 @@ _mm_maskz_cvttspd_epi64 (__mmask8 __U, __m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttspd_epu32 (__m128d __A)
+_mm_cvtts_pd_epu32 (__m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2udqs128_mask ((__v2df) __A,
(__v4si)
@@ -576,7 +576,7 @@ _mm_cvttspd_epu32 (__m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvttspd_epu32 (__m128i __W, __mmask8 __U, __m128d __A)
+_mm_mask_cvtts_pd_epu32 (__m128i __W, __mmask8 __U, __m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2udqs128_mask ((__v2df) __A,
(__v4si) __W,
@@ -585,7 +585,7 @@ _mm_mask_cvttspd_epu32 (__m128i __W, __mmask8 __U, __m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvttspd_epu32 (__mmask8 __U, __m128d __A)
+_mm_maskz_cvtts_pd_epu32 (__mmask8 __U, __m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2udqs128_mask ((__v2df) __A,
(__v4si)
@@ -595,7 +595,7 @@ _mm_maskz_cvttspd_epu32 (__mmask8 __U, __m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttspd_epu64 (__m128d __A)
+_mm_cvtts_pd_epu64 (__m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2uqqs128_mask ((__v2df) __A,
(__v2di)
@@ -605,7 +605,7 @@ _mm_cvttspd_epu64 (__m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvttspd_epu64 (__m128i __W, __mmask8 __U, __m128d __A)
+_mm_mask_cvtts_pd_epu64 (__m128i __W, __mmask8 __U, __m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2uqqs128_mask ((__v2df) __A,
(__v2di) __W,
@@ -614,7 +614,7 @@ _mm_mask_cvttspd_epu64 (__m128i __W, __mmask8 __U, __m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvttspd_epu64 (__mmask8 __U, __m128d __A)
+_mm_maskz_cvtts_pd_epu64 (__mmask8 __U, __m128d __A)
{
return (__m128i) __builtin_ia32_cvttpd2uqqs128_mask ((__v2df) __A,
(__v2di)
@@ -624,7 +624,7 @@ _mm_maskz_cvttspd_epu64 (__mmask8 __U, __m128d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsps_epi32 (__m128 __A)
+_mm_cvtts_ps_epi32 (__m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2dqs128_mask ((__v4sf) __A,
(__v4si)
@@ -634,7 +634,7 @@ _mm_cvttsps_epi32 (__m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvttsps_epi32 (__m128i __W, __mmask8 __U, __m128 __A)
+_mm_mask_cvtts_ps_epi32 (__m128i __W, __mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2dqs128_mask ((__v4sf) __A,
(__v4si) __W,
@@ -643,7 +643,7 @@ _mm_mask_cvttsps_epi32 (__m128i __W, __mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvttsps_epi32 (__mmask8 __U, __m128 __A)
+_mm_maskz_cvtts_ps_epi32 (__mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2dqs128_mask ((__v4sf) __A,
(__v4si)
@@ -653,7 +653,7 @@ _mm_maskz_cvttsps_epi32 (__mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsps_epi64 (__m128 __A)
+_mm_cvtts_ps_epi64 (__m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2qqs128_mask ((__v4sf) __A,
(__v2di)
@@ -663,7 +663,7 @@ _mm_cvttsps_epi64 (__m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvttsps_epi64 (__m128i __W, __mmask8 __U, __m128 __A)
+_mm_mask_cvtts_ps_epi64 (__m128i __W, __mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2qqs128_mask ((__v4sf) __A,
(__v2di) __W,
@@ -672,7 +672,7 @@ _mm_mask_cvttsps_epi64 (__m128i __W, __mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvttsps_epi64 (__mmask8 __U, __m128 __A)
+_mm_maskz_cvtts_ps_epi64 (__mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2qqs128_mask ((__v4sf) __A,
(__v2di)
@@ -682,7 +682,7 @@ _mm_maskz_cvttsps_epi64 (__mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsps_epu32 (__m128 __A)
+_mm_cvtts_ps_epu32 (__m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2udqs128_mask ((__v4sf) __A,
(__v4si)
@@ -692,7 +692,7 @@ _mm_cvttsps_epu32 (__m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvttsps_epu32 (__m128i __W, __mmask8 __U, __m128 __A)
+_mm_mask_cvtts_ps_epu32 (__m128i __W, __mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2udqs128_mask ((__v4sf) __A,
(__v4si) __W,
@@ -701,7 +701,7 @@ _mm_mask_cvttsps_epu32 (__m128i __W, __mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvttsps_epu32 (__mmask8 __U, __m128 __A)
+_mm_maskz_cvtts_ps_epu32 (__mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2udqs128_mask ((__v4sf) __A,
(__v4si)
@@ -711,7 +711,7 @@ _mm_maskz_cvttsps_epu32 (__mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsps_epu64 (__m128 __A)
+_mm_cvtts_ps_epu64 (__m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2uqqs128_mask ((__v4sf) __A,
(__v2di)
@@ -721,7 +721,7 @@ _mm_cvttsps_epu64 (__m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvttsps_epu64 (__m128i __W, __mmask8 __U, __m128 __A)
+_mm_mask_cvtts_ps_epu64 (__m128i __W, __mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2uqqs128_mask ((__v4sf) __A,
(__v2di) __W,
@@ -730,7 +730,7 @@ _mm_mask_cvttsps_epu64 (__m128i __W, __mmask8 __U, __m128 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvttsps_epu64 (__mmask8 __U, __m128 __A)
+_mm_maskz_cvtts_ps_epu64 (__mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_cvttps2uqqs128_mask ((__v4sf) __A,
(__v2di)
@@ -740,7 +740,7 @@ _mm_maskz_cvttsps_epu64 (__mmask8 __U, __m128 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvtph_epi8 (__m256h __A)
+_mm256_ipcvts_ph_epi8 (__m256h __A)
{
return
(__m256i) __builtin_ia32_cvtph2ibs256_mask ((__v16hf) __A,
@@ -751,7 +751,7 @@ _mm256_ipcvtph_epi8 (__m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvtph_epi8 (__m256i __W, __mmask16 __U, __m256h __A)
+_mm256_mask_ipcvts_ph_epi8 (__m256i __W, __mmask16 __U, __m256h __A)
{
return (__m256i) __builtin_ia32_cvtph2ibs256_mask ((__v16hf) __A,
(__v16hi) __W,
@@ -760,7 +760,7 @@ _mm256_mask_ipcvtph_epi8 (__m256i __W, __mmask16 __U, __m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvtph_epi8 (__mmask16 __U, __m256h __A)
+_mm256_maskz_ipcvts_ph_epi8 (__mmask16 __U, __m256h __A)
{
return
(__m256i) __builtin_ia32_cvtph2ibs256_mask ((__v16hf) __A,
@@ -771,7 +771,7 @@ _mm256_maskz_ipcvtph_epi8 (__mmask16 __U, __m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvtph_epu8 (__m256h __A)
+_mm256_ipcvts_ph_epu8 (__m256h __A)
{
return (__m256i)
__builtin_ia32_cvtph2iubs256_mask ((__v16hf) __A,
@@ -782,7 +782,7 @@ _mm256_ipcvtph_epu8 (__m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvtph_epu8 (__m256i __W, __mmask16 __U, __m256h __A)
+_mm256_mask_ipcvts_ph_epu8 (__m256i __W, __mmask16 __U, __m256h __A)
{
return (__m256i) __builtin_ia32_cvtph2iubs256_mask ((__v16hf) __A,
(__v16hi) __W,
@@ -791,7 +791,7 @@ _mm256_mask_ipcvtph_epu8 (__m256i __W, __mmask16 __U, __m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvtph_epu8 (__mmask16 __U, __m256h __A)
+_mm256_maskz_ipcvts_ph_epu8 (__mmask16 __U, __m256h __A)
{
return
(__m256i) __builtin_ia32_cvtph2iubs256_mask ((__v16hf) __A,
@@ -802,7 +802,7 @@ _mm256_maskz_ipcvtph_epu8 (__mmask16 __U, __m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvtps_epi8 (__m256 __A)
+_mm256_ipcvts_ps_epi8 (__m256 __A)
{
return
(__m256i) __builtin_ia32_cvtps2ibs256_mask ((__v8sf) __A,
@@ -813,7 +813,7 @@ _mm256_ipcvtps_epi8 (__m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvtps_epi8 (__m256i __W, __mmask8 __U, __m256 __A)
+_mm256_mask_ipcvts_ps_epi8 (__m256i __W, __mmask8 __U, __m256 __A)
{
return (__m256i) __builtin_ia32_cvtps2ibs256_mask ((__v8sf) __A,
(__v8si) __W,
@@ -822,7 +822,7 @@ _mm256_mask_ipcvtps_epi8 (__m256i __W, __mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvtps_epi8 (__mmask8 __U, __m256 __A)
+_mm256_maskz_ipcvts_ps_epi8 (__mmask8 __U, __m256 __A)
{
return
(__m256i) __builtin_ia32_cvtps2ibs256_mask ((__v8sf) __A,
@@ -833,7 +833,7 @@ _mm256_maskz_ipcvtps_epi8 (__mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvtps_epu8 (__m256 __A)
+_mm256_ipcvts_ps_epu8 (__m256 __A)
{
return (__m256i)
__builtin_ia32_cvtps2iubs256_mask ((__v8sf) __A,
@@ -844,7 +844,7 @@ _mm256_ipcvtps_epu8 (__m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvtps_epu8 (__m256i __W, __mmask8 __U, __m256 __A)
+_mm256_mask_ipcvts_ps_epu8 (__m256i __W, __mmask8 __U, __m256 __A)
{
return (__m256i) __builtin_ia32_cvtps2iubs256_mask ((__v8sf) __A,
(__v8si) __W,
@@ -853,7 +853,7 @@ _mm256_mask_ipcvtps_epu8 (__m256i __W, __mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvtps_epu8 (__mmask8 __U, __m256 __A)
+_mm256_maskz_ipcvts_ps_epu8 (__mmask8 __U, __m256 __A)
{
return
(__m256i) __builtin_ia32_cvtps2iubs256_mask ((__v8sf) __A,
@@ -864,7 +864,7 @@ _mm256_maskz_ipcvtps_epu8 (__mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvttph_epi8 (__m256h __A)
+_mm256_ipcvtts_ph_epi8 (__m256h __A)
{
return (__m256i)
__builtin_ia32_cvttph2ibs256_mask ((__v16hf) __A,
@@ -875,7 +875,7 @@ _mm256_ipcvttph_epi8 (__m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvttph_epi8 (__m256i __W, __mmask16 __U, __m256h __A)
+_mm256_mask_ipcvtts_ph_epi8 (__m256i __W, __mmask16 __U, __m256h __A)
{
return (__m256i) __builtin_ia32_cvttph2ibs256_mask ((__v16hf) __A,
(__v16hi) __W,
@@ -884,7 +884,7 @@ _mm256_mask_ipcvttph_epi8 (__m256i __W, __mmask16 __U, __m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvttph_epi8 (__mmask16 __U, __m256h __A)
+_mm256_maskz_ipcvtts_ph_epi8 (__mmask16 __U, __m256h __A)
{
return
(__m256i) __builtin_ia32_cvttph2ibs256_mask ((__v16hf) __A,
@@ -895,7 +895,7 @@ _mm256_maskz_ipcvttph_epi8 (__mmask16 __U, __m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvttph_epu8 (__m256h __A)
+_mm256_ipcvtts_ph_epu8 (__m256h __A)
{
return (__m256i)
__builtin_ia32_cvttph2iubs256_mask ((__v16hf) __A,
@@ -906,7 +906,7 @@ _mm256_ipcvttph_epu8 (__m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvttph_epu8 (__m256i __W, __mmask16 __U, __m256h __A)
+_mm256_mask_ipcvtts_ph_epu8 (__m256i __W, __mmask16 __U, __m256h __A)
{
return (__m256i) __builtin_ia32_cvttph2iubs256_mask ((__v16hf) __A,
(__v16hi) __W,
@@ -915,7 +915,7 @@ _mm256_mask_ipcvttph_epu8 (__m256i __W, __mmask16 __U, __m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvttph_epu8 (__mmask16 __U, __m256h __A)
+_mm256_maskz_ipcvtts_ph_epu8 (__mmask16 __U, __m256h __A)
{
return
(__m256i) __builtin_ia32_cvttph2iubs256_mask ((__v16hf) __A,
@@ -926,7 +926,7 @@ _mm256_maskz_ipcvttph_epu8 (__mmask16 __U, __m256h __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvttps_epi8 (__m256 __A)
+_mm256_ipcvtts_ps_epi8 (__m256 __A)
{
return (__m256i)
__builtin_ia32_cvttps2ibs256_mask ((__v8sf) __A,
@@ -937,7 +937,7 @@ _mm256_ipcvttps_epi8 (__m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvttps_epi8 (__m256i __W, __mmask8 __U, __m256 __A)
+_mm256_mask_ipcvtts_ps_epi8 (__m256i __W, __mmask8 __U, __m256 __A)
{
return (__m256i) __builtin_ia32_cvttps2ibs256_mask ((__v8sf) __A,
(__v8si) __W,
@@ -946,7 +946,7 @@ _mm256_mask_ipcvttps_epi8 (__m256i __W, __mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvttps_epi8 (__mmask8 __U, __m256 __A)
+_mm256_maskz_ipcvtts_ps_epi8 (__mmask8 __U, __m256 __A)
{
return
(__m256i) __builtin_ia32_cvttps2ibs256_mask ((__v8sf) __A,
@@ -957,7 +957,7 @@ _mm256_maskz_ipcvttps_epi8 (__mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvttps_epu8 (__m256 __A)
+_mm256_ipcvtts_ps_epu8 (__m256 __A)
{
return (__m256i)
__builtin_ia32_cvttps2iubs256_mask ((__v8sf) __A,
@@ -968,7 +968,7 @@ _mm256_ipcvttps_epu8 (__m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvttps_epu8 (__m256i __W, __mmask8 __U, __m256 __A)
+_mm256_mask_ipcvtts_ps_epu8 (__m256i __W, __mmask8 __U, __m256 __A)
{
return (__m256i) __builtin_ia32_cvttps2iubs256_mask ((__v8sf) __A,
(__v8si) __W,
@@ -977,7 +977,7 @@ _mm256_mask_ipcvttps_epu8 (__m256i __W, __mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvttps_epu8 (__mmask8 __U, __m256 __A)
+_mm256_maskz_ipcvtts_ps_epu8 (__mmask8 __U, __m256 __A)
{
return
(__m256i) __builtin_ia32_cvttps2iubs256_mask ((__v8sf) __A,
@@ -988,7 +988,7 @@ _mm256_maskz_ipcvttps_epu8 (__mmask8 __U, __m256 __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvttspd_epi32 (__m256d __A)
+_mm256_cvtts_pd_epi32 (__m256d __A)
{
return
(__m128i) __builtin_ia32_cvttpd2dqs256_mask ((__v4df) __A,
@@ -999,7 +999,7 @@ _mm256_cvttspd_epi32 (__m256d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvttspd_epi32 (__m128i __W, __mmask8 __U, __m256d __A)
+_mm256_mask_cvtts_pd_epi32 (__m128i __W, __mmask8 __U, __m256d __A)
{
return (__m128i) __builtin_ia32_cvttpd2dqs256_mask ((__v4df) __A,
(__v4si) __W,
@@ -1008,7 +1008,7 @@ _mm256_mask_cvttspd_epi32 (__m128i __W, __mmask8 __U, __m256d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvttspd_epi32 (__mmask8 __U, __m256d __A)
+_mm256_maskz_cvtts_pd_epi32 (__mmask8 __U, __m256d __A)
{
return
(__m128i) __builtin_ia32_cvttpd2dqs256_mask ((__v4df) __A,
@@ -1019,7 +1019,7 @@ _mm256_maskz_cvttspd_epi32 (__mmask8 __U, __m256d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvttspd_epi64 (__m256d __A)
+_mm256_cvtts_pd_epi64 (__m256d __A)
{
return (__m256i)
__builtin_ia32_cvttpd2qqs256_mask ((__v4df) __A,
@@ -1030,7 +1030,7 @@ _mm256_cvttspd_epi64 (__m256d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvttspd_epi64 (__m256i __W, __mmask8 __U, __m256d __A)
+_mm256_mask_cvtts_pd_epi64 (__m256i __W, __mmask8 __U, __m256d __A)
{
return (__m256i) __builtin_ia32_cvttpd2qqs256_mask ((__v4df) __A,
(__v4di) __W,
@@ -1039,7 +1039,7 @@ _mm256_mask_cvttspd_epi64 (__m256i __W, __mmask8 __U, __m256d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvttspd_epi64 (__mmask8 __U, __m256d __A)
+_mm256_maskz_cvtts_pd_epi64 (__mmask8 __U, __m256d __A)
{
return
(__m256i) __builtin_ia32_cvttpd2qqs256_mask ((__v4df) __A,
@@ -1050,7 +1050,7 @@ _mm256_maskz_cvttspd_epi64 (__mmask8 __U, __m256d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvttspd_epu32 (__m256d __A)
+_mm256_cvtts_pd_epu32 (__m256d __A)
{
return
(__m128i) __builtin_ia32_cvttpd2udqs256_mask ((__v4df) __A,
@@ -1061,7 +1061,7 @@ _mm256_cvttspd_epu32 (__m256d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvttspd_epu32 (__m128i __W, __mmask8 __U, __m256d __A)
+_mm256_mask_cvtts_pd_epu32 (__m128i __W, __mmask8 __U, __m256d __A)
{
return (__m128i) __builtin_ia32_cvttpd2udqs256_mask ((__v4df) __A,
(__v4si) __W,
@@ -1070,7 +1070,7 @@ _mm256_mask_cvttspd_epu32 (__m128i __W, __mmask8 __U, __m256d __A)
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvttspd_epu32 (__mmask8 __U, __m256d __A)
+_mm256_maskz_cvtts_pd_epu32 (__mmask8 __U, __m256d __A)
{
return
(__m128i) __builtin_ia32_cvttpd2udqs256_mask ((__v4df) __A,
@@ -1081,7 +1081,7 @@ _mm256_maskz_cvttspd_epu32 (__mmask8 __U, __m256d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvttspd_epu64 (__m256d __A)
+_mm256_cvtts_pd_epu64 (__m256d __A)
{
return (__m256i)
__builtin_ia32_cvttpd2uqqs256_mask ((__v4df) __A,
@@ -1092,7 +1092,7 @@ _mm256_cvttspd_epu64 (__m256d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvttspd_epu64 (__m256i __W, __mmask8 __U, __m256d __A)
+_mm256_mask_cvtts_pd_epu64 (__m256i __W, __mmask8 __U, __m256d __A)
{
return (__m256i) __builtin_ia32_cvttpd2uqqs256_mask ((__v4df) __A,
(__v4di) __W,
@@ -1101,7 +1101,7 @@ _mm256_mask_cvttspd_epu64 (__m256i __W, __mmask8 __U, __m256d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvttspd_epu64 (__mmask8 __U, __m256d __A)
+_mm256_maskz_cvtts_pd_epu64 (__mmask8 __U, __m256d __A)
{
return
(__m256i) __builtin_ia32_cvttpd2uqqs256_mask ((__v4df) __A,
@@ -1112,7 +1112,7 @@ _mm256_maskz_cvttspd_epu64 (__mmask8 __U, __m256d __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvttsps_epi32 (__m256 __A)
+_mm256_cvtts_ps_epi32 (__m256 __A)
{
return (__m256i)
__builtin_ia32_cvttps2dqs256_mask ((__v8sf) __A,
@@ -1123,7 +1123,7 @@ _mm256_cvttsps_epi32 (__m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvttsps_epi32 (__m256i __W, __mmask8 __U, __m256 __A)
+_mm256_mask_cvtts_ps_epi32 (__m256i __W, __mmask8 __U, __m256 __A)
{
return (__m256i) __builtin_ia32_cvttps2dqs256_mask ((__v8sf) __A,
(__v8si) __W,
@@ -1132,7 +1132,7 @@ _mm256_mask_cvttsps_epi32 (__m256i __W, __mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvttsps_epi32 (__mmask8 __U, __m256 __A)
+_mm256_maskz_cvtts_ps_epi32 (__mmask8 __U, __m256 __A)
{
return
(__m256i) __builtin_ia32_cvttps2dqs256_mask ((__v8sf) __A,
@@ -1143,7 +1143,7 @@ _mm256_maskz_cvttsps_epi32 (__mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvttsps_epi64 (__m128 __A)
+_mm256_cvtts_ps_epi64 (__m128 __A)
{
return (__m256i)
__builtin_ia32_cvttps2qqs256_mask ((__v4sf) __A,
@@ -1154,7 +1154,7 @@ _mm256_cvttsps_epi64 (__m128 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvttsps_epi64 (__m256i __W, __mmask8 __U, __m128 __A)
+_mm256_mask_cvtts_ps_epi64 (__m256i __W, __mmask8 __U, __m128 __A)
{
return (__m256i) __builtin_ia32_cvttps2qqs256_mask ((__v4sf) __A,
(__v4di) __W,
@@ -1163,7 +1163,7 @@ _mm256_mask_cvttsps_epi64 (__m256i __W, __mmask8 __U, __m128 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvttsps_epi64 (__mmask8 __U, __m128 __A)
+_mm256_maskz_cvtts_ps_epi64 (__mmask8 __U, __m128 __A)
{
return
(__m256i) __builtin_ia32_cvttps2qqs256_mask ((__v4sf) __A,
@@ -1174,7 +1174,7 @@ _mm256_maskz_cvttsps_epi64 (__mmask8 __U, __m128 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvttsps_epu32 (__m256 __A)
+_mm256_cvtts_ps_epu32 (__m256 __A)
{
return (__m256i)
__builtin_ia32_cvttps2udqs256_mask ((__v8sf) __A,
@@ -1185,7 +1185,7 @@ _mm256_cvttsps_epu32 (__m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvttsps_epu32 (__m256i __W, __mmask8 __U, __m256 __A)
+_mm256_mask_cvtts_ps_epu32 (__m256i __W, __mmask8 __U, __m256 __A)
{
return (__m256i) __builtin_ia32_cvttps2udqs256_mask ((__v8sf) __A,
(__v8si) __W,
@@ -1194,7 +1194,7 @@ _mm256_mask_cvttsps_epu32 (__m256i __W, __mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvttsps_epu32 (__mmask8 __U, __m256 __A)
+_mm256_maskz_cvtts_ps_epu32 (__mmask8 __U, __m256 __A)
{
return
(__m256i) __builtin_ia32_cvttps2udqs256_mask ((__v8sf) __A,
@@ -1205,7 +1205,7 @@ _mm256_maskz_cvttsps_epu32 (__mmask8 __U, __m256 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvttsps_epu64 (__m128 __A)
+_mm256_cvtts_ps_epu64 (__m128 __A)
{
return (__m256i)
__builtin_ia32_cvttps2uqqs256_mask ((__v4sf) __A,
@@ -1216,7 +1216,7 @@ _mm256_cvttsps_epu64 (__m128 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvttsps_epu64 (__m256i __W, __mmask8 __U, __m128 __A)
+_mm256_mask_cvtts_ps_epu64 (__m256i __W, __mmask8 __U, __m128 __A)
{
return (__m256i) __builtin_ia32_cvttps2uqqs256_mask ((__v4sf) __A,
(__v4di) __W,
@@ -1225,7 +1225,7 @@ _mm256_mask_cvttsps_epu64 (__m256i __W, __mmask8 __U, __m128 __A)
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvttsps_epu64 (__mmask8 __U, __m128 __A)
+_mm256_maskz_cvtts_ps_epu64 (__mmask8 __U, __m128 __A)
{
return
(__m256i) __builtin_ia32_cvttps2uqqs256_mask ((__v4sf) __A,
@@ -1236,7 +1236,7 @@ _mm256_maskz_cvttsps_epu64 (__mmask8 __U, __m128 __A)
extern __inline int
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttssd_epi32 (__m128d __A)
+_mm_cvtts_sd_epi32 (__m128d __A)
{
return (int) __builtin_ia32_cvttsd2sis32_round ((__v2df) __A,
_MM_FROUND_CUR_DIRECTION);
@@ -1244,7 +1244,7 @@ _mm_cvttssd_epi32 (__m128d __A)
extern __inline unsigned int
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttssd_epu32 (__m128d __A)
+_mm_cvtts_sd_epu32 (__m128d __A)
{
return (unsigned int) __builtin_ia32_cvttsd2usis32_round ((__v2df) __A,
_MM_FROUND_CUR_DIRECTION);
@@ -1252,7 +1252,7 @@ _mm_cvttssd_epu32 (__m128d __A)
extern __inline int
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsss_epi32 (__m128 __A)
+_mm_cvtts_ss_epi32 (__m128 __A)
{
return (int) __builtin_ia32_cvttss2sis32_round ((__v4sf) __A,
_MM_FROUND_CUR_DIRECTION);
@@ -1260,573 +1260,13 @@ _mm_cvttsss_epi32 (__m128 __A)
extern __inline unsigned int
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsss_epu32 (__m128 __A)
+_mm_cvtts_ss_epu32 (__m128 __A)
{
return (unsigned int) __builtin_ia32_cvttss2usis32_round ((__v4sf) __A,
_MM_FROUND_CUR_DIRECTION);
}
#ifdef __OPTIMIZE__
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvt_roundph_epi8 (__m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_undefined_si256 (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvt_roundph_epi8 (__m256i __W, __mmask16 __U, __m256h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) __A,
- (__v16hi) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvt_roundph_epi8 (__mmask16 __U, __m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvt_roundph_epu8 (__m256h __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_undefined_si256 (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvt_roundph_epu8 (__m256i __W, __mmask16 __U, __m256h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) __A,
- (__v16hi) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvt_roundph_epu8 (__mmask16 __U, __m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvt_roundps_epi8 (__m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvt_roundps_epi8 (__m256i __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvt_roundps_epi8 (__mmask8 __U, __m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvt_roundps_epu8 (__m256 __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvt_roundps_epu8 (__m256i __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvt_roundps_epu8 (__mmask8 __U, __m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvtt_roundph_epi8 (__m256h __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_undefined_si256 (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvtt_roundph_epi8 (__m256i __W, __mmask16 __U, __m256h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) __A,
- (__v16hi) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvtt_roundph_epi8 (__mmask16 __U, __m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvtt_roundph_epu8 (__m256h __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_undefined_si256 (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvtt_roundph_epu8 (__m256i __W, __mmask16 __U, __m256h __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) __A,
- (__v16hi) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvtt_roundph_epu8 (__mmask16 __U, __m256h __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvtt_roundps_epi8 (__m256 __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvtt_roundps_epi8 (__m256i __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvtt_roundps_epi8 (__mmask8 __U, __m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_ipcvtt_roundps_epu8 (__m256 __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_ipcvtt_roundps_epu8 (__m256i __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_ipcvtt_roundps_epu8 (__mmask8 __U, __m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtts_roundpd_epi32 (__m256d __A, const int __R)
-{
- return
- (__m128i) __builtin_ia32_cvttpd2dqs256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_undefined_si128 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtts_roundpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m128i) __builtin_ia32_cvttpd2dqs256_mask_round ((__v4df) __A,
- (__v4si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtts_roundpd_epi32 (__mmask8 __U, __m256d __A, const int __R)
-{
- return
- (__m128i) __builtin_ia32_cvttpd2dqs256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtts_roundpd_epi64 (__m256d __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvttpd2qqs256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtts_roundpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttpd2qqs256_mask_round ((__v4df) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtts_roundpd_epi64 (__mmask8 __U, __m256d __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttpd2qqs256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtts_roundpd_epu32 (__m256d __A, const int __R)
-{
- return
- (__m128i) __builtin_ia32_cvttpd2udqs256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_undefined_si128 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtts_roundpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m128i) __builtin_ia32_cvttpd2udqs256_mask_round ((__v4df) __A,
- (__v4si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m128i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtts_roundpd_epu32 (__mmask8 __U, __m256d __A, const int __R)
-{
- return
- (__m128i) __builtin_ia32_cvttpd2udqs256_mask_round ((__v4df) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtts_roundpd_epu64 (__m256d __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvttpd2uqqs256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtts_roundpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttpd2uqqs256_mask_round ((__v4df) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtts_roundpd_epu64 (__mmask8 __U, __m256d __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttpd2uqqs256_mask_round ((__v4df) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtts_roundps_epi32 (__m256 __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvttps2dqs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtts_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttps2dqs256_mask_round ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtts_roundps_epi32 (__mmask8 __U, __m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2dqs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtts_roundps_epi64 (__m128 __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvttps2qqs256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtts_roundps_epi64 (__m256i __W, __mmask8 __U, __m128 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttps2qqs256_mask_round ((__v4sf) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtts_roundps_epi64 (__mmask8 __U, __m128 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2qqs256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtts_roundps_epu32 (__m256 __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvttps2udqs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtts_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttps2udqs256_mask_round ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtts_roundps_epu32 (__mmask8 __U, __m256 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2udqs256_mask_round ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtts_roundps_epu64 (__m128 __A, const int __R)
-{
- return (__m256i)
- __builtin_ia32_cvttps2uqqs256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_undefined_si256 (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtts_roundps_epu64 (__m256i __W, __mmask8 __U, __m128 __A,
- const int __R)
-{
- return (__m256i) __builtin_ia32_cvttps2uqqs256_mask_round ((__v4sf) __A,
- (__v4di) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m128 __A, const int __R)
-{
- return
- (__m256i) __builtin_ia32_cvttps2uqqs256_mask_round ((__v4sf) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U,
- __R);
-}
-
extern __inline int
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_cvtts_roundsd_epi32 (__m128d __A, const int __R)
@@ -1859,546 +1299,6 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R)
__R);
}
#else
-
-#define _mm256_ipcvt_roundph_epi8(A, R) \
- ((__m256i) \
- __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_undefined_si256 ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_ipcvt_roundph_epi8(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) (A), \
- (__v16hi) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_ipcvt_roundph_epi8(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_setzero_si256 ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_ipcvt_roundph_epu8(A, R) \
- ((__m256i) \
- __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_undefined_si256 ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_ipcvt_roundph_epu8(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) (A), \
- (__v16hi) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_ipcvt_roundph_epu8(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_setzero_si256 ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_ipcvt_roundps_epi8(A, R) \
- ((__m256i) \
- __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_ipcvt_roundps_epi8(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_ipcvt_roundps_epi8(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_ipcvt_roundps_epu8(A, R) \
- ((__m256i) \
- __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_ipcvt_roundps_epu8(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_ipcvt_roundps_epu8(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_ipcvtt_roundph_epi8(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_undefined_si256 ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_ipcvtt_roundph_epi8(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) (A), \
- (__v16hi) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_ipcvtt_roundph_epi8(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_setzero_si256 ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_ipcvtt_roundph_epu8(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_undefined_si256 ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_ipcvtt_roundph_epu8(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) (A), \
- (__v16hi) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_ipcvtt_roundph_epu8(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) (A), \
- (__v16hi) \
- (_mm256_setzero_si256 ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_ipcvtt_roundps_epi8(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_ipcvtt_roundps_epi8(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_ipcvtt_roundps_epi8(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_ipcvtt_roundps_epu8(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_ipcvtt_roundps_epu8(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_ipcvtt_roundps_epu8(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundpd_epi32(A, R) \
- ((__m128i) \
- __builtin_ia32_cvttpd2dqs256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_undefined_si128 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundpd_epi32(W, U, A, R) \
- ((__m128i) __builtin_ia32_cvttpd2dqs256_mask_round ((__v4df) (A), \
- (__v4si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundpd_epi32(U, A, R) \
- ((__m128i) __builtin_ia32_cvttpd2dqs256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_setzero_si128 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundpd_epi64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2qqs256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundpd_epi64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttpd2qqs256_mask_round ((__v4df) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundpd_epi64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2qqs256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundpd_epu32(A, R) \
- ((__m128i) \
- __builtin_ia32_cvttpd2udqs256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_undefined_si128 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundpd_epu32(W, U, A, R) \
- ((__m128i) __builtin_ia32_cvttpd2udqs256_mask_round ((__v4df) (A), \
- (__v4si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundpd_epu32(U, A, R) \
- ((__m128i) \
- __builtin_ia32_cvttpd2udqs256_mask_round ((__v4df) (A), \
- (__v4si) (_mm_setzero_si128 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundpd_epu64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2uqqs256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundpd_epu64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttpd2uqqs256_mask_round ((__v4df) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundpd_epu64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2uqqs256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundps_epi32(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2dqs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundps_epi32(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2dqs256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundps_epi32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2dqs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundps_epi64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2qqs256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundps_epi64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2qqs256_mask_round ((__v4sf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundps_epi64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2qqs256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundps_epu32(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2udqs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundps_epu32(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2udqs256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundps_epu32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2udqs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundps_epu64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2uqqs256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundps_epu64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2uqqs256_mask_round ((__v4sf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundps_epu64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2uqqs256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm_cvtts_roundsd_epi32(A, R) \
- ((int) __builtin_ia32_cvttsd2sis32_round ((__v2df) (A), \
- (R)))
-
-#define _mm_cvtts_roundsd_epu32(A, R) \
- ((unsigned int) __builtin_ia32_cvttsd2usis32_round ((__v2df) (A), \
- (R)))
-
-#define _mm_cvtts_roundss_epi32(A, R) \
- ((int) __builtin_ia32_cvttss2sis32_round ((__v4sf) (A), \
- (R)))
-
-#define _mm_cvtts_roundss_epu32(A, R) \
- ((unsigned int) __builtin_ia32_cvttss2usis32_round ((__v4sf) (A), \
- (R)))
-#define _mm256_cvtts_roundpd_epi32(A, R) \
- ((__m128i) \
- __builtin_ia32_cvttpd2dqs256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_undefined_si128 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundpd_epi32(W, U, A, R) \
- ((__m128i) __builtin_ia32_cvttpd2dqs256_mask_round ((__v4df) (A), \
- (__v4si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundpd_epi32(U, A, R) \
- ((__m128i) __builtin_ia32_cvttpd2dqs256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_setzero_si128 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundpd_epi64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2qqs256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundpd_epi64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttpd2qqs256_mask_round ((__v4df) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundpd_epi64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2qqs256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundpd_epu32(A, R) \
- ((__m128i) \
- __builtin_ia32_cvttpd2udqs256_mask_round ((__v4df) (A), \
- (__v4si) \
- (_mm_undefined_si128 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundpd_epu32(W, U, A, R) \
- ((__m128i) __builtin_ia32_cvttpd2udqs256_mask_round ((__v4df) (A), \
- (__v4si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundpd_epu32(U, A, R) \
- ((__m128i) \
- __builtin_ia32_cvttpd2udqs256_mask_round ((__v4df) (A), \
- (__v4si) (_mm_setzero_si128 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundpd_epu64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2uqqs256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundpd_epu64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttpd2uqqs256_mask_round ((__v4df) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundpd_epu64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttpd2uqqs256_mask_round ((__v4df) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundps_epi32(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2dqs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundps_epi32(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2dqs256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundps_epi32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2dqs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundps_epi64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2qqs256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundps_epi64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2qqs256_mask_round ((__v4sf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundps_epi64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2qqs256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundps_epu32(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2udqs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundps_epu32(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2udqs256_mask_round ((__v8sf) (A), \
- (__v8si) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundps_epu32(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2udqs256_mask_round ((__v8sf) (A), \
- (__v8si) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cvtts_roundps_epu64(A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2uqqs256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_undefined_si256 ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cvtts_roundps_epu64(W, U, A, R) \
- ((__m256i) __builtin_ia32_cvttps2uqqs256_mask_round ((__v4sf) (A), \
- (__v4di) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_cvtts_roundps_epu64(U, A, R) \
- ((__m256i) \
- __builtin_ia32_cvttps2uqqs256_mask_round ((__v4sf) (A), \
- (__v4di) \
- (_mm256_setzero_si256 ()), \
- (__mmask8) (U), \
- (R)))
-
#define _mm_cvtts_roundsd_epi32(A, R) \
((int) __builtin_ia32_cvttsd2sis32_round ((__v2df) (A), \
(R)))
@@ -2419,7 +1319,7 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R)
#ifdef __x86_64__
extern __inline long long
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttssd_epi64 (__m128d __A)
+_mm_cvtts_sd_epi64 (__m128d __A)
{
return (long long) __builtin_ia32_cvttsd2sis64_round ((__v2df) __A,
_MM_FROUND_CUR_DIRECTION);
@@ -2427,7 +1327,7 @@ _mm_cvttssd_epi64 (__m128d __A)
extern __inline unsigned long long
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttssd_epu64 (__m128d __A)
+_mm_cvtts_sd_epu64 (__m128d __A)
{
return (unsigned long long) __builtin_ia32_cvttsd2usis64_round ((__v2df) __A,
_MM_FROUND_CUR_DIRECTION);
@@ -2435,7 +1335,7 @@ _mm_cvttssd_epu64 (__m128d __A)
extern __inline long long
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsss_epi64 (__m128 __A)
+_mm_cvtts_ss_epi64 (__m128 __A)
{
return (long long) __builtin_ia32_cvttss2sis64_round ((__v4sf) __A,
_MM_FROUND_CUR_DIRECTION);
@@ -2444,11 +1344,12 @@ _mm_cvttsss_epi64 (__m128 __A)
extern __inline unsigned long long
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsss_epu64 (__m128 __A)
+_mm_cvtts_ss_epu64 (__m128 __A)
{
return (unsigned long long) __builtin_ia32_cvttss2usis64_round ((__v4sf) __A,
_MM_FROUND_CUR_DIRECTION);
}
+
#ifdef __OPTIMIZE__
extern __inline long long
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -2474,7 +1375,6 @@ _mm_cvtts_roundss_epi64 (__m128 __A, const int __R)
__R);
}
-
extern __inline unsigned long long
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_cvtts_roundss_epu64 (__m128 __A, const int __R)
@@ -2483,7 +1383,6 @@ _mm_cvtts_roundss_epu64 (__m128 __A, const int __R)
__R);
}
#else
-
#define _mm_cvtts_roundsd_epi64(A, R) \
((long long) __builtin_ia32_cvttsd2sis64_round ((__v2df) (A), \
(R)))
@@ -2502,9 +1401,9 @@ _mm_cvtts_roundss_epu64 (__m128 __A, const int __R)
#endif
#endif /* __x86_64__ */
-#ifdef __DISABLE_AVX10_2_256__
-#undef __DISABLE_AVX10_2_256__
+#ifdef __DISABLE_AVX10_2__
+#undef __DISABLE_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_256__ */
+#endif /* __DISABLE_AVX10_2__ */
#endif /* _AVX10_2SATCVTINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/cpuid.h b/gcc/config/i386/cpuid.h
index 1a31ceb..afd4ef0 100644
--- a/gcc/config/i386/cpuid.h
+++ b/gcc/config/i386/cpuid.h
@@ -171,11 +171,6 @@
#define bit_AMX_AVX512 (1 << 7)
#define bit_AMX_MOVRS (1 << 8)
-/* AVX10 sub leaf (%eax == 0x24) */
-/* %ebx */
-#define bit_AVX10_256 (1 << 17)
-#define bit_AVX10_512 (1 << 18)
-
/* Signatures for different CPU implementations as returned in uses
of cpuid with level 0. */
#define signature_AMD_ebx 0x68747541
diff --git a/gcc/config/i386/driver-i386.cc b/gcc/config/i386/driver-i386.cc
index ba13138..1ff05e5 100644
--- a/gcc/config/i386/driver-i386.cc
+++ b/gcc/config/i386/driver-i386.cc
@@ -627,7 +627,7 @@ const char *host_detect_local_cpu (int argc, const char **argv)
if (has_feature (FEATURE_AVX512F))
{
/* Assume Diamond Rapids. */
- if (has_feature (FEATURE_AVX10_2_512))
+ if (has_feature (FEATURE_AMX_TRANSPOSE))
cpu = "diamondrapids";
/* Assume Granite Rapids D. */
else if (has_feature (FEATURE_AMX_COMPLEX))
@@ -910,12 +910,9 @@ const char *host_detect_local_cpu (int argc, const char **argv)
isa_names_table[i].option, NULL);
}
/* Never push -mno-avx10.1-{256,512} under -march=native to
- avoid unnecessary warnings when building libraries.
- Never push -mno-avx10.x-256 under -march=native since
- there are no such options. */
+ avoid unnecessary warnings when building libraries. */
else if (isa_names_table[i].feature != FEATURE_AVX10_1_256
- && isa_names_table[i].feature != FEATURE_AVX10_1_512
- && isa_names_table[i].feature != FEATURE_AVX10_2_256
+ && isa_names_table[i].feature != FEATURE_AVX10_1
&& check_avx512_features (cpu_model, cpu_features2,
isa_names_table[i].feature))
options = concat (options, neg_option,
diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def
index 1559c6a..64bde02 100644
--- a/gcc/config/i386/i386-builtin-types.def
+++ b/gcc/config/i386/i386-builtin-types.def
@@ -1416,46 +1416,7 @@ DEF_FUNCTION_TYPE (V4DI, V4DI, V4DI, V2DI)
DEF_FUNCTION_TYPE (VOID, UINT64, UINT64)
# AVX10.2 builtins
-DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, V4DF, UQI, INT)
-DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI, INT)
-DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, V8SF, UQI, INT)
-DEF_FUNCTION_TYPE (UQI, V4DF, V4DF, INT, UQI, INT)
-DEF_FUNCTION_TYPE (UHI, V16HF, V16HF, INT, UHI, INT)
-DEF_FUNCTION_TYPE (UQI, V8SF, V8SF, INT, UQI, INT)
-DEF_FUNCTION_TYPE (V8HF, V8SI, V8HF, UQI, INT)
-DEF_FUNCTION_TYPE (V8SF, V8SI, V8SF, UQI, INT)
-DEF_FUNCTION_TYPE (V8HF, V4DF, V8HF, UQI, INT)
-DEF_FUNCTION_TYPE (V4SF, V4DF, V4SF, UQI, INT)
-DEF_FUNCTION_TYPE (V4SI, V4DF, V4SI, UQI, INT)
-DEF_FUNCTION_TYPE (V4DI, V4DF, V4DI, UQI, INT)
-DEF_FUNCTION_TYPE (V8SI, V8HF, V8SI, UQI, INT)
-DEF_FUNCTION_TYPE (V4DF, V8HF, V4DF, UQI, INT)
-DEF_FUNCTION_TYPE (V8SF, V8HF, V8SF, UQI, INT)
-DEF_FUNCTION_TYPE (V4DI, V8HF, V4DI, UQI, INT)
-DEF_FUNCTION_TYPE (V16HI, V16HF, V16HI, UHI, INT)
-DEF_FUNCTION_TYPE (V4DF, V4SF, V4DF, UQI, INT)
-DEF_FUNCTION_TYPE (V8HF, V8SF, V8HF, UQI, INT)
-DEF_FUNCTION_TYPE (V8SI, V8SF, V8SI, UQI, INT)
-DEF_FUNCTION_TYPE (V4DI, V4SF, V4DI, UQI, INT)
-DEF_FUNCTION_TYPE (V4DF, V4DI, V4DF, UQI, INT)
-DEF_FUNCTION_TYPE (V8HF, V4DI, V8HF, UQI, INT)
-DEF_FUNCTION_TYPE (V4SF, V4DI, V4SF, UQI, INT)
-DEF_FUNCTION_TYPE (V16HF, V16HI, V16HF, UHI, INT)
-DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, INT)
-DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UQI, INT)
-DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, V4DI, INT, UQI, INT)
-DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, V8SI, INT, UQI, INT)
-DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, INT)
-DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, UQI, INT)
-DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, UHI, INT)
-DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, UQI, INT)
-DEF_FUNCTION_TYPE (V4DF, V4DF, INT, V4DF, UQI, INT)
-DEF_FUNCTION_TYPE (V16HF, V16HF, INT, V16HF, UHI, INT)
-DEF_FUNCTION_TYPE (V8SF, V8SF, INT, V8SF, UQI, INT)
-DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, INT, V4DF, UQI, INT)
-DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, INT, V8SF, UQI, INT)
DEF_FUNCTION_TYPE (V32HF, V16SF, V16SF, V32HF, USI, INT)
-DEF_FUNCTION_TYPE (V16HF, V8SF, V8SF, V16HF, UHI, INT)
DEF_FUNCTION_TYPE (V32HF, V16SF, V16SF, V32HF, USI)
DEF_FUNCTION_TYPE (V16HF, V8SF, V8SF, V16HF, UHI)
DEF_FUNCTION_TYPE (V8HF, V4SF, V4SF, V8HF, UQI)
@@ -1507,7 +1468,7 @@ DEF_FUNCTION_TYPE (V32BF, V32BF, V32BF, INT, V32BF, USI)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, INT, V8HF, UQI)
DEF_FUNCTION_TYPE (V8DF, V8DF, V8DF, INT, V8DF, UQI, INT)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, INT, V32HF, USI, INT)
-DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, INT, V16HF, UHI, INT)
+DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, INT, V16HF, UHI)
DEF_FUNCTION_TYPE (V16SF, V16SF, V16SF, INT, V16SF, UHI, INT)
DEF_FUNCTION_TYPE (V8DI, V8SF, V8DI, UQI)
DEF_FUNCTION_TYPE (V8DI, V8DF, V8DI, UQI)
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index e6f5b12..a142711 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -510,18 +510,18 @@ BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS, CODE_FOR_movrsqi, "__built
BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS, CODE_FOR_movrshi, "__builtin_ia32_movrshi", IX86_BUILTIN_MOVRSHI, UNKNOWN, (int) SHORT_FTYPE_PCSHORT)
BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS, CODE_FOR_movrssi, "__builtin_ia32_movrssi", IX86_BUILTIN_MOVRSSI, UNKNOWN, (int) INT_FTYPE_PCINT)
BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS, CODE_FOR_movrsdi, "__builtin_ia32_movrsdi", IX86_BUILTIN_MOVRSDI, UNKNOWN, (int) INT64_FTYPE_PCINT64)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vmovrsbv64qi_mask, "__builtin_ia32_vmovrsb512_mask", IX86_BUILTIN_VMOVRSB_512, UNKNOWN, (int) V64QI_FTYPE_PCV64QI_V64QI_UDI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vmovrsdv16si_mask, "__builtin_ia32_vmovrsd512_mask", IX86_BUILTIN_VMOVRSD_512, UNKNOWN, (int) V16SI_FTYPE_PCV16SI_V16SI_UHI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vmovrsqv8di_mask, "__builtin_ia32_vmovrsq512_mask", IX86_BUILTIN_VMOVRSQ_512, UNKNOWN, (int) V8DI_FTYPE_PCV8DI_V8DI_UQI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vmovrswv32hi_mask, "__builtin_ia32_vmovrsw512_mask", IX86_BUILTIN_VMOVRSW_512, UNKNOWN, (int) V32HI_FTYPE_PCV32HI_V32HI_USI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vmovrsbv32qi_mask, "__builtin_ia32_vmovrsb256_mask", IX86_BUILTIN_VMOVRSB_256, UNKNOWN, (int) V32QI_FTYPE_PCV32QI_V32QI_USI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vmovrsdv8si_mask, "__builtin_ia32_vmovrsd256_mask", IX86_BUILTIN_VMOVRSD_256, UNKNOWN, (int) V8SI_FTYPE_PCV8SI_V8SI_UQI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vmovrsqv4di_mask, "__builtin_ia32_vmovrsq256_mask", IX86_BUILTIN_VMOVRSQ_256, UNKNOWN, (int) V4DI_FTYPE_PCV4DI_V4DI_UQI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vmovrswv16hi_mask, "__builtin_ia32_vmovrsw256_mask", IX86_BUILTIN_VMOVRSW_256, UNKNOWN, (int) V16HI_FTYPE_PCV16HI_V16HI_UHI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vmovrsbv16qi_mask, "__builtin_ia32_vmovrsb128_mask", IX86_BUILTIN_VMOVRSB_128, UNKNOWN, (int) V16QI_FTYPE_PCV16QI_V16QI_UHI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vmovrsdv4si_mask, "__builtin_ia32_vmovrsd128_mask", IX86_BUILTIN_VMOVRSD_128, UNKNOWN, (int) V4SI_FTYPE_PCV4SI_V4SI_UQI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vmovrsqv2di_mask, "__builtin_ia32_vmovrsq128_mask", IX86_BUILTIN_VMOVRSQ_128, UNKNOWN, (int) V2DI_FTYPE_PCV2DI_V2DI_UQI)
-BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vmovrswv8hi_mask, "__builtin_ia32_vmovrsw128_mask", IX86_BUILTIN_VMOVRSW_128, UNKNOWN, (int) V8HI_FTYPE_PCV8HI_V8HI_UQI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrsbv64qi_mask, "__builtin_ia32_vmovrsb512_mask", IX86_BUILTIN_VMOVRSB_512, UNKNOWN, (int) V64QI_FTYPE_PCV64QI_V64QI_UDI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrsdv16si_mask, "__builtin_ia32_vmovrsd512_mask", IX86_BUILTIN_VMOVRSD_512, UNKNOWN, (int) V16SI_FTYPE_PCV16SI_V16SI_UHI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrsqv8di_mask, "__builtin_ia32_vmovrsq512_mask", IX86_BUILTIN_VMOVRSQ_512, UNKNOWN, (int) V8DI_FTYPE_PCV8DI_V8DI_UQI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrswv32hi_mask, "__builtin_ia32_vmovrsw512_mask", IX86_BUILTIN_VMOVRSW_512, UNKNOWN, (int) V32HI_FTYPE_PCV32HI_V32HI_USI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrsbv32qi_mask, "__builtin_ia32_vmovrsb256_mask", IX86_BUILTIN_VMOVRSB_256, UNKNOWN, (int) V32QI_FTYPE_PCV32QI_V32QI_USI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrsdv8si_mask, "__builtin_ia32_vmovrsd256_mask", IX86_BUILTIN_VMOVRSD_256, UNKNOWN, (int) V8SI_FTYPE_PCV8SI_V8SI_UQI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrsqv4di_mask, "__builtin_ia32_vmovrsq256_mask", IX86_BUILTIN_VMOVRSQ_256, UNKNOWN, (int) V4DI_FTYPE_PCV4DI_V4DI_UQI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrswv16hi_mask, "__builtin_ia32_vmovrsw256_mask", IX86_BUILTIN_VMOVRSW_256, UNKNOWN, (int) V16HI_FTYPE_PCV16HI_V16HI_UHI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrsbv16qi_mask, "__builtin_ia32_vmovrsb128_mask", IX86_BUILTIN_VMOVRSB_128, UNKNOWN, (int) V16QI_FTYPE_PCV16QI_V16QI_UHI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrsdv4si_mask, "__builtin_ia32_vmovrsd128_mask", IX86_BUILTIN_VMOVRSD_128, UNKNOWN, (int) V4SI_FTYPE_PCV4SI_V4SI_UQI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrsqv2di_mask, "__builtin_ia32_vmovrsq128_mask", IX86_BUILTIN_VMOVRSQ_128, UNKNOWN, (int) V2DI_FTYPE_PCV2DI_V2DI_UQI)
+BDESC (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_MOVRS | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vmovrswv8hi_mask, "__builtin_ia32_vmovrsw128_mask", IX86_BUILTIN_VMOVRSW_128, UNKNOWN, (int) V8HI_FTYPE_PCV8HI_V8HI_UQI)
BDESC_END (SPECIAL_ARGS, PURE_ARGS)
@@ -1686,10 +1686,10 @@ BDESC (OPTION_MASK_ISA_AVX, OPTION_MASK_ISA2_SM3, CODE_FOR_vsm3rnds2, "__builtin
/* SM4 */
BDESC (0, OPTION_MASK_ISA2_SM4, CODE_FOR_vsm4key4_v4si, "__builtin_ia32_vsm4key4128", IX86_BUILTIN_VSM4KEY4128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI)
BDESC (0, OPTION_MASK_ISA2_SM4, CODE_FOR_vsm4key4_v8si, "__builtin_ia32_vsm4key4256", IX86_BUILTIN_VSM4KEY4256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_SM4 | OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vsm4key4_v16si, "__builtin_ia32_vsm4key4512", IX86_BUILTIN_VSM4KEY4512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_SM4 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vsm4key4_v16si, "__builtin_ia32_vsm4key4512", IX86_BUILTIN_VSM4KEY4512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI)
BDESC (0, OPTION_MASK_ISA2_SM4, CODE_FOR_vsm4rnds4_v4si, "__builtin_ia32_vsm4rnds4128", IX86_BUILTIN_VSM4RNDS4128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI)
BDESC (0, OPTION_MASK_ISA2_SM4, CODE_FOR_vsm4rnds4_v8si, "__builtin_ia32_vsm4rnds4256", IX86_BUILTIN_VSM4RNDS4256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_SM4 | OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vsm4rnds4_v16si, "__builtin_ia32_vsm4rnds4512", IX86_BUILTIN_VSM4RNDS4512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_SM4 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vsm4rnds4_v16si, "__builtin_ia32_vsm4rnds4512", IX86_BUILTIN_VSM4RNDS4512, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI)
/* SHA512 */
BDESC (0, OPTION_MASK_ISA2_SHA512, CODE_FOR_vsha512msg1, "__builtin_ia32_vsha512msg1", IX86_BUILTIN_VSHA512MSG1, UNKNOWN, (int) V4DI_FTYPE_V4DI_V2DI)
@@ -2768,32 +2768,32 @@ BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpws
BDESC (OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpdpwssds_v4si_maskz, "__builtin_ia32_vpdpwssds_v4si_maskz", IX86_BUILTIN_VPDPWSSDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
/* AVXVNNIINT8 */
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssd_v8si, "__builtin_ia32_vpdpbssd256", IX86_BUILTIN_VPDPBSSDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssds_v8si, "__builtin_ia32_vpdpbssds256", IX86_BUILTIN_VPDPBSSDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsud_v8si, "__builtin_ia32_vpdpbsud256", IX86_BUILTIN_VPDPBSUDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsuds_v8si, "__builtin_ia32_vpdpbsuds256", IX86_BUILTIN_VPDPBSUDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuud_v8si, "__builtin_ia32_vpdpbuud256", IX86_BUILTIN_VPDPBUUDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuuds_v8si, "__builtin_ia32_vpdpbuuds256", IX86_BUILTIN_VPDPBUUDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssd_v4si, "__builtin_ia32_vpdpbssd128", IX86_BUILTIN_VPDPBSSDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssds_v4si, "__builtin_ia32_vpdpbssds128", IX86_BUILTIN_VPDPBSSDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsud_v4si, "__builtin_ia32_vpdpbsud128", IX86_BUILTIN_VPDPBSUDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsuds_v4si, "__builtin_ia32_vpdpbsuds128", IX86_BUILTIN_VPDPBSUDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuud_v4si, "__builtin_ia32_vpdpbuud128", IX86_BUILTIN_VPDPBUUDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuuds_v4si, "__builtin_ia32_vpdpbuuds128", IX86_BUILTIN_VPDPBUUDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssd_v8si, "__builtin_ia32_vpdpbssd256", IX86_BUILTIN_VPDPBSSDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssds_v8si, "__builtin_ia32_vpdpbssds256", IX86_BUILTIN_VPDPBSSDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsud_v8si, "__builtin_ia32_vpdpbsud256", IX86_BUILTIN_VPDPBSUDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsuds_v8si, "__builtin_ia32_vpdpbsuds256", IX86_BUILTIN_VPDPBSUDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuud_v8si, "__builtin_ia32_vpdpbuud256", IX86_BUILTIN_VPDPBUUDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuuds_v8si, "__builtin_ia32_vpdpbuuds256", IX86_BUILTIN_VPDPBUUDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssd_v4si, "__builtin_ia32_vpdpbssd128", IX86_BUILTIN_VPDPBSSDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssds_v4si, "__builtin_ia32_vpdpbssds128", IX86_BUILTIN_VPDPBSSDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsud_v4si, "__builtin_ia32_vpdpbsud128", IX86_BUILTIN_VPDPBSUDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsuds_v4si, "__builtin_ia32_vpdpbsuds128", IX86_BUILTIN_VPDPBSUDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuud_v4si, "__builtin_ia32_vpdpbuud128", IX86_BUILTIN_VPDPBUUDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT8 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuuds_v4si, "__builtin_ia32_vpdpbuuds128", IX86_BUILTIN_VPDPBUUDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
/* AVXVNNIINT16 */
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusd_v8si, "__builtin_ia32_vpdpwusd256", IX86_BUILTIN_VPDPWUSDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusds_v8si, "__builtin_ia32_vpdpwusds256", IX86_BUILTIN_VPDPWUSDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsud_v8si, "__builtin_ia32_vpdpwsud256", IX86_BUILTIN_VPDPWSUDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsuds_v8si, "__builtin_ia32_vpdpwsuds256", IX86_BUILTIN_VPDPWSUDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuud_v8si, "__builtin_ia32_vpdpwuud256", IX86_BUILTIN_VPDPWUUDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuuds_v8si, "__builtin_ia32_vpdpwuuds256", IX86_BUILTIN_VPDPWUUDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusd_v4si, "__builtin_ia32_vpdpwusd128", IX86_BUILTIN_VPDPWUSDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusds_v4si, "__builtin_ia32_vpdpwusds128", IX86_BUILTIN_VPDPWUSDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsud_v4si, "__builtin_ia32_vpdpwsud128", IX86_BUILTIN_VPDPWSUDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsuds_v4si, "__builtin_ia32_vpdpwsuds128", IX86_BUILTIN_VPDPWSUDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuud_v4si, "__builtin_ia32_vpdpwuud128", IX86_BUILTIN_VPDPWUUDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
-BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuuds_v4si, "__builtin_ia32_vpdpwuuds128", IX86_BUILTIN_VPDPWUUDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusd_v8si, "__builtin_ia32_vpdpwusd256", IX86_BUILTIN_VPDPWUSDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusds_v8si, "__builtin_ia32_vpdpwusds256", IX86_BUILTIN_VPDPWUSDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsud_v8si, "__builtin_ia32_vpdpwsud256", IX86_BUILTIN_VPDPWSUDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsuds_v8si, "__builtin_ia32_vpdpwsuds256", IX86_BUILTIN_VPDPWSUDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuud_v8si, "__builtin_ia32_vpdpwuud256", IX86_BUILTIN_VPDPWUUDV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuuds_v8si, "__builtin_ia32_vpdpwuuds256", IX86_BUILTIN_VPDPWUUDSV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusd_v4si, "__builtin_ia32_vpdpwusd128", IX86_BUILTIN_VPDPWUSDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusds_v4si, "__builtin_ia32_vpdpwusds128", IX86_BUILTIN_VPDPWUSDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsud_v4si, "__builtin_ia32_vpdpwsud128", IX86_BUILTIN_VPDPWSUDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsuds_v4si, "__builtin_ia32_vpdpwsuds128", IX86_BUILTIN_VPDPWSUDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuud_v4si, "__builtin_ia32_vpdpwuud128", IX86_BUILTIN_VPDPWUUDV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
+BDESC (0, OPTION_MASK_ISA2_AVXVNNIINT16 | OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuuds_v4si, "__builtin_ia32_vpdpwuuds128", IX86_BUILTIN_VPDPWUUDSV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI)
/* VPCLMULQDQ */
BDESC (OPTION_MASK_ISA_VPCLMULQDQ | OPTION_MASK_ISA_AVX512VL, 0, CODE_FOR_vpclmulqdq_v2di, "__builtin_ia32_vpclmulqdq_v2di", IX86_BUILTIN_VPCLMULQDQ2, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT)
@@ -3041,321 +3041,325 @@ BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512vl_
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512vl_fmulc_v16hf_mask, "__builtin_ia32_vfmulcph256_mask", IX86_BUILTIN_VFMULCPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI)
/* AVX10.2. */
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbssd_v16si, "__builtin_ia32_vpdpbssd512", IX86_BUILTIN_VPDPBSSDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbssds_v16si, "__builtin_ia32_vpdpbssds512", IX86_BUILTIN_VPDPBSSDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbsud_v16si, "__builtin_ia32_vpdpbsud512", IX86_BUILTIN_VPDPBSUDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbsuds_v16si, "__builtin_ia32_vpdpbsuds512", IX86_BUILTIN_VPDPBSUDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbuud_v16si, "__builtin_ia32_vpdpbuud512", IX86_BUILTIN_VPDPBUUDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbuuds_v16si, "__builtin_ia32_vpdpbuuds512", IX86_BUILTIN_VPDPBUUDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbssd_v16si_mask, "__builtin_ia32_vpdpbssd_v16si_mask", IX86_BUILTIN_VPDPBSSDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbssd_v16si_maskz, "__builtin_ia32_vpdpbssd_v16si_maskz", IX86_BUILTIN_VPDPBSSDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbssds_v16si_mask, "__builtin_ia32_vpdpbssds_v16si_mask", IX86_BUILTIN_VPDPBSSDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbssds_v16si_maskz, "__builtin_ia32_vpdpbssds_v16si_maskz", IX86_BUILTIN_VPDPBSSDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbsud_v16si_mask, "__builtin_ia32_vpdpbsud_v16si_mask", IX86_BUILTIN_VPDPBSUDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbsud_v16si_maskz, "__builtin_ia32_vpdpbsud_v16si_maskz", IX86_BUILTIN_VPDPBSUDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbsuds_v16si_mask, "__builtin_ia32_vpdpbsuds_v16si_mask", IX86_BUILTIN_VPDPBSUDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbsuds_v16si_maskz, "__builtin_ia32_vpdpbsuds_v16si_maskz", IX86_BUILTIN_VPDPBSUDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbuud_v16si_mask, "__builtin_ia32_vpdpbuud_v16si_mask", IX86_BUILTIN_VPDPBUUDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbuud_v16si_maskz, "__builtin_ia32_vpdpbuud_v16si_maskz", IX86_BUILTIN_VPDPBUUDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbuuds_v16si_mask, "__builtin_ia32_vpdpbuuds_v16si_mask", IX86_BUILTIN_VPDPBUUDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpbuuds_v16si_maskz, "__builtin_ia32_vpdpbuuds_v16si_maskz", IX86_BUILTIN_VPDPBUUDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssd_v8si_mask, "__builtin_ia32_vpdpbssd_v8si_mask", IX86_BUILTIN_VPDPBSSDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssd_v8si_maskz, "__builtin_ia32_vpdpbssd_v8si_maskz", IX86_BUILTIN_VPDPBSSDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssds_v8si_mask, "__builtin_ia32_vpdpbssds_v8si_mask", IX86_BUILTIN_VPDPBSSDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssds_v8si_maskz, "__builtin_ia32_vpdpbssds_v8si_maskz", IX86_BUILTIN_VPDPBSSDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsud_v8si_mask, "__builtin_ia32_vpdpbsud_v8si_mask", IX86_BUILTIN_VPDPBSUDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsud_v8si_maskz, "__builtin_ia32_vpdpbsud_v8si_maskz", IX86_BUILTIN_VPDPBSUDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsuds_v8si_mask, "__builtin_ia32_vpdpbsuds_v8si_mask", IX86_BUILTIN_VPDPBSUDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsuds_v8si_maskz, "__builtin_ia32_vpdpbsuds_v8si_maskz", IX86_BUILTIN_VPDPBSUDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuud_v8si_mask, "__builtin_ia32_vpdpbuud_v8si_mask", IX86_BUILTIN_VPDPBUUDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuud_v8si_maskz, "__builtin_ia32_vpdpbuud_v8si_maskz", IX86_BUILTIN_VPDPBUUDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuuds_v8si_mask, "__builtin_ia32_vpdpbuuds_v8si_mask", IX86_BUILTIN_VPDPBUUDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuuds_v8si_maskz, "__builtin_ia32_vpdpbuuds_v8si_maskz", IX86_BUILTIN_VPDPBUUDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssd_v4si_mask, "__builtin_ia32_vpdpbssd_v4si_mask", IX86_BUILTIN_VPDPBSSDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssd_v4si_maskz, "__builtin_ia32_vpdpbssd_v4si_maskz", IX86_BUILTIN_VPDPBSSDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssds_v4si_mask, "__builtin_ia32_vpdpbssds_v4si_mask", IX86_BUILTIN_VPDPBSSDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbssds_v4si_maskz, "__builtin_ia32_vpdpbssds_v4si_maskz", IX86_BUILTIN_VPDPBSSDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsud_v4si_mask, "__builtin_ia32_vpdpbsud_v4si_mask", IX86_BUILTIN_VPDPBSUDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsud_v4si_maskz, "__builtin_ia32_vpdpbsud_v4si_maskz", IX86_BUILTIN_VPDPBSUDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsuds_v4si_mask, "__builtin_ia32_vpdpbsuds_v4si_mask", IX86_BUILTIN_VPDPBSUDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbsuds_v4si_maskz, "__builtin_ia32_vpdpbsuds_v4si_maskz", IX86_BUILTIN_VPDPBSUDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuud_v4si_mask, "__builtin_ia32_vpdpbuud_v4si_mask", IX86_BUILTIN_VPDPBUUDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuud_v4si_maskz, "__builtin_ia32_vpdpbuud_v4si_maskz", IX86_BUILTIN_VPDPBUUDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuuds_v4si_mask, "__builtin_ia32_vpdpbuuds_v4si_mask", IX86_BUILTIN_VPDPBUUDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpbuuds_v4si_maskz, "__builtin_ia32_vpdpbuuds_v4si_maskz", IX86_BUILTIN_VPDPBUUDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwsud_v16si, "__builtin_ia32_vpdpwsud512", IX86_BUILTIN_VPDPWSUDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwsuds_v16si, "__builtin_ia32_vpdpwsuds512", IX86_BUILTIN_VPDPWSUDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwusd_v16si, "__builtin_ia32_vpdpwusd512", IX86_BUILTIN_VPDPWUSDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwusds_v16si, "__builtin_ia32_vpdpwusds512", IX86_BUILTIN_VPDPWUSDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwuud_v16si, "__builtin_ia32_vpdpwuud512", IX86_BUILTIN_VPDPWUUDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwuuds_v16si, "__builtin_ia32_vpdpwuuds512", IX86_BUILTIN_VPDPWUUDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwsud_v16si_mask, "__builtin_ia32_vpdpwsud_v16si_mask", IX86_BUILTIN_VPDPWSUDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwsud_v16si_maskz, "__builtin_ia32_vpdpwsud_v16si_maskz", IX86_BUILTIN_VPDPWSUDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwsuds_v16si_mask, "__builtin_ia32_vpdpwsuds_v16si_mask", IX86_BUILTIN_VPDPWSUDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwsuds_v16si_maskz, "__builtin_ia32_vpdpwsuds_v16si_maskz", IX86_BUILTIN_VPDPWSUDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwusd_v16si_mask, "__builtin_ia32_vpdpwusd_v16si_mask", IX86_BUILTIN_VPDPWUSDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwusd_v16si_maskz, "__builtin_ia32_vpdpwusd_v16si_maskz", IX86_BUILTIN_VPDPWUSDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwusds_v16si_mask, "__builtin_ia32_vpdpwusds_v16si_mask", IX86_BUILTIN_VPDPWUSDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwusds_v16si_maskz, "__builtin_ia32_vpdpwusds_v16si_maskz", IX86_BUILTIN_VPDPWUSDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwuud_v16si_mask, "__builtin_ia32_vpdpwuud_v16si_mask", IX86_BUILTIN_VPDPWUUDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwuud_v16si_maskz, "__builtin_ia32_vpdpwuud_v16si_maskz", IX86_BUILTIN_VPDPWUUDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwuuds_v16si_mask, "__builtin_ia32_vpdpwuuds_v16si_mask", IX86_BUILTIN_VPDPWUUDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vpdpwuuds_v16si_maskz, "__builtin_ia32_vpdpwuuds_v16si_maskz", IX86_BUILTIN_VPDPWUUDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsud_v8si_mask, "__builtin_ia32_vpdpwsud_v8si_mask", IX86_BUILTIN_VPDPWSUDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsud_v8si_maskz, "__builtin_ia32_vpdpwsud_v8si_maskz", IX86_BUILTIN_VPDPWSUDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsuds_v8si_mask, "__builtin_ia32_vpdpwsuds_v8si_mask", IX86_BUILTIN_VPDPWSUDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsuds_v8si_maskz, "__builtin_ia32_vpdpwsuds_v8si_maskz", IX86_BUILTIN_VPDPWSUDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusd_v8si_mask, "__builtin_ia32_vpdpwusd_v8si_mask", IX86_BUILTIN_VPDPWUSDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusd_v8si_maskz, "__builtin_ia32_vpdpwusd_v8si_maskz", IX86_BUILTIN_VPDPWUSDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusds_v8si_mask, "__builtin_ia32_vpdpwusds_v8si_mask", IX86_BUILTIN_VPDPWUSDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusds_v8si_maskz, "__builtin_ia32_vpdpwusds_v8si_maskz", IX86_BUILTIN_VPDPWUSDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuud_v8si_mask, "__builtin_ia32_vpdpwuud_v8si_mask", IX86_BUILTIN_VPDPWUUDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuud_v8si_maskz, "__builtin_ia32_vpdpwuud_v8si_maskz", IX86_BUILTIN_VPDPWUUDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuuds_v8si_mask, "__builtin_ia32_vpdpwuuds_v8si_mask", IX86_BUILTIN_VPDPWUUDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuuds_v8si_maskz, "__builtin_ia32_vpdpwuuds_v8si_maskz", IX86_BUILTIN_VPDPWUUDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsud_v4si_mask, "__builtin_ia32_vpdpwsud_v4si_mask", IX86_BUILTIN_VPDPWSUDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsud_v4si_maskz, "__builtin_ia32_vpdpwsud_v4si_maskz", IX86_BUILTIN_VPDPWSUDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsuds_v4si_mask, "__builtin_ia32_vpdpwsuds_v4si_mask", IX86_BUILTIN_VPDPWSUDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwsuds_v4si_maskz, "__builtin_ia32_vpdpwsuds_v4si_maskz", IX86_BUILTIN_VPDPWSUDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusd_v4si_mask, "__builtin_ia32_vpdpwusd_v4si_mask", IX86_BUILTIN_VPDPWUSDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusd_v4si_maskz, "__builtin_ia32_vpdpwusd_v4si_maskz", IX86_BUILTIN_VPDPWUSDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusds_v4si_mask, "__builtin_ia32_vpdpwusds_v4si_mask", IX86_BUILTIN_VPDPWUSDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwusds_v4si_maskz, "__builtin_ia32_vpdpwusds_v4si_maskz", IX86_BUILTIN_VPDPWUSDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuud_v4si_mask, "__builtin_ia32_vpdpwuud_v4si_mask", IX86_BUILTIN_VPDPWUUDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuud_v4si_maskz, "__builtin_ia32_vpdpwuud_v4si_maskz", IX86_BUILTIN_VPDPWUUDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuuds_v4si_mask, "__builtin_ia32_vpdpwuuds_v4si_mask", IX86_BUILTIN_VPDPWUUDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vpdpwuuds_v4si_maskz, "__builtin_ia32_vpdpwuuds_v4si_maskz", IX86_BUILTIN_VPDPWUUDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vdpphps_v16sf_mask, "__builtin_ia32_vdpphps512_mask", IX86_BUILTIN_VDPPHPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vdpphps_v16sf_maskz, "__builtin_ia32_vdpphps512_maskz", IX86_BUILTIN_VDPPHPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vdpphps_v8sf_mask, "__builtin_ia32_vdpphps256_mask", IX86_BUILTIN_VDPPHPS256_MASK, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vdpphps_v8sf_maskz, "__builtin_ia32_vdpphps256_maskz", IX86_BUILTIN_VDPPHPS256_MASKZ, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vdpphps_v4sf_mask, "__builtin_ia32_vdpphps128_mask", IX86_BUILTIN_VDPPHPS128_MASK, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vdpphps_v4sf_maskz, "__builtin_ia32_vdpphps128_maskz", IX86_BUILTIN_VDPPHPS128_MASKZ, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mpsadbw, "__builtin_ia32_mpsadbw512", IX86_BUILTIN_AVX10_2_MPSADBW, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mpsadbw_mask, "__builtin_ia32_mpsadbw512_mask", IX86_BUILTIN_VMPSADBW_V32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V64QI_V64QI_INT_V32HI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx2_mpsadbw_mask, "__builtin_ia32_mpsadbw256_mask", IX86_BUILTIN_VMPSADBW_V16HI_MASK, UNKNOWN, (int) V16HI_FTYPE_V32QI_V32QI_INT_V16HI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_sse4_1_mpsadbw_mask, "__builtin_ia32_mpsadbw128_mask", IX86_BUILTIN_VMPSADBW_V8HI_MASK, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI_INT_V8HI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvt2ps2phx_v8hf_mask, "__builtin_ia32_vcvt2ps2phx128_mask", IX86_BUILTIN_VCVT2PS2PHX_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V4SF_V4SF_V8HF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2bf8v8hf, "__builtin_ia32_vcvtbiasph2bf8128", IX86_BUILTIN_VCVTBIASPH2BF8128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2bf8v8hf_mask, "__builtin_ia32_vcvtbiasph2bf8128_mask", IX86_BUILTIN_VCVTBIASPH2BF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2bf8v16hf_mask, "__builtin_ia32_vcvtbiasph2bf8256_mask", IX86_BUILTIN_VCVTBIASPH2BF8256_MASK, UNKNOWN, (int) V16QI_FTYPE_V32QI_V16HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvtbiasph2bf8v32hf_mask, "__builtin_ia32_vcvtbiasph2bf8512_mask", IX86_BUILTIN_VCVTBIASPH2BF8512_MASK, UNKNOWN, (int) V32QI_FTYPE_V64QI_V32HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2bf8sv8hf, "__builtin_ia32_vcvtbiasph2bf8s128", IX86_BUILTIN_VCVTBIASPH2BF8S128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2bf8sv8hf_mask, "__builtin_ia32_vcvtbiasph2bf8s128_mask", IX86_BUILTIN_VCVTBIASPH2BF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2bf8sv16hf_mask, "__builtin_ia32_vcvtbiasph2bf8s256_mask", IX86_BUILTIN_VCVTBIASPH2BF8S256_MASK, UNKNOWN, (int) V16QI_FTYPE_V32QI_V16HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvtbiasph2bf8sv32hf_mask, "__builtin_ia32_vcvtbiasph2bf8s512_mask", IX86_BUILTIN_VCVTBIASPH2BF8S512_MASK, UNKNOWN, (int) V32QI_FTYPE_V64QI_V32HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2hf8v8hf, "__builtin_ia32_vcvtbiasph2hf8128", IX86_BUILTIN_VCVTBIASPH2HF8128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2hf8v8hf_mask, "__builtin_ia32_vcvtbiasph2hf8128_mask", IX86_BUILTIN_VCVTBIASPH2HF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2hf8v16hf_mask, "__builtin_ia32_vcvtbiasph2hf8256_mask", IX86_BUILTIN_VCVTBIASPH2HF8256_MASK, UNKNOWN, (int) V16QI_FTYPE_V32QI_V16HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvtbiasph2hf8v32hf_mask, "__builtin_ia32_vcvtbiasph2hf8512_mask", IX86_BUILTIN_VCVTBIASPH2HF8512_MASK, UNKNOWN, (int) V32QI_FTYPE_V64QI_V32HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2hf8sv8hf, "__builtin_ia32_vcvtbiasph2hf8s128", IX86_BUILTIN_VCVTBIASPH2HF8S128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2hf8sv8hf_mask, "__builtin_ia32_vcvtbiasph2hf8s128_mask", IX86_BUILTIN_VCVTBIASPH2HF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtbiasph2hf8sv16hf_mask, "__builtin_ia32_vcvtbiasph2hf8s256_mask", IX86_BUILTIN_VCVTBIASPH2HF8S256_MASK, UNKNOWN, (int) V16QI_FTYPE_V32QI_V16HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvtbiasph2hf8sv32hf_mask, "__builtin_ia32_vcvtbiasph2hf8s512_mask", IX86_BUILTIN_VCVTBIASPH2HF8S512_MASK, UNKNOWN, (int) V32QI_FTYPE_V64QI_V32HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvt2ph2bf8v8hf_mask, "__builtin_ia32_vcvt2ph2bf8128_mask", IX86_BUILTIN_VCVT2PH2BF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V8HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvt2ph2bf8v16hf_mask, "__builtin_ia32_vcvt2ph2bf8256_mask", IX86_BUILTIN_VCVT2PH2BF8256_MASK, UNKNOWN, (int) V32QI_FTYPE_V16HF_V16HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvt2ph2bf8v32hf_mask, "__builtin_ia32_vcvt2ph2bf8512_mask", IX86_BUILTIN_VCVT2PH2BF8512_MASK, UNKNOWN, (int) V64QI_FTYPE_V32HF_V32HF_V64QI_UDI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvt2ph2bf8sv8hf_mask, "__builtin_ia32_vcvt2ph2bf8s128_mask", IX86_BUILTIN_VCVT2PH2BF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V8HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvt2ph2bf8sv16hf_mask, "__builtin_ia32_vcvt2ph2bf8s256_mask", IX86_BUILTIN_VCVT2PH2BF8S256_MASK, UNKNOWN, (int) V32QI_FTYPE_V16HF_V16HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvt2ph2bf8sv32hf_mask, "__builtin_ia32_vcvt2ph2bf8s512_mask", IX86_BUILTIN_VCVT2PH2BF8S512_MASK, UNKNOWN, (int) V64QI_FTYPE_V32HF_V32HF_V64QI_UDI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvt2ph2hf8v8hf_mask, "__builtin_ia32_vcvt2ph2hf8128_mask", IX86_BUILTIN_VCVT2PH2HF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V8HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvt2ph2hf8v16hf_mask, "__builtin_ia32_vcvt2ph2hf8256_mask", IX86_BUILTIN_VCVT2PH2HF8256_MASK, UNKNOWN, (int) V32QI_FTYPE_V16HF_V16HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvt2ph2hf8v32hf_mask, "__builtin_ia32_vcvt2ph2hf8512_mask", IX86_BUILTIN_VCVT2PH2HF8512_MASK, UNKNOWN, (int) V64QI_FTYPE_V32HF_V32HF_V64QI_UDI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvt2ph2hf8sv8hf_mask, "__builtin_ia32_vcvt2ph2hf8s128_mask", IX86_BUILTIN_VCVT2PH2HF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V8HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvt2ph2hf8sv16hf_mask, "__builtin_ia32_vcvt2ph2hf8s256_mask", IX86_BUILTIN_VCVT2PH2HF8S256_MASK, UNKNOWN, (int) V32QI_FTYPE_V16HF_V16HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvt2ph2hf8sv32hf_mask, "__builtin_ia32_vcvt2ph2hf8s512_mask", IX86_BUILTIN_VCVT2PH2HF8S512_MASK, UNKNOWN, (int) V64QI_FTYPE_V32HF_V32HF_V64QI_UDI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtph2bf8v8hf_mask, "__builtin_ia32_vcvtph2bf8128_mask", IX86_BUILTIN_VCVTPH2BF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V16QI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtph2bf8v16hf_mask, "__builtin_ia32_vcvtph2bf8256_mask", IX86_BUILTIN_VCVTPH2BF8256_MASK, UNKNOWN, (int) V16QI_FTYPE_V16HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvtph2bf8v32hf_mask, "__builtin_ia32_vcvtph2bf8512_mask", IX86_BUILTIN_VCVTPH2BF8512_MASK, UNKNOWN, (int) V32QI_FTYPE_V32HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtph2bf8sv8hf_mask, "__builtin_ia32_vcvtph2bf8s128_mask", IX86_BUILTIN_VCVTPH2BF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V16QI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtph2bf8sv16hf_mask, "__builtin_ia32_vcvtph2bf8s256_mask", IX86_BUILTIN_VCVTPH2BF8S256_MASK, UNKNOWN, (int) V16QI_FTYPE_V16HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvtph2bf8sv32hf_mask, "__builtin_ia32_vcvtph2bf8s512_mask", IX86_BUILTIN_VCVTPH2BF8S512_MASK, UNKNOWN, (int) V32QI_FTYPE_V32HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtph2hf8v8hf_mask, "__builtin_ia32_vcvtph2hf8128_mask", IX86_BUILTIN_VCVTPH2HF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V16QI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtph2hf8v16hf_mask, "__builtin_ia32_vcvtph2hf8256_mask", IX86_BUILTIN_VCVTPH2HF8256_MASK, UNKNOWN, (int) V16QI_FTYPE_V16HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvtph2hf8v32hf_mask, "__builtin_ia32_vcvtph2hf8512_mask", IX86_BUILTIN_VCVTPH2HF8512_MASK, UNKNOWN, (int) V32QI_FTYPE_V32HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtph2hf8sv8hf_mask, "__builtin_ia32_vcvtph2hf8s128_mask", IX86_BUILTIN_VCVTPH2HF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V16QI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtph2hf8sv16hf_mask, "__builtin_ia32_vcvtph2hf8s256_mask", IX86_BUILTIN_VCVTPH2HF8S256_MASK, UNKNOWN, (int) V16QI_FTYPE_V16HF_V16QI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvtph2hf8sv32hf_mask, "__builtin_ia32_vcvtph2hf8s512_mask", IX86_BUILTIN_VCVTPH2HF8S512_MASK, UNKNOWN, (int) V32QI_FTYPE_V32HF_V32QI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvthf82phv8hf_mask, "__builtin_ia32_vcvthf82ph128_mask", IX86_BUILTIN_VCVTHF82PH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V16QI_V8HF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvthf82phv16hf_mask, "__builtin_ia32_vcvthf82ph256_mask", IX86_BUILTIN_VCVTHF82PH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16QI_V16HF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvthf82phv32hf_mask, "__builtin_ia32_vcvthf82ph512_mask", IX86_BUILTIN_VCVTHF82PH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32QI_V32HF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_addbf16_v32bf, "__builtin_ia32_addbf16512", IX86_BUILTIN_ADDBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_addbf16_v32bf_mask, "__builtin_ia32_addbf16512_mask", IX86_BUILTIN_ADDBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v16bf, "__builtin_ia32_addbf16256", IX86_BUILTIN_ADDBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v16bf_mask, "__builtin_ia32_addbf16256_mask", IX86_BUILTIN_ADDBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v8bf, "__builtin_ia32_addbf16128", IX86_BUILTIN_ADDBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v8bf_mask, "__builtin_ia32_addbf16128_mask", IX86_BUILTIN_ADDBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_subbf16_v32bf, "__builtin_ia32_subbf16512", IX86_BUILTIN_SUBBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_subbf16_v32bf_mask, "__builtin_ia32_subbf16512_mask", IX86_BUILTIN_SUBBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v16bf, "__builtin_ia32_subbf16256", IX86_BUILTIN_SUBBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v16bf_mask, "__builtin_ia32_subbf16256_mask", IX86_BUILTIN_SUBBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v8bf, "__builtin_ia32_subbf16128", IX86_BUILTIN_SUBBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v8bf_mask, "__builtin_ia32_subbf16128_mask", IX86_BUILTIN_SUBBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mulbf16_v32bf, "__builtin_ia32_mulbf16512", IX86_BUILTIN_MULBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mulbf16_v32bf_mask, "__builtin_ia32_mulbf16512_mask", IX86_BUILTIN_MULBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v16bf, "__builtin_ia32_mulbf16256", IX86_BUILTIN_MULBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v16bf_mask, "__builtin_ia32_mulbf16256_mask", IX86_BUILTIN_MULBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v8bf, "__builtin_ia32_mulbf16128", IX86_BUILTIN_MULBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v8bf_mask, "__builtin_ia32_mulbf16128_mask", IX86_BUILTIN_MULBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_divbf16_v32bf, "__builtin_ia32_divbf16512", IX86_BUILTIN_DIVBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_divbf16_v32bf_mask, "__builtin_ia32_divbf16512_mask", IX86_BUILTIN_DIVBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v16bf, "__builtin_ia32_divbf16256", IX86_BUILTIN_DIVBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v16bf_mask, "__builtin_ia32_divbf16256_mask", IX86_BUILTIN_DIVBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v8bf, "__builtin_ia32_divbf16128", IX86_BUILTIN_DIVBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v8bf_mask, "__builtin_ia32_divbf16128_mask", IX86_BUILTIN_DIVBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_smaxbf16_v32bf, "__builtin_ia32_maxbf16512", IX86_BUILTIN_MAXBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_smaxbf16_v32bf_mask, "__builtin_ia32_maxbf16512_mask", IX86_BUILTIN_MAXBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_smaxbf16_v16bf, "__builtin_ia32_maxbf16256", IX86_BUILTIN_MAXBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_smaxbf16_v16bf_mask, "__builtin_ia32_maxbf16256_mask", IX86_BUILTIN_MAXBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_smaxbf16_v8bf, "__builtin_ia32_maxbf16128", IX86_BUILTIN_MAXBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_smaxbf16_v8bf_mask, "__builtin_ia32_maxbf16128_mask", IX86_BUILTIN_MAXBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_sminbf16_v32bf, "__builtin_ia32_minbf16512", IX86_BUILTIN_MINBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_sminbf16_v32bf_mask, "__builtin_ia32_minbf16512_mask", IX86_BUILTIN_MINBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_sminbf16_v16bf, "__builtin_ia32_minbf16256", IX86_BUILTIN_MINBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_sminbf16_v16bf_mask, "__builtin_ia32_minbf16256_mask", IX86_BUILTIN_MINBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_sminbf16_v8bf, "__builtin_ia32_minbf16128", IX86_BUILTIN_MINBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_sminbf16_v8bf_mask, "__builtin_ia32_minbf16128_mask", IX86_BUILTIN_MINBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_scalefbf16_v32bf, "__builtin_ia32_scalefbf16512", IX86_BUILTIN_SCALEFBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_scalefbf16_v32bf_mask, "__builtin_ia32_scalefbf16512_mask", IX86_BUILTIN_SCALEFBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_scalefbf16_v16bf, "__builtin_ia32_scalefbf16256", IX86_BUILTIN_SCALEFBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_scalefbf16_v16bf_mask, "__builtin_ia32_scalefbf16256_mask", IX86_BUILTIN_SCALEFBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_scalefbf16_v8bf, "__builtin_ia32_scalefbf16128", IX86_BUILTIN_SCALEFBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_scalefbf16_v8bf_mask, "__builtin_ia32_scalefbf16128_mask", IX86_BUILTIN_SCALEFBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fmaddbf16_v32bf_mask, "__builtin_ia32_fmaddbf16512_mask", IX86_BUILTIN_FMADDBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fmaddbf16_v32bf_mask3, "__builtin_ia32_fmaddbf16512_mask3", IX86_BUILTIN_FMADDBF16512_MASK3, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fmaddbf16_v32bf_maskz, "__builtin_ia32_fmaddbf16512_maskz", IX86_BUILTIN_FMADDBF16512_MASKZ, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmaddbf16_v16bf_mask, "__builtin_ia32_fmaddbf16256_mask", IX86_BUILTIN_FMADDBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmaddbf16_v16bf_mask3, "__builtin_ia32_fmaddbf16256_mask3", IX86_BUILTIN_FMADDBF16256_MASK3, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmaddbf16_v16bf_maskz, "__builtin_ia32_fmaddbf16256_maskz", IX86_BUILTIN_FMADDBF16256_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmaddbf16_v8bf_mask, "__builtin_ia32_fmaddbf16128_mask", IX86_BUILTIN_FMADDBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmaddbf16_v8bf_mask3, "__builtin_ia32_fmaddbf16128_mask3", IX86_BUILTIN_FMADDBF16128_MASK3, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmaddbf16_v8bf_maskz, "__builtin_ia32_fmaddbf16128_maskz", IX86_BUILTIN_FMADDBF16128_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fmsubbf16_v32bf_mask, "__builtin_ia32_fmsubbf16512_mask", IX86_BUILTIN_FMSUBBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fmsubbf16_v32bf_mask3, "__builtin_ia32_fmsubbf16512_mask3", IX86_BUILTIN_FMSUBBF16512_MASK3, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fmsubbf16_v32bf_maskz, "__builtin_ia32_fmsubbf16512_maskz", IX86_BUILTIN_FMSUBBF16512_MASKZ, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmsubbf16_v16bf_mask, "__builtin_ia32_fmsubbf16256_mask", IX86_BUILTIN_FMSUBBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmsubbf16_v16bf_mask3, "__builtin_ia32_fmsubbf16256_mask3", IX86_BUILTIN_FMSUBBF16256_MASK3, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmsubbf16_v16bf_maskz, "__builtin_ia32_fmsubbf16256_maskz", IX86_BUILTIN_FMSUBBF16256_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmsubbf16_v8bf_mask, "__builtin_ia32_fmsubbf16128_mask", IX86_BUILTIN_FMSUBBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmsubbf16_v8bf_mask3, "__builtin_ia32_fmsubbf16128_mask3", IX86_BUILTIN_FMSUBBF16128_MASK3, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fmsubbf16_v8bf_maskz, "__builtin_ia32_fmsubbf16128_maskz", IX86_BUILTIN_FMSUBBF16128_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fnmaddbf16_v32bf_mask, "__builtin_ia32_fnmaddbf16512_mask", IX86_BUILTIN_FNMADDBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fnmaddbf16_v32bf_mask3, "__builtin_ia32_fnmaddbf16512_mask3", IX86_BUILTIN_FNMADDBF16512_MASK3, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fnmaddbf16_v32bf_maskz, "__builtin_ia32_fnmaddbf16512_maskz", IX86_BUILTIN_FNMADDBF16512_MASKZ, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmaddbf16_v16bf_mask, "__builtin_ia32_fnmaddbf16256_mask", IX86_BUILTIN_FNMADDBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmaddbf16_v16bf_mask3, "__builtin_ia32_fnmaddbf16256_mask3", IX86_BUILTIN_FNMADDBF16256_MASK3, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmaddbf16_v16bf_maskz, "__builtin_ia32_fnmaddbf16256_maskz", IX86_BUILTIN_FNMADDBF16256_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmaddbf16_v8bf_mask, "__builtin_ia32_fnmaddbf16128_mask", IX86_BUILTIN_FNMADDBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmaddbf16_v8bf_mask3, "__builtin_ia32_fnmaddbf16128_mask3", IX86_BUILTIN_FNMADDBF16128_MASK3, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmaddbf16_v8bf_maskz, "__builtin_ia32_fnmaddbf16128_maskz", IX86_BUILTIN_FNMADDBF16128_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fnmsubbf16_v32bf_mask, "__builtin_ia32_fnmsubbf16512_mask", IX86_BUILTIN_FNMSUBBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fnmsubbf16_v32bf_mask3, "__builtin_ia32_fnmsubbf16512_mask3", IX86_BUILTIN_FNMSUBBF16512_MASK3, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fnmsubbf16_v32bf_maskz, "__builtin_ia32_fnmsubbf16512_maskz", IX86_BUILTIN_FNMSUBBF16512_MASKZ, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmsubbf16_v16bf_mask, "__builtin_ia32_fnmsubbf16256_mask", IX86_BUILTIN_FNMSUBBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmsubbf16_v16bf_mask3, "__builtin_ia32_fnmsubbf16256_mask3", IX86_BUILTIN_FNMSUBBF16256_MASK3, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmsubbf16_v16bf_maskz, "__builtin_ia32_fnmsubbf16256_maskz", IX86_BUILTIN_FNMSUBBF16256_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmsubbf16_v8bf_mask, "__builtin_ia32_fnmsubbf16128_mask", IX86_BUILTIN_FNMSUBBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmsubbf16_v8bf_mask3, "__builtin_ia32_fnmsubbf16128_mask3", IX86_BUILTIN_FNMSUBBF16128_MASK3, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fnmsubbf16_v8bf_maskz, "__builtin_ia32_fnmsubbf16128_maskz", IX86_BUILTIN_FNMSUBBF16128_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_rsqrtbf16_v32bf_mask, "__builtin_ia32_rsqrtbf16512_mask", IX86_BUILTIN_RSQRTBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rsqrtbf16_v16bf_mask, "__builtin_ia32_rsqrtbf16256_mask", IX86_BUILTIN_RSQRTBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rsqrtbf16_v8bf_mask, "__builtin_ia32_rsqrtbf16128_mask", IX86_BUILTIN_RSQRTBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_sqrtbf16_v32bf_mask, "__builtin_ia32_sqrtbf16512_mask", IX86_BUILTIN_SQRTBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_sqrtbf16_v16bf_mask, "__builtin_ia32_sqrtbf16256_mask", IX86_BUILTIN_SQRTBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_sqrtbf16_v8bf_mask, "__builtin_ia32_sqrtbf16128_mask", IX86_BUILTIN_SQRTBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_rcpbf16_v32bf_mask, "__builtin_ia32_rcpbf16512_mask", IX86_BUILTIN_RCPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rcpbf16_v16bf_mask, "__builtin_ia32_rcpbf16256_mask", IX86_BUILTIN_RCPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rcpbf16_v8bf_mask, "__builtin_ia32_rcpbf16128_mask", IX86_BUILTIN_RCPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_getexpbf16_v32bf_mask, "__builtin_ia32_getexpbf16512_mask", IX86_BUILTIN_GETEXPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_getexpbf16_v16bf_mask, "__builtin_ia32_getexpbf16256_mask", IX86_BUILTIN_GETEXPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_getexpbf16_v8bf_mask, "__builtin_ia32_getexpbf16128_mask", IX86_BUILTIN_GETEXPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_rndscalebf16_v32bf_mask, "__builtin_ia32_rndscalebf16512_mask", IX86_BUILTIN_RNDSCALEBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_INT_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rndscalebf16_v16bf_mask, "__builtin_ia32_rndscalebf16256_mask", IX86_BUILTIN_RNDSCALEBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_INT_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rndscalebf16_v8bf_mask, "__builtin_ia32_rndscalebf16128_mask", IX86_BUILTIN_RNDSCALEBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_INT_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_reducebf16_v32bf_mask, "__builtin_ia32_reducebf16512_mask", IX86_BUILTIN_REDUCEBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_INT_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_reducebf16_v16bf_mask, "__builtin_ia32_reducebf16256_mask", IX86_BUILTIN_REDUCEBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_INT_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_reducebf16_v8bf_mask, "__builtin_ia32_reducebf16128_mask", IX86_BUILTIN_REDUCEBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_INT_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_getmantbf16_v32bf_mask, "__builtin_ia32_getmantbf16512_mask", IX86_BUILTIN_GETMANTBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_INT_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_getmantbf16_v16bf_mask, "__builtin_ia32_getmantbf16256_mask", IX86_BUILTIN_GETMANTBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_INT_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_getmantbf16_v8bf_mask, "__builtin_ia32_getmantbf16128_mask", IX86_BUILTIN_GETMANTBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_INT_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_fpclassbf16_v32bf_mask, "__builtin_ia32_fpclassbf16512_mask", IX86_BUILTIN_FPCLASSBF16512_MASK, UNKNOWN, (int) SI_FTYPE_V32BF_INT_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fpclassbf16_v16bf_mask, "__builtin_ia32_fpclassbf16256_mask", IX86_BUILTIN_FPCLASSBF16256_MASK, UNKNOWN, (int) HI_FTYPE_V16BF_INT_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_fpclassbf16_v8bf_mask, "__builtin_ia32_fpclassbf16128_mask", IX86_BUILTIN_FPCLASSBF16128_MASK, UNKNOWN, (int) QI_FTYPE_V8BF_INT_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cmpbf16_v32bf_mask, "__builtin_ia32_cmpbf16512_mask", IX86_BUILTIN_CMPBF16512_MASK, UNKNOWN, (int) USI_FTYPE_V32BF_V32BF_INT_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cmpbf16_v16bf_mask, "__builtin_ia32_cmpbf16256_mask", IX86_BUILTIN_CMPBF16256_MASK, UNKNOWN, (int) UHI_FTYPE_V16BF_V16BF_INT_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cmpbf16_v8bf_mask, "__builtin_ia32_cmpbf16128_mask", IX86_BUILTIN_CMPBF16128_MASK, UNKNOWN, (int) UQI_FTYPE_V8BF_V8BF_INT_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16eq", IX86_BUILTIN_VCOMISBF16EQ, EQ, (int) INT_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16gt", IX86_BUILTIN_VCOMISBF16GT, GT, (int) INT_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16ge", IX86_BUILTIN_VCOMISBF16GE, GE, (int) INT_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16le", IX86_BUILTIN_VCOMISBF16LE, LE, (int) INT_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16lt", IX86_BUILTIN_VCOMISBF16LT, LT, (int) INT_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16neq", IX86_BUILTIN_VCOMISBF16NE, NE, (int) INT_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtbf162ibsv8bf_mask, "__builtin_ia32_cvtbf162ibs128_mask", IX86_BUILTIN_CVTBF162IBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8BF_V8HI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtbf162ibsv16bf_mask, "__builtin_ia32_cvtbf162ibs256_mask", IX86_BUILTIN_CVTBF162IBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16BF_V16HI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtbf162ibsv32bf_mask, "__builtin_ia32_cvtbf162ibs512_mask", IX86_BUILTIN_CVTBF162IBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32BF_V32HI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtbf162iubsv8bf_mask, "__builtin_ia32_cvtbf162iubs128_mask", IX86_BUILTIN_CVTBF162IUBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8BF_V8HI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtbf162iubsv16bf_mask, "__builtin_ia32_cvtbf162iubs256_mask", IX86_BUILTIN_CVTBF162IUBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16BF_V16HI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtbf162iubsv32bf_mask, "__builtin_ia32_cvtbf162iubs512_mask", IX86_BUILTIN_CVTBF162IUBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32BF_V32HI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtph2ibsv8hf_mask, "__builtin_ia32_cvtph2ibs128_mask", IX86_BUILTIN_CVTPH2IBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HF_V8HI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtph2ibsv16hf_mask, "__builtin_ia32_cvtph2ibs256_mask", IX86_BUILTIN_CVTPH2IBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtph2ibsv32hf_mask, "__builtin_ia32_cvtph2ibs512_mask", IX86_BUILTIN_CVTPH2IBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtph2iubsv8hf_mask, "__builtin_ia32_cvtph2iubs128_mask", IX86_BUILTIN_CVTPH2IUBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HF_V8HI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtph2iubsv16hf_mask, "__builtin_ia32_cvtph2iubs256_mask", IX86_BUILTIN_CVTPH2IUBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtph2iubsv32hf_mask, "__builtin_ia32_cvtph2iubs512_mask", IX86_BUILTIN_CVTPH2IUBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtps2ibsv4sf_mask, "__builtin_ia32_cvtps2ibs128_mask", IX86_BUILTIN_CVTPS2IBS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtps2ibsv8sf_mask, "__builtin_ia32_cvtps2ibs256_mask", IX86_BUILTIN_CVTPS2IBS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtps2ibsv16sf_mask, "__builtin_ia32_cvtps2ibs512_mask", IX86_BUILTIN_CVTPS2IBS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtps2iubsv4sf_mask, "__builtin_ia32_cvtps2iubs128_mask", IX86_BUILTIN_CVTPS2IUBS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtps2iubsv8sf_mask, "__builtin_ia32_cvtps2iubs256_mask", IX86_BUILTIN_CVTPS2IUBS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtps2iubsv16sf_mask, "__builtin_ia32_cvtps2iubs512_mask", IX86_BUILTIN_CVTPS2IUBS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttbf162ibsv8bf_mask, "__builtin_ia32_cvttbf162ibs128_mask", IX86_BUILTIN_CVTTBF162IBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8BF_V8HI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttbf162ibsv16bf_mask, "__builtin_ia32_cvttbf162ibs256_mask", IX86_BUILTIN_CVTTBF162IBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16BF_V16HI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvttbf162ibsv32bf_mask, "__builtin_ia32_cvttbf162ibs512_mask", IX86_BUILTIN_CVTTBF162IBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32BF_V32HI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttbf162iubsv8bf_mask, "__builtin_ia32_cvttbf162iubs128_mask", IX86_BUILTIN_CVTTBF162IUBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8BF_V8HI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttbf162iubsv16bf_mask, "__builtin_ia32_cvttbf162iubs256_mask", IX86_BUILTIN_CVTTBF162IUBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16BF_V16HI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvttbf162iubsv32bf_mask, "__builtin_ia32_cvttbf162iubs512_mask", IX86_BUILTIN_CVTTBF162IUBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32BF_V32HI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttph2ibsv8hf_mask, "__builtin_ia32_cvttph2ibs128_mask", IX86_BUILTIN_CVTTPH2IBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HF_V8HI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttph2ibsv16hf_mask, "__builtin_ia32_cvttph2ibs256_mask", IX86_BUILTIN_CVTTPH2IBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvttph2ibsv32hf_mask, "__builtin_ia32_cvttph2ibs512_mask", IX86_BUILTIN_CVTTPH2IBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttph2iubsv8hf_mask, "__builtin_ia32_cvttph2iubs128_mask", IX86_BUILTIN_CVTTPH2IUBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HF_V8HI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttph2iubsv16hf_mask, "__builtin_ia32_cvttph2iubs256_mask", IX86_BUILTIN_CVTTPH2IUBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvttph2iubsv32hf_mask, "__builtin_ia32_cvttph2iubs512_mask", IX86_BUILTIN_CVTTPH2IUBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttps2ibsv4sf_mask, "__builtin_ia32_cvttps2ibs128_mask", IX86_BUILTIN_CVTTPS2IBS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttps2ibsv8sf_mask, "__builtin_ia32_cvttps2ibs256_mask", IX86_BUILTIN_CVTTPS2IBS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvttps2ibsv16sf_mask, "__builtin_ia32_cvttps2ibs512_mask", IX86_BUILTIN_CVTTPS2IBS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttps2iubsv4sf_mask, "__builtin_ia32_cvttps2iubs128_mask", IX86_BUILTIN_CVTTPS2IUBS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttps2iubsv8sf_mask, "__builtin_ia32_cvttps2iubs256_mask", IX86_BUILTIN_CVTTPS2IUBS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvttps2iubsv16sf_mask, "__builtin_ia32_cvttps2iubs512_mask", IX86_BUILTIN_CVTTPS2IUBS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2dqsv2df_mask, "__builtin_ia32_cvttpd2dqs128_mask", IX86_BUILTIN_VCVTTPD2DQS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V2DF_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2dqsv4df_mask, "__builtin_ia32_cvttpd2dqs256_mask", IX86_BUILTIN_VCVTTPD2DQS256_MASK, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttpd2dqsv8df_mask, "__builtin_ia32_cvttpd2dqs512_mask", IX86_BUILTIN_VCVTTPD2DQS512_MASK, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2qqsv2df_mask, "__builtin_ia32_cvttpd2qqs128_mask", IX86_BUILTIN_VCVTTPD2QQS128_MASK, UNKNOWN, (int) V2DI_FTYPE_V2DF_V2DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2qqsv4df_mask, "__builtin_ia32_cvttpd2qqs256_mask", IX86_BUILTIN_VCVTTPD2QQS256_MASK, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttpd2qqsv8df_mask, "__builtin_ia32_cvttpd2qqs512_mask", IX86_BUILTIN_VCVTTPD2QQS512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2udqsv2df_mask, "__builtin_ia32_cvttpd2udqs128_mask", IX86_BUILTIN_VCVTTPD2UDQS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V2DF_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2udqsv4df_mask, "__builtin_ia32_cvttpd2udqs256_mask", IX86_BUILTIN_VCVTTPD2UDQS256_MASK, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttpd2udqsv8df_mask, "__builtin_ia32_cvttpd2udqs512_mask", IX86_BUILTIN_VCVTTPD2UDQS512_MASK, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2uqqsv2df_mask, "__builtin_ia32_cvttpd2uqqs128_mask", IX86_BUILTIN_VCVTTPD2UQQS128_MASK, UNKNOWN, (int) V2DI_FTYPE_V2DF_V2DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2uqqsv4df_mask, "__builtin_ia32_cvttpd2uqqs256_mask", IX86_BUILTIN_VCVTTPD2UQQS256_MASK, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttpd2uqqsv8df_mask, "__builtin_ia32_cvttpd2uqqs512_mask", IX86_BUILTIN_VCVTTPD2UQQS512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2dqsv4sf_mask, "__builtin_ia32_cvttps2dqs128_mask", IX86_BUILTIN_VCVTTPS2DQS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2dqsv8sf_mask, "__builtin_ia32_cvttps2dqs256_mask", IX86_BUILTIN_VCVTTPS2DQS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttps2dqsv16sf_mask, "__builtin_ia32_cvttps2dqs512_mask", IX86_BUILTIN_VCVTTPS2DQS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2qqsv2di_mask, "__builtin_ia32_cvttps2qqs128_mask", IX86_BUILTIN_VCVTTPS2QQS128_MASK, UNKNOWN, (int) V2DI_FTYPE_V4SF_V2DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2qqsv4di_mask, "__builtin_ia32_cvttps2qqs256_mask", IX86_BUILTIN_VCVTTPS2QQS256_MASK, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttps2qqsv8di_mask, "__builtin_ia32_cvttps2qqs512_mask", IX86_BUILTIN_VCVTTPS2QQS512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2udqsv4sf_mask, "__builtin_ia32_cvttps2udqs128_mask", IX86_BUILTIN_VCVTTPS2UDQS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2udqsv8sf_mask, "__builtin_ia32_cvttps2udqs256_mask", IX86_BUILTIN_VCVTTPS2UDQS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttps2udqsv16sf_mask, "__builtin_ia32_cvttps2udqs512_mask", IX86_BUILTIN_VCVTTPS2UDQS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2uqqsv2di_mask, "__builtin_ia32_cvttps2uqqs128_mask", IX86_BUILTIN_VCVTTPS2UQQS128_MASK, UNKNOWN, (int) V2DI_FTYPE_V4SF_V2DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2uqqsv4di_mask, "__builtin_ia32_cvttps2uqqs256_mask", IX86_BUILTIN_VCVTTPS2UQQS256_MASK, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttps2uqqsv8di_mask, "__builtin_ia32_cvttps2uqqs512_mask", IX86_BUILTIN_VCVTTPS2UQQS512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxbf16_v8bf_mask, "__builtin_ia32_minmaxbf16128_mask", IX86_BUILTIN_MINMAXBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_INT_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxbf16_v16bf_mask, "__builtin_ia32_minmaxbf16256_mask", IX86_BUILTIN_MINMAXBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_INT_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_minmaxbf16_v32bf_mask, "__builtin_ia32_minmaxbf16512_mask", IX86_BUILTIN_MINMAXBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_INT_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxpv2df_mask, "__builtin_ia32_minmaxpd128_mask", IX86_BUILTIN_MINMAXPD128_MASK, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxpv8hf_mask, "__builtin_ia32_minmaxph128_mask", IX86_BUILTIN_MINMAXPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT_V8HF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxpv4sf_mask, "__builtin_ia32_minmaxps128_mask", IX86_BUILTIN_MINMAXPS128_MASK, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssd_v16si, "__builtin_ia32_vpdpbssd512", IX86_BUILTIN_VPDPBSSDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssds_v16si, "__builtin_ia32_vpdpbssds512", IX86_BUILTIN_VPDPBSSDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsud_v16si, "__builtin_ia32_vpdpbsud512", IX86_BUILTIN_VPDPBSUDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsuds_v16si, "__builtin_ia32_vpdpbsuds512", IX86_BUILTIN_VPDPBSUDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuud_v16si, "__builtin_ia32_vpdpbuud512", IX86_BUILTIN_VPDPBUUDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuuds_v16si, "__builtin_ia32_vpdpbuuds512", IX86_BUILTIN_VPDPBUUDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssd_v16si_mask, "__builtin_ia32_vpdpbssd_v16si_mask", IX86_BUILTIN_VPDPBSSDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssd_v16si_maskz, "__builtin_ia32_vpdpbssd_v16si_maskz", IX86_BUILTIN_VPDPBSSDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssds_v16si_mask, "__builtin_ia32_vpdpbssds_v16si_mask", IX86_BUILTIN_VPDPBSSDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssds_v16si_maskz, "__builtin_ia32_vpdpbssds_v16si_maskz", IX86_BUILTIN_VPDPBSSDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsud_v16si_mask, "__builtin_ia32_vpdpbsud_v16si_mask", IX86_BUILTIN_VPDPBSUDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsud_v16si_maskz, "__builtin_ia32_vpdpbsud_v16si_maskz", IX86_BUILTIN_VPDPBSUDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsuds_v16si_mask, "__builtin_ia32_vpdpbsuds_v16si_mask", IX86_BUILTIN_VPDPBSUDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsuds_v16si_maskz, "__builtin_ia32_vpdpbsuds_v16si_maskz", IX86_BUILTIN_VPDPBSUDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuud_v16si_mask, "__builtin_ia32_vpdpbuud_v16si_mask", IX86_BUILTIN_VPDPBUUDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuud_v16si_maskz, "__builtin_ia32_vpdpbuud_v16si_maskz", IX86_BUILTIN_VPDPBUUDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuuds_v16si_mask, "__builtin_ia32_vpdpbuuds_v16si_mask", IX86_BUILTIN_VPDPBUUDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuuds_v16si_maskz, "__builtin_ia32_vpdpbuuds_v16si_maskz", IX86_BUILTIN_VPDPBUUDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssd_v8si_mask, "__builtin_ia32_vpdpbssd_v8si_mask", IX86_BUILTIN_VPDPBSSDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssd_v8si_maskz, "__builtin_ia32_vpdpbssd_v8si_maskz", IX86_BUILTIN_VPDPBSSDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssds_v8si_mask, "__builtin_ia32_vpdpbssds_v8si_mask", IX86_BUILTIN_VPDPBSSDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssds_v8si_maskz, "__builtin_ia32_vpdpbssds_v8si_maskz", IX86_BUILTIN_VPDPBSSDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsud_v8si_mask, "__builtin_ia32_vpdpbsud_v8si_mask", IX86_BUILTIN_VPDPBSUDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsud_v8si_maskz, "__builtin_ia32_vpdpbsud_v8si_maskz", IX86_BUILTIN_VPDPBSUDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsuds_v8si_mask, "__builtin_ia32_vpdpbsuds_v8si_mask", IX86_BUILTIN_VPDPBSUDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsuds_v8si_maskz, "__builtin_ia32_vpdpbsuds_v8si_maskz", IX86_BUILTIN_VPDPBSUDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuud_v8si_mask, "__builtin_ia32_vpdpbuud_v8si_mask", IX86_BUILTIN_VPDPBUUDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuud_v8si_maskz, "__builtin_ia32_vpdpbuud_v8si_maskz", IX86_BUILTIN_VPDPBUUDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuuds_v8si_mask, "__builtin_ia32_vpdpbuuds_v8si_mask", IX86_BUILTIN_VPDPBUUDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuuds_v8si_maskz, "__builtin_ia32_vpdpbuuds_v8si_maskz", IX86_BUILTIN_VPDPBUUDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssd_v4si_mask, "__builtin_ia32_vpdpbssd_v4si_mask", IX86_BUILTIN_VPDPBSSDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssd_v4si_maskz, "__builtin_ia32_vpdpbssd_v4si_maskz", IX86_BUILTIN_VPDPBSSDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssds_v4si_mask, "__builtin_ia32_vpdpbssds_v4si_mask", IX86_BUILTIN_VPDPBSSDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbssds_v4si_maskz, "__builtin_ia32_vpdpbssds_v4si_maskz", IX86_BUILTIN_VPDPBSSDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsud_v4si_mask, "__builtin_ia32_vpdpbsud_v4si_mask", IX86_BUILTIN_VPDPBSUDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsud_v4si_maskz, "__builtin_ia32_vpdpbsud_v4si_maskz", IX86_BUILTIN_VPDPBSUDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsuds_v4si_mask, "__builtin_ia32_vpdpbsuds_v4si_mask", IX86_BUILTIN_VPDPBSUDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbsuds_v4si_maskz, "__builtin_ia32_vpdpbsuds_v4si_maskz", IX86_BUILTIN_VPDPBSUDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuud_v4si_mask, "__builtin_ia32_vpdpbuud_v4si_mask", IX86_BUILTIN_VPDPBUUDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuud_v4si_maskz, "__builtin_ia32_vpdpbuud_v4si_maskz", IX86_BUILTIN_VPDPBUUDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuuds_v4si_mask, "__builtin_ia32_vpdpbuuds_v4si_mask", IX86_BUILTIN_VPDPBUUDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpbuuds_v4si_maskz, "__builtin_ia32_vpdpbuuds_v4si_maskz", IX86_BUILTIN_VPDPBUUDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsud_v16si, "__builtin_ia32_vpdpwsud512", IX86_BUILTIN_VPDPWSUDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsuds_v16si, "__builtin_ia32_vpdpwsuds512", IX86_BUILTIN_VPDPWSUDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusd_v16si, "__builtin_ia32_vpdpwusd512", IX86_BUILTIN_VPDPWUSDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusds_v16si, "__builtin_ia32_vpdpwusds512", IX86_BUILTIN_VPDPWUSDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuud_v16si, "__builtin_ia32_vpdpwuud512", IX86_BUILTIN_VPDPWUUDV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuuds_v16si, "__builtin_ia32_vpdpwuuds512", IX86_BUILTIN_VPDPWUUDSV16SI, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsud_v16si_mask, "__builtin_ia32_vpdpwsud_v16si_mask", IX86_BUILTIN_VPDPWSUDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsud_v16si_maskz, "__builtin_ia32_vpdpwsud_v16si_maskz", IX86_BUILTIN_VPDPWSUDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsuds_v16si_mask, "__builtin_ia32_vpdpwsuds_v16si_mask", IX86_BUILTIN_VPDPWSUDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsuds_v16si_maskz, "__builtin_ia32_vpdpwsuds_v16si_maskz", IX86_BUILTIN_VPDPWSUDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusd_v16si_mask, "__builtin_ia32_vpdpwusd_v16si_mask", IX86_BUILTIN_VPDPWUSDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusd_v16si_maskz, "__builtin_ia32_vpdpwusd_v16si_maskz", IX86_BUILTIN_VPDPWUSDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusds_v16si_mask, "__builtin_ia32_vpdpwusds_v16si_mask", IX86_BUILTIN_VPDPWUSDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusds_v16si_maskz, "__builtin_ia32_vpdpwusds_v16si_maskz", IX86_BUILTIN_VPDPWUSDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuud_v16si_mask, "__builtin_ia32_vpdpwuud_v16si_mask", IX86_BUILTIN_VPDPWUUDV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuud_v16si_maskz, "__builtin_ia32_vpdpwuud_v16si_maskz", IX86_BUILTIN_VPDPWUUDV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuuds_v16si_mask, "__builtin_ia32_vpdpwuuds_v16si_mask", IX86_BUILTIN_VPDPWUUDSV16SI_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuuds_v16si_maskz, "__builtin_ia32_vpdpwuuds_v16si_maskz", IX86_BUILTIN_VPDPWUUDSV16SI_MASKZ, UNKNOWN, (int) V16SI_FTYPE_V16SI_V16SI_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsud_v8si_mask, "__builtin_ia32_vpdpwsud_v8si_mask", IX86_BUILTIN_VPDPWSUDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsud_v8si_maskz, "__builtin_ia32_vpdpwsud_v8si_maskz", IX86_BUILTIN_VPDPWSUDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsuds_v8si_mask, "__builtin_ia32_vpdpwsuds_v8si_mask", IX86_BUILTIN_VPDPWSUDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsuds_v8si_maskz, "__builtin_ia32_vpdpwsuds_v8si_maskz", IX86_BUILTIN_VPDPWSUDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusd_v8si_mask, "__builtin_ia32_vpdpwusd_v8si_mask", IX86_BUILTIN_VPDPWUSDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusd_v8si_maskz, "__builtin_ia32_vpdpwusd_v8si_maskz", IX86_BUILTIN_VPDPWUSDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusds_v8si_mask, "__builtin_ia32_vpdpwusds_v8si_mask", IX86_BUILTIN_VPDPWUSDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusds_v8si_maskz, "__builtin_ia32_vpdpwusds_v8si_maskz", IX86_BUILTIN_VPDPWUSDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuud_v8si_mask, "__builtin_ia32_vpdpwuud_v8si_mask", IX86_BUILTIN_VPDPWUUDV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuud_v8si_maskz, "__builtin_ia32_vpdpwuud_v8si_maskz", IX86_BUILTIN_VPDPWUUDV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuuds_v8si_mask, "__builtin_ia32_vpdpwuuds_v8si_mask", IX86_BUILTIN_VPDPWUUDSV8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuuds_v8si_maskz, "__builtin_ia32_vpdpwuuds_v8si_maskz", IX86_BUILTIN_VPDPWUUDSV8SI_MASKZ, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsud_v4si_mask, "__builtin_ia32_vpdpwsud_v4si_mask", IX86_BUILTIN_VPDPWSUDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsud_v4si_maskz, "__builtin_ia32_vpdpwsud_v4si_maskz", IX86_BUILTIN_VPDPWSUDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsuds_v4si_mask, "__builtin_ia32_vpdpwsuds_v4si_mask", IX86_BUILTIN_VPDPWSUDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwsuds_v4si_maskz, "__builtin_ia32_vpdpwsuds_v4si_maskz", IX86_BUILTIN_VPDPWSUDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusd_v4si_mask, "__builtin_ia32_vpdpwusd_v4si_mask", IX86_BUILTIN_VPDPWUSDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusd_v4si_maskz, "__builtin_ia32_vpdpwusd_v4si_maskz", IX86_BUILTIN_VPDPWUSDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusds_v4si_mask, "__builtin_ia32_vpdpwusds_v4si_mask", IX86_BUILTIN_VPDPWUSDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwusds_v4si_maskz, "__builtin_ia32_vpdpwusds_v4si_maskz", IX86_BUILTIN_VPDPWUSDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuud_v4si_mask, "__builtin_ia32_vpdpwuud_v4si_mask", IX86_BUILTIN_VPDPWUUDV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuud_v4si_maskz, "__builtin_ia32_vpdpwuud_v4si_maskz", IX86_BUILTIN_VPDPWUUDV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuuds_v4si_mask, "__builtin_ia32_vpdpwuuds_v4si_mask", IX86_BUILTIN_VPDPWUUDSV4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vpdpwuuds_v4si_maskz, "__builtin_ia32_vpdpwuuds_v4si_maskz", IX86_BUILTIN_VPDPWUUDSV4SI_MASKZ, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vdpphps_v16sf_mask, "__builtin_ia32_vdpphps512_mask", IX86_BUILTIN_VDPPHPS512_MASK, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vdpphps_v16sf_maskz, "__builtin_ia32_vdpphps512_maskz", IX86_BUILTIN_VDPPHPS512_MASKZ, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_V16SF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vdpphps_v8sf_mask, "__builtin_ia32_vdpphps256_mask", IX86_BUILTIN_VDPPHPS256_MASK, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vdpphps_v8sf_maskz, "__builtin_ia32_vdpphps256_maskz", IX86_BUILTIN_VDPPHPS256_MASKZ, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vdpphps_v4sf_mask, "__builtin_ia32_vdpphps128_mask", IX86_BUILTIN_VDPPHPS128_MASK, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vdpphps_v4sf_maskz, "__builtin_ia32_vdpphps128_maskz", IX86_BUILTIN_VDPPHPS128_MASKZ, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_mpsadbw, "__builtin_ia32_mpsadbw512", IX86_BUILTIN_AVX10_2_MPSADBW, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_mpsadbw_mask, "__builtin_ia32_mpsadbw512_mask", IX86_BUILTIN_VMPSADBW_V32HI_MASK, UNKNOWN, (int) V32HI_FTYPE_V64QI_V64QI_INT_V32HI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx2_mpsadbw_mask, "__builtin_ia32_mpsadbw256_mask", IX86_BUILTIN_VMPSADBW_V16HI_MASK, UNKNOWN, (int) V16HI_FTYPE_V32QI_V32QI_INT_V16HI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_sse4_1_mpsadbw_mask, "__builtin_ia32_mpsadbw128_mask", IX86_BUILTIN_VMPSADBW_V8HI_MASK, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI_INT_V8HI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvt2ps2phx_v16hf_mask, "__builtin_ia32_vcvt2ps2phx256_mask", IX86_BUILTIN_VCVT2PS2PHX_V16HF_MASK, UNKNOWN, (int) V16HF_FTYPE_V8SF_V8SF_V16HF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvt2ps2phx_v8hf_mask, "__builtin_ia32_vcvt2ps2phx128_mask", IX86_BUILTIN_VCVT2PS2PHX_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V4SF_V4SF_V8HF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2bf8v8hf, "__builtin_ia32_vcvtbiasph2bf8128", IX86_BUILTIN_VCVTBIASPH2BF8128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2bf8v8hf_mask, "__builtin_ia32_vcvtbiasph2bf8128_mask", IX86_BUILTIN_VCVTBIASPH2BF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2bf8v16hf_mask, "__builtin_ia32_vcvtbiasph2bf8256_mask", IX86_BUILTIN_VCVTBIASPH2BF8256_MASK, UNKNOWN, (int) V16QI_FTYPE_V32QI_V16HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2bf8v32hf_mask, "__builtin_ia32_vcvtbiasph2bf8512_mask", IX86_BUILTIN_VCVTBIASPH2BF8512_MASK, UNKNOWN, (int) V32QI_FTYPE_V64QI_V32HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2bf8sv8hf, "__builtin_ia32_vcvtbiasph2bf8s128", IX86_BUILTIN_VCVTBIASPH2BF8S128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2bf8sv8hf_mask, "__builtin_ia32_vcvtbiasph2bf8s128_mask", IX86_BUILTIN_VCVTBIASPH2BF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2bf8sv16hf_mask, "__builtin_ia32_vcvtbiasph2bf8s256_mask", IX86_BUILTIN_VCVTBIASPH2BF8S256_MASK, UNKNOWN, (int) V16QI_FTYPE_V32QI_V16HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2bf8sv32hf_mask, "__builtin_ia32_vcvtbiasph2bf8s512_mask", IX86_BUILTIN_VCVTBIASPH2BF8S512_MASK, UNKNOWN, (int) V32QI_FTYPE_V64QI_V32HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2hf8v8hf, "__builtin_ia32_vcvtbiasph2hf8128", IX86_BUILTIN_VCVTBIASPH2HF8128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2hf8v8hf_mask, "__builtin_ia32_vcvtbiasph2hf8128_mask", IX86_BUILTIN_VCVTBIASPH2HF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2hf8v16hf_mask, "__builtin_ia32_vcvtbiasph2hf8256_mask", IX86_BUILTIN_VCVTBIASPH2HF8256_MASK, UNKNOWN, (int) V16QI_FTYPE_V32QI_V16HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2hf8v32hf_mask, "__builtin_ia32_vcvtbiasph2hf8512_mask", IX86_BUILTIN_VCVTBIASPH2HF8512_MASK, UNKNOWN, (int) V32QI_FTYPE_V64QI_V32HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2hf8sv8hf, "__builtin_ia32_vcvtbiasph2hf8s128", IX86_BUILTIN_VCVTBIASPH2HF8S128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2hf8sv8hf_mask, "__builtin_ia32_vcvtbiasph2hf8s128_mask", IX86_BUILTIN_VCVTBIASPH2HF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V16QI_V8HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2hf8sv16hf_mask, "__builtin_ia32_vcvtbiasph2hf8s256_mask", IX86_BUILTIN_VCVTBIASPH2HF8S256_MASK, UNKNOWN, (int) V16QI_FTYPE_V32QI_V16HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtbiasph2hf8sv32hf_mask, "__builtin_ia32_vcvtbiasph2hf8s512_mask", IX86_BUILTIN_VCVTBIASPH2HF8S512_MASK, UNKNOWN, (int) V32QI_FTYPE_V64QI_V32HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2bf8v8hf_mask, "__builtin_ia32_vcvt2ph2bf8128_mask", IX86_BUILTIN_VCVT2PH2BF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V8HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2bf8v16hf_mask, "__builtin_ia32_vcvt2ph2bf8256_mask", IX86_BUILTIN_VCVT2PH2BF8256_MASK, UNKNOWN, (int) V32QI_FTYPE_V16HF_V16HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2bf8v32hf_mask, "__builtin_ia32_vcvt2ph2bf8512_mask", IX86_BUILTIN_VCVT2PH2BF8512_MASK, UNKNOWN, (int) V64QI_FTYPE_V32HF_V32HF_V64QI_UDI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2bf8sv8hf_mask, "__builtin_ia32_vcvt2ph2bf8s128_mask", IX86_BUILTIN_VCVT2PH2BF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V8HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2bf8sv16hf_mask, "__builtin_ia32_vcvt2ph2bf8s256_mask", IX86_BUILTIN_VCVT2PH2BF8S256_MASK, UNKNOWN, (int) V32QI_FTYPE_V16HF_V16HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2bf8sv32hf_mask, "__builtin_ia32_vcvt2ph2bf8s512_mask", IX86_BUILTIN_VCVT2PH2BF8S512_MASK, UNKNOWN, (int) V64QI_FTYPE_V32HF_V32HF_V64QI_UDI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2hf8v8hf_mask, "__builtin_ia32_vcvt2ph2hf8128_mask", IX86_BUILTIN_VCVT2PH2HF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V8HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2hf8v16hf_mask, "__builtin_ia32_vcvt2ph2hf8256_mask", IX86_BUILTIN_VCVT2PH2HF8256_MASK, UNKNOWN, (int) V32QI_FTYPE_V16HF_V16HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2hf8v32hf_mask, "__builtin_ia32_vcvt2ph2hf8512_mask", IX86_BUILTIN_VCVT2PH2HF8512_MASK, UNKNOWN, (int) V64QI_FTYPE_V32HF_V32HF_V64QI_UDI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2hf8sv8hf_mask, "__builtin_ia32_vcvt2ph2hf8s128_mask", IX86_BUILTIN_VCVT2PH2HF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V8HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2hf8sv16hf_mask, "__builtin_ia32_vcvt2ph2hf8s256_mask", IX86_BUILTIN_VCVT2PH2HF8S256_MASK, UNKNOWN, (int) V32QI_FTYPE_V16HF_V16HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvt2ph2hf8sv32hf_mask, "__builtin_ia32_vcvt2ph2hf8s512_mask", IX86_BUILTIN_VCVT2PH2HF8S512_MASK, UNKNOWN, (int) V64QI_FTYPE_V32HF_V32HF_V64QI_UDI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2bf8v8hf_mask, "__builtin_ia32_vcvtph2bf8128_mask", IX86_BUILTIN_VCVTPH2BF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V16QI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2bf8v16hf_mask, "__builtin_ia32_vcvtph2bf8256_mask", IX86_BUILTIN_VCVTPH2BF8256_MASK, UNKNOWN, (int) V16QI_FTYPE_V16HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2bf8v32hf_mask, "__builtin_ia32_vcvtph2bf8512_mask", IX86_BUILTIN_VCVTPH2BF8512_MASK, UNKNOWN, (int) V32QI_FTYPE_V32HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2bf8sv8hf_mask, "__builtin_ia32_vcvtph2bf8s128_mask", IX86_BUILTIN_VCVTPH2BF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V16QI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2bf8sv16hf_mask, "__builtin_ia32_vcvtph2bf8s256_mask", IX86_BUILTIN_VCVTPH2BF8S256_MASK, UNKNOWN, (int) V16QI_FTYPE_V16HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2bf8sv32hf_mask, "__builtin_ia32_vcvtph2bf8s512_mask", IX86_BUILTIN_VCVTPH2BF8S512_MASK, UNKNOWN, (int) V32QI_FTYPE_V32HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2hf8v8hf_mask, "__builtin_ia32_vcvtph2hf8128_mask", IX86_BUILTIN_VCVTPH2HF8128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V16QI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2hf8v16hf_mask, "__builtin_ia32_vcvtph2hf8256_mask", IX86_BUILTIN_VCVTPH2HF8256_MASK, UNKNOWN, (int) V16QI_FTYPE_V16HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2hf8v32hf_mask, "__builtin_ia32_vcvtph2hf8512_mask", IX86_BUILTIN_VCVTPH2HF8512_MASK, UNKNOWN, (int) V32QI_FTYPE_V32HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2hf8sv8hf_mask, "__builtin_ia32_vcvtph2hf8s128_mask", IX86_BUILTIN_VCVTPH2HF8S128_MASK, UNKNOWN, (int) V16QI_FTYPE_V8HF_V16QI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2hf8sv16hf_mask, "__builtin_ia32_vcvtph2hf8s256_mask", IX86_BUILTIN_VCVTPH2HF8S256_MASK, UNKNOWN, (int) V16QI_FTYPE_V16HF_V16QI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvtph2hf8sv32hf_mask, "__builtin_ia32_vcvtph2hf8s512_mask", IX86_BUILTIN_VCVTPH2HF8S512_MASK, UNKNOWN, (int) V32QI_FTYPE_V32HF_V32QI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvthf82phv8hf_mask, "__builtin_ia32_vcvthf82ph128_mask", IX86_BUILTIN_VCVTHF82PH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V16QI_V8HF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvthf82phv16hf_mask, "__builtin_ia32_vcvthf82ph256_mask", IX86_BUILTIN_VCVTHF82PH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16QI_V16HF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_vcvthf82phv32hf_mask, "__builtin_ia32_vcvthf82ph512_mask", IX86_BUILTIN_VCVTHF82PH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32QI_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_addbf16_v32bf, "__builtin_ia32_addbf16512", IX86_BUILTIN_ADDBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_addbf16_v32bf_mask, "__builtin_ia32_addbf16512_mask", IX86_BUILTIN_ADDBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_addbf16_v16bf, "__builtin_ia32_addbf16256", IX86_BUILTIN_ADDBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_addbf16_v16bf_mask, "__builtin_ia32_addbf16256_mask", IX86_BUILTIN_ADDBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_addbf16_v8bf, "__builtin_ia32_addbf16128", IX86_BUILTIN_ADDBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_addbf16_v8bf_mask, "__builtin_ia32_addbf16128_mask", IX86_BUILTIN_ADDBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_subbf16_v32bf, "__builtin_ia32_subbf16512", IX86_BUILTIN_SUBBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_subbf16_v32bf_mask, "__builtin_ia32_subbf16512_mask", IX86_BUILTIN_SUBBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_subbf16_v16bf, "__builtin_ia32_subbf16256", IX86_BUILTIN_SUBBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_subbf16_v16bf_mask, "__builtin_ia32_subbf16256_mask", IX86_BUILTIN_SUBBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_subbf16_v8bf, "__builtin_ia32_subbf16128", IX86_BUILTIN_SUBBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_subbf16_v8bf_mask, "__builtin_ia32_subbf16128_mask", IX86_BUILTIN_SUBBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_mulbf16_v32bf, "__builtin_ia32_mulbf16512", IX86_BUILTIN_MULBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_mulbf16_v32bf_mask, "__builtin_ia32_mulbf16512_mask", IX86_BUILTIN_MULBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_mulbf16_v16bf, "__builtin_ia32_mulbf16256", IX86_BUILTIN_MULBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_mulbf16_v16bf_mask, "__builtin_ia32_mulbf16256_mask", IX86_BUILTIN_MULBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_mulbf16_v8bf, "__builtin_ia32_mulbf16128", IX86_BUILTIN_MULBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_mulbf16_v8bf_mask, "__builtin_ia32_mulbf16128_mask", IX86_BUILTIN_MULBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_divbf16_v32bf, "__builtin_ia32_divbf16512", IX86_BUILTIN_DIVBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_divbf16_v32bf_mask, "__builtin_ia32_divbf16512_mask", IX86_BUILTIN_DIVBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_divbf16_v16bf, "__builtin_ia32_divbf16256", IX86_BUILTIN_DIVBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_divbf16_v16bf_mask, "__builtin_ia32_divbf16256_mask", IX86_BUILTIN_DIVBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_divbf16_v8bf, "__builtin_ia32_divbf16128", IX86_BUILTIN_DIVBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_divbf16_v8bf_mask, "__builtin_ia32_divbf16128_mask", IX86_BUILTIN_DIVBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_smaxbf16_v32bf, "__builtin_ia32_maxbf16512", IX86_BUILTIN_MAXBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_smaxbf16_v32bf_mask, "__builtin_ia32_maxbf16512_mask", IX86_BUILTIN_MAXBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_smaxbf16_v16bf, "__builtin_ia32_maxbf16256", IX86_BUILTIN_MAXBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_smaxbf16_v16bf_mask, "__builtin_ia32_maxbf16256_mask", IX86_BUILTIN_MAXBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_smaxbf16_v8bf, "__builtin_ia32_maxbf16128", IX86_BUILTIN_MAXBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_smaxbf16_v8bf_mask, "__builtin_ia32_maxbf16128_mask", IX86_BUILTIN_MAXBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_sminbf16_v32bf, "__builtin_ia32_minbf16512", IX86_BUILTIN_MINBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_sminbf16_v32bf_mask, "__builtin_ia32_minbf16512_mask", IX86_BUILTIN_MINBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_sminbf16_v16bf, "__builtin_ia32_minbf16256", IX86_BUILTIN_MINBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_sminbf16_v16bf_mask, "__builtin_ia32_minbf16256_mask", IX86_BUILTIN_MINBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_sminbf16_v8bf, "__builtin_ia32_minbf16128", IX86_BUILTIN_MINBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_sminbf16_v8bf_mask, "__builtin_ia32_minbf16128_mask", IX86_BUILTIN_MINBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_scalefbf16_v32bf, "__builtin_ia32_scalefbf16512", IX86_BUILTIN_SCALEFBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_scalefbf16_v32bf_mask, "__builtin_ia32_scalefbf16512_mask", IX86_BUILTIN_SCALEFBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_scalefbf16_v16bf, "__builtin_ia32_scalefbf16256", IX86_BUILTIN_SCALEFBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_scalefbf16_v16bf_mask, "__builtin_ia32_scalefbf16256_mask", IX86_BUILTIN_SCALEFBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_scalefbf16_v8bf, "__builtin_ia32_scalefbf16128", IX86_BUILTIN_SCALEFBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_scalefbf16_v8bf_mask, "__builtin_ia32_scalefbf16128_mask", IX86_BUILTIN_SCALEFBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmaddbf16_v32bf_mask, "__builtin_ia32_fmaddbf16512_mask", IX86_BUILTIN_FMADDBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmaddbf16_v32bf_mask3, "__builtin_ia32_fmaddbf16512_mask3", IX86_BUILTIN_FMADDBF16512_MASK3, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmaddbf16_v32bf_maskz, "__builtin_ia32_fmaddbf16512_maskz", IX86_BUILTIN_FMADDBF16512_MASKZ, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmaddbf16_v16bf_mask, "__builtin_ia32_fmaddbf16256_mask", IX86_BUILTIN_FMADDBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmaddbf16_v16bf_mask3, "__builtin_ia32_fmaddbf16256_mask3", IX86_BUILTIN_FMADDBF16256_MASK3, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmaddbf16_v16bf_maskz, "__builtin_ia32_fmaddbf16256_maskz", IX86_BUILTIN_FMADDBF16256_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmaddbf16_v8bf_mask, "__builtin_ia32_fmaddbf16128_mask", IX86_BUILTIN_FMADDBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmaddbf16_v8bf_mask3, "__builtin_ia32_fmaddbf16128_mask3", IX86_BUILTIN_FMADDBF16128_MASK3, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmaddbf16_v8bf_maskz, "__builtin_ia32_fmaddbf16128_maskz", IX86_BUILTIN_FMADDBF16128_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmsubbf16_v32bf_mask, "__builtin_ia32_fmsubbf16512_mask", IX86_BUILTIN_FMSUBBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmsubbf16_v32bf_mask3, "__builtin_ia32_fmsubbf16512_mask3", IX86_BUILTIN_FMSUBBF16512_MASK3, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmsubbf16_v32bf_maskz, "__builtin_ia32_fmsubbf16512_maskz", IX86_BUILTIN_FMSUBBF16512_MASKZ, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmsubbf16_v16bf_mask, "__builtin_ia32_fmsubbf16256_mask", IX86_BUILTIN_FMSUBBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmsubbf16_v16bf_mask3, "__builtin_ia32_fmsubbf16256_mask3", IX86_BUILTIN_FMSUBBF16256_MASK3, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmsubbf16_v16bf_maskz, "__builtin_ia32_fmsubbf16256_maskz", IX86_BUILTIN_FMSUBBF16256_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmsubbf16_v8bf_mask, "__builtin_ia32_fmsubbf16128_mask", IX86_BUILTIN_FMSUBBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmsubbf16_v8bf_mask3, "__builtin_ia32_fmsubbf16128_mask3", IX86_BUILTIN_FMSUBBF16128_MASK3, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fmsubbf16_v8bf_maskz, "__builtin_ia32_fmsubbf16128_maskz", IX86_BUILTIN_FMSUBBF16128_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmaddbf16_v32bf_mask, "__builtin_ia32_fnmaddbf16512_mask", IX86_BUILTIN_FNMADDBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmaddbf16_v32bf_mask3, "__builtin_ia32_fnmaddbf16512_mask3", IX86_BUILTIN_FNMADDBF16512_MASK3, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmaddbf16_v32bf_maskz, "__builtin_ia32_fnmaddbf16512_maskz", IX86_BUILTIN_FNMADDBF16512_MASKZ, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmaddbf16_v16bf_mask, "__builtin_ia32_fnmaddbf16256_mask", IX86_BUILTIN_FNMADDBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmaddbf16_v16bf_mask3, "__builtin_ia32_fnmaddbf16256_mask3", IX86_BUILTIN_FNMADDBF16256_MASK3, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmaddbf16_v16bf_maskz, "__builtin_ia32_fnmaddbf16256_maskz", IX86_BUILTIN_FNMADDBF16256_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmaddbf16_v8bf_mask, "__builtin_ia32_fnmaddbf16128_mask", IX86_BUILTIN_FNMADDBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmaddbf16_v8bf_mask3, "__builtin_ia32_fnmaddbf16128_mask3", IX86_BUILTIN_FNMADDBF16128_MASK3, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmaddbf16_v8bf_maskz, "__builtin_ia32_fnmaddbf16128_maskz", IX86_BUILTIN_FNMADDBF16128_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmsubbf16_v32bf_mask, "__builtin_ia32_fnmsubbf16512_mask", IX86_BUILTIN_FNMSUBBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmsubbf16_v32bf_mask3, "__builtin_ia32_fnmsubbf16512_mask3", IX86_BUILTIN_FNMSUBBF16512_MASK3, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmsubbf16_v32bf_maskz, "__builtin_ia32_fnmsubbf16512_maskz", IX86_BUILTIN_FNMSUBBF16512_MASKZ, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmsubbf16_v16bf_mask, "__builtin_ia32_fnmsubbf16256_mask", IX86_BUILTIN_FNMSUBBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmsubbf16_v16bf_mask3, "__builtin_ia32_fnmsubbf16256_mask3", IX86_BUILTIN_FNMSUBBF16256_MASK3, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmsubbf16_v16bf_maskz, "__builtin_ia32_fnmsubbf16256_maskz", IX86_BUILTIN_FNMSUBBF16256_MASKZ, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmsubbf16_v8bf_mask, "__builtin_ia32_fnmsubbf16128_mask", IX86_BUILTIN_FNMSUBBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmsubbf16_v8bf_mask3, "__builtin_ia32_fnmsubbf16128_mask3", IX86_BUILTIN_FNMSUBBF16128_MASK3, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fnmsubbf16_v8bf_maskz, "__builtin_ia32_fnmsubbf16128_maskz", IX86_BUILTIN_FNMSUBBF16128_MASKZ, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_rsqrtbf16_v32bf_mask, "__builtin_ia32_rsqrtbf16512_mask", IX86_BUILTIN_RSQRTBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_rsqrtbf16_v16bf_mask, "__builtin_ia32_rsqrtbf16256_mask", IX86_BUILTIN_RSQRTBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_rsqrtbf16_v8bf_mask, "__builtin_ia32_rsqrtbf16128_mask", IX86_BUILTIN_RSQRTBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_sqrtbf16_v32bf_mask, "__builtin_ia32_sqrtbf16512_mask", IX86_BUILTIN_SQRTBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_sqrtbf16_v16bf_mask, "__builtin_ia32_sqrtbf16256_mask", IX86_BUILTIN_SQRTBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_sqrtbf16_v8bf_mask, "__builtin_ia32_sqrtbf16128_mask", IX86_BUILTIN_SQRTBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_rcpbf16_v32bf_mask, "__builtin_ia32_rcpbf16512_mask", IX86_BUILTIN_RCPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_rcpbf16_v16bf_mask, "__builtin_ia32_rcpbf16256_mask", IX86_BUILTIN_RCPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_rcpbf16_v8bf_mask, "__builtin_ia32_rcpbf16128_mask", IX86_BUILTIN_RCPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_getexpbf16_v32bf_mask, "__builtin_ia32_getexpbf16512_mask", IX86_BUILTIN_GETEXPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_getexpbf16_v16bf_mask, "__builtin_ia32_getexpbf16256_mask", IX86_BUILTIN_GETEXPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_getexpbf16_v8bf_mask, "__builtin_ia32_getexpbf16128_mask", IX86_BUILTIN_GETEXPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_rndscalebf16_v32bf_mask, "__builtin_ia32_rndscalebf16512_mask", IX86_BUILTIN_RNDSCALEBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_INT_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_rndscalebf16_v16bf_mask, "__builtin_ia32_rndscalebf16256_mask", IX86_BUILTIN_RNDSCALEBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_INT_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_rndscalebf16_v8bf_mask, "__builtin_ia32_rndscalebf16128_mask", IX86_BUILTIN_RNDSCALEBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_INT_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_reducebf16_v32bf_mask, "__builtin_ia32_reducebf16512_mask", IX86_BUILTIN_REDUCEBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_INT_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_reducebf16_v16bf_mask, "__builtin_ia32_reducebf16256_mask", IX86_BUILTIN_REDUCEBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_INT_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_reducebf16_v8bf_mask, "__builtin_ia32_reducebf16128_mask", IX86_BUILTIN_REDUCEBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_INT_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_getmantbf16_v32bf_mask, "__builtin_ia32_getmantbf16512_mask", IX86_BUILTIN_GETMANTBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_INT_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_getmantbf16_v16bf_mask, "__builtin_ia32_getmantbf16256_mask", IX86_BUILTIN_GETMANTBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_INT_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_getmantbf16_v8bf_mask, "__builtin_ia32_getmantbf16128_mask", IX86_BUILTIN_GETMANTBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_INT_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fpclassbf16_v32bf_mask, "__builtin_ia32_fpclassbf16512_mask", IX86_BUILTIN_FPCLASSBF16512_MASK, UNKNOWN, (int) SI_FTYPE_V32BF_INT_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fpclassbf16_v16bf_mask, "__builtin_ia32_fpclassbf16256_mask", IX86_BUILTIN_FPCLASSBF16256_MASK, UNKNOWN, (int) HI_FTYPE_V16BF_INT_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_fpclassbf16_v8bf_mask, "__builtin_ia32_fpclassbf16128_mask", IX86_BUILTIN_FPCLASSBF16128_MASK, UNKNOWN, (int) QI_FTYPE_V8BF_INT_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cmpbf16_v32bf_mask, "__builtin_ia32_cmpbf16512_mask", IX86_BUILTIN_CMPBF16512_MASK, UNKNOWN, (int) USI_FTYPE_V32BF_V32BF_INT_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cmpbf16_v16bf_mask, "__builtin_ia32_cmpbf16256_mask", IX86_BUILTIN_CMPBF16256_MASK, UNKNOWN, (int) UHI_FTYPE_V16BF_V16BF_INT_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cmpbf16_v8bf_mask, "__builtin_ia32_cmpbf16128_mask", IX86_BUILTIN_CMPBF16128_MASK, UNKNOWN, (int) UQI_FTYPE_V8BF_V8BF_INT_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16eq", IX86_BUILTIN_VCOMISBF16EQ, EQ, (int) INT_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16gt", IX86_BUILTIN_VCOMISBF16GT, GT, (int) INT_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16ge", IX86_BUILTIN_VCOMISBF16GE, GE, (int) INT_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16le", IX86_BUILTIN_VCOMISBF16LE, LE, (int) INT_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16lt", IX86_BUILTIN_VCOMISBF16LT, LT, (int) INT_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_comisbf16_v8bf, "__builtin_ia32_vcomisbf16neq", IX86_BUILTIN_VCOMISBF16NE, NE, (int) INT_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtbf162ibsv8bf_mask, "__builtin_ia32_cvtbf162ibs128_mask", IX86_BUILTIN_CVTBF162IBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8BF_V8HI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtbf162ibsv16bf_mask, "__builtin_ia32_cvtbf162ibs256_mask", IX86_BUILTIN_CVTBF162IBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16BF_V16HI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtbf162ibsv32bf_mask, "__builtin_ia32_cvtbf162ibs512_mask", IX86_BUILTIN_CVTBF162IBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32BF_V32HI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtbf162iubsv8bf_mask, "__builtin_ia32_cvtbf162iubs128_mask", IX86_BUILTIN_CVTBF162IUBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8BF_V8HI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtbf162iubsv16bf_mask, "__builtin_ia32_cvtbf162iubs256_mask", IX86_BUILTIN_CVTBF162IUBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16BF_V16HI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtbf162iubsv32bf_mask, "__builtin_ia32_cvtbf162iubs512_mask", IX86_BUILTIN_CVTBF162IUBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32BF_V32HI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtph2ibsv8hf_mask, "__builtin_ia32_cvtph2ibs128_mask", IX86_BUILTIN_CVTPH2IBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HF_V8HI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtph2ibsv16hf_mask, "__builtin_ia32_cvtph2ibs256_mask", IX86_BUILTIN_CVTPH2IBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtph2ibsv32hf_mask, "__builtin_ia32_cvtph2ibs512_mask", IX86_BUILTIN_CVTPH2IBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtph2iubsv8hf_mask, "__builtin_ia32_cvtph2iubs128_mask", IX86_BUILTIN_CVTPH2IUBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HF_V8HI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtph2iubsv16hf_mask, "__builtin_ia32_cvtph2iubs256_mask", IX86_BUILTIN_CVTPH2IUBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtph2iubsv32hf_mask, "__builtin_ia32_cvtph2iubs512_mask", IX86_BUILTIN_CVTPH2IUBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtps2ibsv4sf_mask, "__builtin_ia32_cvtps2ibs128_mask", IX86_BUILTIN_CVTPS2IBS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtps2ibsv8sf_mask, "__builtin_ia32_cvtps2ibs256_mask", IX86_BUILTIN_CVTPS2IBS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtps2ibsv16sf_mask, "__builtin_ia32_cvtps2ibs512_mask", IX86_BUILTIN_CVTPS2IBS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtps2iubsv4sf_mask, "__builtin_ia32_cvtps2iubs128_mask", IX86_BUILTIN_CVTPS2IUBS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtps2iubsv8sf_mask, "__builtin_ia32_cvtps2iubs256_mask", IX86_BUILTIN_CVTPS2IUBS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtps2iubsv16sf_mask, "__builtin_ia32_cvtps2iubs512_mask", IX86_BUILTIN_CVTPS2IUBS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttbf162ibsv8bf_mask, "__builtin_ia32_cvttbf162ibs128_mask", IX86_BUILTIN_CVTTBF162IBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8BF_V8HI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttbf162ibsv16bf_mask, "__builtin_ia32_cvttbf162ibs256_mask", IX86_BUILTIN_CVTTBF162IBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16BF_V16HI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttbf162ibsv32bf_mask, "__builtin_ia32_cvttbf162ibs512_mask", IX86_BUILTIN_CVTTBF162IBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32BF_V32HI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttbf162iubsv8bf_mask, "__builtin_ia32_cvttbf162iubs128_mask", IX86_BUILTIN_CVTTBF162IUBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8BF_V8HI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttbf162iubsv16bf_mask, "__builtin_ia32_cvttbf162iubs256_mask", IX86_BUILTIN_CVTTBF162IUBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16BF_V16HI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttbf162iubsv32bf_mask, "__builtin_ia32_cvttbf162iubs512_mask", IX86_BUILTIN_CVTTBF162IUBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32BF_V32HI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttph2ibsv8hf_mask, "__builtin_ia32_cvttph2ibs128_mask", IX86_BUILTIN_CVTTPH2IBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HF_V8HI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttph2ibsv16hf_mask, "__builtin_ia32_cvttph2ibs256_mask", IX86_BUILTIN_CVTTPH2IBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttph2ibsv32hf_mask, "__builtin_ia32_cvttph2ibs512_mask", IX86_BUILTIN_CVTTPH2IBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttph2iubsv8hf_mask, "__builtin_ia32_cvttph2iubs128_mask", IX86_BUILTIN_CVTTPH2IUBS128_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HF_V8HI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttph2iubsv16hf_mask, "__builtin_ia32_cvttph2iubs256_mask", IX86_BUILTIN_CVTTPH2IUBS256_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttph2iubsv32hf_mask, "__builtin_ia32_cvttph2iubs512_mask", IX86_BUILTIN_CVTTPH2IUBS512_MASK, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttps2ibsv4sf_mask, "__builtin_ia32_cvttps2ibs128_mask", IX86_BUILTIN_CVTTPS2IBS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttps2ibsv8sf_mask, "__builtin_ia32_cvttps2ibs256_mask", IX86_BUILTIN_CVTTPS2IBS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttps2ibsv16sf_mask, "__builtin_ia32_cvttps2ibs512_mask", IX86_BUILTIN_CVTTPS2IBS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttps2iubsv4sf_mask, "__builtin_ia32_cvttps2iubs128_mask", IX86_BUILTIN_CVTTPS2IUBS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttps2iubsv8sf_mask, "__builtin_ia32_cvttps2iubs256_mask", IX86_BUILTIN_CVTTPS2IUBS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttps2iubsv16sf_mask, "__builtin_ia32_cvttps2iubs512_mask", IX86_BUILTIN_CVTTPS2IUBS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2dqsv2df_mask, "__builtin_ia32_cvttpd2dqs128_mask", IX86_BUILTIN_VCVTTPD2DQS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V2DF_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2dqsv4df_mask, "__builtin_ia32_cvttpd2dqs256_mask", IX86_BUILTIN_VCVTTPD2DQS256_MASK, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2dqsv8df_mask, "__builtin_ia32_cvttpd2dqs512_mask", IX86_BUILTIN_VCVTTPD2DQS512_MASK, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2qqsv2df_mask, "__builtin_ia32_cvttpd2qqs128_mask", IX86_BUILTIN_VCVTTPD2QQS128_MASK, UNKNOWN, (int) V2DI_FTYPE_V2DF_V2DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2qqsv4df_mask, "__builtin_ia32_cvttpd2qqs256_mask", IX86_BUILTIN_VCVTTPD2QQS256_MASK, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2qqsv8df_mask, "__builtin_ia32_cvttpd2qqs512_mask", IX86_BUILTIN_VCVTTPD2QQS512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2udqsv2df_mask, "__builtin_ia32_cvttpd2udqs128_mask", IX86_BUILTIN_VCVTTPD2UDQS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V2DF_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2udqsv4df_mask, "__builtin_ia32_cvttpd2udqs256_mask", IX86_BUILTIN_VCVTTPD2UDQS256_MASK, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2udqsv8df_mask, "__builtin_ia32_cvttpd2udqs512_mask", IX86_BUILTIN_VCVTTPD2UDQS512_MASK, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2uqqsv2df_mask, "__builtin_ia32_cvttpd2uqqs128_mask", IX86_BUILTIN_VCVTTPD2UQQS128_MASK, UNKNOWN, (int) V2DI_FTYPE_V2DF_V2DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2uqqsv4df_mask, "__builtin_ia32_cvttpd2uqqs256_mask", IX86_BUILTIN_VCVTTPD2UQQS256_MASK, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2uqqsv8df_mask, "__builtin_ia32_cvttpd2uqqs512_mask", IX86_BUILTIN_VCVTTPD2UQQS512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2dqsv4sf_mask, "__builtin_ia32_cvttps2dqs128_mask", IX86_BUILTIN_VCVTTPS2DQS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2dqsv8sf_mask, "__builtin_ia32_cvttps2dqs256_mask", IX86_BUILTIN_VCVTTPS2DQS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2dqsv16sf_mask, "__builtin_ia32_cvttps2dqs512_mask", IX86_BUILTIN_VCVTTPS2DQS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2qqsv2di_mask, "__builtin_ia32_cvttps2qqs128_mask", IX86_BUILTIN_VCVTTPS2QQS128_MASK, UNKNOWN, (int) V2DI_FTYPE_V4SF_V2DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2qqsv4di_mask, "__builtin_ia32_cvttps2qqs256_mask", IX86_BUILTIN_VCVTTPS2QQS256_MASK, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2qqsv8di_mask, "__builtin_ia32_cvttps2qqs512_mask", IX86_BUILTIN_VCVTTPS2QQS512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2udqsv4sf_mask, "__builtin_ia32_cvttps2udqs128_mask", IX86_BUILTIN_VCVTTPS2UDQS128_MASK, UNKNOWN, (int) V4SI_FTYPE_V4SF_V4SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2udqsv8sf_mask, "__builtin_ia32_cvttps2udqs256_mask", IX86_BUILTIN_VCVTTPS2UDQS256_MASK, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2udqsv16sf_mask, "__builtin_ia32_cvttps2udqs512_mask", IX86_BUILTIN_VCVTTPS2UDQS512_MASK, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2uqqsv2di_mask, "__builtin_ia32_cvttps2uqqs128_mask", IX86_BUILTIN_VCVTTPS2UQQS128_MASK, UNKNOWN, (int) V2DI_FTYPE_V4SF_V2DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2uqqsv4di_mask, "__builtin_ia32_cvttps2uqqs256_mask", IX86_BUILTIN_VCVTTPS2UQQS256_MASK, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2uqqsv8di_mask, "__builtin_ia32_cvttps2uqqs512_mask", IX86_BUILTIN_VCVTTPS2UQQS512_MASK, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxbf16_v8bf_mask, "__builtin_ia32_minmaxbf16128_mask", IX86_BUILTIN_MINMAXBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_INT_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxbf16_v16bf_mask, "__builtin_ia32_minmaxbf16256_mask", IX86_BUILTIN_MINMAXBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_INT_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxbf16_v32bf_mask, "__builtin_ia32_minmaxbf16512_mask", IX86_BUILTIN_MINMAXBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_INT_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxpv4df_mask, "__builtin_ia32_minmaxpd256_mask", IX86_BUILTIN_MINMAXPD256_MASK, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT_V4DF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxpv16hf_mask, "__builtin_ia32_minmaxph256_mask", IX86_BUILTIN_MINMAXPH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_INT_V16HF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxpv8sf_mask, "__builtin_ia32_minmaxps256_mask", IX86_BUILTIN_MINMAXPS256_MASK, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT_V8SF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxpv2df_mask, "__builtin_ia32_minmaxpd128_mask", IX86_BUILTIN_MINMAXPD128_MASK, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxpv8hf_mask, "__builtin_ia32_minmaxph128_mask", IX86_BUILTIN_MINMAXPH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT_V8HF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxpv4sf_mask, "__builtin_ia32_minmaxps128_mask", IX86_BUILTIN_MINMAXPS128_MASK, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI)
/* Builtins with rounding support. */
BDESC_END (ARGS, ROUND_ARGS)
@@ -3656,215 +3660,37 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_round, "
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_mask_round, "__builtin_ia32_vfmulcsh_mask_round", IX86_BUILTIN_VFMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
/* AVX10.2. */
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv4df3_mask_round, "__builtin_ia32_addpd256_mask_round", IX86_BUILTIN_ADDPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv16hf3_mask_round, "__builtin_ia32_addph256_mask_round", IX86_BUILTIN_ADDPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv8sf3_mask_round, "__builtin_ia32_addps256_mask_round", IX86_BUILTIN_ADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv4df3_mask_round, "__builtin_ia32_cmppd256_mask_round", IX86_BUILTIN_CMPPD256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V4DF_V4DF_INT_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv16hf3_mask_round, "__builtin_ia32_cmpph256_mask_round", IX86_BUILTIN_CMPPH256_MASK_ROUND, UNKNOWN, (int) UHI_FTYPE_V16HF_V16HF_INT_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv8sf3_mask_round, "__builtin_ia32_cmpps256_mask_round", IX86_BUILTIN_CMPPS256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V8SF_V8SF_INT_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtdq2ph_v8si_mask_round, "__builtin_ia32_vcvtdq2ph256_mask_round", IX86_BUILTIN_VCVTDQ2PH256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8SI_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatv8siv8sf2_mask_round, "__builtin_ia32_cvtdq2ps256_mask_round", IX86_BUILTIN_VCVTDQ2PS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SI_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtpd2ph_v4df_mask_round, "__builtin_ia32_vcvtpd2ph256_mask_round", IX86_BUILTIN_VCVTPD2PH256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V4DF_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_cvtpd2ps256_mask_round, "__builtin_ia32_cvtpd2ps256_mask_round", IX86_BUILTIN_CVTPD2PS256_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4DF_V4SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_cvtpd2dq256_mask_round, "__builtin_ia32_cvtpd2dq256_mask_round", IX86_BUILTIN_CVTPD2DQ256_MASK_ROUND, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_fix_notruncv4dfv4di2_mask_round, "__builtin_ia32_cvtpd2qq256_mask_round", IX86_BUILTIN_CVTPD2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_fixuns_notruncv4dfv4si2_mask_round, "__builtin_ia32_cvtpd2udq256_mask_round", IX86_BUILTIN_CVTPD2UDQ256_MASK_ROUND, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_fixuns_notruncv4dfv4di2_mask_round, "__builtin_ia32_cvtpd2uqq256_mask_round", IX86_BUILTIN_CVTPD2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2dq_v8si_mask_round, "__builtin_ia32_vcvtph2dq256_mask_round", IX86_BUILTIN_VCVTPH2DQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_float_extend_phv4df2_mask_round, "__builtin_ia32_vcvtph2pd256_mask_round", IX86_BUILTIN_VCVTPH2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V8HF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtph2ps256_mask_round, "__builtin_ia32_vcvtph2ps256_mask_round", IX86_BUILTIN_VCVTPH2PS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8HF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_float_extend_phv8sf2_mask_round, "__builtin_ia32_vcvtph2psx256_mask_round", IX86_BUILTIN_VCVTPH2PSX256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8HF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2qq_v4di_mask_round, "__builtin_ia32_vcvtph2qq256_mask_round", IX86_BUILTIN_VCVTPH2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2udq_v8si_mask_round, "__builtin_ia32_vcvtph2udq256_mask_round", IX86_BUILTIN_VCVTPH2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2uqq_v4di_mask_round, "__builtin_ia32_vcvtph2uqq256_mask_round", IX86_BUILTIN_VCVTPH2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2uw_v16hi_mask_round, "__builtin_ia32_vcvtph2uw256_mask_round", IX86_BUILTIN_VCVTPH2UW256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2w_v16hi_mask_round, "__builtin_ia32_vcvtph2w256_mask_round", IX86_BUILTIN_VCVTPH2W256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_cvtps2pd256_mask_round, "__builtin_ia32_vcvtps2pd256_mask_round", IX86_BUILTIN_VCVTPS2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4SF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtps2ph_v8sf_mask_round, "__builtin_ia32_vcvtps2phx256_mask_round", IX86_BUILTIN_VCVTPS2PHX256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8SF_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_fix_notruncv8sfv8si_mask_round, "__builtin_ia32_vcvtps2dq256_mask_round", IX86_BUILTIN_VCVTPS2DQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512dq_cvtps2qqv4di_mask_round, "__builtin_ia32_cvtps2qq256_mask_round", IX86_BUILTIN_VCVTPS2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fixuns_notruncv8sfv8si_mask_round, "__builtin_ia32_cvtps2udq256_mask_round", IX86_BUILTIN_VCVTPS2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512dq_cvtps2uqqv4di_mask_round, "__builtin_ia32_cvtps2uqq256_mask_round", IX86_BUILTIN_VCVTPS2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatv4div4df2_mask_round, "__builtin_ia32_cvtqq2pd256_mask_round", IX86_BUILTIN_VCVTQQ2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DI_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtqq2ph_v4di_mask_round, "__builtin_ia32_vcvtqq2ph256_mask_round", IX86_BUILTIN_VCVTQQ2PH256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V4DI_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatv4div4sf2_mask_round, "__builtin_ia32_cvtqq2ps256_mask_round", IX86_BUILTIN_VCVTQQ2PS256_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4DI_V4SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fix_truncv4dfv4si2_mask_round, "__builtin_ia32_cvttpd2dq256_mask_round", IX86_BUILTIN_VCVTTPD2DQ256_MASK_ROUND, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fix_truncv4dfv4di2_mask_round, "__builtin_ia32_cvttpd2qq256_mask_round", IX86_BUILTIN_VCVTTPD2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fixuns_truncv4dfv4si2_mask_round, "__builtin_ia32_cvttpd2udq256_mask_round", IX86_BUILTIN_VCVTTPD2UDQ256_MASK_ROUND, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fixuns_truncv4dfv4di2_mask_round, "__builtin_ia32_cvttpd2uqq256_mask_round", IX86_BUILTIN_VCVTTPD2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fix_truncv8si2_mask_round, "__builtin_ia32_vcvttph2dq256_mask_round", IX86_BUILTIN_VCVTTPH2DQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fix_truncv4di2_mask_round, "__builtin_ia32_vcvttph2qq256_mask_round", IX86_BUILTIN_VCVTTPH2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv8si2_mask_round, "__builtin_ia32_vcvttph2udq256_mask_round", IX86_BUILTIN_VCVTTPH2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv4di2_mask_round, "__builtin_ia32_vcvttph2uqq256_mask_round", IX86_BUILTIN_VCVTTPH2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv16hi2_mask_round, "__builtin_ia32_vcvttph2uw256_mask_round", IX86_BUILTIN_VCVTTPH2UW256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fix_truncv16hi2_mask_round, "__builtin_ia32_vcvttph2w256_mask_round", IX86_BUILTIN_VCVTTPH2W256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fix_truncv8sfv8si2_mask_round, "__builtin_ia32_cvttps2dq256_mask_round", IX86_BUILTIN_VCVTTPS2DQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fix_truncv4sfv4di2_mask_round, "__builtin_ia32_cvttps2qq256_mask_round", IX86_BUILTIN_VCVTTPS2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fixuns_truncv8sfv8si2_mask_round, "__builtin_ia32_cvttps2udq256_mask_round", IX86_BUILTIN_VCVTTPS2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fixuns_truncv4sfv4di2_mask_round, "__builtin_ia32_cvttps2uqq256_mask_round", IX86_BUILTIN_VCVTTPS2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtudq2ph_v8si_mask_round, "__builtin_ia32_vcvtudq2ph256_mask_round", IX86_BUILTIN_VCVTUDQ2PH256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8SI_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv8siv8sf2_mask_round, "__builtin_ia32_cvtudq2ps256_mask_round", IX86_BUILTIN_VCVTUDQ2PS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SI_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv4div4df2_mask_round, "__builtin_ia32_cvtuqq2pd256_mask_round", IX86_BUILTIN_VCVTUQQ2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DI_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtuqq2ph_v4di_mask_round, "__builtin_ia32_vcvtuqq2ph256_mask_round", IX86_BUILTIN_VCVTUQQ2PH256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V4DI_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv4div4sf2_mask_round, "__builtin_ia32_cvtuqq2ps256_mask_round", IX86_BUILTIN_VCVTUQQ2PS256_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4DI_V4SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtuw2ph_v16hi_mask_round, "__builtin_ia32_vcvtuw2ph256_mask_round", IX86_BUILTIN_VCVTUW2PH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HI_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtw2ph_v16hi_mask_round, "__builtin_ia32_vcvtw2ph256_mask_round", IX86_BUILTIN_VCVTW2PH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HI_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_divv4df3_mask_round, "__builtin_ia32_divpd256_mask_round", IX86_BUILTIN_VDIVPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_divv16hf3_mask_round, "__builtin_ia32_divph256_mask_round", IX86_BUILTIN_VDIVPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_divv8sf3_mask_round, "__builtin_ia32_divps256_mask_round", IX86_BUILTIN_VDIVPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_fma_fcmaddc_v16hf_round, "__builtin_ia32_vfcmaddcph256_round", IX86_BUILTIN_VFCMADDCPH256_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fcmaddc_v16hf_mask1_round, "__builtin_ia32_vfcmaddcph256_mask_round", IX86_BUILTIN_VFCMADDCPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fcmaddc_v16hf_mask_round, "__builtin_ia32_vfcmaddcph256_mask3_round", IX86_BUILTIN_VFCMADDCPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fcmaddc_v16hf_maskz_round, "__builtin_ia32_vfcmaddcph256_maskz_round", IX86_BUILTIN_VFCMADDCPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fcmulc_v16hf_round, "__builtin_ia32_vfcmulcph256_round", IX86_BUILTIN_VFCMULCPH256_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fcmulc_v16hf_mask_round, "__builtin_ia32_vfcmulcph256_mask_round", IX86_BUILTIN_VFCMULCPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fixupimmv4df_mask_round, "__builtin_ia32_fixupimmpd256_mask_round", IX86_BUILTIN_VFIXUPIMMPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI_INT_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fixupimmv4df_maskz_round, "__builtin_ia32_fixupimmpd256_maskz_round", IX86_BUILTIN_VFIXUPIMMPD256_MASKZ_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI_INT_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fixupimmv8sf_mask_round, "__builtin_ia32_fixupimmps256_mask_round", IX86_BUILTIN_VFIXUPIMMPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI_INT_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fixupimmv8sf_maskz_round, "__builtin_ia32_fixupimmps256_maskz_round", IX86_BUILTIN_VFIXUPIMMPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI_INT_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v4df_mask_round, "__builtin_ia32_vfmaddpd256_mask_round", IX86_BUILTIN_VFMADDPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v4df_mask3_round, "__builtin_ia32_vfmaddpd256_mask3_round", IX86_BUILTIN_VFMADDPD256_MASK3_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v4df_maskz_round, "__builtin_ia32_vfmaddpd256_maskz_round", IX86_BUILTIN_VFMADDPD256_MASKZ_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v16hf_mask_round, "__builtin_ia32_vfmaddph256_mask_round", IX86_BUILTIN_VFMADDPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v16hf_mask3_round, "__builtin_ia32_vfmaddph256_mask3_round", IX86_BUILTIN_VFMADDPH512_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v16hf_maskz_round, "__builtin_ia32_vfmaddph256_maskz_round", IX86_BUILTIN_VFMADDPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v8sf_mask_round, "__builtin_ia32_vfmaddps256_mask_round", IX86_BUILTIN_VFMADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v8sf_mask3_round, "__builtin_ia32_vfmaddps256_mask3_round", IX86_BUILTIN_VFMADDPS512_MASK3_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v8sf_maskz_round, "__builtin_ia32_vfmaddps256_maskz_round", IX86_BUILTIN_VFMADDPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_fma_fmaddc_v16hf_round, "__builtin_ia32_vfmaddcph256_round", IX86_BUILTIN_VFMADDCPH256_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddc_v16hf_mask1_round, "__builtin_ia32_vfmaddcph256_mask_round", IX86_BUILTIN_VFMADDCPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddc_v16hf_mask_round, "__builtin_ia32_vfmaddcph256_mask3_round", IX86_BUILTIN_VFMADDCPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddc_v16hf_maskz_round, "__builtin_ia32_vfmaddcph256_maskz_round", IX86_BUILTIN_VFMADDCPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v4df_mask_round, "__builtin_ia32_vfmaddsubpd256_mask_round", IX86_BUILTIN_VFMADDSUBPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v4df_mask3_round, "__builtin_ia32_vfmaddsubpd256_mask3_round", IX86_BUILTIN_VFMADDSUBPD256_MASK3_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v4df_maskz_round, "__builtin_ia32_vfmaddsubpd256_maskz_round", IX86_BUILTIN_VFMADDSUBPD256_MASKZ_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v16hf_mask_round, "__builtin_ia32_vfmaddsubph256_mask_round", IX86_BUILTIN_VFMADDSUBPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v16hf_mask3_round, "__builtin_ia32_vfmaddsubph256_mask3_round", IX86_BUILTIN_VFMADDSUBPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v16hf_maskz_round, "__builtin_ia32_vfmaddsubph256_maskz_round", IX86_BUILTIN_VFMADDSUBPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v8sf_mask_round, "__builtin_ia32_vfmaddsubps256_mask_round", IX86_BUILTIN_VFMADDSUBPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v8sf_mask3_round, "__builtin_ia32_vfmaddsubps256_mask3_round", IX86_BUILTIN_VFMADDSUBPS512_MASK3_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v8sf_maskz_round, "__builtin_ia32_vfmaddsubps256_maskz_round", IX86_BUILTIN_VFMADDSUBPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsub_v4df_mask_round, "__builtin_ia32_vfmsubpd256_mask_round", IX86_BUILTIN_VFMSUBPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsub_v4df_mask3_round, "__builtin_ia32_vfmsubpd256_mask3_round", IX86_BUILTIN_VFMSUBPD256_MASK3_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsub_v4df_maskz_round, "__builtin_ia32_vfmsubpd256_maskz_round", IX86_BUILTIN_VFMSUBPD256_MASKZ_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsub_v16hf_mask_round, "__builtin_ia32_vfmsubph256_mask_round", IX86_BUILTIN_VFMSUBPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsub_v16hf_mask3_round, "__builtin_ia32_vfmsubph256_mask3_round", IX86_BUILTIN_VFMSUBPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsub_v16hf_maskz_round, "__builtin_ia32_vfmsubph256_maskz_round", IX86_BUILTIN_VFMSUBPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsub_v8sf_mask_round, "__builtin_ia32_vfmsubps256_mask_round", IX86_BUILTIN_VFMSUBPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsub_v8sf_mask3_round, "__builtin_ia32_vfmsubps256_mask3_round", IX86_BUILTIN_VFMSUBPS512_MASK3_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsub_v8sf_maskz_round, "__builtin_ia32_vfmsubps256_maskz_round", IX86_BUILTIN_VFMSUBPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsubadd_v4df_mask_round, "__builtin_ia32_vfmsubaddpd256_mask_round", IX86_BUILTIN_VFMSUBADDPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsubadd_v4df_mask3_round, "__builtin_ia32_vfmsubaddpd256_mask3_round", IX86_BUILTIN_VFMSUBADDPD256_MASK3_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsubadd_v4df_maskz_round, "__builtin_ia32_vfmsubaddpd256_maskz_round", IX86_BUILTIN_VFMSUBADDPD256_MASKZ_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsubadd_v16hf_mask_round, "__builtin_ia32_vfmsubaddph256_mask_round", IX86_BUILTIN_VFMSUBADDPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsubadd_v16hf_mask3_round, "__builtin_ia32_vfmsubaddph256_mask3_round", IX86_BUILTIN_VFMSUBADDPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsubadd_v16hf_maskz_round, "__builtin_ia32_vfmsubaddph256_maskz_round", IX86_BUILTIN_VFMSUBADDPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsubadd_v8sf_mask_round, "__builtin_ia32_vfmsubaddps256_mask_round", IX86_BUILTIN_VFMSUBADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsubadd_v8sf_mask3_round, "__builtin_ia32_vfmsubaddps256_mask3_round", IX86_BUILTIN_VFMSUBADDPS512_MASK3_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmsubadd_v8sf_maskz_round, "__builtin_ia32_vfmsubaddps256_maskz_round", IX86_BUILTIN_VFMSUBADDPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmulc_v16hf_round, "__builtin_ia32_vfmulcph256_round", IX86_BUILTIN_VFMULCPH256_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmulc_v16hf_mask_round, "__builtin_ia32_vfmulcph256_mask_round", IX86_BUILTIN_VFMULCPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v4df_mask_round, "__builtin_ia32_vfnmaddpd256_mask_round", IX86_BUILTIN_VFNMADDPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v4df_mask3_round, "__builtin_ia32_vfnmaddpd256_mask3_round", IX86_BUILTIN_VFNMADDPD256_MASK3_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v4df_maskz_round, "__builtin_ia32_vfnmaddpd256_maskz_round", IX86_BUILTIN_VFNMADDPD256_MASKZ_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v16hf_mask_round, "__builtin_ia32_vfnmaddph256_mask_round", IX86_BUILTIN_VFNMADDPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v16hf_mask3_round, "__builtin_ia32_vfnmaddph256_mask3_round", IX86_BUILTIN_VFNMADDPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v16hf_maskz_round, "__builtin_ia32_vfnmaddph256_maskz_round", IX86_BUILTIN_VFNMADDPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v8sf_mask_round, "__builtin_ia32_vfnmaddps256_mask_round", IX86_BUILTIN_VFNMADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v8sf_mask3_round, "__builtin_ia32_vfnmaddps256_mask3_round", IX86_BUILTIN_VFNMADDPS512_MASK3_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v8sf_maskz_round, "__builtin_ia32_vfnmaddps256_maskz_round", IX86_BUILTIN_VFNMADDPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v4df_mask_round, "__builtin_ia32_vfnmsubpd256_mask_round", IX86_BUILTIN_VFNMSUBPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v4df_mask3_round, "__builtin_ia32_vfnmsubpd256_mask3_round", IX86_BUILTIN_VFNMSUBPD256_MASK3_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v4df_maskz_round, "__builtin_ia32_vfnmsubpd256_maskz_round", IX86_BUILTIN_VFNMSUBPD256_MASKZ_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v16hf_mask_round, "__builtin_ia32_vfnmsubph256_mask_round", IX86_BUILTIN_VFNMSUBPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v16hf_mask3_round, "__builtin_ia32_vfnmsubph256_mask3_round", IX86_BUILTIN_VFNMSUBPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v16hf_maskz_round, "__builtin_ia32_vfnmsubph256_maskz_round", IX86_BUILTIN_VFNMSUBPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v8sf_mask_round, "__builtin_ia32_vfnmsubps256_mask_round", IX86_BUILTIN_VFNMSUBPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v8sf_mask3_round, "__builtin_ia32_vfnmsubps256_mask3_round", IX86_BUILTIN_VFNMSUBPS512_MASK3_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v8sf_maskz_round, "__builtin_ia32_vfnmsubps256_maskz_round", IX86_BUILTIN_VFNMSUBPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_getexpv4df_mask_round, "__builtin_ia32_getexppd256_mask_round", IX86_BUILTIN_VGETEXPPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_getexpv16hf_mask_round, "__builtin_ia32_getexpph256_mask_round", IX86_BUILTIN_VGETEXPPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_getexpv8sf_mask_round, "__builtin_ia32_getexpps256_mask_round", IX86_BUILTIN_VGETEXPPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_getmantv4df_mask_round, "__builtin_ia32_getmantpd256_mask_round", IX86_BUILTIN_VGETMANTPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_getmantv16hf_mask_round, "__builtin_ia32_getmantph256_mask_round", IX86_BUILTIN_VGETMANTPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_INT_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_getmantv8sf_mask_round, "__builtin_ia32_getmantps256_mask_round", IX86_BUILTIN_VGETMANTPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_smaxv4df3_mask_round, "__builtin_ia32_maxpd256_mask_round", IX86_BUILTIN_VMAXPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_smaxv16hf3_mask_round, "__builtin_ia32_maxph256_mask_round", IX86_BUILTIN_VMAXPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_smaxv8sf3_mask_round, "__builtin_ia32_maxps256_mask_round", IX86_BUILTIN_VMAXPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_sminv4df3_mask_round, "__builtin_ia32_minpd256_mask_round", IX86_BUILTIN_VMINPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_sminv16hf3_mask_round, "__builtin_ia32_minph256_mask_round", IX86_BUILTIN_VMINPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_sminv8sf3_mask_round, "__builtin_ia32_minps256_mask_round", IX86_BUILTIN_VMINPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_mulv4df3_mask_round, "__builtin_ia32_mulpd256_mask_round", IX86_BUILTIN_VMULPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_mulv16hf3_mask_round, "__builtin_ia32_mulph256_mask_round", IX86_BUILTIN_VMULPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_mulv8sf3_mask_round, "__builtin_ia32_mulps256_mask_round", IX86_BUILTIN_VMULPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512dq_rangepv4df_mask_round, "__builtin_ia32_rangepd256_mask_round", IX86_BUILTIN_VRANGEPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512dq_rangepv8sf_mask_round, "__builtin_ia32_rangeps256_mask_round", IX86_BUILTIN_VRANGEPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_reducepv4df_mask_round, "__builtin_ia32_reducepd256_mask_round", IX86_BUILTIN_VREDUCEPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_reducepv16hf_mask_round, "__builtin_ia32_reduceph256_mask_round", IX86_BUILTIN_VREDUCEPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_INT_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_reducepv8sf_mask_round, "__builtin_ia32_reduceps256_mask_round", IX86_BUILTIN_VREDUCEPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_rndscalev4df_mask_round, "__builtin_ia32_rndscalepd256_mask_round", IX86_BUILTIN_VRNDSCALEPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_rndscalev16hf_mask_round, "__builtin_ia32_rndscaleph256_mask_round", IX86_BUILTIN_VRNDSCALEPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_INT_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_rndscalev8sf_mask_round, "__builtin_ia32_rndscaleps256_mask_round", IX86_BUILTIN_VRNDSCALEPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_scalefv4df_mask_round, "__builtin_ia32_scalefpd256_mask_round", IX86_BUILTIN_VSCALEFPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_scalefv16hf_mask_round, "__builtin_ia32_scalefph256_mask_round", IX86_BUILTIN_VSCALEFPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_scalefv8sf_mask_round, "__builtin_ia32_scalefps256_mask_round", IX86_BUILTIN_VSCALEFPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_sqrtv4df2_mask_round, "__builtin_ia32_sqrtpd256_mask_round", IX86_BUILTIN_VSQRTPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_sqrtv16hf2_mask_round, "__builtin_ia32_sqrtph256_mask_round", IX86_BUILTIN_VSQRTPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_sqrtv8sf2_mask_round, "__builtin_ia32_sqrtps256_mask_round", IX86_BUILTIN_VSQRTPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_subv4df3_mask_round, "__builtin_ia32_subpd256_mask_round", IX86_BUILTIN_VSUBPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_subv16hf3_mask_round, "__builtin_ia32_subph256_mask_round", IX86_BUILTIN_VSUBPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_subv8sf3_mask_round, "__builtin_ia32_subps256_mask_round", IX86_BUILTIN_VSUBPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvt2ps2phx_v32hf_mask_round, "__builtin_ia32_vcvt2ps2phx512_mask_round", IX86_BUILTIN_VCVT2PS2PHX_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V16SF_V16SF_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvt2ps2phx_v16hf_mask_round, "__builtin_ia32_vcvt2ps2phx256_mask_round", IX86_BUILTIN_VCVT2PS2PHX_V16HF_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V8SF_V8SF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtph2ibsv16hf_mask_round, "__builtin_ia32_cvtph2ibs256_mask_round", IX86_BUILTIN_CVTPH2IBS256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtph2ibsv32hf_mask_round, "__builtin_ia32_cvtph2ibs512_mask_round", IX86_BUILTIN_CVTPH2IBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtph2iubsv16hf_mask_round, "__builtin_ia32_cvtph2iubs256_mask_round", IX86_BUILTIN_CVTPH2IUBS256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtph2iubsv32hf_mask_round, "__builtin_ia32_cvtph2iubs512_mask_round", IX86_BUILTIN_CVTPH2IUBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtps2ibsv8sf_mask_round, "__builtin_ia32_cvtps2ibs256_mask_round", IX86_BUILTIN_CVTPS2IBS256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtps2ibsv16sf_mask_round, "__builtin_ia32_cvtps2ibs512_mask_round", IX86_BUILTIN_CVTPS2IBS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvtps2iubsv8sf_mask_round, "__builtin_ia32_cvtps2iubs256_mask_round", IX86_BUILTIN_CVTPS2IUBS256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtps2iubsv16sf_mask_round, "__builtin_ia32_cvtps2iubs512_mask_round", IX86_BUILTIN_CVTPS2IUBS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttph2ibsv16hf_mask_round, "__builtin_ia32_cvttph2ibs256_mask_round", IX86_BUILTIN_CVTTPH2IBS256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvttph2ibsv32hf_mask_round, "__builtin_ia32_cvttph2ibs512_mask_round", IX86_BUILTIN_CVTTPH2IBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttph2iubsv16hf_mask_round, "__builtin_ia32_cvttph2iubs256_mask_round", IX86_BUILTIN_CVTTPH2IUBS256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvttph2iubsv32hf_mask_round, "__builtin_ia32_cvttph2iubs512_mask_round", IX86_BUILTIN_CVTTPH2IUBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttps2ibsv8sf_mask_round, "__builtin_ia32_cvttps2ibs256_mask_round", IX86_BUILTIN_CVTTPS2IBS256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvttps2ibsv16sf_mask_round, "__builtin_ia32_cvttps2ibs512_mask_round", IX86_BUILTIN_CVTTPS2IBS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_cvttps2iubsv8sf_mask_round, "__builtin_ia32_cvttps2iubs256_mask_round", IX86_BUILTIN_CVTTPS2IUBS256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvttps2iubsv16sf_mask_round, "__builtin_ia32_cvttps2iubs512_mask_round", IX86_BUILTIN_CVTTPS2IUBS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2dqsv4df_mask_round, "__builtin_ia32_cvttpd2dqs256_mask_round", IX86_BUILTIN_VCVTTPD2DQS256_MASK_ROUND, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttpd2dqsv8df_mask_round, "__builtin_ia32_cvttpd2dqs512_mask_round", IX86_BUILTIN_VCVTTPD2DQS512_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_QI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2qqsv4df_mask_round, "__builtin_ia32_cvttpd2qqs256_mask_round", IX86_BUILTIN_VCVTTPD2QQS256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttpd2qqsv8df_mask_round, "__builtin_ia32_cvttpd2qqs512_mask_round", IX86_BUILTIN_VCVTTPD2QQS512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2udqsv4df_mask_round, "__builtin_ia32_cvttpd2udqs256_mask_round", IX86_BUILTIN_VCVTTPD2UDQS256_MASK_ROUND, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttpd2udqsv8df_mask_round, "__builtin_ia32_cvttpd2udqs512_mask_round", IX86_BUILTIN_VCVTTPD2UDQS512_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_QI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttpd2uqqsv4df_mask_round, "__builtin_ia32_cvttpd2uqqs256_mask_round", IX86_BUILTIN_VCVTTPD2UQQS256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttpd2uqqsv8df_mask_round, "__builtin_ia32_cvttpd2uqqs512_mask_round", IX86_BUILTIN_VCVTTPD2UQQS512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2dqsv8sf_mask_round, "__builtin_ia32_cvttps2dqs256_mask_round", IX86_BUILTIN_VCVTTPS2DQS256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttps2dqsv16sf_mask_round, "__builtin_ia32_cvttps2dqs512_mask_round", IX86_BUILTIN_VCVTTPS2DQS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_HI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2qqsv4di_mask_round, "__builtin_ia32_cvttps2qqs256_mask_round", IX86_BUILTIN_VCVTTPS2QQS256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttps2qqsv8di_mask_round, "__builtin_ia32_cvttps2qqs512_mask_round", IX86_BUILTIN_VCVTTPS2QQS512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2udqsv8sf_mask_round, "__builtin_ia32_cvttps2udqs256_mask_round", IX86_BUILTIN_VCVTTPS2UDQS256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttps2udqsv16sf_mask_round, "__builtin_ia32_cvttps2udqs512_mask_round", IX86_BUILTIN_VCVTTPS2UDQS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_HI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttps2uqqsv4di_mask_round, "__builtin_ia32_cvttps2uqqs256_mask_round", IX86_BUILTIN_VCVTTPS2UQQS256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_vcvttps2uqqsv8di_mask_round, "__builtin_ia32_cvttps2uqqs512_mask_round", IX86_BUILTIN_VCVTTPS2UQQS512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttsd2sissi_round, "__builtin_ia32_cvttsd2sis32_round", IX86_BUILTIN_VCVTTSD2SIS32_ROUND, UNKNOWN, (int) INT_FTYPE_V2DF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttsd2sisdi_round, "__builtin_ia32_cvttsd2sis64_round", IX86_BUILTIN_VCVTTSD2SIS64_ROUND, UNKNOWN, (int) INT64_FTYPE_V2DF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttsd2usissi_round, "__builtin_ia32_cvttsd2usis32_round", IX86_BUILTIN_VCVTTSD2USIS32_ROUND, UNKNOWN, (int) INT_FTYPE_V2DF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttsd2usisdi_round, "__builtin_ia32_cvttsd2usis64_round", IX86_BUILTIN_VCVTTSD2USIS64_ROUND, UNKNOWN, (int) INT64_FTYPE_V2DF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttss2sissi_round, "__builtin_ia32_cvttss2sis32_round", IX86_BUILTIN_VCVTTSS2SIS32_ROUND, UNKNOWN, (int) INT_FTYPE_V4SF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttss2sisdi_round, "__builtin_ia32_cvttss2sis64_round", IX86_BUILTIN_VCVTTSS2SIS64_ROUND, UNKNOWN, (int) INT64_FTYPE_V4SF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttss2usissi_round, "__builtin_ia32_cvttss2usis32_round", IX86_BUILTIN_VCVTTSS2USIS32_ROUND, UNKNOWN, (int) INT_FTYPE_V4SF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_vcvttss2usisdi_round, "__builtin_ia32_cvttss2usis64_round", IX86_BUILTIN_VCVTTSS2USIS64_ROUND, UNKNOWN, (int) INT64_FTYPE_V4SF_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_minmaxpv8df_mask_round, "__builtin_ia32_minmaxpd512_mask_round", IX86_BUILTIN_MINMAXPD512_MASK_ROUND, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_INT_V8DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_minmaxpv32hf_mask_round, "__builtin_ia32_minmaxph512_mask_round", IX86_BUILTIN_MINMAXPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_INT_V32HF_USI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_minmaxpv16sf_mask_round, "__builtin_ia32_minmaxps512_mask_round", IX86_BUILTIN_MINMAXPS512_MASK_ROUND, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_INT_V16SF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxpv4df_mask_round, "__builtin_ia32_minmaxpd256_mask_round", IX86_BUILTIN_MINMAXPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxpv16hf_mask_round, "__builtin_ia32_minmaxph256_mask_round", IX86_BUILTIN_MINMAXPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_INT_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxpv8sf_mask_round, "__builtin_ia32_minmaxps256_mask_round", IX86_BUILTIN_MINMAXPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxsv2df_mask_round, "__builtin_ia32_minmaxsd_mask_round", IX86_BUILTIN_MINMAXSD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxsv8hf_mask_round, "__builtin_ia32_minmaxsh_mask_round", IX86_BUILTIN_MINMAXSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT_V8HF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_minmaxsv4sf_mask_round, "__builtin_ia32_minmaxss_mask_round", IX86_BUILTIN_MINMAXSS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvt2ps2phx_v32hf_mask_round, "__builtin_ia32_vcvt2ps2phx512_mask_round", IX86_BUILTIN_VCVT2PS2PHX_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V16SF_V16SF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtph2ibsv32hf_mask_round, "__builtin_ia32_cvtph2ibs512_mask_round", IX86_BUILTIN_CVTPH2IBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtph2iubsv32hf_mask_round, "__builtin_ia32_cvtph2iubs512_mask_round", IX86_BUILTIN_CVTPH2IUBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtps2ibsv16sf_mask_round, "__builtin_ia32_cvtps2ibs512_mask_round", IX86_BUILTIN_CVTPS2IBS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvtps2iubsv16sf_mask_round, "__builtin_ia32_cvtps2iubs512_mask_round", IX86_BUILTIN_CVTPS2IUBS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttph2ibsv32hf_mask_round, "__builtin_ia32_cvttph2ibs512_mask_round", IX86_BUILTIN_CVTTPH2IBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttph2iubsv32hf_mask_round, "__builtin_ia32_cvttph2iubs512_mask_round", IX86_BUILTIN_CVTTPH2IUBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttps2ibsv16sf_mask_round, "__builtin_ia32_cvttps2ibs512_mask_round", IX86_BUILTIN_CVTTPS2IBS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_cvttps2iubsv16sf_mask_round, "__builtin_ia32_cvttps2iubs512_mask_round", IX86_BUILTIN_CVTTPS2IUBS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2dqsv8df_mask_round, "__builtin_ia32_cvttpd2dqs512_mask_round", IX86_BUILTIN_VCVTTPD2DQS512_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_QI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2qqsv8df_mask_round, "__builtin_ia32_cvttpd2qqs512_mask_round", IX86_BUILTIN_VCVTTPD2QQS512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2udqsv8df_mask_round, "__builtin_ia32_cvttpd2udqs512_mask_round", IX86_BUILTIN_VCVTTPD2UDQS512_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_QI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttpd2uqqsv8df_mask_round, "__builtin_ia32_cvttpd2uqqs512_mask_round", IX86_BUILTIN_VCVTTPD2UQQS512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2dqsv16sf_mask_round, "__builtin_ia32_cvttps2dqs512_mask_round", IX86_BUILTIN_VCVTTPS2DQS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_HI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2qqsv8di_mask_round, "__builtin_ia32_cvttps2qqs512_mask_round", IX86_BUILTIN_VCVTTPS2QQS512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2udqsv16sf_mask_round, "__builtin_ia32_cvttps2udqs512_mask_round", IX86_BUILTIN_VCVTTPS2UDQS512_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_HI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttps2uqqsv8di_mask_round, "__builtin_ia32_cvttps2uqqs512_mask_round", IX86_BUILTIN_VCVTTPS2UQQS512_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8SF_V8DI_QI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttsd2sissi_round, "__builtin_ia32_cvttsd2sis32_round", IX86_BUILTIN_VCVTTSD2SIS32_ROUND, UNKNOWN, (int) INT_FTYPE_V2DF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttsd2sisdi_round, "__builtin_ia32_cvttsd2sis64_round", IX86_BUILTIN_VCVTTSD2SIS64_ROUND, UNKNOWN, (int) INT64_FTYPE_V2DF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttsd2usissi_round, "__builtin_ia32_cvttsd2usis32_round", IX86_BUILTIN_VCVTTSD2USIS32_ROUND, UNKNOWN, (int) INT_FTYPE_V2DF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttsd2usisdi_round, "__builtin_ia32_cvttsd2usis64_round", IX86_BUILTIN_VCVTTSD2USIS64_ROUND, UNKNOWN, (int) INT64_FTYPE_V2DF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttss2sissi_round, "__builtin_ia32_cvttss2sis32_round", IX86_BUILTIN_VCVTTSS2SIS32_ROUND, UNKNOWN, (int) INT_FTYPE_V4SF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttss2sisdi_round, "__builtin_ia32_cvttss2sis64_round", IX86_BUILTIN_VCVTTSS2SIS64_ROUND, UNKNOWN, (int) INT64_FTYPE_V4SF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttss2usissi_round, "__builtin_ia32_cvttss2usis32_round", IX86_BUILTIN_VCVTTSS2USIS32_ROUND, UNKNOWN, (int) INT_FTYPE_V4SF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_vcvttss2usisdi_round, "__builtin_ia32_cvttss2usis64_round", IX86_BUILTIN_VCVTTSS2USIS64_ROUND, UNKNOWN, (int) INT64_FTYPE_V4SF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxpv8df_mask_round, "__builtin_ia32_minmaxpd512_mask_round", IX86_BUILTIN_MINMAXPD512_MASK_ROUND, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_INT_V8DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxpv32hf_mask_round, "__builtin_ia32_minmaxph512_mask_round", IX86_BUILTIN_MINMAXPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_INT_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxpv16sf_mask_round, "__builtin_ia32_minmaxps512_mask_round", IX86_BUILTIN_MINMAXPS512_MASK_ROUND, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_INT_V16SF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxsv2df_mask_round, "__builtin_ia32_minmaxsd_mask_round", IX86_BUILTIN_MINMAXSD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxsv8hf_mask_round, "__builtin_ia32_minmaxsh_mask_round", IX86_BUILTIN_MINMAXSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2, CODE_FOR_avx10_2_minmaxsv4sf_mask_round, "__builtin_ia32_minmaxss_mask_round", IX86_BUILTIN_MINMAXSS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
diff --git a/gcc/config/i386/i386-c.cc b/gcc/config/i386/i386-c.cc
index 7e310c3..0a320ca 100644
--- a/gcc/config/i386/i386-c.cc
+++ b/gcc/config/i386/i386-c.cc
@@ -734,20 +734,15 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
if (isa_flag2 & OPTION_MASK_ISA2_USER_MSR)
def_or_undef (parse_in, "__USER_MSR__");
if (isa_flag2 & OPTION_MASK_ISA2_AVX10_1_256)
- {
- def_or_undef (parse_in, "__AVX10_1_256__");
- def_or_undef (parse_in, "__AVX10_1__");
- }
- if (isa_flag2 & OPTION_MASK_ISA2_AVX10_1_512)
- def_or_undef (parse_in, "__AVX10_1_512__");
+ def_or_undef (parse_in, "__AVX10_1_256__");
+ if (isa_flag2 & OPTION_MASK_ISA2_AVX10_1)
+ def_or_undef (parse_in, "__AVX10_1__");
if (isa_flag2 & OPTION_MASK_ISA2_APX_F)
def_or_undef (parse_in, "__APX_F__");
if (ix86_apx_inline_asm_use_gpr32)
def_or_undef (parse_in, "__APX_INLINE_ASM_USE_GPR32__");
- if (isa_flag2 & OPTION_MASK_ISA2_AVX10_2_256)
- def_or_undef (parse_in, "__AVX10_2_256__");
- if (isa_flag2 & OPTION_MASK_ISA2_AVX10_2_512)
- def_or_undef (parse_in, "__AVX10_2_512__");
+ if (isa_flag2 & OPTION_MASK_ISA2_AVX10_2)
+ def_or_undef (parse_in, "__AVX10_2__");
if (isa_flag2 & OPTION_MASK_ISA2_AMX_AVX512)
def_or_undef (parse_in, "__AMX_AVX512__");
if (isa_flag2 & OPTION_MASK_ISA2_AMX_TF32)
diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
index b4d8bd9..cdfd94d 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -2531,7 +2531,7 @@ ix86_expand_branch (enum rtx_code code, rtx op0, rtx op1, rtx label)
return;
case E_BFmode:
- gcc_assert (TARGET_AVX10_2_256 && !flag_trapping_math);
+ gcc_assert (TARGET_AVX10_2 && !flag_trapping_math);
goto simple;
case E_DImode:
@@ -2802,7 +2802,7 @@ ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
machine_mode op_mode = GET_MODE (op0);
bool is_sse = SSE_FLOAT_MODE_SSEMATH_OR_HFBF_P (op_mode);
- if (op_mode == BFmode && (!TARGET_AVX10_2_256 || flag_trapping_math))
+ if (op_mode == BFmode && (!TARGET_AVX10_2 || flag_trapping_math))
{
rtx op = gen_lowpart (HImode, op0);
if (CONST_INT_P (op))
@@ -2924,7 +2924,7 @@ ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1)
/* We only have vcomisbf16, No vcomubf16 nor vcomxbf16 */
if (GET_MODE (op0) != E_BFmode)
{
- if (TARGET_AVX10_2_256 && (code == EQ || code == NE))
+ if (TARGET_AVX10_2 && (code == EQ || code == NE))
tmp = gen_rtx_UNSPEC (CCFPmode, gen_rtvec (1, tmp), UNSPEC_OPTCOMX);
if (unordered_compare)
tmp = gen_rtx_UNSPEC (CCFPmode, gen_rtvec (1, tmp), UNSPEC_NOTRAP);
@@ -10779,7 +10779,7 @@ ix86_ssecom_setcc (const enum rtx_code comparison,
/* NB: For ordered EQ or unordered NE, check ZF alone isn't sufficient
with NAN operands.
- Under TARGET_AVX10_2_256, VCOMX/VUCOMX are generated instead of
+ Under TARGET_AVX10_2, VCOMX/VUCOMX are generated instead of
COMI/UCOMI. VCOMX/VUCOMX will not set ZF for NAN operands. */
if (check_unordered)
{
@@ -10852,12 +10852,12 @@ ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
case GE:
break;
case EQ:
- if (!TARGET_AVX10_2_256 || !comx_ok)
+ if (!TARGET_AVX10_2 || !comx_ok)
check_unordered = true;
mode = CCZmode;
break;
case NE:
- if (!TARGET_AVX10_2_256 || !comx_ok)
+ if (!TARGET_AVX10_2 || !comx_ok)
check_unordered = true;
mode = CCZmode;
const_val = const1_rtx;
@@ -10878,7 +10878,7 @@ ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
op1 = copy_to_mode_reg (mode1, op1);
if ((comparison == EQ || comparison == NE)
- && TARGET_AVX10_2_256 && comx_ok)
+ && TARGET_AVX10_2 && comx_ok)
{
switch (icode)
{
@@ -12103,6 +12103,7 @@ ix86_expand_args_builtin (const struct builtin_description *d,
case V8BF_FTYPE_V8BF_V8BF_INT_V8BF_UQI:
case V16BF_FTYPE_V16BF_V16BF_INT_V16BF_UHI:
case V32BF_FTYPE_V32BF_V32BF_INT_V32BF_USI:
+ case V16HF_FTYPE_V16HF_V16HF_INT_V16HF_UHI:
case V8HF_FTYPE_V8HF_V8HF_INT_V8HF_UQI:
nargs = 5;
mask_pos = 1;
@@ -12473,7 +12474,7 @@ ix86_expand_sse_comi_round (const struct builtin_description *d,
case ORDERED:
if (!ordered)
{
- if (TARGET_AVX10_2_256 && comx_ok)
+ if (TARGET_AVX10_2 && comx_ok)
{
/* Unlike VCOMI{SH,SS,SD}, VCOMX{SH,SS,SD} will set SF
differently. So directly return true here. */
@@ -12501,7 +12502,7 @@ ix86_expand_sse_comi_round (const struct builtin_description *d,
case UNORDERED:
if (ordered)
{
- if (TARGET_AVX10_2_256 && comx_ok)
+ if (TARGET_AVX10_2 && comx_ok)
{
/* Unlike VCOMI{SH,SS,SD}, VCOMX{SH,SS,SD} will set SF
differently. So directly return false here. */
@@ -12548,20 +12549,20 @@ ix86_expand_sse_comi_round (const struct builtin_description *d,
break;
/* NB: COMI/UCOMI will set ZF with NAN operands. Use CCZmode for
_CMP_EQ_OQ/_CMP_EQ_OS.
- Under TARGET_AVX10_2_256, VCOMX/VUCOMX are always generated instead
+ Under TARGET_AVX10_2, VCOMX/VUCOMX are always generated instead
of COMI/UCOMI, VCOMX/VUCOMX will not set ZF with NAN. */
case EQ:
- if (!TARGET_AVX10_2_256 || !comx_ok)
+ if (!TARGET_AVX10_2 || !comx_ok)
check_unordered = true;
mode = CCZmode;
break;
case NE:
/* NB: COMI/UCOMI will set ZF with NAN operands. Use CCZmode for
_CMP_NEQ_UQ/_CMP_NEQ_US.
- Under TARGET_AVX10_2_256, VCOMX/VUCOMX are always generated instead
+ Under TARGET_AVX10_2, VCOMX/VUCOMX are always generated instead
of COMI/UCOMI, VCOMX/VUCOMX will not set ZF with NAN. */
gcc_assert (!ordered);
- if (!TARGET_AVX10_2_256 || !comx_ok)
+ if (!TARGET_AVX10_2 || !comx_ok)
check_unordered = true;
mode = CCZmode;
const_val = const1_rtx;
@@ -12584,7 +12585,7 @@ ix86_expand_sse_comi_round (const struct builtin_description *d,
/* Generate comx instead of comi when EQ/NE to avoid NAN checks.
Use orig_comp to exclude ORDERED/UNORDERED cases. */
if ((orig_comp == EQ || orig_comp == NE)
- && TARGET_AVX10_2_256 && comx_ok)
+ && TARGET_AVX10_2 && comx_ok)
{
switch (icode)
{
@@ -12605,7 +12606,7 @@ ix86_expand_sse_comi_round (const struct builtin_description *d,
/* Generate comi instead of comx when UNEQ/LTGT to avoid NAN checks. */
if ((comparison == UNEQ || comparison == LTGT)
- && TARGET_AVX10_2_256 && comx_ok)
+ && TARGET_AVX10_2 && comx_ok)
{
switch (icode)
{
@@ -12708,7 +12709,6 @@ ix86_expand_round_builtin (const struct builtin_description *d,
nargs = 2;
break;
case V32HF_FTYPE_V32HF_V32HF_INT:
- case V16HF_FTYPE_V16HF_V16HF_INT:
case V8HF_FTYPE_V8HF_V8HF_INT:
case V8HF_FTYPE_V8HF_INT_INT:
case V8HF_FTYPE_V8HF_UINT_INT:
@@ -12746,37 +12746,14 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case V16SI_FTYPE_V16SF_V16SI_HI_INT:
case V16SI_FTYPE_V16SF_V16SI_UHI_INT:
case V16SI_FTYPE_V16HF_V16SI_UHI_INT:
- case V16HF_FTYPE_V16HF_V16HF_V16HF_INT:
case V16HF_FTYPE_V16SI_V16HF_UHI_INT:
- case V16HI_FTYPE_V16HF_V16HI_UHI_INT:
case V8DF_FTYPE_V8SF_V8DF_QI_INT:
case V16SF_FTYPE_V16HI_V16SF_HI_INT:
- case V8SF_FTYPE_V8SF_V8SF_UQI_INT:
- case V8SF_FTYPE_V8SI_V8SF_UQI_INT:
- case V8SF_FTYPE_V8HF_V8SF_UQI_INT:
- case V8SI_FTYPE_V8SF_V8SI_UQI_INT:
- case V8SI_FTYPE_V8HF_V8SI_UQI_INT:
- case V4DF_FTYPE_V4DF_V4DF_UQI_INT:
- case V4DF_FTYPE_V4DI_V4DF_UQI_INT:
- case V4DF_FTYPE_V4SF_V4DF_UQI_INT:
- case V4DF_FTYPE_V8HF_V4DF_UQI_INT:
- case V4DI_FTYPE_V8HF_V4DI_UQI_INT:
- case V4DI_FTYPE_V4DF_V4DI_UQI_INT:
- case V4DI_FTYPE_V4SF_V4DI_UQI_INT:
case V2DF_FTYPE_V2DF_V2DF_V2DF_INT:
- case V4SI_FTYPE_V4DF_V4SI_UQI_INT:
- case V4SF_FTYPE_V4DF_V4SF_UQI_INT:
- case V4SF_FTYPE_V4DI_V4SF_UQI_INT:
case V4SF_FTYPE_V4SF_V4SF_V4SF_INT:
case V8HF_FTYPE_V8DI_V8HF_UQI_INT:
case V8HF_FTYPE_V8DF_V8HF_UQI_INT:
- case V8HF_FTYPE_V8SF_V8HF_UQI_INT:
- case V8HF_FTYPE_V8SI_V8HF_UQI_INT:
- case V8HF_FTYPE_V4DF_V8HF_UQI_INT:
- case V8HF_FTYPE_V4DI_V8HF_UQI_INT:
case V16HF_FTYPE_V16SF_V16HF_UHI_INT:
- case V16HF_FTYPE_V16HF_V16HF_UHI_INT:
- case V16HF_FTYPE_V16HI_V16HF_UHI_INT:
case V16HI_FTYPE_V16BF_V16HI_UHI_INT:
case V8HF_FTYPE_V8HF_V8HF_V8HF_INT:
nargs = 4;
@@ -12789,15 +12766,11 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case INT_FTYPE_V4SF_V4SF_INT_INT:
case INT_FTYPE_V2DF_V2DF_INT_INT:
return ix86_expand_sse_comi_round (d, exp, target, true);
- case V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT:
case V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT:
case V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT:
case V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT:
case V4SF_FTYPE_V8HF_V4SF_V4SF_UQI_INT:
- case V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT:
case V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT:
- case V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT:
- case V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT:
case V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT:
case V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT:
case V2DF_FTYPE_V8HF_V2DF_V2DF_UQI_INT:
@@ -12810,7 +12783,6 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT:
case V8HF_FTYPE_V2DF_V8HF_V8HF_UQI_INT:
case V8HF_FTYPE_V4SF_V8HF_V8HF_UQI_INT:
- case V16HF_FTYPE_V8SF_V8SF_V16HF_UHI_INT:
case V32HF_FTYPE_V16SF_V16SF_V32HF_USI_INT:
nargs = 5;
break;
@@ -12819,18 +12791,12 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case V8DF_FTYPE_V8DF_INT_V8DF_QI_INT:
case V8DF_FTYPE_V8DF_INT_V8DF_UQI_INT:
case V16SF_FTYPE_V16SF_INT_V16SF_UHI_INT:
- case V16HF_FTYPE_V16HF_INT_V16HF_UHI_INT:
- case V4DF_FTYPE_V4DF_INT_V4DF_UQI_INT:
- case V8SF_FTYPE_V8SF_INT_V8SF_UQI_INT:
nargs_constant = 4;
nargs = 5;
break;
case UQI_FTYPE_V8DF_V8DF_INT_UQI_INT:
- case UQI_FTYPE_V4DF_V4DF_INT_UQI_INT:
case UQI_FTYPE_V2DF_V2DF_INT_UQI_INT:
case UHI_FTYPE_V16SF_V16SF_INT_UHI_INT:
- case UHI_FTYPE_V16HF_V16HF_INT_UHI_INT:
- case UQI_FTYPE_V8SF_V8SF_INT_UQI_INT:
case UQI_FTYPE_V4SF_V4SF_INT_UQI_INT:
case USI_FTYPE_V32HF_V32HF_INT_USI_INT:
case UQI_FTYPE_V8HF_V8HF_INT_UQI_INT:
@@ -12839,8 +12805,6 @@ ix86_expand_round_builtin (const struct builtin_description *d,
break;
case V16SF_FTYPE_V16SF_V16SF_INT_V16SF_HI_INT:
case V8DF_FTYPE_V8DF_V8DF_INT_V8DF_QI_INT:
- case V8SF_FTYPE_V8SF_V8SF_INT_V8SF_UQI_INT:
- case V4DF_FTYPE_V4DF_V4DF_INT_V4DF_UQI_INT:
case V4SF_FTYPE_V4SF_V4SF_INT_V4SF_QI_INT:
case V2DF_FTYPE_V2DF_V2DF_INT_V2DF_QI_INT:
case V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI_INT:
@@ -12848,15 +12812,12 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case V8HF_FTYPE_V8HF_V8HF_INT_V8HF_UQI_INT:
case V8DF_FTYPE_V8DF_V8DF_INT_V8DF_UQI_INT:
case V32HF_FTYPE_V32HF_V32HF_INT_V32HF_USI_INT:
- case V16HF_FTYPE_V16HF_V16HF_INT_V16HF_UHI_INT:
case V16SF_FTYPE_V16SF_V16SF_INT_V16SF_UHI_INT:
nargs = 6;
nargs_constant = 4;
break;
case V8DF_FTYPE_V8DF_V8DF_V8DI_INT_QI_INT:
- case V4DF_FTYPE_V4DF_V4DF_V4DI_INT_UQI_INT:
case V16SF_FTYPE_V16SF_V16SF_V16SI_INT_HI_INT:
- case V8SF_FTYPE_V8SF_V8SF_V8SI_INT_UQI_INT:
case V2DF_FTYPE_V2DF_V2DF_V2DI_INT_QI_INT:
case V4SF_FTYPE_V4SF_V4SF_V4SI_INT_QI_INT:
nargs = 6;
@@ -13621,9 +13582,9 @@ ix86_check_builtin_isa_match (unsigned int fcode,
SHARE_BUILTIN (OPTION_MASK_ISA_AES, 0, OPTION_MASK_ISA_AVX512VL,
OPTION_MASK_ISA2_VAES);
SHARE_BUILTIN (0, OPTION_MASK_ISA2_AVXVNNIINT8, 0,
- OPTION_MASK_ISA2_AVX10_2_256);
+ OPTION_MASK_ISA2_AVX10_2);
SHARE_BUILTIN (0, OPTION_MASK_ISA2_AVXVNNIINT16, 0,
- OPTION_MASK_ISA2_AVX10_2_256);
+ OPTION_MASK_ISA2_AVX10_2);
isa = tmp_isa;
isa2 = tmp_isa2;
diff --git a/gcc/config/i386/i386-isa.def b/gcc/config/i386/i386-isa.def
index 5d8db36..19d78d7 100644
--- a/gcc/config/i386/i386-isa.def
+++ b/gcc/config/i386/i386-isa.def
@@ -120,9 +120,8 @@ DEF_PTA(APX_F)
DEF_PTA(USER_MSR)
DEF_PTA(EVEX512)
DEF_PTA(AVX10_1_256)
-DEF_PTA(AVX10_1_512)
-DEF_PTA(AVX10_2_256)
-DEF_PTA(AVX10_2_512)
+DEF_PTA(AVX10_1)
+DEF_PTA(AVX10_2)
DEF_PTA(AMX_AVX512)
DEF_PTA(AMX_TF32)
DEF_PTA(AMX_TRANSPOSE)
diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
index fc4d2d2..a9fac01 100644
--- a/gcc/config/i386/i386-options.cc
+++ b/gcc/config/i386/i386-options.cc
@@ -262,9 +262,8 @@ static struct ix86_target_opts isa2_opts[] =
{ "-mevex512", OPTION_MASK_ISA2_EVEX512 },
{ "-musermsr", OPTION_MASK_ISA2_USER_MSR },
{ "-mavx10.1-256", OPTION_MASK_ISA2_AVX10_1_256 },
- { "-mavx10.1-512", OPTION_MASK_ISA2_AVX10_1_512 },
- { "-mavx10.2-256", OPTION_MASK_ISA2_AVX10_2_256 },
- { "-mavx10.2-512", OPTION_MASK_ISA2_AVX10_2_512 },
+ { "-mavx10.1", OPTION_MASK_ISA2_AVX10_1 },
+ { "-mavx10.2", OPTION_MASK_ISA2_AVX10_2 },
{ "-mamx-avx512", OPTION_MASK_ISA2_AMX_AVX512 },
{ "-mamx-tf32", OPTION_MASK_ISA2_AMX_TF32 },
{ "-mamx-transpose", OPTION_MASK_ISA2_AMX_TRANSPOSE },
@@ -1135,10 +1134,9 @@ ix86_valid_target_attribute_inner_p (tree fndecl, tree args, char *p_strings[],
IX86_ATTR_ISA ("evex512", OPT_mevex512),
IX86_ATTR_ISA ("usermsr", OPT_musermsr),
IX86_ATTR_ISA ("avx10.1-256", OPT_mavx10_1_256),
- IX86_ATTR_ISA ("avx10.1-512", OPT_mavx10_1_512),
- IX86_ATTR_ISA ("avx10.2-256", OPT_mavx10_2_256),
+ IX86_ATTR_ISA ("avx10.1", OPT_mavx10_1),
+ IX86_ATTR_ISA ("avx10.1-512", OPT_mavx10_1),
IX86_ATTR_ISA ("avx10.2", OPT_mavx10_2),
- IX86_ATTR_ISA ("avx10.2-512", OPT_mavx10_2),
IX86_ATTR_ISA ("amx-avx512", OPT_mamx_avx512),
IX86_ATTR_ISA ("amx-tf32", OPT_mamx_tf32),
IX86_ATTR_ISA ("amx-transpose", OPT_mamx_transpose),
@@ -1273,6 +1271,13 @@ ix86_valid_target_attribute_inner_p (tree fndecl, tree args, char *p_strings[],
}
}
+ /* Fixup -msse4 which is RejectNegative to -mno-sse4 when negated. */
+ if (opt == OPT_msse4 && !opt_set_p)
+ {
+ opt = OPT_mno_sse4;
+ opt_set_p = true;
+ }
+
/* Process the option. */
if (opt == N_OPTS)
{
@@ -2676,7 +2681,7 @@ ix86_option_override_internal (bool main_args_p,
2. Both AVX10.1-256 and AVX512 w/o 512 bit vector width are enabled with no
explicit disable on other AVX512 features.
3. Both AVX10.1 and AVX512 are disabled. */
- if (TARGET_AVX10_1_512_P (opts->x_ix86_isa_flags2))
+ if (TARGET_AVX10_1_P (opts->x_ix86_isa_flags2))
{
if (opts->x_ix86_no_avx512_explicit
&& (((~(avx512_isa_flags & opts->x_ix86_isa_flags)
@@ -2686,7 +2691,9 @@ ix86_option_override_internal (bool main_args_p,
& ((avx512_isa_flags2 | OPTION_MASK_ISA2_EVEX512)
& opts->x_ix86_isa_flags2_explicit)))))
warning (0, "%<-mno-evex512%> or %<-mno-avx512XXX%> cannot disable "
- "AVX10 instructions when AVX10.1-512 is available");
+ "AVX10 instructions when AVX10.1-512 is available in GCC 15, "
+ "behavior will change to it will disable that part of "
+ "AVX512 instructions since GCC 16");
}
else if (TARGET_AVX10_1_256_P (opts->x_ix86_isa_flags2))
{
@@ -2722,18 +2729,21 @@ ix86_option_override_internal (bool main_args_p,
& (avx512_isa_flags2
& opts->x_ix86_isa_flags2_explicit)))))
warning (0, "%<-mno-avx512XXX%> cannot disable AVX10 instructions "
- "when AVX10 is available");
+ "when AVX10 is available in GCC 15, behavior will change "
+ "to it will disable that part of AVX512 instructions since "
+ "GCC 16");
}
else if (TARGET_AVX512F_P (opts->x_ix86_isa_flags)
&& (OPTION_MASK_ISA_AVX512F & opts->x_ix86_isa_flags_explicit))
{
if (opts->x_ix86_no_avx10_1_explicit
- && ((OPTION_MASK_ISA2_AVX10_1_256 | OPTION_MASK_ISA2_AVX10_1_512)
+ && ((OPTION_MASK_ISA2_AVX10_1_256 | OPTION_MASK_ISA2_AVX10_1)
& opts->x_ix86_isa_flags2_explicit))
{
- warning (0, "%<-mno-avx10.1-256, -mno-avx10.1-512%> "
- "cannot disable AVX512 instructions when "
- "%<-mavx512XXX%>");
+ warning (0, "%<-mno-avx10.1-256, -mno-avx10.1-512%> cannot disable "
+ "AVX512 instructions when %<-mavx512XXX%> in GCC 15, "
+ "behavior will change to it will disable all the "
+ "instructions in GCC 16");
/* Reset those unset AVX512 flags set by AVX10 options when AVX10 is
disabled. */
if (OPTION_MASK_ISA2_AVX10_1_256 & opts->x_ix86_isa_flags2_explicit)
@@ -2753,7 +2763,7 @@ ix86_option_override_internal (bool main_args_p,
/* Set EVEX512 if one of the following conditions meets:
1. AVX512 is enabled while EVEX512 is not explicitly set/unset.
2. AVX10.1-512 is enabled. */
- if (TARGET_AVX10_1_512_P (opts->x_ix86_isa_flags2)
+ if (TARGET_AVX10_1_P (opts->x_ix86_isa_flags2)
|| (TARGET_AVX512F_P (opts->x_ix86_isa_flags)
&& !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA2_EVEX512)))
opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_EVEX512;
diff --git a/gcc/config/i386/i386-rust-and-jit.inc b/gcc/config/i386/i386-rust-and-jit.inc
new file mode 100644
index 0000000..998f44c
--- /dev/null
+++ b/gcc/config/i386/i386-rust-and-jit.inc
@@ -0,0 +1,93 @@
+if (TARGET_64BIT)
+ ADD_TARGET_INFO ("target_arch", "x86_64");
+else
+ ADD_TARGET_INFO ("target_arch", "x86");
+
+// features officially "stabilised" in rustc
+if (TARGET_MMX)
+ ADD_TARGET_INFO ("target_feature", "mmx");
+if (TARGET_SSE)
+ ADD_TARGET_INFO ("target_feature", "sse");
+if (TARGET_SSE2)
+ ADD_TARGET_INFO ("target_feature", "sse2");
+if (TARGET_SSE3)
+ ADD_TARGET_INFO ("target_feature", "sse3");
+if (TARGET_SSSE3)
+ ADD_TARGET_INFO ("target_feature", "ssse3");
+if (TARGET_SSE4_1)
+ ADD_TARGET_INFO ("target_feature", "sse4.1");
+if (TARGET_SSE4_2)
+ ADD_TARGET_INFO ("target_feature", "sse4.2");
+if (TARGET_AES)
+ ADD_TARGET_INFO ("target_feature", "aes");
+if (TARGET_SHA)
+ ADD_TARGET_INFO ("target_feature", "sha");
+if (TARGET_AVX)
+ ADD_TARGET_INFO ("target_feature", "avx");
+if (TARGET_AVX2)
+ ADD_TARGET_INFO ("target_feature", "avx2");
+if (TARGET_AVX512F)
+ ADD_TARGET_INFO ("target_feature", "avx512f");
+if (TARGET_AVX512CD)
+ ADD_TARGET_INFO ("target_feature", "avx512cd");
+if (TARGET_AVX512DQ)
+ ADD_TARGET_INFO ("target_feature", "avx512dq");
+if (TARGET_AVX512BW)
+ ADD_TARGET_INFO ("target_feature", "avx512bw");
+if (TARGET_AVX512VL)
+ ADD_TARGET_INFO ("target_feature", "avx512vl");
+if (TARGET_AVX512VBMI)
+ ADD_TARGET_INFO ("target_feature", "avx512vbmi");
+if (TARGET_AVX512IFMA)
+ ADD_TARGET_INFO ("target_feature", "avx512ifma");
+if (TARGET_AVX512VPOPCNTDQ)
+ ADD_TARGET_INFO ("target_feature", "avx512vpopcntdq");
+if (TARGET_FMA)
+ ADD_TARGET_INFO ("target_feature", "fma");
+if (TARGET_RTM)
+ ADD_TARGET_INFO ("target_feature", "rtm");
+if (TARGET_SSE4A)
+ ADD_TARGET_INFO ("target_feature", "sse4a");
+if (TARGET_BMI)
+ {
+ ADD_TARGET_INFO ("target_feature", "bmi1");
+ ADD_TARGET_INFO ("target_feature", "bmi");
+ }
+if (TARGET_BMI2)
+ ADD_TARGET_INFO ("target_feature", "bmi2");
+if (TARGET_LZCNT)
+ ADD_TARGET_INFO ("target_feature", "lzcnt");
+if (TARGET_TBM)
+ ADD_TARGET_INFO ("target_feature", "tbm");
+if (TARGET_POPCNT)
+ ADD_TARGET_INFO ("target_feature", "popcnt");
+if (TARGET_RDRND)
+ {
+ ADD_TARGET_INFO ("target_feature", "rdrand");
+ ADD_TARGET_INFO ("target_feature", "rdrnd");
+ }
+if (TARGET_F16C)
+ ADD_TARGET_INFO ("target_feature", "f16c");
+if (TARGET_RDSEED)
+ ADD_TARGET_INFO ("target_feature", "rdseed");
+if (TARGET_ADX)
+ ADD_TARGET_INFO ("target_feature", "adx");
+if (TARGET_FXSR)
+ ADD_TARGET_INFO ("target_feature", "fxsr");
+if (TARGET_XSAVE)
+ ADD_TARGET_INFO ("target_feature", "xsave");
+if (TARGET_XSAVEOPT)
+ ADD_TARGET_INFO ("target_feature", "xsaveopt");
+if (TARGET_XSAVEC)
+ ADD_TARGET_INFO ("target_feature", "xsavec");
+if (TARGET_XSAVES)
+ ADD_TARGET_INFO ("target_feature", "xsaves");
+if (TARGET_VPCLMULQDQ)
+ {
+ ADD_TARGET_INFO ("target_feature", "pclmulqdq");
+ ADD_TARGET_INFO ("target_feature", "vpclmulqdq");
+ }
+if (TARGET_CMPXCHG16B)
+ ADD_TARGET_INFO ("target_feature", "cmpxchg16b");
+if (TARGET_MOVBE)
+ ADD_TARGET_INFO ("target_feature", "movbe");
diff --git a/gcc/config/i386/i386-rust.cc b/gcc/config/i386/i386-rust.cc
index b9099d3..de00076 100644
--- a/gcc/config/i386/i386-rust.cc
+++ b/gcc/config/i386/i386-rust.cc
@@ -29,97 +29,7 @@ along with GCC; see the file COPYING3. If not see
void
ix86_rust_target_cpu_info (void)
{
- if (TARGET_64BIT)
- rust_add_target_info ("target_arch", "x86_64");
- else
- rust_add_target_info ("target_arch", "x86");
-
- // features officially "stabilised" in rustc
- if (TARGET_MMX)
- rust_add_target_info ("target_feature", "mmx");
- if (TARGET_SSE)
- rust_add_target_info ("target_feature", "sse");
- if (TARGET_SSE2)
- rust_add_target_info ("target_feature", "sse2");
- if (TARGET_SSE3)
- rust_add_target_info ("target_feature", "sse3");
- if (TARGET_SSSE3)
- rust_add_target_info ("target_feature", "ssse3");
- if (TARGET_SSE4_1)
- rust_add_target_info ("target_feature", "sse4.1");
- if (TARGET_SSE4_2)
- rust_add_target_info ("target_feature", "sse4.2");
- if (TARGET_AES)
- rust_add_target_info ("target_feature", "aes");
- if (TARGET_SHA)
- rust_add_target_info ("target_feature", "sha");
- if (TARGET_AVX)
- rust_add_target_info ("target_feature", "avx");
- if (TARGET_AVX2)
- rust_add_target_info ("target_feature", "avx2");
- if (TARGET_AVX512F)
- rust_add_target_info ("target_feature", "avx512f");
- if (TARGET_AVX512CD)
- rust_add_target_info ("target_feature", "avx512cd");
- if (TARGET_AVX512DQ)
- rust_add_target_info ("target_feature", "avx512dq");
- if (TARGET_AVX512BW)
- rust_add_target_info ("target_feature", "avx512bw");
- if (TARGET_AVX512VL)
- rust_add_target_info ("target_feature", "avx512vl");
- if (TARGET_AVX512VBMI)
- rust_add_target_info ("target_feature", "avx512vbmi");
- if (TARGET_AVX512IFMA)
- rust_add_target_info ("target_feature", "avx512ifma");
- if (TARGET_AVX512VPOPCNTDQ)
- rust_add_target_info ("target_feature", "avx512vpopcntdq");
- if (TARGET_FMA)
- rust_add_target_info ("target_feature", "fma");
- if (TARGET_RTM)
- rust_add_target_info ("target_feature", "rtm");
- if (TARGET_SSE4A)
- rust_add_target_info ("target_feature", "sse4a");
- if (TARGET_BMI)
- {
- rust_add_target_info ("target_feature", "bmi1");
- rust_add_target_info ("target_feature", "bmi");
- }
- if (TARGET_BMI2)
- rust_add_target_info ("target_feature", "bmi2");
- if (TARGET_LZCNT)
- rust_add_target_info ("target_feature", "lzcnt");
- if (TARGET_TBM)
- rust_add_target_info ("target_feature", "tbm");
- if (TARGET_POPCNT)
- rust_add_target_info ("target_feature", "popcnt");
- if (TARGET_RDRND)
- {
- rust_add_target_info ("target_feature", "rdrand");
- rust_add_target_info ("target_feature", "rdrnd");
- }
- if (TARGET_F16C)
- rust_add_target_info ("target_feature", "f16c");
- if (TARGET_RDSEED)
- rust_add_target_info ("target_feature", "rdseed");
- if (TARGET_ADX)
- rust_add_target_info ("target_feature", "adx");
- if (TARGET_FXSR)
- rust_add_target_info ("target_feature", "fxsr");
- if (TARGET_XSAVE)
- rust_add_target_info ("target_feature", "xsave");
- if (TARGET_XSAVEOPT)
- rust_add_target_info ("target_feature", "xsaveopt");
- if (TARGET_XSAVEC)
- rust_add_target_info ("target_feature", "xsavec");
- if (TARGET_XSAVES)
- rust_add_target_info ("target_feature", "xsaves");
- if (TARGET_VPCLMULQDQ)
- {
- rust_add_target_info ("target_feature", "pclmulqdq");
- rust_add_target_info ("target_feature", "vpclmulqdq");
- }
- if (TARGET_CMPXCHG16B)
- rust_add_target_info ("target_feature", "cmpxchg16b");
- if (TARGET_MOVBE)
- rust_add_target_info ("target_feature", "movbe");
+#define ADD_TARGET_INFO rust_add_target_info
+#include "i386-rust-and-jit.inc"
+#undef ADD_TARGET_INFO
}
diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
index be5e27f..f38e3db 100644
--- a/gcc/config/i386/i386.cc
+++ b/gcc/config/i386/i386.cc
@@ -16668,7 +16668,7 @@ ix86_fp_compare_code_to_integer (enum rtx_code code)
return NE;
case EQ:
case NE:
- if (TARGET_AVX10_2_256)
+ if (TARGET_AVX10_2)
return code;
/* FALLTHRU. */
default:
@@ -25055,7 +25055,7 @@ ix86_get_mask_mode (machine_mode data_mode)
to kmask for _Float16. */
|| (TARGET_AVX512VL && TARGET_AVX512FP16
&& GET_MODE_INNER (data_mode) == E_HFmode)
- || (TARGET_AVX10_2_256 && GET_MODE_INNER (data_mode) == E_BFmode))
+ || (TARGET_AVX10_2 && GET_MODE_INNER (data_mode) == E_BFmode))
{
if (elem_size == 4
|| elem_size == 8
@@ -26465,8 +26465,7 @@ ix86_redzone_clobber ()
cfun->machine->asm_redzone_clobber_seen = true;
if (ix86_using_red_zone ())
{
- rtx base = plus_constant (Pmode, stack_pointer_rtx,
- GEN_INT (-RED_ZONE_SIZE));
+ rtx base = plus_constant (Pmode, stack_pointer_rtx, -RED_ZONE_SIZE);
rtx mem = gen_rtx_MEM (BLKmode, base);
set_mem_size (mem, RED_ZONE_SIZE);
return mem;
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index ce29c27..13da3d8 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -1176,7 +1176,7 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
#define SSE_FLOAT_MODE_SSEMATH_OR_HFBF_P(MODE) \
((SSE_FLOAT_MODE_P (MODE) && TARGET_SSE_MATH) \
|| (TARGET_AVX512FP16 && (MODE) == HFmode) \
- || (TARGET_AVX10_2_256 && (MODE) == BFmode))
+ || (TARGET_AVX10_2 && (MODE) == BFmode))
#define FMA4_VEC_FLOAT_MODE_P(MODE) \
(TARGET_FMA4 && ((MODE) == V4SFmode || (MODE) == V2DFmode \
@@ -2449,9 +2449,9 @@ constexpr wide_int_bitmask PTA_DIAMONDRAPIDS = PTA_SKYLAKE | PTA_PKU | PTA_SHA
| PTA_WBNOINVD | PTA_CLWB | PTA_MOVDIRI | PTA_MOVDIR64B | PTA_ENQCMD
| PTA_CLDEMOTE | PTA_PTWRITE | PTA_WAITPKG | PTA_SERIALIZE | PTA_TSXLDTRK
| PTA_AMX_TILE | PTA_AMX_INT8 | PTA_AMX_BF16 | PTA_UINTR | PTA_AVXVNNI
- | PTA_AMX_FP16 | PTA_PREFETCHI | PTA_AMX_COMPLEX | PTA_AVX10_1_512
+ | PTA_AMX_FP16 | PTA_PREFETCHI | PTA_AMX_COMPLEX | PTA_AVX10_1
| PTA_AVXIFMA | PTA_AVXNECONVERT | PTA_AVXVNNIINT16 | PTA_AVXVNNIINT8
- | PTA_CMPCCXADD | PTA_SHA512 | PTA_SM3 | PTA_SM4 | PTA_AVX10_2_512
+ | PTA_CMPCCXADD | PTA_SHA512 | PTA_SM3 | PTA_SM4 | PTA_AVX10_2
| PTA_APX_F | PTA_AMX_AVX512 | PTA_AMX_FP8 | PTA_AMX_TF32 | PTA_AMX_TRANSPOSE
| PTA_MOVRS | PTA_AMX_MOVRS | PTA_USER_MSR;
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index b1cd523..f7f790d 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -984,7 +984,7 @@
(symbol_ref "TARGET_APX_NDD && Pmode == DImode")
(eq_attr "isa" "vaes_avx512vl")
(symbol_ref "TARGET_VAES && TARGET_AVX512VL")
- (eq_attr "isa" "avx10_2") (symbol_ref "TARGET_AVX10_2_256")
+ (eq_attr "isa" "avx10_2") (symbol_ref "TARGET_AVX10_2")
(eq_attr "mmx_isa" "native")
(symbol_ref "!TARGET_MMX_WITH_SSE")
@@ -1812,14 +1812,14 @@
(compare:CC (match_operand:BF 1 "cmp_fp_expander_operand")
(match_operand:BF 2 "cmp_fp_expander_operand")))
(set (pc) (if_then_else
- (match_operator 0 "comparison_operator"
+ (match_operator 0 "ix86_fp_comparison_operator"
[(reg:CC FLAGS_REG)
(const_int 0)])
(label_ref (match_operand 3))
(pc)))]
"TARGET_80387 || (SSE_FLOAT_MODE_P (SFmode) && TARGET_SSE_MATH)"
{
- if (TARGET_AVX10_2_256 && !flag_trapping_math)
+ if (TARGET_AVX10_2 && !flag_trapping_math)
ix86_expand_branch (GET_CODE (operands[0]),
operands[1], operands[2], operands[3]);
else
@@ -1861,7 +1861,7 @@
"TARGET_80387 || (SSE_FLOAT_MODE_P (SFmode) && TARGET_SSE_MATH)"
{
rtx op2 = operands[2], op3 = operands[3];
- if (!TARGET_AVX10_2_256 || flag_trapping_math)
+ if (!TARGET_AVX10_2 || flag_trapping_math)
{
op2 = ix86_expand_fast_convert_bf_to_sf (operands[2]);
op3 = ix86_expand_fast_convert_bf_to_sf (operands[3]);
@@ -2042,7 +2042,7 @@
(match_operand:MODEF 0 "register_operand" "v")
(match_operand:MODEF 1 "nonimmediate_operand" "vm"))]
UNSPEC_OPTCOMX))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"%v<unord>comx<MODEF:ssemodesuffix>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "evex")
@@ -2055,7 +2055,7 @@
(match_operand:HF 0 "register_operand" "v")
(match_operand:HF 1 "nonimmediate_operand" "vm"))]
UNSPEC_OPTCOMX))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"v<unord>comxsh\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "evex")
@@ -2114,7 +2114,7 @@
(compare:CCFP
(match_operand:BF 0 "register_operand" "v")
(match_operand:BF 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcomisbf16\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "evex")
@@ -12411,7 +12411,9 @@
(and:SWI248 (match_operand:SWI248 0 "memory_operand")
(match_operand 1 "const_int_operand"))
(const_int 0)))]
- "!TARGET_PARTIAL_MEMORY_READ_STALL && !MEM_VOLATILE_P (operands[0])"
+ "!TARGET_PARTIAL_MEMORY_READ_STALL
+ && !MEM_VOLATILE_P (operands[0])
+ && offsettable_memref_p (operands[0])"
[(set (reg:CCZ FLAGS_REG)
(compare:CCZ (match_dup 2) (const_int 0)))]
{
@@ -18168,7 +18170,8 @@
[(set (match_dup 4) (match_dup 1))
(set (match_dup 0)
(any_rotate:SWI (match_dup 4)
- (subreg:QI (match_dup 2) 0)))]
+ (subreg:QI
+ (and:SI (match_dup 2) (match_dup 3)) 0)))]
"operands[4] = gen_reg_rtx (<MODE>mode);")
(define_insn_and_split "*<insn><mode>3_mask_1"
@@ -18202,7 +18205,8 @@
== GET_MODE_BITSIZE (<MODE>mode) - 1"
[(set (match_dup 4) (match_dup 1))
(set (match_dup 0)
- (any_rotate:SWI (match_dup 4) (match_dup 2)))]
+ (any_rotate:SWI (match_dup 4)
+ (and:QI (match_dup 2) (match_dup 3))))]
"operands[4] = gen_reg_rtx (<MODE>mode);")
(define_insn_and_split "*<insn><mode>3_add"
diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
index 9dc6073..27d34bd 100644
--- a/gcc/config/i386/i386.opt
+++ b/gcc/config/i386/i386.opt
@@ -1356,7 +1356,7 @@ Target Var(ix86_apx_inline_asm_use_gpr32) Init(0)
Enable GPR32 in inline asm when APX_F enabled.
mevex512
-Target Mask(ISA2_EVEX512) Var(ix86_isa_flags2) Save
+Target Mask(ISA2_EVEX512) Var(ix86_isa_flags2) Save Warn(%<-mevex512%> will be deprecated in GCC 16 due to all machines 512 bit vector size supported)
Support 512 bit vector built-in functions and code generation.
musermsr
@@ -1364,34 +1364,29 @@ Target Mask(ISA2_USER_MSR) Var(ix86_isa_flags2) Save
Support USER_MSR built-in functions and code generation.
mavx10.1-256
-Target Mask(ISA2_AVX10_1_256) Var(ix86_isa_flags2) Save
+Target Mask(ISA2_AVX10_1_256) Var(ix86_isa_flags2) Save Warn(%<-mavx10.1%> is aliased to 512 bit since GCC14.3 and GCC15.1 while %<-mavx10.1-256%> and %<-mavx10.1-512%> will be deprecated in GCC 16 due to all machines 512 bit vector size supported)
Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2,
-and AVX10.1 built-in functions and code generation.
+and AVX10.1-256 built-in functions and code generation.
-mavx10.1-512
-Target Mask(ISA2_AVX10_1_512) Var(ix86_isa_flags2) Save
+mavx10.1
+Target Mask(ISA2_AVX10_1) Var(ix86_isa_flags2) Save Warn(%<-mavx10.1%> is aliased to 512 bit since GCC14.3 and GCC15.1 while %<-mavx10.1-256%> and %<-mavx10.1-512%> will be deprecated in GCC 16 due to all machines 512 bit vector size supported)
Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2,
and AVX10.1-512 built-in functions and code generation.
-mavx10.2-256
-Target RejectNegative Mask(ISA2_AVX10_2_256) Var(ix86_isa_flags2) Save
+mavx10.1-512
+Target Alias(mavx10.1)
Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2,
-AVX10.1 and AVX10.2 built-in functions and code generation.
+and AVX10.1-512 built-in functions and code generation.
mavx10.2
-Target Mask(ISA2_AVX10_2_512) Var(ix86_isa_flags2) Save
-Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2,
-AVX10.1-512 and AVX10.2-512 built-in functions and code generation.
-
-mavx10.2-512
-Target RejectNegative Alias(mavx10.2)
+Target Mask(ISA2_AVX10_2) Var(ix86_isa_flags2) Save
Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2,
-AVX10.1-512 and AVX10.2-512 built-in functions and code generation.
+AVX10.1-512 and AVX10.2 built-in functions and code generation.
mamx-avx512
Target Mask(ISA2_AMX_AVX512) Var(ix86_isa_flags2) Save
Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2, AVX10.1-512,
-AVX10.2-512 and AMX-AVX512 built-in functions and code generation.
+AVX10.2 and AMX-AVX512 built-in functions and code generation.
mamx-tf32
Target Mask(ISA2_AMX_TF32) Var(ix86_isa_flags2) Save
diff --git a/gcc/config/i386/i386.opt.urls b/gcc/config/i386/i386.opt.urls
index ee68061..0d5a5a1 100644
--- a/gcc/config/i386/i386.opt.urls
+++ b/gcc/config/i386/i386.opt.urls
@@ -599,18 +599,15 @@ UrlSuffix(gcc/x86-Options.html#index-musermsr)
mavx10.1-256
UrlSuffix(gcc/x86-Options.html#index-mavx10_002e1-256)
+mavx10.1
+UrlSuffix(gcc/x86-Options.html#index-mavx10_002e1)
+
mavx10.1-512
UrlSuffix(gcc/x86-Options.html#index-mavx10_002e1-512)
-mavx10.2-256
-UrlSuffix(gcc/x86-Options.html#index-mavx10_002e2-256)
-
mavx10.2
UrlSuffix(gcc/x86-Options.html#index-mavx10_002e2)
-mavx10.2-512
-UrlSuffix(gcc/x86-Options.html#index-mavx10_002e2-512)
-
mamx-avx512
UrlSuffix(gcc/x86-Options.html#index-mamx-avx512)
diff --git a/gcc/config/i386/immintrin.h b/gcc/config/i386/immintrin.h
index 6907a2c..c30a4e0 100644
--- a/gcc/config/i386/immintrin.h
+++ b/gcc/config/i386/immintrin.h
@@ -146,8 +146,6 @@
#include <amxfp16intrin.h>
-#include <avx10_2roundingintrin.h>
-
#include <avx10_2mediaintrin.h>
#include <avx10_2-512mediaintrin.h>
diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md
index e518999..7920232 100644
--- a/gcc/config/i386/mmx.md
+++ b/gcc/config/i386/mmx.md
@@ -2117,7 +2117,7 @@
(plusminusmultdiv:VBF_32_64
(match_operand:VBF_32_64 1 "nonimmediate_operand")
(match_operand:VBF_32_64 2 "nonimmediate_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
rtx op0 = gen_reg_rtx (V8BFmode);
rtx op1 = lowpart_subreg (V8BFmode,
@@ -2176,7 +2176,7 @@
(smaxmin:VBF_32_64
(match_operand:VBF_32_64 1 "nonimmediate_operand")
(match_operand:VBF_32_64 2 "nonimmediate_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
rtx op0 = gen_reg_rtx (V8BFmode);
rtx op1 = lowpart_subreg (V8BFmode,
@@ -2208,7 +2208,7 @@
(define_expand "sqrt<mode>2"
[(set (match_operand:VBF_32_64 0 "register_operand")
(sqrt:VBF_32_64 (match_operand:VBF_32_64 1 "vector_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
rtx op0 = gen_reg_rtx (V8BFmode);
rtx op1 = lowpart_subreg (V8BFmode,
@@ -2369,7 +2369,7 @@
(match_operator:QI 1 ""
[(match_operand:VBF_32_64 2 "register_operand")
(match_operand:VBF_32_64 3 "nonimmediate_operand")]))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
rtx op2 = lowpart_subreg (V8BFmode,
force_reg (<MODE>mode, operands[2]), <MODE>mode);
@@ -2788,7 +2788,7 @@
(match_operand:VBF_32_64 1 "nonimmediate_operand")
(match_operand:VBF_32_64 2 "nonimmediate_operand")
(match_operand:VBF_32_64 3 "nonimmediate_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
rtx op0 = gen_reg_rtx (V8BFmode);
rtx op1 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[1]), <MODE>mode);
@@ -2808,7 +2808,7 @@
(match_operand:VBF_32_64 2 "nonimmediate_operand")
(neg:VBF_32_64
(match_operand:VBF_32_64 3 "nonimmediate_operand"))))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
rtx op0 = gen_reg_rtx (V8BFmode);
rtx op1 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[1]), <MODE>mode);
@@ -2828,7 +2828,7 @@
(match_operand:VBF_32_64 1 "nonimmediate_operand"))
(match_operand:VBF_32_64 2 "nonimmediate_operand")
(match_operand:VBF_32_64 3 "nonimmediate_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
rtx op0 = gen_reg_rtx (V8BFmode);
rtx op1 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[1]), <MODE>mode);
@@ -2849,7 +2849,7 @@
(match_operand:VBF_32_64 2 "nonimmediate_operand")
(neg:VBF_32_64
(match_operand:VBF_32_64 3 "nonimmediate_operand"))))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
rtx op0 = gen_reg_rtx (V8BFmode);
rtx op1 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[1]), <MODE>mode);
diff --git a/gcc/config/i386/movrsintrin.h b/gcc/config/i386/movrsintrin.h
index b29d643..9127442 100644
--- a/gcc/config/i386/movrsintrin.h
+++ b/gcc/config/i386/movrsintrin.h
@@ -79,7 +79,7 @@ _movrs_i64 (void const * __P)
#ifdef __x86_64__
-#if !defined (__AVX10_2_256__) || !defined (__MOVRS__)
+#if !defined (__AVX10_2__) || !defined (__MOVRS__)
#pragma GCC push_options
#pragma GCC target("avx10.2,movrs")
#define __DISABLE_MOVRS_AVX10_2__
@@ -317,17 +317,6 @@ _mm_maskz_loadrs_epi16 (__mmask8 __U, void const *__A)
(__mmask8) __U);
}
-#ifdef __DISABLE_MOVRS_AVX10_2__
-#undef __DISABLE_MOVRS_AVX10_2__
-#pragma GCC pop_options
-#endif /* __DISABLE_MOVRS_AVX10_2__ */
-
-#if !defined (__AVX10_2_512__) || !defined (__MOVRS__)
-#pragma GCC push_options
-#pragma GCC target("avx10.2-512,movrs")
-#define __DISABLE_MOVRS_AVX10_2_512__
-#endif /* __MOVRS_AVX10_2_512__ */
-
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_loadrs_epi8 (void const *__A)
@@ -443,10 +432,10 @@ _mm512_maskz_loadrs_epi16 (__mmask32 __U, void const *__A)
(__mmask32) __U);
}
-#ifdef __DISABLE_MOVRS_AVX10_2_512__
-#undef __DISABLE_MOVRS_AVX10_2_512__
+#ifdef __DISABLE_MOVRS_AVX10_2__
+#undef __DISABLE_MOVRS_AVX10_2__
#pragma GCC pop_options
-#endif /* __DISABLE_MOVRS_AVX10_2_512__ */
+#endif /* __DISABLE_MOVRS_AVX10_2__ */
#endif /* __x86_64__ */
diff --git a/gcc/config/i386/predicates.md b/gcc/config/i386/predicates.md
index 8631588..3d3848c 100644
--- a/gcc/config/i386/predicates.md
+++ b/gcc/config/i386/predicates.md
@@ -1629,7 +1629,7 @@
;; Return true if this comparison only requires testing one flag bit.
;; VCOMX/VUCOMX set ZF, SF, OF, differently from COMI/UCOMI.
(define_predicate "ix86_trivial_fp_comparison_operator"
- (if_then_else (match_test "TARGET_AVX10_2_256")
+ (if_then_else (match_test "TARGET_AVX10_2")
(match_code "gt,ge,unlt,unle,eq,uneq,ne,ltgt,ordered,unordered")
(match_code "gt,ge,unlt,unle,uneq,ltgt,ordered,unordered")))
diff --git a/gcc/config/i386/sm4intrin.h b/gcc/config/i386/sm4intrin.h
index 29d1d4c..af7a1c3 100644
--- a/gcc/config/i386/sm4intrin.h
+++ b/gcc/config/i386/sm4intrin.h
@@ -67,9 +67,9 @@ _mm256_sm4rnds4_epi32 (__m256i __A, __m256i __B)
#pragma GCC pop_options
#endif /* __DISABLE_SM4__ */
-#if !defined (__SM4__) || !defined (__AVX10_2_512__)
+#if !defined (__SM4__) || !defined (__AVX10_2__)
#pragma GCC push_options
-#pragma GCC target("sm4,avx10.2-512")
+#pragma GCC target("sm4,avx10.2")
#define __DISABLE_SM4_512__
#endif /* __SM4_512__ */
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 70c2cf3..b280676 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -382,8 +382,8 @@
(V2DF "TARGET_AVX512DQ && TARGET_AVX512VL")])
(define_mode_iterator VF1_VF2_AVX10_2
- [(V16SF "TARGET_AVX10_2_512") V8SF V4SF
- (V8DF "TARGET_AVX10_2_512") V4DF V2DF])
+ [(V16SF "TARGET_AVX10_2") V8SF V4SF
+ (V8DF "TARGET_AVX10_2") V4DF V2DF])
(define_mode_iterator VFH
[(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
@@ -401,9 +401,9 @@
(V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F && TARGET_EVEX512")
(V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")
- (V32BF "TARGET_AVX10_2_512")
- (V16BF "TARGET_AVX10_2_256")
- (V8BF "TARGET_AVX10_2_256")
+ (V32BF "TARGET_AVX10_2")
+ (V16BF "TARGET_AVX10_2")
+ (V8BF "TARGET_AVX10_2")
])
;; 128-, 256- and 512-bit float vector modes for bitwise operations
@@ -447,13 +447,13 @@
[(V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") V2DF])
(define_mode_iterator VF2_AVX10_2
- [(V8DF "TARGET_AVX10_2_512") V4DF V2DF])
+ [(V8DF "TARGET_AVX10_2") V4DF V2DF])
;; All DFmode & HFmode & BFmode vector float modes
(define_mode_iterator VF2HB
- [(V32BF "TARGET_AVX10_2_512")
- (V16BF "TARGET_AVX10_2_256")
- (V8BF "TARGET_AVX10_2_256")
+ [(V32BF "TARGET_AVX10_2")
+ (V16BF "TARGET_AVX10_2")
+ (V8BF "TARGET_AVX10_2")
(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
@@ -511,10 +511,10 @@
(V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI1248_AVX10_2
- [(V64QI "TARGET_AVX10_2_512") V32QI V16QI
- (V32HI "TARGET_AVX10_2_512") V16HI V8HI
- (V16SI "TARGET_AVX10_2_512") V8SI V4SI
- (V8DI "TARGET_AVX10_2_512") V4DI V2DI])
+ [(V64QI "TARGET_AVX10_2") V32QI V16QI
+ (V32HI "TARGET_AVX10_2") V16HI V8HI
+ (V16SI "TARGET_AVX10_2") V8SI V4SI
+ (V8DI "TARGET_AVX10_2") V4DI V2DI])
(define_mode_iterator VF_AVX512VL
[(V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
@@ -528,9 +528,9 @@
(V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator VFH_AVX10_2
- [(V32HF "TARGET_AVX10_2_512") V16HF V8HF
- (V16SF "TARGET_AVX10_2_512") V8SF V4SF
- (V8DF "TARGET_AVX10_2_512") V4DF V2DF])
+ [(V32HF "TARGET_AVX10_2") V16HF V8HF
+ (V16SF "TARGET_AVX10_2") V8SF V4SF
+ (V8DF "TARGET_AVX10_2") V4DF V2DF])
(define_mode_iterator VF2_AVX512VL
[(V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
@@ -542,7 +542,7 @@
[(V16SF "TARGET_AVX512BW && TARGET_EVEX512") (V8SF "TARGET_AVX2") V4SF])
(define_mode_iterator VF1_AVX10_2
- [(V16SF "TARGET_AVX10_2_512") V8SF V4SF])
+ [(V16SF "TARGET_AVX10_2") V8SF V4SF])
(define_mode_iterator VHFBF
[(V32HF "TARGET_EVEX512") V16HF V8HF
@@ -558,10 +558,10 @@
(V32BF "TARGET_EVEX512") (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
(define_mode_iterator VHF_AVX10_2
- [(V32HF "TARGET_AVX10_2_512") V16HF V8HF])
+ [(V32HF "TARGET_AVX10_2") V16HF V8HF])
(define_mode_iterator VBF_AVX10_2
- [(V32BF "TARGET_AVX10_2_512") V16BF V8BF])
+ [(V32BF "TARGET_AVX10_2") V16BF V8BF])
;; All vector integer modes
(define_mode_iterator VI
@@ -614,7 +614,7 @@
[(V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI])
(define_mode_iterator VI8_AVX10_2
- [(V8DI "TARGET_AVX10_2_512") V4DI V2DI])
+ [(V8DI "TARGET_AVX10_2") V4DI V2DI])
(define_mode_iterator VI8_FVL
[(V8DI "TARGET_AVX512F && TARGET_EVEX512") V4DI (V2DI "TARGET_AVX512VL")])
@@ -659,7 +659,7 @@
(V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI2_AVX10_2
- [(V32HI "TARGET_AVX10_2_512") V16HI V8HI])
+ [(V32HI "TARGET_AVX10_2") V16HI V8HI])
(define_mode_iterator VI4_AVX
[(V8SI "TARGET_AVX") V4SI])
@@ -674,7 +674,7 @@
[(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")])
(define_mode_iterator VI4_AVX10_2
- [(V16SI "TARGET_AVX10_2_512") V8SI V4SI])
+ [(V16SI "TARGET_AVX10_2") V8SI V4SI])
(define_mode_iterator VI48_AVX512F_AVX512VL
[V4SI V8SI (V16SI "TARGET_AVX512F && TARGET_EVEX512")
@@ -1571,7 +1571,11 @@
"TARGET_AVX512VL || <MODE_SIZE> == 64"
"vpternlogd\t{$0xFF, %0, %0, %0|%0, %0, %0, 0xFF}"
[(set_attr "type" "sselog1")
- (set_attr "prefix" "evex")])
+ (set_attr "prefix" "evex")
+ (set (attr "mode")
+ (if_then_else (match_test "TARGET_AVX512VL")
+ (const_string "<sseinsnmode>")
+ (const_string "XI")))])
;; If mem_addr points to a memory region with less than whole vector size bytes
;; of accessible memory and k is a mask that would prevent reading the inaccessible
@@ -2649,7 +2653,7 @@
(plusminus:VF_BHSD
(match_operand:VF_BHSD 1 "<round_nimm_predicate>")
(match_operand:VF_BHSD 2 "<round_nimm_predicate>")))]
- "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
"ix86_fixup_binary_operands_no_copy (<CODE>, <MODE>mode, operands);")
(define_insn "*<insn><mode>3<mask_name><round_name>"
@@ -2658,7 +2662,7 @@
(match_operand:VFH 1 "<bcst_round_nimm_predicate>" "<comm>0,v")
(match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
"TARGET_SSE && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)
- && <mask_mode512bit_condition> && <round_mode_condition>"
+ && <mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
<plusminus_mnemonic><ssemodesuffix>\t{%2, %0|%0, %2}
v<plusminus_mnemonic><ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
@@ -2738,7 +2742,7 @@
(mult:VF_BHSD
(match_operand:VF_BHSD 1 "<round_nimm_predicate>")
(match_operand:VF_BHSD 2 "<round_nimm_predicate>")))]
- "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
"ix86_fixup_binary_operands_no_copy (MULT, <MODE>mode, operands);")
(define_insn "*mul<mode>3<mask_name><round_name>"
@@ -2747,7 +2751,7 @@
(match_operand:VFH 1 "<bcst_round_nimm_predicate>" "%0,v")
(match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
"TARGET_SSE && ix86_binary_operator_ok (MULT, <MODE>mode, operands)
- && <mask_mode512bit_condition> && <round_mode_condition>"
+ && <mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
mul<ssemodesuffix>\t{%2, %0|%0, %2}
vmul<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
@@ -2857,7 +2861,7 @@
(div:VBF_AVX10_2
(match_operand:VBF_AVX10_2 1 "register_operand")
(match_operand:VBF_AVX10_2 2 "vector_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
if (TARGET_RECIP_VEC_DIV
&& optimize_insn_for_speed_p ()
@@ -2895,7 +2899,7 @@
(div:VFH
(match_operand:VFH 1 "register_operand" "0,v")
(match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
- "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
div<ssemodesuffix>\t{%2, %0|%0, %2}
vdiv<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
@@ -3061,7 +3065,7 @@
(define_insn "<sse>_sqrt<mode>2<mask_name><round_name>"
[(set (match_operand:VFH 0 "register_operand" "=x,v")
(sqrt:VFH (match_operand:VFH 1 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
- "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
sqrt<ssemodesuffix>\t{%1, %0|%0, %1}
vsqrt<ssemodesuffix>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
@@ -3277,7 +3281,7 @@
(match_operand:VFH 1 "<round_saeonly_nimm_predicate>")
(match_operand:VFH 2 "<round_saeonly_nimm_predicate>")))]
"TARGET_SSE && <mask_mode512bit_condition>
- && <round_saeonly_mode_condition>"
+ && <round_saeonly_mode512bit_condition>"
{
if (!flag_finite_math_only || flag_signed_zeros)
{
@@ -3305,7 +3309,7 @@
"TARGET_SSE
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))
&& <mask_mode512bit_condition>
- && <round_saeonly_mode_condition>"
+ && <round_saeonly_mode512bit_condition>"
"@
<maxmin_float><ssemodesuffix>\t{%2, %0|%0, %2}
v<maxmin_float><ssemodesuffix>\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_saeonly_mask_op3>}"
@@ -3392,7 +3396,7 @@
IEEE_MAXMIN))]
"TARGET_SSE
&& <mask_mode512bit_condition>
- && <round_saeonly_mode_condition>"
+ && <round_saeonly_mode512bit_condition>"
"@
<ieee_maxmin><ssemodesuffix>\t{%2, %0|%0, %2}
v<ieee_maxmin><ssemodesuffix>\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_saeonly_mask_op3>}"
@@ -4021,7 +4025,7 @@
[(match_operand:VFH_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_REDUCE))]
- "(TARGET_AVX512DQ || (VALID_AVX512FP16_REG_MODE (<MODE>mode))) && <round_saeonly_mode_condition>"
+ "TARGET_AVX512DQ || (VALID_AVX512FP16_REG_MODE (<MODE>mode))"
"vreduce<ssemodesuffix>\t{%2, <round_saeonly_mask_op3>%1, %0<mask_operand3>|%0<mask_operand3>, %1<round_saeonly_mask_op3>, %2}"
[(set_attr "type" "sse")
(set_attr "prefix" "evex")
@@ -4391,7 +4395,7 @@
(match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "<round_saeonly_constraint>")
(match_operand:SI 3 "<cmp_imm_predicate>" "n")]
UNSPEC_PCMP))]
- "TARGET_AVX512F && <round_saeonly_mode_condition>"
+ "TARGET_AVX512F && <round_saeonly_mode512bit_condition>"
"v<ssecmpintprefix>cmp<ssemodesuffix>\t{%3, <round_saeonly_mask_scalar_merge_op4>%2, %1, %0<mask_scalar_merge_operand4>|%0<mask_scalar_merge_operand4>, %1, %2<round_saeonly_mask_scalar_merge_op4>, %3}"
[(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
@@ -4850,7 +4854,7 @@
(match_operand:<ssevecmode> 1 "<round_saeonly_nimm_scalar_predicate>" "<round_saeonly_constraint>")
(parallel [(const_int 0)]))]
UNSPEC_COMX))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"v<unord>comx<ssemodesuffix>\t{<round_saeonly_op2>%1, %0|%0, %<iptr>1<round_saeonly_op2>}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "evex")
@@ -4885,7 +4889,7 @@
(vec_select:BF
(match_operand:V8BF 1 "nonimmediate_operand" "vm")
(parallel [(const_int 0)]))))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcomisbf16\t{%1, %0|%0, %1}"
[(set_attr "prefix" "evex")
(set_attr "type" "ssecomi")])
@@ -4921,7 +4925,7 @@
(match_operator:<avx512fmaskmode> 1 ""
[(match_operand:VBF_AVX10_2 2 "register_operand")
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand")]))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
bool ok = ix86_expand_mask_vec_cmp (operands[0], GET_CODE (operands[1]),
operands[2], operands[3]);
@@ -5813,9 +5817,9 @@
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
- (V8BF "TARGET_AVX10_2_256")
- (V16BF "TARGET_AVX10_2_256")
- (V32BF "TARGET_AVX10_2_512")])
+ (V8BF "TARGET_AVX10_2")
+ (V16BF "TARGET_AVX10_2")
+ (V32BF "TARGET_AVX10_2")])
(define_expand "fma<mode>4"
[(set (match_operand:FMAMODEM 0 "register_operand")
@@ -5897,7 +5901,7 @@
(match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F && <round_mode512bit_condition>"
{
emit_insn (gen_fma_fmadd_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
@@ -5939,7 +5943,7 @@
(match_operand:VFH_SF_AVX512VL 1 "<bcst_round_nimm_predicate>" "%0,0,v")
(match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
(match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0")))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
vfmadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfmadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -5980,7 +5984,7 @@
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>"))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F && <round_mode512bit_condition>"
"@
vfmadd132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
vfmadd213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
@@ -5997,7 +6001,7 @@
(match_operand:VFH_AVX512VL 3 "nonimmediate_operand" "0"))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F"
"vfmadd231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -6027,7 +6031,7 @@
(match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F && <round_mode512bit_condition>"
{
emit_insn (gen_fma_fmsub_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
@@ -6042,7 +6046,7 @@
(match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
(neg:VFH_SF_AVX512VL
(match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0"))))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
vfmsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfmsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -6085,7 +6089,7 @@
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>")))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F"
"@
vfmsub132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
vfmsub213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
@@ -6103,7 +6107,7 @@
(match_operand:VFH_AVX512VL 3 "nonimmediate_operand" "0")))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F && <round_mode512bit_condition>"
"vfmsub231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -6133,7 +6137,7 @@
(match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F && <round_mode512bit_condition>"
{
emit_insn (gen_fma_fnmadd_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
@@ -6148,7 +6152,7 @@
(match_operand:VFH_SF_AVX512VL 1 "<bcst_round_nimm_predicate>" "%0,0,v"))
(match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
(match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0")))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
vfnmadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfnmadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -6191,7 +6195,7 @@
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>"))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F && <round_mode512bit_condition>"
"@
vfnmadd132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
vfnmadd213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
@@ -6209,7 +6213,7 @@
(match_operand:VFH_AVX512VL 3 "nonimmediate_operand" "0"))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F && <round_mode512bit_condition>"
"vfnmadd231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -6240,7 +6244,7 @@
(match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F && <round_mode512bit_condition>"
{
emit_insn (gen_fma_fnmsub_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
@@ -6256,7 +6260,7 @@
(match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
(neg:VFH_SF_AVX512VL
(match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0"))))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
vfnmsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfnmsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -6301,7 +6305,7 @@
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>")))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F && <round_mode512bit_condition>"
"@
vfnmsub132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
vfnmsub213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
@@ -6320,7 +6324,7 @@
(match_operand:VFH_AVX512VL 3 "nonimmediate_operand" "0")))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F"
"vfnmsub231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -6418,7 +6422,7 @@
(match_operand:VFH_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>,v,<round_constraint>")
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>,0")]
UNSPEC_FMADDSUB))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
vfmaddsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfmaddsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -6437,7 +6441,7 @@
UNSPEC_FMADDSUB)
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F"
"@
vfmaddsub132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
vfmaddsub213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
@@ -6455,7 +6459,7 @@
UNSPEC_FMADDSUB)
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F"
"vfmaddsub231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -6488,7 +6492,7 @@
(neg:VFH_AVX512VL
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>,0"))]
UNSPEC_FMADDSUB))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
vfmsubadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfmsubadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -7057,7 +7061,7 @@
(match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskcmode> 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
rtx op0, op1, dest;
if (<round_embedded_complex>)
@@ -7087,7 +7091,7 @@
(match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskcmode> 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
emit_insn (gen_fma_fmaddc_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
@@ -7101,7 +7105,7 @@
(match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskcmode> 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
rtx op0, op1, dest;
if (<round_embedded_complex>)
@@ -7133,7 +7137,7 @@
(match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskcmode> 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
emit_insn (gen_fma_fcmaddc_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
@@ -7157,7 +7161,7 @@
(match_operand:VHF_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>")
(match_operand:VHF_AVX512VL 3 "<round_nimm_predicate>" "0")]
UNSPEC_COMPLEX_F_C_MA))]
- "TARGET_AVX512FP16 && <sdc_mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <sdc_mask_mode512bit_condition> && <round_mode512bit_condition>"
"v<complexopname><ssemodesuffix>\t{<round_sdc_mask_op4>%2, %1, %0<sdc_mask_op4>|%0<sdc_mask_op4>, %1, %2<round_sdc_mask_op4>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -7295,7 +7299,7 @@
(unspec:<avx512fmaskmode>
[(match_operand:<avx512fmaskcmode> 4 "register_operand" "Yk")]
UNSPEC_COMPLEX_MASK)))]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
"v<complexopname><ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -7315,7 +7319,7 @@
[(match_operand:VHF_AVX512VL 1 "<round_nimm_predicate>" "<int_comm>v")
(match_operand:VHF_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_COMPLEX_F_C_MUL))]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
if (TARGET_DEST_FALSE_DEP_FOR_GLC
&& <maskc_dest_false_dep_for_glc_cond>)
@@ -7332,7 +7336,7 @@
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
emit_insn (gen_avx512fp16_fma_fmaddcsh_v8hf_maskz<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
@@ -7346,7 +7350,7 @@
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
rtx op0, op1, dest;
@@ -7376,7 +7380,7 @@
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
emit_insn (gen_avx512fp16_fma_fcmaddcsh_v8hf_maskz<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
@@ -7390,7 +7394,7 @@
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
rtx op0, op1, dest;
@@ -7420,7 +7424,7 @@
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
rtx dest, op0, op1;
@@ -7450,7 +7454,7 @@
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
{
rtx dest, op0, op1;
@@ -7627,7 +7631,7 @@
(unspec:VI248_AVX512VL
[(match_operand:<ssePHmode> 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_US_FIX_NOTRUNC))]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16"
"vcvtph2<sseintconvertsignprefix><sseintconvert>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %<iptrh>1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -7643,7 +7647,7 @@
[(set (match_operand:<ssePHmode> 0 "register_operand" "=v")
(any_float:<ssePHmode>
(match_operand:VI2H_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")))]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16"
"vcvt<floatsuffix><sseintconvert>2ph<round_qq2phsuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -7681,19 +7685,6 @@
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
-(define_expand "avx512fp16_vcvt<floatsuffix>qq2ph_v4di_mask_round"
- [(match_operand:V8HF 0 "register_operand")
- (any_float:V4HF (match_operand:V4DI 1 "register_operand"))
- (match_operand:V8HF 2 "nonimm_or_0_operand")
- (match_operand:QI 3 "register_operand")
- (unspec [(match_operand:SI 4 "const_4_or_8_to_11_operand")] UNSPEC_EMBEDDED_ROUNDING)]
- "TARGET_AVX10_2_256"
-{
- emit_insn (gen_avx512fp16_vcvt<floatsuffix>qq2ph_v4di_mask_round_1 (
- operands[0], operands[1], operands[2], operands[3], CONST0_RTX (V4HFmode), operands[4]));
- DONE;
-})
-
(define_expand "avx512fp16_vcvt<floatsuffix><sseintconvert>2ph_<mode>_mask"
[(set (match_operand:V8HF 0 "register_operand" "=v")
(vec_concat:V8HF
@@ -7707,18 +7698,18 @@
"TARGET_AVX512FP16 && TARGET_AVX512VL"
"operands[4] = CONST0_RTX (V4HFmode);")
-(define_insn "avx512fp16_vcvt<floatsuffix><sseintconvert>2ph_<mode>_mask<round_name>_1"
+(define_insn "*avx512fp16_vcvt<floatsuffix><sseintconvert>2ph_<mode>_mask"
[(set (match_operand:V8HF 0 "register_operand" "=v")
(vec_concat:V8HF
(vec_merge:V4HF
- (any_float:V4HF (match_operand:VI4_128_8_256 1 "<round_nimm_predicate>" "<round_constraint>"))
+ (any_float:V4HF (match_operand:VI4_128_8_256 1 "vector_operand" "vm"))
(vec_select:V4HF
(match_operand:V8HF 2 "nonimm_or_0_operand" "0C")
(parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))
(match_operand:QI 3 "register_operand" "Yk"))
(match_operand:V4HF 4 "const0_operand")))]
- "TARGET_AVX512FP16 && TARGET_AVX512VL && <round_mode_condition>"
- "vcvt<floatsuffix><sseintconvert>2ph<round_qq2phsuff>\t{<round_op5>%1, %0%{%3%}%N2|%0%{%3%}%N2, %1<round_op5>}"
+ "TARGET_AVX512FP16 && TARGET_AVX512VL"
+ "vcvt<floatsuffix><sseintconvert>2ph<qq2phsuff>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -7865,7 +7856,7 @@
(unspec:VI2H_AVX512VL
[(match_operand:<ssePHmode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTT_U))]
- "TARGET_AVX512FP16 && <round_saeonly_mode_condition>"
+ "TARGET_AVX512FP16"
"vcvttph2<vcvtt_suffix><sseintconvert>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -7875,7 +7866,7 @@
[(set (match_operand:VI2H_AVX512VL 0 "register_operand" "=v")
(any_fix:VI2H_AVX512VL
(match_operand:<ssePHmode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512FP16 && <round_saeonly_mode_condition>"
+ "TARGET_AVX512FP16"
"vcvttph2<fixsuffix><sseintconvert>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -7898,13 +7889,13 @@
}
})
-(define_insn "unspec_avx512fp16_fix<vcvtt_uns_suffix>_trunc<mode>2<mask_name><round_saeonly_name>"
+(define_insn "unspec_avx512fp16_fix<vcvtt_uns_suffix>_trunc<mode>2<mask_name>"
[(set (match_operand:VI4_128_8_256 0 "register_operand" "=v")
(unspec:VI4_128_8_256
[(match_operand:V8HF 1 "register_operand" "v")]
UNSPEC_VCVTT_U))]
- "TARGET_AVX512FP16 && TARGET_AVX512VL && <round_saeonly_mode_condition>"
- "vcvttph2<vcvtt_suffix><sseintconvert>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
+ "TARGET_AVX512FP16 && TARGET_AVX512VL"
+ "vcvttph2<vcvtt_suffix><sseintconvert>\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -8028,7 +8019,7 @@
[(set (match_operand:VF48H_AVX512VL 0 "register_operand" "=v")
(float_extend:VF48H_AVX512VL
(match_operand:<ssePHmode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512FP16 && <round_saeonly_mode_condition>"
+ "TARGET_AVX512FP16"
"vcvtph2<castmode><ph2pssuffix>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8051,14 +8042,14 @@
}
})
-(define_insn "avx512fp16_float_extend_ph<mode>2<mask_name><round_saeonly_name>"
+(define_insn "avx512fp16_float_extend_ph<mode>2<mask_name>"
[(set (match_operand:VF4_128_8_256 0 "register_operand" "=v")
(float_extend:VF4_128_8_256
(vec_select:V4HF
(match_operand:V8HF 1 "register_operand" "v")
(parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))]
- "TARGET_AVX512FP16 && TARGET_AVX512VL && <round_saeonly_mode_condition>"
- "vcvtph2<castmode><ph2pssuffix>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %q1<round_saeonly_mask_op2>}"
+ "TARGET_AVX512FP16 && TARGET_AVX512VL"
+ "vcvtph2<castmode><ph2pssuffix>\t{%1, %0<mask_operand2>|%0<mask_operand2>, %q1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -8122,7 +8113,7 @@
[(set (match_operand:<ssePHmode> 0 "register_operand" "=v")
(float_truncate:<ssePHmode>
(match_operand:VF48H_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")))]
- "TARGET_AVX512FP16 && <round_mode_condition>"
+ "TARGET_AVX512FP16"
"vcvt<castmode>2ph<ph2pssuffix><round_qq2phsuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8161,19 +8152,6 @@
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
-(define_expand "avx512fp16_vcvtpd2ph_v4df_mask_round"
- [(match_operand:V8HF 0 "register_operand")
- (match_operand:V4DF 1 "register_operand")
- (match_operand:V8HF 2 "nonimm_or_0_operand")
- (match_operand:QI 3 "register_operand")
- (unspec [(match_operand:SI 4 "const_4_or_8_to_11_operand")] UNSPEC_EMBEDDED_ROUNDING)]
- "TARGET_AVX10_2_256"
-{
- emit_insn (gen_avx512fp16_vcvtpd2ph_v4df_mask_round_1 (
- operands[0], operands[1], operands[2], operands[3], CONST0_RTX (V4HFmode), operands[4]));
- DONE;
-})
-
(define_expand "avx512fp16_vcvt<castmode>2ph_<mode>_mask"
[(set (match_operand:V8HF 0 "register_operand" "=v")
(vec_concat:V8HF
@@ -8189,20 +8167,20 @@
"TARGET_AVX512FP16 && TARGET_AVX512VL"
"operands[4] = CONST0_RTX (V4HFmode);")
-(define_insn "avx512fp16_vcvt<castmode>2ph_<mode>_mask<round_name>_1"
+(define_insn "*avx512fp16_vcvt<castmode>2ph_<mode>_mask"
[(set (match_operand:V8HF 0 "register_operand" "=v")
(vec_concat:V8HF
(vec_merge:V4HF
(float_truncate:V4HF
- (match_operand:VF4_128_8_256 1 "<round_nimm_predicate>" "<round_constraint>"))
+ (match_operand:VF4_128_8_256 1 "vector_operand" "vm"))
(vec_select:V4HF
(match_operand:V8HF 2 "nonimm_or_0_operand" "0C")
(parallel [(const_int 0) (const_int 1)
(const_int 2) (const_int 3)]))
(match_operand:QI 3 "register_operand" "Yk"))
(match_operand:V4HF 4 "const0_operand")))]
- "TARGET_AVX512FP16 && TARGET_AVX512VL && <round_mode_condition>"
- "vcvt<castmode>2ph<ph2pssuffix><round_qq2phsuff>\t{<round_op5>%1, %0%{%3%}%N2|%0%{%3%}%N2, %1<round_op5>}"
+ "TARGET_AVX512FP16 && TARGET_AVX512VL"
+ "vcvt<castmode>2ph<ph2pssuffix><qq2phsuff>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -8647,7 +8625,7 @@
[(set (match_operand:VF1 0 "register_operand" "=x,v")
(float:VF1
(match_operand:<sseintvecmode> 1 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
- "TARGET_SSE2 && <mask_mode512bit_condition> && <round_mode_condition>"
+ "TARGET_SSE2 && <mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
cvtdq2ps\t{%1, %0|%0, %1}
vcvtdq2ps\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
@@ -8660,7 +8638,7 @@
[(set (match_operand:VF1_AVX512VL 0 "register_operand" "=v")
(unsigned_float:VF1_AVX512VL
(match_operand:<sseintvecmode> 1 "nonimmediate_operand" "<round_constraint>")))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F"
"vcvtudq2ps\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8684,13 +8662,13 @@
(define_mode_attr sf2simodelower
[(V16SI "v16sf") (V8SI "v8sf") (V4SI "v4sf")])
-(define_insn "<sse2_avx_avx512f>_fix_notrunc<sf2simodelower><mode><mask_name><round_name>"
+(define_insn "<sse2_avx_avx512f>_fix_notrunc<sf2simodelower><mode><mask_name>"
[(set (match_operand:VI4_AVX 0 "register_operand" "=v")
(unspec:VI4_AVX
- [(match_operand:<ssePSmode> 1 "<round_nimm_predicate>" "<round_constraint4>")]
+ [(match_operand:<ssePSmode> 1 "vector_operand" "vBm")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_SSE2 && <mask_mode512bit_condition> && <round_mode_condition>"
- "%vcvtps2dq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
+ "TARGET_SSE2 && <mask_mode512bit_condition>"
+ "%vcvtps2dq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set (attr "prefix_data16")
(if_then_else
@@ -8716,7 +8694,7 @@
(unspec:VI4_AVX512VL
[(match_operand:<ssePSmode> 1 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_UNSIGNED_FIX_NOTRUNC))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F"
"vcvtps2udq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8726,7 +8704,7 @@
[(set (match_operand:VI8_256_512 0 "register_operand" "=v")
(unspec:VI8_256_512 [(match_operand:<ssePSmode2> 1 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_AVX512DQ && <round_mode_condition>"
+ "TARGET_AVX512DQ && <round_mode512bit_condition>"
"vcvtps2qq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8749,7 +8727,7 @@
[(set (match_operand:VI8_256_512 0 "register_operand" "=v")
(unspec:VI8_256_512 [(match_operand:<ssePSmode2> 1 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_UNSIGNED_FIX_NOTRUNC))]
- "TARGET_AVX512DQ && <round_mode_condition>"
+ "TARGET_AVX512DQ && <round_mode512bit_condition>"
"vcvtps2uqq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8789,13 +8767,12 @@
(set_attr "prefix" "evex")
(set_attr "mode" "XI")])
-(define_insn "unspec_fix_truncv8sfv8si2<mask_name><round_saeonly_name>"
+(define_insn "unspec_fix_truncv8sfv8si2<mask_name>"
[(set (match_operand:V8SI 0 "register_operand" "=v")
- (unspec:V8SI [(match_operand:V8SF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
+ (unspec:V8SI [(match_operand:V8SF 1 "nonimmediate_operand" "vm")]
UNSPEC_VCVTT))]
- "TARGET_AVX && <mask_avx512vl_condition>
- && (!<round_saeonly_applied> || TARGET_AVX10_2_256)"
- "vcvttps2dq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
+ "TARGET_AVX && <mask_avx512vl_condition>"
+ "vcvttps2dq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "<mask_prefix>")
(set_attr "mode" "OI")])
@@ -9181,7 +9158,7 @@
[(set (match_operand:VF2_AVX512VL 0 "register_operand" "=v")
(any_float:VF2_AVX512VL
(match_operand:<sseintvecmode> 1 "nonimmediate_operand" "<round_constraint>")))]
- "TARGET_AVX512DQ && <round_mode_condition>"
+ "TARGET_AVX512DQ"
"vcvt<floatsuffix>qq2pd\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9195,8 +9172,8 @@
[(set (match_operand:<ssePSmode2> 0 "register_operand" "=v")
(any_float:<ssePSmode2>
(match_operand:VI8_256_512 1 "nonimmediate_operand" "<round_constraint>")))]
- "TARGET_AVX512DQ && <round_mode_condition>"
- "vcvt<floatsuffix>qq2ps<round_qq2pssuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
+ "TARGET_AVX512DQ && <round_mode512bit_condition>"
+ "vcvt<floatsuffix>qq2ps<qq2pssuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -9414,13 +9391,12 @@
(set_attr "prefix" "evex")
(set_attr "mode" "OI")])
-(define_insn "avx_cvtpd2dq256<mask_name><round_name>"
+(define_insn "avx_cvtpd2dq256<mask_name>"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4DF 1 "<round_nimm_predicate>" "<round_constraint>")]
+ (unspec:V4SI [(match_operand:V4DF 1 "nonimmediate_operand" "vm")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_AVX && <mask_avx512vl_condition>
- && (!<round_applied> || TARGET_AVX10_2_256)"
- "vcvtpd2dq<round_suff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
+ "TARGET_AVX && <mask_avx512vl_condition>"
+ "vcvtpd2dq{y}\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "<mask_prefix>")
(set_attr "mode" "OI")])
@@ -9510,8 +9486,8 @@
(unspec:<si2dfmode>
[(match_operand:VF2_512_256VL 1 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_UNSIGNED_FIX_NOTRUNC))]
- "TARGET_AVX512F && <round_mode_condition>"
- "vcvtpd2udq<round_pd2udqsuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
+ "TARGET_AVX512F"
+ "vcvtpd2udq<pd2udqsuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -9654,13 +9630,12 @@
(set_attr "prefix" "evex")
(set_attr "mode" "TI")])
-(define_insn "unspec_fix_truncv4dfv4si2<mask_name><round_saeonly_name>"
+(define_insn "unspec_fix_truncv4dfv4si2<mask_name>"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4DF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
+ (unspec:V4SI [(match_operand:V4DF 1 "nonimmediate_operand" "vm")]
UNSPEC_VCVTT))]
- "TARGET_AVX && <mask_avx512vl_condition>
- && (!<round_saeonly_applied> || TARGET_AVX10_2_256)"
- "vcvttpd2dq<round_saeonly_suff>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
+ "TARGET_AVX || (TARGET_AVX512VL && TARGET_AVX512F)"
+ "vcvttpd2dq{y}\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "maybe_evex")
(set_attr "mode" "OI")])
@@ -9674,13 +9649,12 @@
(set_attr "prefix" "maybe_evex")
(set_attr "mode" "OI")])
-(define_insn "unspec_fixuns_truncv4dfv4si2<mask_name><round_saeonly_name>"
+(define_insn "unspec_fixuns_truncv4dfv4si2<mask_name>"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4DF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
+ (unspec:V4SI [(match_operand:V4DF 1 "nonimmediate_operand" "vm")]
UNSPEC_VCVTTU))]
- "TARGET_AVX512VL && TARGET_AVX512F
- && (!<round_saeonly_applied> || TARGET_AVX10_2_256)"
- "vcvttpd2udq<round_saeonly_suff>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
+ "TARGET_AVX512VL && TARGET_AVX512F"
+ "vcvttpd2udq{y}\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "maybe_evex")
(set_attr "mode" "OI")])
@@ -9699,7 +9673,7 @@
(unspec:<sseintvecmode>
[(match_operand:VF2_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTT_U))]
- "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
+ "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
"vcvttpd2<vcvtt_suffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9709,7 +9683,7 @@
[(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
(any_fix:<sseintvecmode>
(match_operand:VF2_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
+ "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
"vcvttpd2<fixsuffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9720,7 +9694,7 @@
(unspec:<sseintvecmode>
[(match_operand:VF2_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_AVX512DQ && <round_mode_condition>"
+ "TARGET_AVX512DQ && <round_mode512bit_condition>"
"vcvtpd2qq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9731,7 +9705,7 @@
(unspec:<sseintvecmode>
[(match_operand:VF2_AVX512VL 1 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_UNSIGNED_FIX_NOTRUNC))]
- "TARGET_AVX512DQ && <round_mode_condition>"
+ "TARGET_AVX512DQ && <round_mode512bit_condition>"
"vcvtpd2uqq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9742,18 +9716,18 @@
(unspec:VI8_256_512
[(match_operand:<ssePSmode2> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTT_U))]
- "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
+ "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
"vcvttps2<vcvtt_suffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
-(define_insn "fix<fixunssuffix>_trunc<ssePSmode2lower><mode>2<mask_name>"
+(define_insn "fix<fixunssuffix>_trunc<ssePSmode2lower><mode>2<mask_name><round_saeonly_name>"
[(set (match_operand:VI8_256_512 0 "register_operand" "=v")
(any_fix:VI8_256_512
- (match_operand:<ssePSmode2> 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512DQ"
- "vcvttps2<fixsuffix>qq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
+ (match_operand:<ssePSmode2> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
+ "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+ "vcvttps2<fixsuffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -9908,13 +9882,13 @@
DONE;
})
-(define_insn "<mask_codefor>unspec_fixuns_trunc<mode><sseintvecmodelower>2<mask_name><round_saeonly_name>"
+(define_insn "<mask_codefor>unspec_fixuns_trunc<mode><sseintvecmodelower>2<mask_name>"
[(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
(unspec:<sseintvecmode>
- [(match_operand:VF1_128_256 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
+ [(match_operand:VF1_128_256 1 "nonimmediate_operand" "vm")]
UNSPEC_VCVTTU))]
- "TARGET_AVX512VL && <round_saeonly_mode_condition>"
- "vcvttps2udq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
+ "TARGET_AVX512VL"
+ "vcvttps2udq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseintvecmode2>")])
@@ -10102,13 +10076,12 @@
(set_attr "prefix" "evex")
(set_attr "mode" "V8SF")])
-(define_insn "avx_cvtpd2ps256<mask_name><round_name>"
+(define_insn "avx_cvtpd2ps256<mask_name>"
[(set (match_operand:V4SF 0 "register_operand" "=v")
(float_truncate:V4SF
- (match_operand:V4DF 1 "<round_nimm_predicate>" "<round_constraint>")))]
- "TARGET_AVX && <mask_avx512vl_condition>
- && (!<round_applied> || TARGET_AVX10_2_256)"
- "vcvtpd2ps<round_suff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
+ (match_operand:V4DF 1 "nonimmediate_operand" "vm")))]
+ "TARGET_AVX && <mask_avx512vl_condition>"
+ "vcvtpd2ps{y}\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "maybe_evex")
(set_attr "btver2_decode" "vector")
@@ -10231,7 +10204,7 @@
[(set (match_operand:VF2_512_256 0 "register_operand" "=v")
(float_extend:VF2_512_256
(match_operand:<sf2dfmode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX && <mask_mode512bit_condition> && <round_saeonly_mode_condition>"
+ "TARGET_AVX && <mask_mode512bit_condition> && <round_saeonly_mode512bit_condition>"
"vcvtps2pd\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "maybe_vex")
@@ -11935,7 +11908,7 @@
]
(const_string "ssemov")))
(set (attr "addr")
- (if_then_else (eq_attr "alternative" "8,9")
+ (if_then_else (eq_attr "alternative" "9,10")
(const_string "gpr16")
(const_string "*")))
(set (attr "prefix_extra")
@@ -12126,7 +12099,7 @@
(match_operand:V8_128 1 "reg_or_0_operand" "v,C")
(const_int 1)))]
"TARGET_AVX512FP16
- || (TARGET_AVX10_2_256 && const0_operand (operands[1], <MODE>mode))"
+ || (TARGET_AVX10_2 && const0_operand (operands[1], <MODE>mode))"
"@
vmovsh\t{%2, %1, %0|%0, %1, %2}
vmovw\t{%2, %0|%2, %0}"
@@ -13633,7 +13606,7 @@
[(match_operand:VFH_AVX512VL 1 "register_operand" "v")
(match_operand:VFH_AVX512VL 2 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_SCALEF))]
- "TARGET_AVX512F && <round_mode_condition>"
+ "TARGET_AVX512F"
"vscalef<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
@@ -14041,7 +14014,7 @@
[(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v")
(unspec:VFH_AVX512VL [(match_operand:VFH_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_GETEXP))]
- "TARGET_AVX512F && <round_saeonly_mode_condition>"
+ "TARGET_AVX512F"
"vgetexp<ssemodesuffix>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}";
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
@@ -14156,7 +14129,7 @@
(match_operand:<sseintvecmode> 3 "nonimmediate_operand" "<round_saeonly_constraint>")
(match_operand:SI 4 "const_0_to_255_operand")]
UNSPEC_FIXUPIMM))]
- "TARGET_AVX512F && <round_saeonly_mode_condition>"
+ "TARGET_AVX512F"
"vfixupimm<ssemodesuffix>\t{%4, <round_saeonly_sd_mask_op5>%3, %2, %0<sd_mask_op5>|%0<sd_mask_op5>, %2, %3<round_saeonly_sd_mask_op5>, %4}";
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
@@ -14172,7 +14145,7 @@
UNSPEC_FIXUPIMM)
(match_dup 1)
(match_operand:<avx512fmaskmode> 5 "register_operand" "Yk")))]
- "TARGET_AVX512F && <round_saeonly_mode_condition>"
+ "TARGET_AVX512F"
"vfixupimm<ssemodesuffix>\t{%4, <round_saeonly_op6>%3, %2, %0%{%5%}|%0%{%5%}, %2, %3<round_saeonly_op6>, %4}";
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
@@ -14234,7 +14207,7 @@
[(match_operand:VFH_AVX512VL 1 "nonimmediate_operand" "<round_saeonly_constraint>")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_ROUND))]
- "TARGET_AVX512F && <round_saeonly_mode_condition>"
+ "TARGET_AVX512F"
"vrndscale<ssemodesuffix>\t{%2, <round_saeonly_mask_op3>%1, %0<mask_operand3>|%0<mask_operand3>, %1<round_saeonly_mask_op3>, %2}"
[(set_attr "length_immediate" "1")
(set_attr "prefix" "evex")
@@ -20204,6 +20177,7 @@
return "vshuf<shuffletype>64x2\t{%3, %2, %1, %0<mask_operand7>|%0<mask_operand7>, %1, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "addr" "gpr16,*")
(set_attr "length_immediate" "1")
(set_attr "prefix" "evex")
(set_attr "mode" "XI")])
@@ -20365,6 +20339,7 @@
return "vshuf<shuffletype>32x4\t{%3, %2, %1, %0<mask_operand11>|%0<mask_operand11>, %1, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "addr" "gpr16,*")
(set_attr "length_immediate" "1")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -23829,7 +23804,7 @@
(match_operand:V64QI 2 "vector_operand" "vm")
(match_operand:SI 3 "const_0_to_255_operand" "n")]
UNSPEC_MPSADBW))]
- "TARGET_AVX10_2_512"
+ "TARGET_AVX10_2"
"vmpsadbw\t{%3, %2, %1, %0<mask_operand4>|%0<mask_operand4>, %1, %2, %3}"
[(set_attr "length_immediate" "1")
(set_attr "prefix" "evex")])
@@ -23841,7 +23816,7 @@
(match_operand:VI1 2 "vector_operand" "vm")
(match_operand:SI 3 "const_0_to_255_operand" "n")]
UNSPEC_MPSADBW))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vmpsadbw\t{%3, %2, %1, %0<mask_operand4>|%0<mask_operand4>, %1, %2, %3}"
[(set_attr "length_immediate" "1")
(set_attr "prefix" "evex")
@@ -24107,6 +24082,7 @@
"TARGET_AVX2"
"vpblendd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemov")
+ (set_attr "addr" "gpr16")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
@@ -27116,7 +27092,7 @@
vaesenc\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx,vaes_avx512vl")
(set_attr "type" "sselog1")
- (set_attr "addr" "gpr16,*,*")
+ (set_attr "addr" "gpr16,gpr16,*")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex,evex")
(set_attr "btver2_decode" "double,double,double")
@@ -27134,7 +27110,7 @@
vaesenclast\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx,vaes_avx512vl")
(set_attr "type" "sselog1")
- (set_attr "addr" "gpr16,*,*")
+ (set_attr "addr" "gpr16,gpr16,*")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex,evex")
(set_attr "btver2_decode" "double,double,double")
@@ -27152,7 +27128,7 @@
vaesdec\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx,vaes_avx512vl")
(set_attr "type" "sselog1")
- (set_attr "addr" "gpr16,*,*")
+ (set_attr "addr" "gpr16,gpr16,*")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex,evex")
(set_attr "btver2_decode" "double,double,double")
@@ -27169,7 +27145,7 @@
* return TARGET_AES ? \"vaesdeclast\t{%2, %1, %0|%0, %1, %2}\" : \"%{evex%} vaesdeclast\t{%2, %1, %0|%0, %1, %2}\";
vaesdeclast\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx,vaes_avx512vl")
- (set_attr "addr" "gpr16,*,*")
+ (set_attr "addr" "gpr16,gpr16,*")
(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,maybe_evex,evex")
@@ -29104,13 +29080,12 @@
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
-(define_insn "vcvtph2ps256<mask_name><round_saeonly_name>"
+(define_insn "vcvtph2ps256<mask_name>"
[(set (match_operand:V8SF 0 "register_operand" "=v")
- (unspec:V8SF [(match_operand:V8HI 1 "<round_saeonly_nimm_scalar_predicate>" "<round_saeonly_constraint>")]
+ (unspec:V8SF [(match_operand:V8HI 1 "nonimmediate_operand" "vm")]
UNSPEC_VCVTPH2PS))]
- "(TARGET_F16C || TARGET_AVX512VL)
- && (!<round_saeonly_applied> || TARGET_AVX10_2_256)"
- "vcvtph2ps\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
+ "TARGET_F16C || TARGET_AVX512VL"
+ "vcvtph2ps\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "vex")
(set_attr "btver2_decode" "double")
@@ -29824,7 +29799,7 @@
(match_operand:VF_AVX512VL 2 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")
(match_operand:SI 3 "const_0_to_15_operand")]
UNSPEC_RANGE))]
- "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
+ "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
{
if (TARGET_DEST_FALSE_DEP_FOR_GLC
&& <mask4_dest_false_dep_for_glc_cond>
@@ -29894,7 +29869,7 @@
[(match_operand:VFH_AVX512VL 1 "nonimmediate_operand" "<round_saeonly_constraint>")
(match_operand:SI 2 "const_0_to_15_operand")]
UNSPEC_GETMANT))]
- "TARGET_AVX512F && <round_saeonly_mode_condition>"
+ "TARGET_AVX512F"
{
if (TARGET_DEST_FALSE_DEP_FOR_GLC
&& <mask3_dest_false_dep_for_glc_cond>
@@ -30873,7 +30848,11 @@
return "%{evex%} vaesdec\t{%2, %1, %0|%0, %1, %2}";
else
return "vaesdec\t{%2, %1, %0|%0, %1, %2}";
-})
+}
+[(set_attr "isa" "avx,vaes_avx512vl")
+ (set_attr "type" "sselog1")
+ (set_attr "addr" "gpr16,*")
+ (set_attr "mode" "TI")])
(define_insn "vaesdeclast_<mode>"
[(set (match_operand:VI1_AVX512VL_F 0 "register_operand" "=x,v")
@@ -30887,7 +30866,11 @@
return "%{evex%} vaesdeclast\t{%2, %1, %0|%0, %1, %2}";
else
return "vaesdeclast\t{%2, %1, %0|%0, %1, %2}";
-})
+}
+[(set_attr "isa" "avx,vaes_avx512vl")
+ (set_attr "type" "sselog1")
+ (set_attr "addr" "gpr16,*")
+ (set_attr "mode" "TI")])
(define_insn "vaesenc_<mode>"
[(set (match_operand:VI1_AVX512VL_F 0 "register_operand" "=x,v")
@@ -30901,7 +30884,11 @@
return "%{evex%} vaesenc\t{%2, %1, %0|%0, %1, %2}";
else
return "vaesenc\t{%2, %1, %0|%0, %1, %2}";
-})
+}
+[(set_attr "isa" "avx,vaes_avx512vl")
+ (set_attr "type" "sselog1")
+ (set_attr "addr" "gpr16,*")
+ (set_attr "mode" "TI")])
(define_insn "vaesenclast_<mode>"
[(set (match_operand:VI1_AVX512VL_F 0 "register_operand" "=x,v")
@@ -30915,7 +30902,11 @@
return "%{evex%} vaesenclast\t{%2, %1, %0|%0, %1, %2}";
else
return "vaesenclast\t{%2, %1, %0|%0, %1, %2}";
-})
+}
+[(set_attr "isa" "avx,vaes_avx512vl")
+ (set_attr "type" "sselog1")
+ (set_attr "addr" "gpr16,*")
+ (set_attr "mode" "TI")])
(define_insn "vpclmulqdq_<mode>"
[(set (match_operand:VI8_FVL 0 "register_operand" "=v")
@@ -31362,7 +31353,8 @@
(unspec_volatile:CCZ [(match_dup 1) (match_dup 2)] AESDECENCKL))]
"TARGET_KL"
"aes<aesklvariant>\t{%2, %0|%0, %2}"
- [(set_attr "type" "other")])
+ [(set_attr "type" "other")
+ (set_attr "addr" "gpr16")])
(define_int_iterator AESDECENCWIDEKL
[UNSPECV_AESDECWIDE128KLU8 UNSPECV_AESDECWIDE256KLU8
@@ -31424,7 +31416,8 @@
AESDECENCWIDEKL))])]
"TARGET_WIDEKL"
"aes<aeswideklvariant>\t%0"
- [(set_attr "type" "other")])
+ [(set_attr "type" "other")
+ (set_attr "addr" "gpr16")])
;; Modes handled by broadcast patterns. NB: Allow V64QI and V32HI with
;; TARGET_AVX512F since ix86_expand_vector_init_duplicate can expand
@@ -31477,8 +31470,8 @@
(match_operand:<ssedvecmode> 3 "register_operand")]
"TARGET_SSE2"
{
- if ((<MODE_SIZE> == 64 && TARGET_AVX10_2_512)
- || (<MODE_SIZE> < 64 && (TARGET_AVXVNNIINT8 || TARGET_AVX10_2_256)))
+ if ((<MODE_SIZE> == 64 && TARGET_AVX10_2)
+ || (<MODE_SIZE> < 64 && (TARGET_AVXVNNIINT8 || TARGET_AVX10_2)))
{
operands[1] = lowpart_subreg (<ssedvecmode>mode,
force_reg (<MODE>mode, operands[1]),
@@ -31525,8 +31518,8 @@
(match_operand:<ssedvecmode> 3 "register_operand")]
"TARGET_SSE2"
{
- if ((<MODE_SIZE> == 64 && TARGET_AVX10_2_512)
- || (<MODE_SIZE> < 64 && (TARGET_AVXVNNIINT8 || TARGET_AVX10_2_256)))
+ if ((<MODE_SIZE> == 64 && TARGET_AVX10_2)
+ || (<MODE_SIZE> < 64 && (TARGET_AVXVNNIINT8 || TARGET_AVX10_2)))
{
operands[1] = lowpart_subreg (<ssedvecmode>mode,
force_reg (<MODE>mode, operands[1]),
@@ -31573,7 +31566,7 @@
(match_operand:VI4_AVX 2 "register_operand" "v")
(match_operand:VI4_AVX 3 "nonimmediate_operand" "vm")]
VPDOTPROD))]
- "TARGET_AVXVNNIINT8 || TARGET_AVX10_2_256"
+ "TARGET_AVXVNNIINT8 || TARGET_AVX10_2"
"vpdp<vpdotprodtype>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "prefix" "maybe_evex")])
@@ -31584,7 +31577,7 @@
(match_operand:V16SI 2 "register_operand" "v")
(match_operand:V16SI 3 "nonimmediate_operand" "vm")]
VPDOTPROD))]
- "TARGET_AVX10_2_512"
+ "TARGET_AVX10_2"
"vpdp<vpdotprodtype>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "prefix" "evex")])
@@ -31598,7 +31591,7 @@
VPDOTPROD)
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vpdp<vpdotprodtype>\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
[(set_attr "prefix" "evex")])
@@ -31612,7 +31605,7 @@
VPDOTPROD)
(match_dup 5)
(match_operand:<avx512fmaskmode> 4 "register_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"operands[5] = CONST0_RTX (<MODE>mode);")
(define_insn "*vpdp<vpdotprodtype>_<mode>_maskz"
@@ -31625,7 +31618,7 @@
VPDOTPROD)
(match_operand:VI4_AVX10_2 5 "const0_operand" "C")
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vpdp<vpdotprodtype>\t{%3, %2, %0%{%4%}%N5|%0%{%4%}%N5, %2, %3}"
[(set_attr "prefix" "evex")])
@@ -31718,7 +31711,7 @@
(match_operand:<ssePSmode> 2 "<round_nimm_predicate>" "<round_constraint>"))
(float_truncate:<ssehalfvecmode>
(match_operand:<ssePSmode> 1 "register_operand" "v"))))]
- "TARGET_AVX10_2_256 && <round_mode_condition>"
+ "TARGET_AVX10_2 && <round_mode512bit_condition>"
"vcvt2ps2phx\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}")
(define_mode_attr ssebvecmode
@@ -31740,7 +31733,7 @@
[(match_operand:VHF_AVX10_2 1 "register_operand" "v")
(match_operand:VHF_AVX10_2 2 "nonimmediate_operand" "vm")]
UNSPEC_CONVERTFP8_PACK))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvt<convertfp8_pack>\t{%2, %1, %0<mask_operand3>|%0<mask_operand2>, %1, %2}"
[(set_attr "prefix" "evex")])
@@ -31765,7 +31758,7 @@
(match_operand:V8HF 2 "nonimmediate_operand")]
UNSPEC_VCVTBIASPH2FP8_PACK)
(match_dup 3)))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"operands[3] = CONST0_RTX (V8QImode);")
(define_insn "*vcvt<biasph2fp8_pack>v8hf"
@@ -31776,7 +31769,7 @@
(match_operand:V8HF 2 "nonimmediate_operand" "vm")]
UNSPEC_VCVTBIASPH2FP8_PACK)
(match_operand:V8QI 3 "const0_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvt<biasph2fp8_pack>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "prefix" "evex")
(set_attr "mode" "HF")])
@@ -31797,7 +31790,7 @@
(const_int 6) (const_int 7)]))
(match_operand:QI 4 "register_operand" "C"))
(match_dup 5)))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"operands[5] = CONST0_RTX (V8QImode);")
(define_insn "*vcvt<biasph2fp8_pack>v8hf_mask"
@@ -31816,12 +31809,12 @@
(const_int 6) (const_int 7)]))
(match_operand:QI 4 "register_operand" "Yk"))
(match_operand:V8QI 5 "const0_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvt<biasph2fp8_pack>\t{%2, %1, %0%{%4%}%N3|%0%{%4%}%N3, %1, %2}"
[(set_attr "prefix" "evex")])
(define_mode_iterator VHF_AVX10_2_2
- [(V32HF "TARGET_AVX10_2_512") V16HF])
+ [(V32HF "TARGET_AVX10_2") V16HF])
(define_insn "vcvt<biasph2fp8_pack><mode><mask_name>"
[(set (match_operand:<ssebvecmode_2> 0 "register_operand" "=v")
@@ -31829,12 +31822,12 @@
[(match_operand:<ssebvecmode> 1 "register_operand" "v")
(match_operand:VHF_AVX10_2_2 2 "nonimmediate_operand" "vm")]
UNSPEC_VCVTBIASPH2FP8_PACK))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvt<biasph2fp8_pack>\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "prefix" "evex")])
(define_mode_iterator VHF_256_512
- [V16HF (V32HF "TARGET_AVX10_2_512")])
+ [V16HF (V32HF "TARGET_AVX10_2")])
(define_mode_attr ph2fp8suff
[(V32HF "") (V16HF "{y}") (V8HF "{x}")])
@@ -31856,7 +31849,7 @@
[(match_operand:V8HF 1 "nonimmediate_operand")]
UNSPEC_CONVERTPH2FP8)
(match_dup 2)))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"operands[2] = CONST0_RTX (V8QImode);")
(define_insn "*vcvt<convertph2fp8>v8hf"
@@ -31866,7 +31859,7 @@
[(match_operand:V8HF 1 "nonimmediate_operand" "vm")]
UNSPEC_CONVERTPH2FP8)
(match_operand:V8QI 2 "const0_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvt<convertph2fp8>{x}\t{%1, %0|%0, %1}"
[(set_attr "prefix" "evex")
(set_attr "mode" "HF")])
@@ -31886,7 +31879,7 @@
(const_int 6) (const_int 7)]))
(match_operand:QI 3 "register_operand"))
(match_dup 4)))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"operands[4] = CONST0_RTX (V8QImode);")
(define_insn "*vcvt<convertph2fp8>v8hf_mask"
@@ -31904,7 +31897,7 @@
(const_int 6) (const_int 7)]))
(match_operand:QI 3 "register_operand" "Yk"))
(match_operand:V8QI 4 "const0_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvt<convertph2fp8>{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
[(set_attr "prefix" "evex")])
@@ -31913,7 +31906,7 @@
(unspec:<ssebvecmode_2>
[(match_operand:VHF_256_512 1 "nonimmediate_operand" "vm")]
UNSPEC_CONVERTPH2FP8))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvt<convertph2fp8><ph2fp8suff>\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
@@ -31922,7 +31915,7 @@
(unspec:VHF_AVX10_2
[(match_operand:<ssebvecmode_2> 1 "nonimmediate_operand" "vm")]
UNSPEC_VCVTHF82PH))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvthf82ph\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
@@ -31944,7 +31937,7 @@
(match_operand:VI2_AVX10_2 1 "register_operand")
(match_operand:VI2_AVX10_2 2 "register_operand")
(match_operand:<sseunpackmode> 3 "register_operand")]
- "TARGET_AVXVNNIINT16 || TARGET_AVX10_2_256"
+ "TARGET_AVXVNNIINT16 || TARGET_AVX10_2"
{
operands[1] = lowpart_subreg (<sseunpackmode>mode,
force_reg (<MODE>mode, operands[1]),
@@ -31962,7 +31955,7 @@
(match_operand:VI2_AVX10_2 1 "register_operand")
(match_operand:VI2_AVX10_2 2 "register_operand")
(match_operand:<sseunpackmode> 3 "register_operand")]
- "TARGET_AVXVNNIINT16 || TARGET_AVX10_2_256"
+ "TARGET_AVXVNNIINT16 || TARGET_AVX10_2"
{
operands[1] = lowpart_subreg (<sseunpackmode>mode,
force_reg (<MODE>mode, operands[1]),
@@ -31982,7 +31975,7 @@
(match_operand:VI4_AVX 2 "register_operand" "v")
(match_operand:VI4_AVX 3 "nonimmediate_operand" "vm")]
VPDPWPROD))]
- "TARGET_AVXVNNIINT16 || TARGET_AVX10_2_256"
+ "TARGET_AVXVNNIINT16 || TARGET_AVX10_2"
"vpdp<vpdpwprodtype>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "prefix" "maybe_evex")])
@@ -31993,7 +31986,7 @@
(match_operand:V16SI 2 "register_operand" "v")
(match_operand:V16SI 3 "nonimmediate_operand" "vm")]
VPDPWPROD))]
- "TARGET_AVX10_2_512"
+ "TARGET_AVX10_2"
"vpdp<vpdpwprodtype>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "prefix" "evex")])
@@ -32007,7 +32000,7 @@
VPDPWPROD)
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vpdp<vpdpwprodtype>\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
[(set_attr "prefix" "evex")])
@@ -32021,7 +32014,7 @@
VPDPWPROD)
(match_dup 5)
(match_operand:<avx512fmaskmode> 4 "register_operand")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"operands[5] = CONST0_RTX (<MODE>mode);")
(define_insn "*vpdp<vpdpwprodtype>_<mode>_maskz"
@@ -32034,7 +32027,7 @@
VPDPWPROD)
(match_operand:VI4_AVX10_2 5 "const0_operand" "C")
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vpdp<vpdpwprodtype>\t{%3, %2, %0%{%4%}%N5|%0%{%4%}%N5, %2, %3}"
[(set_attr "prefix" "evex")])
@@ -32045,7 +32038,7 @@
(match_operand:VF1_AVX10_2 2 "register_operand" "v")
(match_operand:VF1_AVX10_2 3 "nonimmediate_operand" "vm")]
UNSPEC_VDPPHPS))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vdpphps\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "prefix" "evex")])
@@ -32059,7 +32052,7 @@
UNSPEC_VDPPHPS)
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vdpphps\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
[(set_attr "prefix" "evex")])
@@ -32069,7 +32062,7 @@
(match_operand:VF1_AVX10_2 2 "register_operand")
(match_operand:VF1_AVX10_2 3 "nonimmediate_operand")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
emit_insn (gen_vdpphps_<mode>_maskz_1 (operands[0], operands[1],
operands[2], operands[3], CONST0_RTX(<MODE>mode), operands[4]));
@@ -32086,7 +32079,7 @@
UNSPEC_VDPPHPS)
(match_operand:VF1_AVX10_2 4 "const0_operand" "C")
(match_operand:<avx512fmaskmode> 5 "register_operand" "Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vdpphps\t{%3, %2, %0%{%5%}%N4|%0%{%5%}%N4, %2, %3}"
[(set_attr "prefix" "evex")])
@@ -32096,7 +32089,7 @@
[(match_operand:VBF_AVX10_2 1 "register_operand" "v")
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")]
UNSPEC_VSCALEFBF16))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vscalefbf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "prefix" "evex")])
@@ -32105,14 +32098,14 @@
(smaxmin:VBF_AVX10_2
(match_operand:VBF_AVX10_2 1 "register_operand")
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand")))]
- "TARGET_AVX10_2_256")
+ "TARGET_AVX10_2")
(define_insn "avx10_2_<code>bf16_<mode><mask_name>"
[(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
(smaxmin:VBF_AVX10_2
(match_operand:VBF_AVX10_2 1 "register_operand" "v")
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"v<maxmin_float>bf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
@@ -32122,7 +32115,7 @@
(plusminusmultdiv:VBF_AVX10_2
(match_operand:VBF_AVX10_2 1 "register_operand" "v")
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"v<insn>bf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "prefix" "evex")])
@@ -32132,7 +32125,7 @@
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand")
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
emit_insn (gen_avx10_2_fmaddbf16_<mode>_maskz_1 (operands[0], operands[1],
operands[2], operands[3],
@@ -32147,7 +32140,7 @@
(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "%0,0,v")
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v,vm")
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm,0")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"@
vfmadd132bf16\t{%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2}
vfmadd213bf16\t{%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3}
@@ -32165,7 +32158,7 @@
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm"))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"@
vfmadd132bf16\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2}
vfmadd213bf16\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
@@ -32182,7 +32175,7 @@
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "0"))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vfmadd231bf16\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}"
[(set_attr "prefix" "evex")
(set_attr "type" "ssemuladd")
@@ -32194,7 +32187,7 @@
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand")
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
emit_insn (gen_avx10_2_fnmaddbf16_<mode>_maskz_1 (operands[0], operands[1],
operands[2], operands[3],
@@ -32210,7 +32203,7 @@
(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "%0,0,v"))
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v,vm")
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm,0")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"@
vfnmadd132bf16\t{%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2}
vfnmadd213bf16\t{%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3}
@@ -32229,7 +32222,7 @@
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm"))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"@
vfnmadd132bf16\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2}
vfnmadd213bf16\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
@@ -32247,7 +32240,7 @@
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "0"))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vfnmadd231bf16\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}"
[(set_attr "prefix" "evex")
(set_attr "type" "ssemuladd")
@@ -32259,7 +32252,7 @@
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand")
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
emit_insn (gen_avx10_2_fmsubbf16_<mode>_maskz_1 (operands[0], operands[1],
operands[2], operands[3],
@@ -32275,7 +32268,7 @@
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v,vm")
(neg:VBF_AVX10_2
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm,0"))))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"@
vfmsub132bf16\t{%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2}
vfmsub213bf16\t{%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3}
@@ -32294,7 +32287,7 @@
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm")))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"@
vfmsub132bf16\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2}
vfmsub213bf16\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
@@ -32312,7 +32305,7 @@
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "0")))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vfmsub231bf16\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}"
[(set_attr "prefix" "evex")
(set_attr "type" "ssemuladd")
@@ -32324,7 +32317,7 @@
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand")
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
{
emit_insn (gen_avx10_2_fnmsubbf16_<mode>_maskz_1 (operands[0], operands[1],
operands[2], operands[3],
@@ -32341,7 +32334,7 @@
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v,vm")
(neg:VBF_AVX10_2
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm,0"))))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"@
vfnmsub132bf16\t{%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2}
vfnmsub213bf16\t{%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3}
@@ -32361,7 +32354,7 @@
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm")))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"@
vfnmsub132bf16\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2}
vfnmsub213bf16\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
@@ -32380,7 +32373,7 @@
(match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "0")))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vfnmsub231bf16\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}"
[(set_attr "prefix" "evex")
(set_attr "type" "ssemuladd")
@@ -32391,7 +32384,7 @@
(unspec:VBF_AVX10_2
[(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")]
UNSPEC_RSQRT))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vrsqrtbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
@@ -32399,7 +32392,7 @@
[(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
(sqrt:VBF_AVX10_2
(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vsqrtbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
@@ -32408,7 +32401,7 @@
(unspec:VBF_AVX10_2
[(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")]
UNSPEC_RCP))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vrcpbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
@@ -32417,7 +32410,7 @@
(unspec:VBF_AVX10_2
[(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")]
UNSPEC_GETEXP))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vgetexpbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
@@ -32437,7 +32430,7 @@
[(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")
(match_operand:SI 2 "const_0_to_255_operand")]
BF16IMMOP))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"v<bf16immop>bf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "prefix" "evex")])
@@ -32447,7 +32440,7 @@
[(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")
(match_operand 2 "const_0_to_255_operand")]
UNSPEC_VFPCLASSBF16))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vfpclassbf16<vecmemsuffix>\t{%2, %1, %0<mask_scalar_merge_operand3>|%0<mask_scalar_merge_operand3>, %1, %2}"
[(set_attr "prefix" "evex")])
@@ -32458,7 +32451,7 @@
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")
(match_operand 3 "const_0_to_31_operand" "n")]
UNSPEC_PCMP))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcmpbf16\t{%3, %2, %1, %0<mask_scalar_merge_operand4>|%0<mask_scalar_merge_operand4>, %1, %2, %3}"
[(set_attr "prefix" "evex")])
@@ -32495,7 +32488,7 @@
(unspec:<sseintvecmode>
[(match_operand:VBF_AVX10_2 1 "vector_operand" "vm")]
UNSPEC_CVT_BF16_IBS_ITER))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvt<sat_cvt_trunc_prefix>bf162i<sat_cvt_sign_prefix>bs\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -32510,7 +32503,7 @@
(unspec:<sseintvecmode>
[(match_operand:VHF_AVX10_2 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_CVT_PH_IBS_ITER))]
- "TARGET_AVX10_2_256 && <round_mode_condition>"
+ "TARGET_AVX10_2 && <round_mode512bit_condition>"
"vcvtph2i<sat_cvt_sign_prefix>bs\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -32525,7 +32518,7 @@
(unspec:<sseintvecmode>
[(match_operand:VHF_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_CVTT_PH_IBS_ITER))]
- "TARGET_AVX10_2_256 && <round_saeonly_mode_condition>"
+ "TARGET_AVX10_2 && <round_saeonly_mode512bit_condition>"
"vcvttph2i<sat_cvt_sign_prefix>bs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -32540,7 +32533,7 @@
(unspec:<sseintvecmode>
[(match_operand:VF1_AVX10_2 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_CVT_PS_IBS_ITER))]
- "TARGET_AVX10_2_256 && <round_mode_condition>"
+ "TARGET_AVX10_2 && <round_mode512bit_condition>"
"vcvtps2i<sat_cvt_sign_prefix>bs\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -32555,7 +32548,7 @@
(unspec:<sseintvecmode>
[(match_operand:VF1_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_CVTT_PS_IBS_ITER))]
- "TARGET_AVX10_2_256 && <round_saeonly_mode_condition>"
+ "TARGET_AVX10_2 && <round_saeonly_mode512bit_condition>"
"vcvttps2i<sat_cvt_sign_prefix>bs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -32574,7 +32567,7 @@
(unspec:<VEC_GATHER_IDXSI>
[(match_operand:VF1_VF2_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_SAT_CVT_DS_SIGN_ITER))]
- "TARGET_AVX10_2_256 && <round_saeonly_mode_condition>"
+ "TARGET_AVX10_2 && <round_saeonly_mode512bit_condition>"
"vcvtt<castmode>2<sat_cvt_sign_prefix>dqs<pd2dqssuff>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -32585,7 +32578,7 @@
(unspec:<VEC_GATHER_IDXDI>
[(match_operand:VF2_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_SAT_CVT_DS_SIGN_ITER))]
- "TARGET_AVX10_2_256 && <round_saeonly_mode_condition>"
+ "TARGET_AVX10_2 && <round_saeonly_mode512bit_condition>"
"vcvttpd2<sat_cvt_sign_prefix>qqs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -32596,7 +32589,7 @@
(unspec:VI8_AVX10_2
[(match_operand:<vpckfloat_temp_mode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_SAT_CVT_DS_SIGN_ITER))]
- "TARGET_AVX10_2_256 && <round_saeonly_mode_condition>"
+ "TARGET_AVX10_2 && <round_saeonly_mode512bit_condition>"
"vcvttps2<sat_cvt_sign_prefix>qqs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -32609,7 +32602,7 @@
(match_operand:V2DF 1 "<round_saeonly_nimm_scalar_predicate>" "<round_saeonly_constraint>")
(parallel [(const_int 0)]))]
UNSPEC_SAT_CVT_DS_SIGN_ITER))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvttsd2<sat_cvt_sign_prefix>sis\t{<round_saeonly_op2>%1, %0|%0, %1<round_saeonly_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -32622,7 +32615,7 @@
(match_operand:V4SF 1 "<round_saeonly_nimm_scalar_predicate>" "<round_saeonly_constraint>")
(parallel [(const_int 0)]))]
UNSPEC_SAT_CVT_DS_SIGN_ITER))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vcvttss2<sat_cvt_sign_prefix>sis\t{<round_saeonly_op2>%1, %0|%0, %1<round_saeonly_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -32635,7 +32628,7 @@
(match_operand:VBF_AVX10_2 2 "bcst_vector_operand" "vmBr")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_MINMAXBF16))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vminmaxbf16\t{%3, %2, %1, %0<mask_operand4>|%0<mask_operand4>, %1, %2, %3}"
[(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -32647,7 +32640,7 @@
(match_operand:VFH_AVX10_2 2 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_MINMAX))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vminmax<ssemodesuffix>\t{%3, <round_saeonly_mask_op4>%2, %1, %0<mask_operand4>|%0<mask_operand4>, %1, %2<round_saeonly_mask_op4>, %3}"
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
@@ -32662,7 +32655,7 @@
UNSPEC_MINMAX)
(match_dup 1)
(const_int 1)))]
- "TARGET_AVX10_2_256"
+ "TARGET_AVX10_2"
"vminmax<ssescalarmodesuffix>\t{%3, <round_saeonly_scalar_mask_op4>%2, %1, %0<mask_scalar_operand4>|%0<mask_scalar_operand4>, %1, %2<round_saeonly_scalar_mask_op4>, %3}"
[(set_attr "prefix" "evex")
(set_attr "mode" "<ssescalarmode>")])
@@ -32672,7 +32665,7 @@
(unspec:VI1248_AVX10_2
[(match_operand:VI1248_AVX10_2 1 "memory_operand" "m")]
UNSPEC_VMOVRS))]
- "TARGET_AVX10_2_256 && TARGET_MOVRS"
+ "TARGET_AVX10_2 && TARGET_MOVRS"
"vmovrs<ssemodesuffix>\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
diff --git a/gcc/config/i386/subst.md b/gcc/config/i386/subst.md
index c47c5d5..c30b274 100644
--- a/gcc/config/i386/subst.md
+++ b/gcc/config/i386/subst.md
@@ -197,32 +197,19 @@
(define_subst_attr "round_sd_mask_op4" "round" "" "<round_sd_mask_operand4>")
(define_subst_attr "round_sdc_mask_op4" "round" "" "<round_sdc_mask_operand4>")
(define_subst_attr "round_constraint" "round" "vm" "v")
-(define_subst_attr "round_suff" "round" "{y}" "")
(define_subst_attr "round_qq2phsuff" "round" "<qq2phsuff>" "")
-(define_subst_attr "round_qq2pssuff" "round" "<qq2pssuff>" "")
-(define_subst_attr "round_pd2udqsuff" "round" "<pd2udqsuff>" "")
(define_subst_attr "bcst_round_constraint" "round" "vmBr" "v")
(define_subst_attr "round_constraint2" "round" "m" "v")
(define_subst_attr "round_constraint3" "round" "rm" "r")
-(define_subst_attr "round_constraint4" "round" "vBm" "v")
(define_subst_attr "round_nimm_predicate" "round" "vector_operand" "register_operand")
(define_subst_attr "bcst_round_nimm_predicate" "round" "bcst_vector_operand" "register_operand")
(define_subst_attr "round_nimm_scalar_predicate" "round" "nonimmediate_operand" "register_operand")
(define_subst_attr "round_prefix" "round" "vex" "evex")
-(define_subst_attr "round_mode_condition" "round" "1" "((<MODE>mode == V16SFmode
- || <MODE>mode == V8DFmode
- || <MODE>mode == V8DImode
- || <MODE>mode == V16SImode
- || <MODE>mode == V32HImode
- || <MODE>mode == V32HFmode)
- || (TARGET_AVX10_2_256
- && (<MODE>mode == V8SFmode
- || <MODE>mode == V4DFmode
- || <MODE>mode == V4DImode
- || <MODE>mode == V8SImode
- || <MODE>mode == V16HImode
- || <MODE>mode == V16HFmode)))")
-(define_subst_attr "round_applied" "round" "false" "true")
+(define_subst_attr "round_mode512bit_condition" "round" "1" "(<MODE>mode == V16SFmode
+ || <MODE>mode == V8DFmode
+ || <MODE>mode == V8DImode
+ || <MODE>mode == V16SImode
+ || <MODE>mode == V32HFmode)")
(define_subst_attr "round_modev4sf_condition" "round" "1" "(<MODE>mode == V4SFmode)")
(define_subst_attr "round_codefor" "round" "*" "")
@@ -263,21 +250,11 @@
(define_subst_attr "round_saeonly_constraint2" "round_saeonly" "m" "v")
(define_subst_attr "round_saeonly_nimm_predicate" "round_saeonly" "vector_operand" "register_operand")
(define_subst_attr "round_saeonly_nimm_scalar_predicate" "round_saeonly" "nonimmediate_operand" "register_operand")
-(define_subst_attr "round_saeonly_suff" "round_saeonly" "{y}" "")
-(define_subst_attr "round_saeonly_mode_condition" "round_saeonly" "1" "((<MODE>mode == V16SFmode
- || <MODE>mode == V8DFmode
- || <MODE>mode == V8DImode
- || <MODE>mode == V16SImode
- || <MODE>mode == V32HImode
- || <MODE>mode == V32HFmode)
- || (TARGET_AVX10_2_256
- && (<MODE>mode == V8SFmode
- || <MODE>mode == V4DFmode
- || <MODE>mode == V4DImode
- || <MODE>mode == V8SImode
- || <MODE>mode == V16HImode
- || <MODE>mode == V16HFmode)))")
-(define_subst_attr "round_saeonly_applied" "round_saeonly" "false" "true")
+(define_subst_attr "round_saeonly_mode512bit_condition" "round_saeonly" "1" "(<MODE>mode == V16SFmode
+ || <MODE>mode == V8DFmode
+ || <MODE>mode == V8DImode
+ || <MODE>mode == V16SImode
+ || <MODE>mode == V32HFmode)")
(define_subst "round_saeonly"
diff --git a/gcc/config/i386/zn4zn5.md b/gcc/config/i386/zn4zn5.md
index ae188a1..ecb1e3b 100644
--- a/gcc/config/i386/zn4zn5.md
+++ b/gcc/config/i386/zn4zn5.md
@@ -142,6 +142,20 @@
(eq_attr "memory" "load"))))
"znver4-double,znver5-load,znver5-ieu")
+(define_insn_reservation "znver4_imov_double_store" 5
+ (and (eq_attr "cpu" "znver4")
+ (and (eq_attr "znver1_decode" "double")
+ (and (eq_attr "type" "imov")
+ (eq_attr "memory" "store"))))
+ "znver4-double,znver4-store,znver4-ieu")
+
+(define_insn_reservation "znver5_imov_double_store" 5
+ (and (eq_attr "cpu" "znver5")
+ (and (eq_attr "znver1_decode" "double")
+ (and (eq_attr "type" "imov")
+ (eq_attr "memory" "store"))))
+ "znver4-double,znver5-store,znver5-ieu")
+
;; imov, imovx
(define_insn_reservation "znver4_imov" 1
(and (eq_attr "cpu" "znver4")
@@ -167,6 +181,18 @@
(eq_attr "memory" "load")))
"znver4-direct,znver5-load,znver5-ieu")
+(define_insn_reservation "znver4_imov_store" 5
+ (and (eq_attr "cpu" "znver4")
+ (and (eq_attr "type" "imov,imovx")
+ (eq_attr "memory" "store")))
+ "znver4-direct,znver4-store,znver4-ieu")
+
+(define_insn_reservation "znver5_imov_store" 5
+ (and (eq_attr "cpu" "znver5")
+ (and (eq_attr "type" "imov,imovx")
+ (eq_attr "memory" "store")))
+ "znver4-direct,znver5-store,znver5-ieu")
+
;; Push Instruction
(define_insn_reservation "znver4_push" 1
(and (eq_attr "cpu" "znver4")
@@ -395,6 +421,18 @@
(eq_attr "memory" "store")))
"znver4-direct,znver4-ieu,znver5-store")
+(define_insn_reservation "znver4_insn_both" 5
+ (and (eq_attr "cpu" "znver4")
+ (and (eq_attr "type" "alu,alu1,negnot,rotate1,ishift1,test,incdec,icmp")
+ (eq_attr "memory" "both")))
+ "znver4-direct,znver4-load,znver4-ieu,znver4-store")
+
+(define_insn_reservation "znver5_insn_both" 5
+ (and (eq_attr "cpu" "znver5")
+ (and (eq_attr "type" "alu,alu1,negnot,rotate1,ishift1,test,incdec,icmp")
+ (eq_attr "memory" "both")))
+ "znver4-direct,znver5-load,znver4-ieu,znver5-store")
+
(define_insn_reservation "znver4_insn2_store" 1
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "icmov,setcc")
@@ -855,13 +893,20 @@
"znver4-direct,znver5-load,znver4-fpu")
(define_insn_reservation "znver4_sse_log1" 1
+ (and (eq_attr "cpu" "znver4,znver5")
+ (and (eq_attr "type" "sselog1")
+ (and (eq_attr "mode" "V4SF,V8SF,V2DF,V4DF,QI,HI,SI,DI,TI,OI")
+ (eq_attr "memory" "none"))))
+ "znver4-direct,znver4-fpu1|znver4-fpu2")
+
+(define_insn_reservation "znver4_sse_log1_store" 1
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "sselog1")
(and (eq_attr "mode" "V4SF,V8SF,V2DF,V4DF,QI,HI,SI,DI,TI,OI")
(eq_attr "memory" "store"))))
"znver4-direct,znver4-fpu1|znver4-fpu2,znver4-fp-store")
-(define_insn_reservation "znver5_sse_log1" 1
+(define_insn_reservation "znver5_sse_log1_store" 1
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "sselog1")
(and (eq_attr "mode" "V4SF,V8SF,V2DF,V4DF,QI,HI,SI,DI,TI,OI")
@@ -908,9 +953,8 @@
(define_insn_reservation "znver4_sse_test" 1
(and (eq_attr "cpu" "znver4,znver5")
- (and (eq_attr "prefix_extra" "1")
- (and (eq_attr "type" "ssecomi")
- (eq_attr "memory" "none"))))
+ (and (eq_attr "type" "ssecomi")
+ (eq_attr "memory" "none")))
"znver4-direct,znver4-fpu1|znver4-fpu2")
(define_insn_reservation "znver4_sse_test_load" 6
@@ -986,35 +1030,35 @@
(define_insn_reservation "znver4_sse_mov_fp" 1
(and (eq_attr "cpu" "znver4,znver5")
(and (eq_attr "type" "ssemov")
- (and (eq_attr "mode" "V16SF,V8DF,V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V16SF,V8DF,V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "none"))))
"znver4-direct,znver4-fpu")
(define_insn_reservation "znver4_sse_mov_fp_load" 6
(and (eq_attr "cpu" "znver4")
- (and (eq_attr "type" "ssemov")
- (and (eq_attr "mode" "V16SF,V8DF,V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "type" "ssemov,ssemov2")
+ (and (eq_attr "mode" "V16SF,V8DF,V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-direct,znver4-load,znver4-fpu")
(define_insn_reservation "znver5_sse_mov_fp_load" 6
(and (eq_attr "cpu" "znver5")
- (and (eq_attr "type" "ssemov")
- (and (eq_attr "mode" "V16SF,V8DF,V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "type" "ssemov,ssemov2")
+ (and (eq_attr "mode" "V16SF,V8DF,V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-direct,znver5-load,znver4-fpu")
(define_insn_reservation "znver4_sse_mov_fp_store" 1
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "ssemov")
- (and (eq_attr "mode" "V16SF,V8DF,V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V16SF,V8DF,V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "store"))))
"znver4-direct,znver4-fp-store")
(define_insn_reservation "znver5_sse_mov_fp_store" 1
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "ssemov")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "store"))))
"znver4-direct,znver5-fp-store256")
@@ -1028,42 +1072,42 @@
(define_insn_reservation "znver4_sse_add" 3
(and (eq_attr "cpu" "znver4,znver5")
(and (eq_attr "type" "sseadd")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "none"))))
"znver4-direct,znver4-fpu2|znver4-fpu3")
(define_insn_reservation "znver4_sse_add_load" 8
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "sseadd")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-direct,znver4-load,znver4-fpu2|znver4-fpu3")
(define_insn_reservation "znver5_sse_add_load" 8
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "sseadd")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-direct,znver5-load,znver4-fpu2|znver4-fpu3")
(define_insn_reservation "znver4_sse_add1" 4
(and (eq_attr "cpu" "znver4,znver5")
(and (eq_attr "type" "sseadd1")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "none"))))
"znver4-vector,znver4-fvector*2")
(define_insn_reservation "znver4_sse_add1_load" 9
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "sseadd1")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-vector,znver4-load,znver4-fvector*2")
(define_insn_reservation "znver5_sse_add1_load" 9
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "sseadd1")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-vector,znver5-load,znver4-fvector*2")
@@ -1091,28 +1135,28 @@
(define_insn_reservation "znver4_sse_mul" 3
(and (eq_attr "cpu" "znver4,znver5")
(and (eq_attr "type" "ssemul")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "none"))))
"znver4-direct,znver4-fpu0|znver4-fpu1")
(define_insn_reservation "znver4_sse_mul_load" 8
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "ssemul")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-direct,znver4-load,znver4-fpu0|znver4-fpu1")
(define_insn_reservation "znver5_sse_mul_load" 8
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "ssemul")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-direct,znver5-load,znver4-fpu0|znver4-fpu1")
(define_insn_reservation "znver4_sse_div_pd" 13
(and (eq_attr "cpu" "znver4,znver5")
(and (eq_attr "type" "ssediv")
- (and (eq_attr "mode" "V4DF,V2DF,V1DF")
+ (and (eq_attr "mode" "V4DF,V2DF,V1DF,DF")
(eq_attr "memory" "none"))))
"znver4-direct,znver4-fdiv*5")
@@ -1126,14 +1170,14 @@
(define_insn_reservation "znver4_sse_div_pd_load" 18
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "ssediv")
- (and (eq_attr "mode" "V4DF,V2DF,V1DF")
+ (and (eq_attr "mode" "V4DF,V2DF,V1DF,DF")
(eq_attr "memory" "load"))))
"znver4-direct,znver4-load,znver4-fdiv*5")
(define_insn_reservation "znver5_sse_div_pd_load" 18
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "ssediv")
- (and (eq_attr "mode" "V4DF,V2DF,V1DF")
+ (and (eq_attr "mode" "V4DF,V2DF,V1DF,DF")
(eq_attr "memory" "load"))))
"znver4-direct,znver5-load,znver4-fdiv*5")
@@ -1199,41 +1243,48 @@
(define_insn_reservation "znver4_sse_cvt" 3
(and (eq_attr "cpu" "znver4,znver5")
(and (eq_attr "type" "ssecvt")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "none"))))
"znver4-direct,znver4-fpu2|znver4-fpu3")
(define_insn_reservation "znver4_sse_cvt_load" 8
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "ssecvt")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-direct,znver4-load,znver4-fpu2|znver4-fpu3")
(define_insn_reservation "znver5_sse_cvt_load" 8
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "ssecvt")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-direct,znver5-load,znver4-fpu2|znver4-fpu3")
(define_insn_reservation "znver4_sse_icvt" 3
(and (eq_attr "cpu" "znver4,znver5")
- (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "type" "sseicvt")
(and (eq_attr "mode" "SI")
(eq_attr "memory" "none"))))
"znver4-direct,znver4-fpu2|znver4-fpu3")
+(define_insn_reservation "znver4_sse_icvt2" 3
+ (and (eq_attr "cpu" "znver4,znver5")
+ (and (eq_attr "type" "sseicvt2")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "memory" "none"))))
+ "znver4-direct,znver4-fpu2|znver4-fpu3")
+
(define_insn_reservation "znver4_sse_icvt_store" 4
(and (eq_attr "cpu" "znver4")
- (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "type" "sseicvt")
(and (eq_attr "mode" "SI")
(eq_attr "memory" "store"))))
"znver4-double,znver4-fpu2|znver4-fpu3,znver4-fp-store")
(define_insn_reservation "znver5_sse_icvt_store" 4
(and (eq_attr "cpu" "znver5")
- (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "type" "sseicvt")
(and (eq_attr "mode" "SI")
(eq_attr "memory" "store"))))
"znver4-double,znver4-fpu2|znver4-fpu3,znver5-fp-store256")
@@ -1241,28 +1292,28 @@
(define_insn_reservation "znver4_sse_shuf" 1
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "sseshuf")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "none"))))
"znver4-direct,znver4-fpu1|znver4-fpu2")
(define_insn_reservation "znver5_sse_shuf" 1
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "sseshuf")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "none"))))
"znver4-direct,znver4-fpu1|znver4-fpu2|znver4-fpu3")
(define_insn_reservation "znver4_sse_shuf_load" 6
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "sseshuf")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-direct,znver4-load,znver4-fpu")
(define_insn_reservation "znver5_sse_shuf_load" 6
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "sseshuf")
- (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,DF,SF")
(eq_attr "memory" "load"))))
"znver4-direct,znver5-load,znver4-fpu")
@@ -1316,6 +1367,20 @@
(eq_attr "memory" "load"))))
"znver4-direct,znver5-load,znver4-fpu0|znver4-fpu1|znver4-fpu2|znver4-fpu3")
+(define_insn_reservation "znver4_sse_log_evex_store" 1
+ (and (eq_attr "cpu" "znver4")
+ (and (eq_attr "type" "sselog")
+ (and (eq_attr "mode" "V16SF,V8DF,XI")
+ (eq_attr "memory" "store"))))
+ "znver4-direct,znver4-store,znver4-fpu0*2|znver4-fpu1*2|znver4-fpu2*2|znver4-fpu3*2")
+
+(define_insn_reservation "znver5_sse_log_evex_store" 1
+ (and (eq_attr "cpu" "znver5")
+ (and (eq_attr "type" "sselog")
+ (and (eq_attr "mode" "V16SF,V8DF,XI")
+ (eq_attr "memory" "store"))))
+ "znver4-direct,znver5-store,znver4-fpu0|znver4-fpu1|znver4-fpu2|znver4-fpu3")
+
(define_insn_reservation "znver4_sse_log1_evex" 1
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "sselog1")
@@ -1557,7 +1622,7 @@
(define_insn_reservation "znver4_sse_cmp_avx128" 3
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "ssecmp")
- (and (eq_attr "mode" "V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V4SF,V2DF,V2SF,V1DF,DF,SF,TI")
(and (eq_attr "prefix" "evex")
(eq_attr "memory" "none")))))
"znver4-direct,znver4-fpu0*2|znver4-fpu1*2")
@@ -1565,7 +1630,7 @@
(define_insn_reservation "znver5_sse_cmp_avx128" 3
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "ssecmp")
- (and (eq_attr "mode" "V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V4SF,V2DF,V2SF,V1DF,DF,SF,TI")
(and (eq_attr "prefix" "evex")
(eq_attr "memory" "none")))))
"znver4-direct,znver4-fpu1|znver4-fpu2")
@@ -1573,7 +1638,7 @@
(define_insn_reservation "znver4_sse_cmp_avx128_load" 9
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "ssecmp")
- (and (eq_attr "mode" "V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V4SF,V2DF,V2SF,V1DF,DF,SF")
(and (eq_attr "prefix" "evex")
(eq_attr "memory" "load")))))
"znver4-direct,znver4-load,znver4-fpu0*2|znver4-fpu1*2")
@@ -1581,7 +1646,7 @@
(define_insn_reservation "znver5_sse_cmp_avx128_load" 9
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "ssecmp")
- (and (eq_attr "mode" "V4SF,V2DF,V2SF,V1DF,SF")
+ (and (eq_attr "mode" "V4SF,V2DF,V2SF,V1DF,DF,SF")
(and (eq_attr "prefix" "evex")
(eq_attr "memory" "load")))))
"znver4-direct,znver5-load,znver4-fpu1|znver4-fpu2")
@@ -1589,7 +1654,7 @@
(define_insn_reservation "znver4_sse_cmp_avx256" 4
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "ssecmp")
- (and (eq_attr "mode" "V8SF,V4DF")
+ (and (eq_attr "mode" "V8SF,V4DF,OI")
(and (eq_attr "prefix" "evex")
(eq_attr "memory" "none")))))
"znver4-direct,znver4-fpu0*2|znver4-fpu1*2")
@@ -1597,7 +1662,7 @@
(define_insn_reservation "znver5_sse_cmp_avx256" 4
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "ssecmp")
- (and (eq_attr "mode" "V8SF,V4DF")
+ (and (eq_attr "mode" "V8SF,V4DF,OI")
(and (eq_attr "prefix" "evex")
(eq_attr "memory" "none")))))
"znver4-direct,znver4-fpu1|znver4-fpu2")
@@ -1621,7 +1686,7 @@
(define_insn_reservation "znver4_sse_cmp_avx512" 5
(and (eq_attr "cpu" "znver4")
(and (eq_attr "type" "ssecmp")
- (and (eq_attr "mode" "V16SF,V8DF")
+ (and (eq_attr "mode" "V16SF,V8DF,XI")
(and (eq_attr "prefix" "evex")
(eq_attr "memory" "none")))))
"znver4-direct,znver4-fpu0*2|znver4-fpu1*2")
@@ -1629,7 +1694,7 @@
(define_insn_reservation "znver5_sse_cmp_avx512" 5
(and (eq_attr "cpu" "znver5")
(and (eq_attr "type" "ssecmp")
- (and (eq_attr "mode" "V16SF,V8DF")
+ (and (eq_attr "mode" "V16SF,V8DF,XI")
(and (eq_attr "prefix" "evex")
(eq_attr "memory" "none")))))
"znver4-direct,znver4-fpu1|znver4-fpu2")
@@ -1748,13 +1813,13 @@
(define_insn_reservation "znver4_sse_muladd_load" 10
(and (eq_attr "cpu" "znver4")
- (and (eq_attr "type" "sseshuf")
+ (and (eq_attr "type" "ssemuladd")
(eq_attr "memory" "load")))
"znver4-direct,znver4-load,znver4-fpu0*2|znver4-fpu1*2")
(define_insn_reservation "znver5_sse_muladd_load" 10
(and (eq_attr "cpu" "znver5")
- (and (eq_attr "type" "sseshuf")
+ (and (eq_attr "type" "ssemuladd")
(eq_attr "memory" "load")))
"znver4-direct,znver5-load,znver4-fpu1|znver4-fpu2")