aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorHaochen Jiang <haochen.jiang@intel.com>2025-01-23 09:51:56 +0800
committerHaochen Jiang <haochen.jiang@intel.com>2025-01-23 09:51:56 +0800
commitb2667fca938c393579e6e4ae9144ff5111ee8b8f (patch)
tree178bddb0afddb0e94805968b6ccc5ea7a60d93ef /gcc
parenta19aca8afbf141bb550e9040135cc46387ce7f73 (diff)
downloadgcc-b2667fca938c393579e6e4ae9144ff5111ee8b8f.zip
gcc-b2667fca938c393579e6e4ae9144ff5111ee8b8f.tar.gz
gcc-b2667fca938c393579e6e4ae9144ff5111ee8b8f.tar.bz2
i386: Change mnemonics from V[ADDNE,DIVNE,MULNE,RCP,SUBNE]PBF16 to V[ADD,DIV,MUL,RCP,SUB]BF16
gcc/ChangeLog: PR target/118270 * config/i386/avx10_2-512bf16intrin.h: Change intrin and builtin name according to new mnemonics. * config/i386/avx10_2bf16intrin.h: Ditto. * config/i386/i386-builtin.def (BDESC): Ditto. * config/i386/sse.md (div<mode>3): Adjust emit_insn. (avx10_2_<insn>nepbf16_<mode><mask_name>): Rename to... (avx10_2_<insn>bf16_<mode><mask_name>): ...this. Change instruction name output. (avx10_2_rcppbf16_<mode><mask_name>): Rename to... (avx10_2_rcpbf16_<mode><mask_name>):...this. Change instruction name output. gcc/testsuite/ChangeLog: PR target/118270 * gcc.target/i386/avx10_2-512-bf16-1.c: Adjust output and intrin call. * gcc.target/i386/avx10_2-512-bf-vector-operations-1.c: Move to ... * gcc.target/i386/avx10_2-512-bf16-vector-operations-1.c: ...here. Adjust asm check. * gcc.target/i386/avx10_2-512-vaddnepbf16-2.c: Move to... * gcc.target/i386/avx10_2-512-vaddbf16-2.c: ...here. Adjust intrin call. * gcc.target/i386/avx10_2-512-vdivnepbf16-2.c: Move to... * gcc.target/i386/avx10_2-512-vdivbf16-2.c: ...here. Adjust intrin call. * gcc.target/i386/avx10_2-512-vmulnepbf16-2.c: Move to... * gcc.target/i386/avx10_2-512-vmulbf16-2.c: ...here. Adjust intrin call. * gcc.target/i386/avx10_2-512-vrcppbf16-2.c: Move to... * gcc.target/i386/avx10_2-512-vrcpbf16-2.c: ...here. Adjust intrin call. * gcc.target/i386/avx10_2-512-vsubnepbf16-2.c: Move to... * gcc.target/i386/avx10_2-512-vsubbf16-2.c: ...here. Adjust intrin call. * gcc.target/i386/avx10_2-bf16-1.c: Adjust output and intrin call. * gcc.target/i386/avx10_2-bf-vector-operations-1.c: Move to .... * gcc.target/i386/avx10_2-bf16-vector-operations-1.c: ...here. Adjust asm check. * gcc.target/i386/avx10_2-partial-bf-vector-fast-math-1.c: Move to... * gcc.target/i386/avx10_2-partial-bf16-vector-fast-math-1.c: ...here. Adjust asm check. * gcc.target/i386/avx10_2-partial-bf-vector-operations-1.c: Move to... * gcc.target/i386/avx10_2-partial-bf16-vector-operations-1.c: ...here. Adjust asm check. * gcc.target/i386/avx10_2-vaddnepbf16-2.c: Move to... * gcc.target/i386/avx10_2-vaddbf16-2.c: ...here. Adjust intrin call. * gcc.target/i386/avx10_2-vdivnepbf16-2.c: Move to... * gcc.target/i386/avx10_2-vdivbf16-2.c: ...here. Adjust intrin call. * gcc.target/i386/avx10_2-vmulnepbf16-2.c: Move to... * gcc.target/i386/avx10_2-vmulbf16-2.c: ...here. Adjust intrin call. * gcc.target/i386/avx10_2-vrcppbf16-2.c: Move to... * gcc.target/i386/avx10_2-vrcpbf16-2.c: ...here. Adjust intrin call. * gcc.target/i386/avx10_2-vsubnepbf16-2.c: Move to... * gcc.target/i386/avx10_2-vsubbf16-2.c: ...here. Adjust intrin call. * lib/target-supports.exp (check_effective_target_avx10_2): Adjust asm usage. (check_effective_target_avx10_2_512): Ditto.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/i386/avx10_2-512bf16intrin.h86
-rw-r--r--gcc/config/i386/avx10_2bf16intrin.h174
-rw-r--r--gcc/config/i386/i386-builtin.def54
-rw-r--r--gcc/config/i386/sse.md12
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-512-bf-vector-operations-1.c42
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-512-bf16-1.c54
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-512-bf16-vector-operations-1.c42
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-512-vaddbf16-2.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-512-vaddnepbf16-2.c)6
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-512-vdivbf16-2.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-512-vdivnepbf16-2.c)6
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-512-vmulbf16-2.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-512-vmulnepbf16-2.c)6
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-512-vrcpbf16-2.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-512-vrcppbf16-2.c)0
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-512-vsubbf16-2.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-512-vsubnepbf16-2.c)6
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-bf-vector-operations-1.c79
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-bf16-1.c108
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-bf16-vector-operations-1.c79
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-partial-bf16-vector-fast-math-1.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-partial-bf-vector-fast-math-1.c)4
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-partial-bf16-vector-operations-1.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-partial-bf-vector-operations-1.c)8
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-vaddbf16-2.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-vrcppbf16-2.c)4
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-vdivbf16-2.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-vaddnepbf16-2.c)4
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-vmulbf16-2.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-vdivnepbf16-2.c)4
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-vrcpbf16-2.c (renamed from gcc/testsuite/gcc.target/i386/avx10_2-vmulnepbf16-2.c)4
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-vsubbf16-2.c16
-rw-r--r--gcc/testsuite/gcc.target/i386/avx10_2-vsubnepbf16-2.c16
-rw-r--r--gcc/testsuite/lib/target-supports.exp4
24 files changed, 409 insertions, 409 deletions
diff --git a/gcc/config/i386/avx10_2-512bf16intrin.h b/gcc/config/i386/avx10_2-512bf16intrin.h
index d43860a..4d197af 100644
--- a/gcc/config/i386/avx10_2-512bf16intrin.h
+++ b/gcc/config/i386/avx10_2-512bf16intrin.h
@@ -36,106 +36,106 @@
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_addne_pbh (__m512bh __A, __m512bh __B)
+_mm512_add_pbh (__m512bh __A, __m512bh __B)
{
- return (__m512bh) __builtin_ia32_addnepbf16512 (__A, __B);
+ return (__m512bh) __builtin_ia32_addbf16512 (__A, __B);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_addne_pbh (__m512bh __W, __mmask32 __U,
- __m512bh __A, __m512bh __B)
+_mm512_mask_add_pbh (__m512bh __W, __mmask32 __U,
+ __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_addnepbf16512_mask (__A, __B, __W, __U);
+ __builtin_ia32_addbf16512_mask (__A, __B, __W, __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_addne_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
+_mm512_maskz_add_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_addnepbf16512_mask (__A, __B,
- (__v32bf) _mm512_setzero_si512 (),
- __U);
+ __builtin_ia32_addbf16512_mask (__A, __B,
+ (__v32bf) _mm512_setzero_si512 (),
+ __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_subne_pbh (__m512bh __A, __m512bh __B)
+_mm512_sub_pbh (__m512bh __A, __m512bh __B)
{
- return (__m512bh) __builtin_ia32_subnepbf16512 (__A, __B);
+ return (__m512bh) __builtin_ia32_subbf16512 (__A, __B);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_subne_pbh (__m512bh __W, __mmask32 __U,
- __m512bh __A, __m512bh __B)
+_mm512_mask_sub_pbh (__m512bh __W, __mmask32 __U,
+ __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_subnepbf16512_mask (__A, __B, __W, __U);
+ __builtin_ia32_subbf16512_mask (__A, __B, __W, __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_subne_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
+_mm512_maskz_sub_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_subnepbf16512_mask (__A, __B,
- (__v32bf) _mm512_setzero_si512 (),
- __U);
+ __builtin_ia32_subbf16512_mask (__A, __B,
+ (__v32bf) _mm512_setzero_si512 (),
+ __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mulne_pbh (__m512bh __A, __m512bh __B)
+_mm512_mul_pbh (__m512bh __A, __m512bh __B)
{
- return (__m512bh) __builtin_ia32_mulnepbf16512 (__A, __B);
+ return (__m512bh) __builtin_ia32_mulbf16512 (__A, __B);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_mulne_pbh (__m512bh __W, __mmask32 __U,
- __m512bh __A, __m512bh __B)
+_mm512_mask_mul_pbh (__m512bh __W, __mmask32 __U,
+ __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_mulnepbf16512_mask (__A, __B, __W, __U);
+ __builtin_ia32_mulbf16512_mask (__A, __B, __W, __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_mulne_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
+_mm512_maskz_mul_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_mulnepbf16512_mask (__A, __B,
- (__v32bf) _mm512_setzero_si512 (),
- __U);
+ __builtin_ia32_mulbf16512_mask (__A, __B,
+ (__v32bf) _mm512_setzero_si512 (),
+ __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_divne_pbh (__m512bh __A, __m512bh __B)
+_mm512_div_pbh (__m512bh __A, __m512bh __B)
{
- return (__m512bh) __builtin_ia32_divnepbf16512 (__A, __B);
+ return (__m512bh) __builtin_ia32_divbf16512 (__A, __B);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_divne_pbh (__m512bh __W, __mmask32 __U,
- __m512bh __A, __m512bh __B)
+_mm512_mask_div_pbh (__m512bh __W, __mmask32 __U,
+ __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_divnepbf16512_mask (__A, __B, __W, __U);
+ __builtin_ia32_divbf16512_mask (__A, __B, __W, __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_divne_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
+_mm512_maskz_div_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_divnepbf16512_mask (__A, __B,
- (__v32bf) _mm512_setzero_si512 (),
- __U);
+ __builtin_ia32_divbf16512_mask (__A, __B,
+ (__v32bf) _mm512_setzero_si512 (),
+ __U);
}
extern __inline__ __m512bh
@@ -418,9 +418,9 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_rcp_pbh (__m512bh __A)
{
return (__m512bh)
- __builtin_ia32_rcppbf16512_mask (__A,
- (__v32bf) _mm512_setzero_si512 (),
- (__mmask32) -1);
+ __builtin_ia32_rcpbf16512_mask (__A,
+ (__v32bf) _mm512_setzero_si512 (),
+ (__mmask32) -1);
}
extern __inline__ __m512bh
@@ -428,7 +428,7 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_rcp_pbh (__m512bh __W, __mmask32 __U, __m512bh __A)
{
return (__m512bh)
- __builtin_ia32_rcppbf16512_mask (__A, __W, __U);
+ __builtin_ia32_rcpbf16512_mask (__A, __W, __U);
}
extern __inline__ __m512bh
@@ -436,9 +436,9 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_rcp_pbh (__mmask32 __U, __m512bh __A)
{
return (__m512bh)
- __builtin_ia32_rcppbf16512_mask (__A,
- (__v32bf) _mm512_setzero_si512 (),
- __U);
+ __builtin_ia32_rcpbf16512_mask (__A,
+ (__v32bf) _mm512_setzero_si512 (),
+ __U);
}
extern __inline__ __m512bh
diff --git a/gcc/config/i386/avx10_2bf16intrin.h b/gcc/config/i386/avx10_2bf16intrin.h
index 1394cd2..216e8bd 100644
--- a/gcc/config/i386/avx10_2bf16intrin.h
+++ b/gcc/config/i386/avx10_2bf16intrin.h
@@ -36,217 +36,217 @@
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_addne_pbh (__m256bh __A, __m256bh __B)
+_mm256_add_pbh (__m256bh __A, __m256bh __B)
{
- return (__m256bh) __builtin_ia32_addnepbf16256 (__A, __B);
+ return (__m256bh) __builtin_ia32_addbf16256 (__A, __B);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_addne_pbh (__m256bh __W, __mmask16 __U,
- __m256bh __A, __m256bh __B)
+_mm256_mask_add_pbh (__m256bh __W, __mmask16 __U,
+ __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_addnepbf16256_mask (__A, __B, __W, __U);
+ __builtin_ia32_addbf16256_mask (__A, __B, __W, __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_addne_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
+_mm256_maskz_add_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_addnepbf16256_mask (__A, __B,
- (__v16bf) _mm256_setzero_si256 (),
- __U);
+ __builtin_ia32_addbf16256_mask (__A, __B,
+ (__v16bf) _mm256_setzero_si256 (),
+ __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_addne_pbh (__m128bh __A, __m128bh __B)
+_mm_add_pbh (__m128bh __A, __m128bh __B)
{
- return (__m128bh) __builtin_ia32_addnepbf16128 (__A, __B);
+ return (__m128bh) __builtin_ia32_addbf16128 (__A, __B);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_addne_pbh (__m128bh __W, __mmask8 __U,
- __m128bh __A, __m128bh __B)
+_mm_mask_add_pbh (__m128bh __W, __mmask8 __U,
+ __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_addnepbf16128_mask (__A, __B, __W, __U);
+ __builtin_ia32_addbf16128_mask (__A, __B, __W, __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_addne_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
+_mm_maskz_add_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_addnepbf16128_mask (__A, __B,
- (__v8bf) _mm_setzero_si128 (),
- __U);
+ __builtin_ia32_addbf16128_mask (__A, __B,
+ (__v8bf) _mm_setzero_si128 (),
+ __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_subne_pbh (__m256bh __A, __m256bh __B)
+_mm256_sub_pbh (__m256bh __A, __m256bh __B)
{
- return (__m256bh) __builtin_ia32_subnepbf16256 (__A, __B);
+ return (__m256bh) __builtin_ia32_subbf16256 (__A, __B);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_subne_pbh (__m256bh __W, __mmask16 __U,
- __m256bh __A, __m256bh __B)
+_mm256_mask_sub_pbh (__m256bh __W, __mmask16 __U,
+ __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_subnepbf16256_mask (__A, __B, __W, __U);
+ __builtin_ia32_subbf16256_mask (__A, __B, __W, __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_subne_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
+_mm256_maskz_sub_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_subnepbf16256_mask (__A, __B,
- (__v16bf) _mm256_setzero_si256 (),
- __U);
+ __builtin_ia32_subbf16256_mask (__A, __B,
+ (__v16bf) _mm256_setzero_si256 (),
+ __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subne_pbh (__m128bh __A, __m128bh __B)
+_mm_sub_pbh (__m128bh __A, __m128bh __B)
{
- return (__m128bh) __builtin_ia32_subnepbf16128 (__A, __B);
+ return (__m128bh) __builtin_ia32_subbf16128 (__A, __B);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_subne_pbh (__m128bh __W, __mmask8 __U,
- __m128bh __A, __m128bh __B)
+_mm_mask_sub_pbh (__m128bh __W, __mmask8 __U,
+ __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_subnepbf16128_mask (__A, __B, __W, __U);
+ __builtin_ia32_subbf16128_mask (__A, __B, __W, __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_subne_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
+_mm_maskz_sub_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_subnepbf16128_mask (__A, __B,
- (__v8bf) _mm_setzero_si128 (),
- __U);
+ __builtin_ia32_subbf16128_mask (__A, __B,
+ (__v8bf) _mm_setzero_si128 (),
+ __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mulne_pbh (__m256bh __A, __m256bh __B)
+_mm256_mul_pbh (__m256bh __A, __m256bh __B)
{
- return (__m256bh) __builtin_ia32_mulnepbf16256 (__A, __B);
+ return (__m256bh) __builtin_ia32_mulbf16256 (__A, __B);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_mulne_pbh (__m256bh __W, __mmask16 __U,
- __m256bh __A, __m256bh __B)
+_mm256_mask_mul_pbh (__m256bh __W, __mmask16 __U,
+ __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_mulnepbf16256_mask (__A, __B, __W, __U);
+ __builtin_ia32_mulbf16256_mask (__A, __B, __W, __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_mulne_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
+_mm256_maskz_mul_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_mulnepbf16256_mask (__A, __B,
- (__v16bf) _mm256_setzero_si256 (),
- __U);
+ __builtin_ia32_mulbf16256_mask (__A, __B,
+ (__v16bf) _mm256_setzero_si256 (),
+ __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulne_pbh (__m128bh __A, __m128bh __B)
+_mm_mul_pbh (__m128bh __A, __m128bh __B)
{
- return (__m128bh) __builtin_ia32_mulnepbf16128 (__A, __B);
+ return (__m128bh) __builtin_ia32_mulbf16128 (__A, __B);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_mulne_pbh (__m128bh __W, __mmask8 __U,
- __m128bh __A, __m128bh __B)
+_mm_mask_mul_pbh (__m128bh __W, __mmask8 __U,
+ __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_mulnepbf16128_mask (__A, __B, __W, __U);
+ __builtin_ia32_mulbf16128_mask (__A, __B, __W, __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_mulne_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
+_mm_maskz_mul_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_mulnepbf16128_mask (__A, __B,
- (__v8bf) _mm_setzero_si128 (),
- __U);
+ __builtin_ia32_mulbf16128_mask (__A, __B,
+ (__v8bf) _mm_setzero_si128 (),
+ __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_divne_pbh (__m256bh __A, __m256bh __B)
+_mm256_div_pbh (__m256bh __A, __m256bh __B)
{
- return (__m256bh) __builtin_ia32_divnepbf16256 (__A, __B);
+ return (__m256bh) __builtin_ia32_divbf16256 (__A, __B);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_divne_pbh (__m256bh __W, __mmask16 __U,
- __m256bh __A, __m256bh __B)
+_mm256_mask_div_pbh (__m256bh __W, __mmask16 __U,
+ __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_divnepbf16256_mask (__A, __B, __W, __U);
+ __builtin_ia32_divbf16256_mask (__A, __B, __W, __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_divne_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
+_mm256_maskz_div_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_divnepbf16256_mask (__A, __B,
- (__v16bf) _mm256_setzero_si256 (),
- __U);
+ __builtin_ia32_divbf16256_mask (__A, __B,
+ (__v16bf) _mm256_setzero_si256 (),
+ __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_divne_pbh (__m128bh __A, __m128bh __B)
+_mm_div_pbh (__m128bh __A, __m128bh __B)
{
- return (__m128bh) __builtin_ia32_divnepbf16128 (__A, __B);
+ return (__m128bh) __builtin_ia32_divbf16128 (__A, __B);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_divne_pbh (__m128bh __W, __mmask8 __U,
- __m128bh __A, __m128bh __B)
+_mm_mask_div_pbh (__m128bh __W, __mmask8 __U,
+ __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_divnepbf16128_mask (__A, __B, __W, __U);
+ __builtin_ia32_divbf16128_mask (__A, __B, __W, __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_divne_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
+_mm_maskz_div_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_divnepbf16128_mask (__A, __B,
- (__v8bf) _mm_setzero_si128 (),
- __U);
+ __builtin_ia32_divbf16128_mask (__A, __B,
+ (__v8bf) _mm_setzero_si128 (),
+ __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_max_pbh (__m256bh __A, __m256bh __B)
{
- return (__m256bh) __builtin_ia32_maxpbf16256 (__A, __B);
+ return (__m256bh) __builtin_ia32_maxbf16256 (__A, __B);
}
extern __inline__ __m256bh
@@ -794,9 +794,9 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_rcp_pbh (__m256bh __A)
{
return (__m256bh)
- __builtin_ia32_rcppbf16256_mask (__A,
- (__v16bf) _mm256_setzero_si256 (),
- (__mmask16) -1);
+ __builtin_ia32_rcpbf16256_mask (__A,
+ (__v16bf) _mm256_setzero_si256 (),
+ (__mmask16) -1);
}
extern __inline__ __m256bh
@@ -804,7 +804,7 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_rcp_pbh (__m256bh __W, __mmask16 __U, __m256bh __A)
{
return (__m256bh)
- __builtin_ia32_rcppbf16256_mask (__A, __W, __U);
+ __builtin_ia32_rcpbf16256_mask (__A, __W, __U);
}
extern __inline__ __m256bh
@@ -812,9 +812,9 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_rcp_pbh (__mmask16 __U, __m256bh __A)
{
return (__m256bh)
- __builtin_ia32_rcppbf16256_mask (__A,
- (__v16bf) _mm256_setzero_si256 (),
- __U);
+ __builtin_ia32_rcpbf16256_mask (__A,
+ (__v16bf) _mm256_setzero_si256 (),
+ __U);
}
extern __inline__ __m128bh
@@ -822,9 +822,9 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_rcp_pbh (__m128bh __A)
{
return (__m128bh)
- __builtin_ia32_rcppbf16128_mask (__A,
- (__v8bf) _mm_setzero_si128 (),
- (__mmask8) -1);
+ __builtin_ia32_rcpbf16128_mask (__A,
+ (__v8bf) _mm_setzero_si128 (),
+ (__mmask8) -1);
}
extern __inline__ __m128bh
@@ -832,7 +832,7 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_rcp_pbh (__m128bh __W, __mmask8 __U, __m128bh __A)
{
return (__m128bh)
- __builtin_ia32_rcppbf16128_mask (__A, __W, __U);
+ __builtin_ia32_rcpbf16128_mask (__A, __W, __U);
}
extern __inline__ __m128bh
@@ -840,9 +840,9 @@ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskz_rcp_pbh (__mmask8 __U, __m128bh __A)
{
return (__m128bh)
- __builtin_ia32_rcppbf16128_mask (__A,
- (__v8bf) _mm_setzero_si128 (),
- __U);
+ __builtin_ia32_rcpbf16128_mask (__A,
+ (__v8bf) _mm_setzero_si128 (),
+ __U);
}
extern __inline__ __m256bh
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index 5549c77..1de85b7 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -3179,30 +3179,30 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvtneph2hf8sv32hf_mask, "__bui
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvthf82phv8hf_mask, "__builtin_ia32_vcvthf82ph128_mask", IX86_BUILTIN_VCVTHF82PH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V16QI_V8HF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvthf82phv16hf_mask, "__builtin_ia32_vcvthf82ph256_mask", IX86_BUILTIN_VCVTHF82PH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16QI_V16HF_UHI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvthf82phv32hf_mask, "__builtin_ia32_vcvthf82ph512_mask", IX86_BUILTIN_VCVTHF82PH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32QI_V32HF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_addnepbf16_v32bf, "__builtin_ia32_addnepbf16512", IX86_BUILTIN_ADDNEPBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_addnepbf16_v32bf_mask, "__builtin_ia32_addnepbf16512_mask", IX86_BUILTIN_ADDNEPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addnepbf16_v16bf, "__builtin_ia32_addnepbf16256", IX86_BUILTIN_ADDNEPBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addnepbf16_v16bf_mask, "__builtin_ia32_addnepbf16256_mask", IX86_BUILTIN_ADDNEPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addnepbf16_v8bf, "__builtin_ia32_addnepbf16128", IX86_BUILTIN_ADDNEPBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addnepbf16_v8bf_mask, "__builtin_ia32_addnepbf16128_mask", IX86_BUILTIN_ADDNEPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_subnepbf16_v32bf, "__builtin_ia32_subnepbf16512", IX86_BUILTIN_SUBNEPBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_subnepbf16_v32bf_mask, "__builtin_ia32_subnepbf16512_mask", IX86_BUILTIN_SUBNEPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subnepbf16_v16bf, "__builtin_ia32_subnepbf16256", IX86_BUILTIN_SUBNEPBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subnepbf16_v16bf_mask, "__builtin_ia32_subnepbf16256_mask", IX86_BUILTIN_SUBNEPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subnepbf16_v8bf, "__builtin_ia32_subnepbf16128", IX86_BUILTIN_SUBNEPBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subnepbf16_v8bf_mask, "__builtin_ia32_subnepbf16128_mask", IX86_BUILTIN_SUBNEPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mulnepbf16_v32bf, "__builtin_ia32_mulnepbf16512", IX86_BUILTIN_MULNEPBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mulnepbf16_v32bf_mask, "__builtin_ia32_mulnepbf16512_mask", IX86_BUILTIN_MULNEPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulnepbf16_v16bf, "__builtin_ia32_mulnepbf16256", IX86_BUILTIN_MULNEPBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulnepbf16_v16bf_mask, "__builtin_ia32_mulnepbf16256_mask", IX86_BUILTIN_MULNEPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulnepbf16_v8bf, "__builtin_ia32_mulnepbf16128", IX86_BUILTIN_MULNEPBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulnepbf16_v8bf_mask, "__builtin_ia32_mulnepbf16128_mask", IX86_BUILTIN_MULNEPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_divnepbf16_v32bf, "__builtin_ia32_divnepbf16512", IX86_BUILTIN_DIVNEPBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_divnepbf16_v32bf_mask, "__builtin_ia32_divnepbf16512_mask", IX86_BUILTIN_DIVNEPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divnepbf16_v16bf, "__builtin_ia32_divnepbf16256", IX86_BUILTIN_DIVNEPBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divnepbf16_v16bf_mask, "__builtin_ia32_divnepbf16256_mask", IX86_BUILTIN_DIVNEPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divnepbf16_v8bf, "__builtin_ia32_divnepbf16128", IX86_BUILTIN_DIVNEPBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divnepbf16_v8bf_mask, "__builtin_ia32_divnepbf16128_mask", IX86_BUILTIN_DIVNEPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_addbf16_v32bf, "__builtin_ia32_addbf16512", IX86_BUILTIN_ADDBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_addbf16_v32bf_mask, "__builtin_ia32_addbf16512_mask", IX86_BUILTIN_ADDBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v16bf, "__builtin_ia32_addbf16256", IX86_BUILTIN_ADDBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v16bf_mask, "__builtin_ia32_addbf16256_mask", IX86_BUILTIN_ADDBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v8bf, "__builtin_ia32_addbf16128", IX86_BUILTIN_ADDBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v8bf_mask, "__builtin_ia32_addbf16128_mask", IX86_BUILTIN_ADDBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_subbf16_v32bf, "__builtin_ia32_subbf16512", IX86_BUILTIN_SUBBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_subbf16_v32bf_mask, "__builtin_ia32_subbf16512_mask", IX86_BUILTIN_SUBBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v16bf, "__builtin_ia32_subbf16256", IX86_BUILTIN_SUBBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v16bf_mask, "__builtin_ia32_subbf16256_mask", IX86_BUILTIN_SUBBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v8bf, "__builtin_ia32_subbf16128", IX86_BUILTIN_SUBBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v8bf_mask, "__builtin_ia32_subbf16128_mask", IX86_BUILTIN_SUBBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mulbf16_v32bf, "__builtin_ia32_mulbf16512", IX86_BUILTIN_MULBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mulbf16_v32bf_mask, "__builtin_ia32_mulbf16512_mask", IX86_BUILTIN_MULBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v16bf, "__builtin_ia32_mulbf16256", IX86_BUILTIN_MULBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v16bf_mask, "__builtin_ia32_mulbf16256_mask", IX86_BUILTIN_MULBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v8bf, "__builtin_ia32_mulbf16128", IX86_BUILTIN_MULBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v8bf_mask, "__builtin_ia32_mulbf16128_mask", IX86_BUILTIN_MULBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_divbf16_v32bf, "__builtin_ia32_divbf16512", IX86_BUILTIN_DIVBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_divbf16_v32bf_mask, "__builtin_ia32_divbf16512_mask", IX86_BUILTIN_DIVBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v16bf, "__builtin_ia32_divbf16256", IX86_BUILTIN_DIVBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v16bf_mask, "__builtin_ia32_divbf16256_mask", IX86_BUILTIN_DIVBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v8bf, "__builtin_ia32_divbf16128", IX86_BUILTIN_DIVBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v8bf_mask, "__builtin_ia32_divbf16128_mask", IX86_BUILTIN_DIVBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_smaxpbf16_v32bf, "__builtin_ia32_maxpbf16512", IX86_BUILTIN_MAXPBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_smaxpbf16_v32bf_mask, "__builtin_ia32_maxpbf16512_mask", IX86_BUILTIN_MAXPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_smaxpbf16_v16bf, "__builtin_ia32_maxpbf16256", IX86_BUILTIN_MAXPBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
@@ -3263,9 +3263,9 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rsqrtpbf16_v8bf_mask, "
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_sqrtnepbf16_v32bf_mask, "__builtin_ia32_sqrtnepbf16512_mask", IX86_BUILTIN_SQRTNEPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_sqrtnepbf16_v16bf_mask, "__builtin_ia32_sqrtnepbf16256_mask", IX86_BUILTIN_SQRTNEPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_sqrtnepbf16_v8bf_mask, "__builtin_ia32_sqrtnepbf16128_mask", IX86_BUILTIN_SQRTNEPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_rcppbf16_v32bf_mask, "__builtin_ia32_rcppbf16512_mask", IX86_BUILTIN_RCPPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rcppbf16_v16bf_mask, "__builtin_ia32_rcppbf16256_mask", IX86_BUILTIN_RCPPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rcppbf16_v8bf_mask, "__builtin_ia32_rcppbf16128_mask", IX86_BUILTIN_RCPPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_rcpbf16_v32bf_mask, "__builtin_ia32_rcpbf16512_mask", IX86_BUILTIN_RCPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rcpbf16_v16bf_mask, "__builtin_ia32_rcpbf16256_mask", IX86_BUILTIN_RCPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rcpbf16_v8bf_mask, "__builtin_ia32_rcpbf16128_mask", IX86_BUILTIN_RCPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_getexppbf16_v32bf_mask, "__builtin_ia32_getexppbf16512_mask", IX86_BUILTIN_GETEXPPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_getexppbf16_v16bf_mask, "__builtin_ia32_getexppbf16256_mask", IX86_BUILTIN_GETEXPPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_getexppbf16_v8bf_mask, "__builtin_ia32_getexppbf16128_mask", IX86_BUILTIN_GETEXPPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 1461778..4314124 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -2867,8 +2867,8 @@
{
rtx op = gen_reg_rtx (<MODE>mode);
operands[2] = force_reg (<MODE>mode, operands[2]);
- emit_insn (gen_avx10_2_rcppbf16_<mode> (op, operands[2]));
- emit_insn (gen_avx10_2_mulnepbf16_<mode> (operands[0], operands[1], op));
+ emit_insn (gen_avx10_2_rcpbf16_<mode> (op, operands[2]));
+ emit_insn (gen_avx10_2_mulbf16_<mode> (operands[0], operands[1], op));
DONE;
}
})
@@ -32102,13 +32102,13 @@
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
-(define_insn "avx10_2_<insn>nepbf16_<mode><mask_name>"
+(define_insn "avx10_2_<insn>bf16_<mode><mask_name>"
[(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
(plusminusmultdiv:VBF_AVX10_2
(match_operand:VBF_AVX10_2 1 "register_operand" "v")
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")))]
"TARGET_AVX10_2_256"
- "v<insn>nepbf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
+ "v<insn>bf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "prefix" "evex")])
(define_expand "avx10_2_fmaddnepbf16_<mode>_maskz"
@@ -32388,13 +32388,13 @@
"vsqrtnepbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
-(define_insn "avx10_2_rcppbf16_<mode><mask_name>"
+(define_insn "avx10_2_rcpbf16_<mode><mask_name>"
[(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
(unspec:VBF_AVX10_2
[(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")]
UNSPEC_RCP))]
"TARGET_AVX10_2_256"
- "vrcppbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
+ "vrcpbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
(define_insn "avx10_2_getexppbf16_<mode><mask_name>"
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-bf-vector-operations-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-bf-vector-operations-1.c
deleted file mode 100644
index d507e18..0000000
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-bf-vector-operations-1.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-march=x86-64-v3 -mavx10.2-512 -O2" } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-
-#include <immintrin.h>
-
-typedef __bf16 v32bf __attribute__ ((__vector_size__ (64)));
-
-v32bf
-foo_mul (v32bf a, v32bf b)
-{
- return a * b;
-}
-
-v32bf
-foo_add (v32bf a, v32bf b)
-{
- return a + b;
-}
-
-v32bf
-foo_div (v32bf a, v32bf b)
-{
- return a / b;
-}
-
-v32bf
-foo_sub (v32bf a, v32bf b)
-{
- return a - b;
-}
-
-__attribute__((optimize("fast-math")))
-v32bf
-foo_div_fast_math (v32bf a, v32bf b)
-{
- return a / b;
-}
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-bf16-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-bf16-1.c
index 016d1c6..52c802d 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-bf16-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-bf16-1.c
@@ -1,17 +1,17 @@
/* { dg-do compile } */
/* { dg-options "-march=x86-64-v3 -mavx10.2-512 -O2" } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
@@ -43,9 +43,9 @@
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
@@ -73,18 +73,18 @@ volatile __mmask32 m32;
void extern
avx10_2_512_test (void)
{
- res = _mm512_addne_pbh (x1, x2);
- res = _mm512_mask_addne_pbh (res, m32, x1, x2);
- res = _mm512_maskz_addne_pbh (m32, x1, x2);
- res = _mm512_subne_pbh (x1, x2);
- res = _mm512_mask_subne_pbh (res, m32, x1, x2);
- res = _mm512_maskz_subne_pbh (m32, x1, x2);
- res = _mm512_mulne_pbh (x1, x2);
- res = _mm512_mask_mulne_pbh (res, m32, x1, x2);
- res = _mm512_maskz_mulne_pbh (m32, x1, x2);
- res = _mm512_divne_pbh (x1, x2);
- res = _mm512_mask_divne_pbh (res, m32, x1, x2);
- res = _mm512_maskz_divne_pbh (m32, x1, x2);
+ res = _mm512_add_pbh (x1, x2);
+ res = _mm512_mask_add_pbh (res, m32, x1, x2);
+ res = _mm512_maskz_add_pbh (m32, x1, x2);
+ res = _mm512_sub_pbh (x1, x2);
+ res = _mm512_mask_sub_pbh (res, m32, x1, x2);
+ res = _mm512_maskz_sub_pbh (m32, x1, x2);
+ res = _mm512_mul_pbh (x1, x2);
+ res = _mm512_mask_mul_pbh (res, m32, x1, x2);
+ res = _mm512_maskz_mul_pbh (m32, x1, x2);
+ res = _mm512_div_pbh (x1, x2);
+ res = _mm512_mask_div_pbh (res, m32, x1, x2);
+ res = _mm512_maskz_div_pbh (m32, x1, x2);
res = _mm512_max_pbh (x1, x2);
res = _mm512_mask_max_pbh (res, m32, x1, x2);
res = _mm512_maskz_max_pbh (m32, x1, x2);
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-bf16-vector-operations-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-bf16-vector-operations-1.c
new file mode 100644
index 0000000..e224a36
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-bf16-vector-operations-1.c
@@ -0,0 +1,42 @@
+/* { dg-do compile } */
+/* { dg-options "-march=x86-64-v3 -mavx10.2-512 -O2" } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+typedef __bf16 v32bf __attribute__ ((__vector_size__ (64)));
+
+v32bf
+foo_mul (v32bf a, v32bf b)
+{
+ return a * b;
+}
+
+v32bf
+foo_add (v32bf a, v32bf b)
+{
+ return a + b;
+}
+
+v32bf
+foo_div (v32bf a, v32bf b)
+{
+ return a / b;
+}
+
+v32bf
+foo_sub (v32bf a, v32bf b)
+{
+ return a - b;
+}
+
+__attribute__((optimize("fast-math")))
+v32bf
+foo_div_fast_math (v32bf a, v32bf b)
+{
+ return a / b;
+}
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vaddnepbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vaddbf16-2.c
index 04d1b95..fe13c64 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vaddnepbf16-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vaddbf16-2.c
@@ -33,9 +33,9 @@ TEST (void)
res_ref[i] = res_ref2[i] = convert_fp32_to_bf16_ne (res);
}
- res1.x = INTRINSIC (_addne_pbh) (src1.x, src2.x);
- res2.x = INTRINSIC (_mask_addne_pbh) (res2.x, mask, src1.x, src2.x);
- res3.x = INTRINSIC (_maskz_addne_pbh) (mask, src1.x, src2.x);
+ res1.x = INTRINSIC (_add_pbh) (src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_add_pbh) (res2.x, mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_add_pbh) (mask, src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN, bf16_uw) (res1, res_ref))
abort ();
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vdivnepbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vdivbf16-2.c
index 1f23cd8..54bc275 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vdivnepbf16-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vdivbf16-2.c
@@ -33,9 +33,9 @@ TEST (void)
res_ref[i] = res_ref2[i] = convert_fp32_to_bf16_ne (res);
}
- res1.x = INTRINSIC (_divne_pbh) (src1.x, src2.x);
- res2.x = INTRINSIC (_mask_divne_pbh) (res2.x, mask, src1.x, src2.x);
- res3.x = INTRINSIC (_maskz_divne_pbh) (mask, src1.x, src2.x);
+ res1.x = INTRINSIC (_div_pbh) (src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_div_pbh) (res2.x, mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_div_pbh) (mask, src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN, bf16_uw) (res1, res_ref))
abort ();
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vmulnepbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vmulbf16-2.c
index 8663f78..6e03d71 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vmulnepbf16-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vmulbf16-2.c
@@ -33,9 +33,9 @@ TEST (void)
res_ref[i] = res_ref2[i] = convert_fp32_to_bf16_ne (res);
}
- res1.x = INTRINSIC (_mulne_pbh) (src1.x, src2.x);
- res2.x = INTRINSIC (_mask_mulne_pbh) (res2.x, mask, src1.x, src2.x);
- res3.x = INTRINSIC (_maskz_mulne_pbh) (mask, src1.x, src2.x);
+ res1.x = INTRINSIC (_mul_pbh) (src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_mul_pbh) (res2.x, mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_mul_pbh) (mask, src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN, bf16_uw) (res1, res_ref))
abort ();
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vrcppbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vrcpbf16-2.c
index 9bb620eb..9bb620eb 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vrcppbf16-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vrcpbf16-2.c
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vsubnepbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vsubbf16-2.c
index 244fa49..7e8df5f 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vsubnepbf16-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vsubbf16-2.c
@@ -33,9 +33,9 @@ TEST (void)
res_ref[i] = res_ref2[i] = convert_fp32_to_bf16_ne (res);
}
- res1.x = INTRINSIC (_subne_pbh) (src1.x, src2.x);
- res2.x = INTRINSIC (_mask_subne_pbh) (res2.x, mask, src1.x, src2.x);
- res3.x = INTRINSIC (_maskz_subne_pbh) (mask, src1.x, src2.x);
+ res1.x = INTRINSIC (_sub_pbh) (src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_sub_pbh) (res2.x, mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_sub_pbh) (mask, src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN, bf16_uw) (res1, res_ref))
abort ();
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-bf-vector-operations-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-bf-vector-operations-1.c
deleted file mode 100644
index 5a8cb1e..0000000
--- a/gcc/testsuite/gcc.target/i386/avx10_2-bf-vector-operations-1.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-march=x86-64-v3 -mavx10.2 -O2" } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-
-#include <immintrin.h>
-
-typedef __bf16 v16bf __attribute__ ((__vector_size__ (32)));
-typedef __bf16 v8bf __attribute__ ((__vector_size__ (16)));
-
-v16bf
-foo_mul_256 (v16bf a, v16bf b)
-{
- return a * b;
-}
-
-v16bf
-foo_add_256 (v16bf a, v16bf b)
-{
- return a + b;
-}
-
-v16bf
-foo_div_256 (v16bf a, v16bf b)
-{
- return a / b;
-}
-
-v16bf
-foo_sub_256 (v16bf a, v16bf b)
-{
- return a - b;
-}
-
-__attribute__((optimize("fast-math")))
-v16bf
-foo_div_fast_math_256 (v16bf a, v16bf b)
-{
- return a / b;
-}
-
-v8bf
-foo_mul_128 (v8bf a, v8bf b)
-{
- return a * b;
-}
-
-v8bf
-foo_add_128 (v8bf a, v8bf b)
-{
- return a + b;
-}
-
-v8bf
-foo_div_128 (v8bf a, v8bf b)
-{
- return a / b;
-}
-
-v8bf
-foo_sub_128 (v8bf a, v8bf b)
-{
- return a - b;
-}
-
-__attribute__((optimize("fast-math")))
-v8bf
-foo_div_fast_math_128 (v8bf a, v8bf b)
-{
- return a / b;
-}
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-bf16-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-bf16-1.c
index c0fd3b6..7512af7 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-bf16-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-bf16-1.c
@@ -1,29 +1,29 @@
/* { dg-do compile } */
/* { dg-options "-march=x86-64-v3 -mavx10.2 -O2" } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
@@ -86,12 +86,12 @@
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
@@ -136,33 +136,33 @@ volatile __mmask8 m8;
void extern
avx10_2_test (void)
{
- res = _mm256_addne_pbh (x1, x2);
- res = _mm256_mask_addne_pbh (res, m16, x1, x2);
- res = _mm256_maskz_addne_pbh (m16, x1, x2);
- res1 = _mm_addne_pbh (x3, x4);
- res1 = _mm_mask_addne_pbh (res1, m8, x3, x4);
- res1 = _mm_maskz_addne_pbh (m8, x3, x4);
+ res = _mm256_add_pbh (x1, x2);
+ res = _mm256_mask_add_pbh (res, m16, x1, x2);
+ res = _mm256_maskz_add_pbh (m16, x1, x2);
+ res1 = _mm_add_pbh (x3, x4);
+ res1 = _mm_mask_add_pbh (res1, m8, x3, x4);
+ res1 = _mm_maskz_add_pbh (m8, x3, x4);
- res = _mm256_subne_pbh (x1, x2);
- res = _mm256_mask_subne_pbh (res, m16, x1, x2);
- res = _mm256_maskz_subne_pbh (m16, x1, x2);
- res1 = _mm_subne_pbh (x3, x4);
- res1 = _mm_mask_subne_pbh (res1, m8, x3, x4);
- res1 = _mm_maskz_subne_pbh (m8, x3, x4);
+ res = _mm256_sub_pbh (x1, x2);
+ res = _mm256_mask_sub_pbh (res, m16, x1, x2);
+ res = _mm256_maskz_sub_pbh (m16, x1, x2);
+ res1 = _mm_sub_pbh (x3, x4);
+ res1 = _mm_mask_sub_pbh (res1, m8, x3, x4);
+ res1 = _mm_maskz_sub_pbh (m8, x3, x4);
- res = _mm256_mulne_pbh (x1, x2);
- res = _mm256_mask_mulne_pbh (res, m16, x1, x2);
- res = _mm256_maskz_mulne_pbh (m16, x1, x2);
- res1 = _mm_mulne_pbh (x3, x4);
- res1 = _mm_mask_mulne_pbh (res1, m8, x3, x4);
- res1 = _mm_maskz_mulne_pbh (m8, x3, x4);
+ res = _mm256_mul_pbh (x1, x2);
+ res = _mm256_mask_mul_pbh (res, m16, x1, x2);
+ res = _mm256_maskz_mul_pbh (m16, x1, x2);
+ res1 = _mm_mul_pbh (x3, x4);
+ res1 = _mm_mask_mul_pbh (res1, m8, x3, x4);
+ res1 = _mm_maskz_mul_pbh (m8, x3, x4);
- res = _mm256_divne_pbh (x1, x2);
- res = _mm256_mask_divne_pbh (res, m16, x1, x2);
- res = _mm256_maskz_divne_pbh (m16, x1, x2);
- res1 = _mm_divne_pbh (x3, x4);
- res1 = _mm_mask_divne_pbh (res1, m8, x3, x4);
- res1 = _mm_maskz_divne_pbh (m8, x3, x4);
+ res = _mm256_div_pbh (x1, x2);
+ res = _mm256_mask_div_pbh (res, m16, x1, x2);
+ res = _mm256_maskz_div_pbh (m16, x1, x2);
+ res1 = _mm_div_pbh (x3, x4);
+ res1 = _mm_mask_div_pbh (res1, m8, x3, x4);
+ res1 = _mm_maskz_div_pbh (m8, x3, x4);
res = _mm256_max_pbh (x1, x2);
res = _mm256_mask_max_pbh (res, m16, x1, x2);
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-bf16-vector-operations-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-bf16-vector-operations-1.c
new file mode 100644
index 0000000..530167b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-bf16-vector-operations-1.c
@@ -0,0 +1,79 @@
+/* { dg-do compile } */
+/* { dg-options "-march=x86-64-v3 -mavx10.2 -O2" } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+typedef __bf16 v16bf __attribute__ ((__vector_size__ (32)));
+typedef __bf16 v8bf __attribute__ ((__vector_size__ (16)));
+
+v16bf
+foo_mul_256 (v16bf a, v16bf b)
+{
+ return a * b;
+}
+
+v16bf
+foo_add_256 (v16bf a, v16bf b)
+{
+ return a + b;
+}
+
+v16bf
+foo_div_256 (v16bf a, v16bf b)
+{
+ return a / b;
+}
+
+v16bf
+foo_sub_256 (v16bf a, v16bf b)
+{
+ return a - b;
+}
+
+__attribute__((optimize("fast-math")))
+v16bf
+foo_div_fast_math_256 (v16bf a, v16bf b)
+{
+ return a / b;
+}
+
+v8bf
+foo_mul_128 (v8bf a, v8bf b)
+{
+ return a * b;
+}
+
+v8bf
+foo_add_128 (v8bf a, v8bf b)
+{
+ return a + b;
+}
+
+v8bf
+foo_div_128 (v8bf a, v8bf b)
+{
+ return a / b;
+}
+
+v8bf
+foo_sub_128 (v8bf a, v8bf b)
+{
+ return a - b;
+}
+
+__attribute__((optimize("fast-math")))
+v8bf
+foo_div_fast_math_128 (v8bf a, v8bf b)
+{
+ return a / b;
+}
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf-vector-fast-math-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf16-vector-fast-math-1.c
index d9f78d4..28856b1 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf-vector-fast-math-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf16-vector-fast-math-1.c
@@ -1,7 +1,7 @@
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-march=x86-64-v3 -mavx10.2 -O2" } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
typedef __bf16 v4bf __attribute__ ((__vector_size__ (8)));
typedef __bf16 v2bf __attribute__ ((__vector_size__ (4)));
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf-vector-operations-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf16-vector-operations-1.c
index 89fac2c..22ba8a2 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf-vector-operations-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf16-vector-operations-1.c
@@ -1,9 +1,9 @@
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-march=x86-64-v3 -mavx10.2 -O2" } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
typedef __bf16 v4bf __attribute__ ((__vector_size__ (8)));
typedef __bf16 v2bf __attribute__ ((__vector_size__ (4)));
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-vrcppbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-vaddbf16-2.c
index 4914b25..d880454 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-vrcppbf16-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-vaddbf16-2.c
@@ -6,11 +6,11 @@
#define AVX512VL
#define AVX512F_LEN 256
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vrcppbf16-2.c"
+#include "avx10_2-512-vaddbf16-2.c"
#undef AVX512F_LEN
#undef AVX512F_LEN_HALF
#define AVX512F_LEN 128
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vrcppbf16-2.c"
+#include "avx10_2-512-vaddbf16-2.c"
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-vaddnepbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-vdivbf16-2.c
index 893fa0c..69d5019 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-vaddnepbf16-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-vdivbf16-2.c
@@ -6,11 +6,11 @@
#define AVX512VL
#define AVX512F_LEN 256
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vaddnepbf16-2.c"
+#include "avx10_2-512-vdivbf16-2.c"
#undef AVX512F_LEN
#undef AVX512F_LEN_HALF
#define AVX512F_LEN 128
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vaddnepbf16-2.c"
+#include "avx10_2-512-vdivbf16-2.c"
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-vdivnepbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-vmulbf16-2.c
index 199a6ec..568c0a9 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-vdivnepbf16-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-vmulbf16-2.c
@@ -6,11 +6,11 @@
#define AVX512VL
#define AVX512F_LEN 256
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vdivnepbf16-2.c"
+#include "avx10_2-512-vmulbf16-2.c"
#undef AVX512F_LEN
#undef AVX512F_LEN_HALF
#define AVX512F_LEN 128
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vdivnepbf16-2.c"
+#include "avx10_2-512-vmulbf16-2.c"
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-vmulnepbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-vrcpbf16-2.c
index 1daa21f..367b2cf 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-vmulnepbf16-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-vrcpbf16-2.c
@@ -6,11 +6,11 @@
#define AVX512VL
#define AVX512F_LEN 256
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vmulnepbf16-2.c"
+#include "avx10_2-512-vrcpbf16-2.c"
#undef AVX512F_LEN
#undef AVX512F_LEN_HALF
#define AVX512F_LEN 128
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vmulnepbf16-2.c"
+#include "avx10_2-512-vrcpbf16-2.c"
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-vsubbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-vsubbf16-2.c
new file mode 100644
index 0000000..16f444a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-vsubbf16-2.c
@@ -0,0 +1,16 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -march=x86-64-v3 -mavx10.2" } */
+/* { dg-require-effective-target avx10_2 } */
+
+#define AVX10_2
+#define AVX512VL
+#define AVX512F_LEN 256
+#define AVX512F_LEN_HALF 128
+#include "avx10_2-512-vsubbf16-2.c"
+
+#undef AVX512F_LEN
+#undef AVX512F_LEN_HALF
+
+#define AVX512F_LEN 128
+#define AVX512F_LEN_HALF 128
+#include "avx10_2-512-vsubbf16-2.c"
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-vsubnepbf16-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-vsubnepbf16-2.c
deleted file mode 100644
index 907a705..0000000
--- a/gcc/testsuite/gcc.target/i386/avx10_2-vsubnepbf16-2.c
+++ /dev/null
@@ -1,16 +0,0 @@
-/* { dg-do run } */
-/* { dg-options "-O2 -march=x86-64-v3 -mavx10.2" } */
-/* { dg-require-effective-target avx10_2 } */
-
-#define AVX10_2
-#define AVX512VL
-#define AVX512F_LEN 256
-#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vsubnepbf16-2.c"
-
-#undef AVX512F_LEN
-#undef AVX512F_LEN_HALF
-
-#define AVX512F_LEN 128
-#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vsubnepbf16-2.c"
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index baafa1d..1785a9d 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -10968,7 +10968,7 @@ proc check_effective_target_avx10_2 { } {
{
__asm__ volatile ("vdpphps\t%ymm4, %ymm5, %ymm6");
__asm__ volatile ("vcvthf82ph\t%xmm5, %ymm6");
- __asm__ volatile ("vaddnepbf16\t%ymm4, %ymm5, %ymm6");
+ __asm__ volatile ("vaddbf16\t%ymm4, %ymm5, %ymm6");
__asm__ volatile ("vcvtph2ibs\t%ymm5, %ymm6");
__asm__ volatile ("vminmaxpd\t$123, %ymm4, %ymm5, %ymm6");
}
@@ -10983,7 +10983,7 @@ proc check_effective_target_avx10_2_512 { } {
{
__asm__ volatile ("vdpphps\t%zmm4, %zmm5, %zmm6");
__asm__ volatile ("vcvthf82ph\t%ymm5, %zmm6");
- __asm__ volatile ("vaddnepbf16\t%zmm4, %zmm5, %zmm6");
+ __asm__ volatile ("vaddbf16\t%zmm4, %zmm5, %zmm6");
__asm__ volatile ("vcvtph2ibs\t%zmm5, %zmm6");
__asm__ volatile ("vminmaxpd\t$123, %zmm4, %zmm5, %zmm6");
}