; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+avx10.2-256 | FileCheck %s --check-prefixes=CHECK,X64 ; RUN: llc < %s -verify-machineinstrs -mtriple=i686-unknown-unknown --show-mc-encoding -mattr=+avx10.2-256 | FileCheck %s --check-prefixes=CHECK,X86 define <16 x bfloat> @test_int_x86_avx10_add_bf16_256(<16 x bfloat> %x1, <16 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_add_bf16_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vaddbf16 %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x58,0xc1] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = fadd <16 x bfloat> %x1, %x2 ret <16 x bfloat> %res } define <16 x bfloat> @test_int_x86_avx10_mask_add_bf16_256(<16 x bfloat> %src, <16 x bfloat> %x1, <16 x bfloat> %x2, i16 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_mask_add_bf16_256: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vaddbf16 %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x75,0x29,0x58,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_mask_add_bf16_256: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vaddbf16 %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x75,0x29,0x58,0xc2] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i16 %msk to <16 x i1> %res0 = fadd <16 x bfloat> %x1, %x2 %res = select <16 x i1> %mask, <16 x bfloat> %res0, <16 x bfloat> %src ret <16 x bfloat> %res } define <16 x bfloat> @test_int_x86_avx10_maskz_add_bf16_256(<16 x bfloat> %src, <16 x bfloat> %x1, <16 x bfloat> %x2, i16 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_maskz_add_bf16_256: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vaddbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x58,0xc2] ; X64-NEXT: vaddbf16 (%rsi), %ymm1, %ymm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x58,0x0e] ; X64-NEXT: vaddbf16 %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x58,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_maskz_add_bf16_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vaddbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x58,0xc2] ; X86-NEXT: vaddbf16 (%eax), %ymm1, %ymm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x58,0x08] ; X86-NEXT: vaddbf16 %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x58,0xc1] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i16 %msk to <16 x i1> %val = load <16 x bfloat>, ptr %ptr %res0 = fadd <16 x bfloat> %x1, %x2 %res1 = select <16 x i1> %mask, <16 x bfloat> %res0, <16 x bfloat> zeroinitializer %t2 = fadd <16 x bfloat> %x1, %val %res2 = select <16 x i1> %mask, <16 x bfloat> %t2, <16 x bfloat> zeroinitializer %res3 = fadd <16 x bfloat> %res1, %res2 ret <16 x bfloat> %res3 } define <8 x bfloat> @test_int_x86_avx10_add_bf16_128(<8 x bfloat> %x1, <8 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_add_bf16_128: ; CHECK: # %bb.0: ; CHECK-NEXT: vaddbf16 %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x58,0xc1] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = fadd <8 x bfloat> %x1, %x2 ret <8 x bfloat> %res } define <8 x bfloat> @test_int_x86_avx10_mask_add_bf16_128(<8 x bfloat> %src, <8 x bfloat> %x1, <8 x bfloat> %x2, i8 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_mask_add_bf16_128: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vaddbf16 %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x75,0x09,0x58,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_mask_add_bf16_128: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vaddbf16 %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x75,0x09,0x58,0xc2] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i8 %msk to <8 x i1> %res0 = fadd <8 x bfloat> %x1, %x2 %res = select <8 x i1> %mask, <8 x bfloat> %res0, <8 x bfloat> %src ret <8 x bfloat> %res } define <8 x bfloat> @test_int_x86_avx10_maskz_add_bf16_128(<8 x bfloat> %src, <8 x bfloat> %x1, <8 x bfloat> %x2, i8 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_maskz_add_bf16_128: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vaddbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x58,0xc2] ; X64-NEXT: vaddbf16 (%rsi), %xmm1, %xmm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x58,0x0e] ; X64-NEXT: vaddbf16 %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x58,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_maskz_add_bf16_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vaddbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x58,0xc2] ; X86-NEXT: vaddbf16 (%eax), %xmm1, %xmm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x58,0x08] ; X86-NEXT: vaddbf16 %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x58,0xc1] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i8 %msk to <8 x i1> %val = load <8 x bfloat>, ptr %ptr %res0 = fadd <8 x bfloat> %x1, %x2 %res1 = select <8 x i1> %mask, <8 x bfloat> %res0, <8 x bfloat> zeroinitializer %t2 = fadd <8 x bfloat> %x1, %val %res2 = select <8 x i1> %mask, <8 x bfloat> %t2, <8 x bfloat> zeroinitializer %res3 = fadd <8 x bfloat> %res1, %res2 ret <8 x bfloat> %res3 } define <16 x bfloat> @test_int_x86_avx10_sub_bf16_256(<16 x bfloat> %x1, <16 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_sub_bf16_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vsubbf16 %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x5c,0xc1] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = fsub <16 x bfloat> %x1, %x2 ret <16 x bfloat> %res } define <16 x bfloat> @test_int_x86_avx10_mask_sub_bf16_256(<16 x bfloat> %src, <16 x bfloat> %x1, <16 x bfloat> %x2, i16 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_mask_sub_bf16_256: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vsubbf16 %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x75,0x29,0x5c,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_mask_sub_bf16_256: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vsubbf16 %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x75,0x29,0x5c,0xc2] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i16 %msk to <16 x i1> %res0 = fsub <16 x bfloat> %x1, %x2 %res = select <16 x i1> %mask, <16 x bfloat> %res0, <16 x bfloat> %src ret <16 x bfloat> %res } define <16 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_256(<16 x bfloat> %src, <16 x bfloat> %x1, <16 x bfloat> %x2, i16 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_maskz_sub_bf16_256: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vsubbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x5c,0xc2] ; X64-NEXT: vsubbf16 (%rsi), %ymm1, %ymm1 # encoding: [0x62,0xf5,0x75,0x28,0x5c,0x0e] ; X64-NEXT: vsubbf16 %ymm1, %ymm0, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x5c,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_256: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] ; X86-NEXT: vsubbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x5c,0xc2] ; X86-NEXT: vsubbf16 (%eax), %ymm1, %ymm1 # encoding: [0x62,0xf5,0x75,0x28,0x5c,0x08] ; X86-NEXT: vsubbf16 %ymm1, %ymm0, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x5c,0xc1] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i16 %msk to <16 x i1> %val = load <16 x bfloat>, ptr %ptr %res0 = fsub <16 x bfloat> %x1, %x2 %res1 = select <16 x i1> %mask, <16 x bfloat> %res0, <16 x bfloat> zeroinitializer %t2 = fsub <16 x bfloat> %x1, %val %res2 = select <16 x i1> %mask, <16 x bfloat> %t2, <16 x bfloat> zeroinitializer %res3 = fsub <16 x bfloat> %res1, %res2 ret <16 x bfloat> %res3 } define <8 x bfloat> @test_int_x86_avx10_sub_bf16_128(<8 x bfloat> %x1, <8 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_sub_bf16_128: ; CHECK: # %bb.0: ; CHECK-NEXT: vsubbf16 %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x5c,0xc1] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = fsub <8 x bfloat> %x1, %x2 ret <8 x bfloat> %res } define <8 x bfloat> @test_int_x86_avx10_mask_sub_bf16_128(<8 x bfloat> %src, <8 x bfloat> %x1, <8 x bfloat> %x2, i8 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_mask_sub_bf16_128: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vsubbf16 %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x75,0x09,0x5c,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_mask_sub_bf16_128: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vsubbf16 %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x75,0x09,0x5c,0xc2] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i8 %msk to <8 x i1> %res0 = fsub <8 x bfloat> %x1, %x2 %res = select <8 x i1> %mask, <8 x bfloat> %res0, <8 x bfloat> %src ret <8 x bfloat> %res } define <8 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_128(<8 x bfloat> %src, <8 x bfloat> %x1, <8 x bfloat> %x2, i8 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_maskz_sub_bf16_128: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vsubbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x5c,0xc2] ; X64-NEXT: vsubbf16 (%rsi), %xmm1, %xmm1 # encoding: [0x62,0xf5,0x75,0x08,0x5c,0x0e] ; X64-NEXT: vsubbf16 %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x5c,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_128: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] ; X86-NEXT: vsubbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x5c,0xc2] ; X86-NEXT: vsubbf16 (%eax), %xmm1, %xmm1 # encoding: [0x62,0xf5,0x75,0x08,0x5c,0x08] ; X86-NEXT: vsubbf16 %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x5c,0xc1] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i8 %msk to <8 x i1> %val = load <8 x bfloat>, ptr %ptr %res0 = fsub <8 x bfloat> %x1, %x2 %res1 = select <8 x i1> %mask, <8 x bfloat> %res0, <8 x bfloat> zeroinitializer %t2 = fsub <8 x bfloat> %x1, %val %res2 = select <8 x i1> %mask, <8 x bfloat> %t2, <8 x bfloat> zeroinitializer %res3 = fsub <8 x bfloat> %res1, %res2 ret <8 x bfloat> %res3 } define <16 x bfloat> @test_int_x86_avx10_mul_bf16_256(<16 x bfloat> %x1, <16 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_mul_bf16_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmulbf16 %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x59,0xc1] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = fmul <16 x bfloat> %x1, %x2 ret <16 x bfloat> %res } define <16 x bfloat> @test_int_x86_avx10_mask_mul_bf16_256(<16 x bfloat> %src, <16 x bfloat> %x1, <16 x bfloat> %x2, i16 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_mask_mul_bf16_256: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vmulbf16 %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x75,0x29,0x59,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_mask_mul_bf16_256: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vmulbf16 %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x75,0x29,0x59,0xc2] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i16 %msk to <16 x i1> %res0 = fmul <16 x bfloat> %x1, %x2 %res = select <16 x i1> %mask, <16 x bfloat> %res0, <16 x bfloat> %src ret <16 x bfloat> %res } define <16 x bfloat> @test_int_x86_avx10_maskz_mul_bf16_256(<16 x bfloat> %src, <16 x bfloat> %x1, <16 x bfloat> %x2, i16 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_maskz_mul_bf16_256: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vmulbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x59,0xc2] ; X64-NEXT: vmulbf16 (%rsi), %ymm1, %ymm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x59,0x0e] ; X64-NEXT: vmulbf16 %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x59,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_maskz_mul_bf16_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vmulbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x59,0xc2] ; X86-NEXT: vmulbf16 (%eax), %ymm1, %ymm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x59,0x08] ; X86-NEXT: vmulbf16 %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x59,0xc1] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i16 %msk to <16 x i1> %val = load <16 x bfloat>, ptr %ptr %res0 = fmul <16 x bfloat> %x1, %x2 %res1 = select <16 x i1> %mask, <16 x bfloat> %res0, <16 x bfloat> zeroinitializer %t2 = fmul <16 x bfloat> %x1, %val %res2 = select <16 x i1> %mask, <16 x bfloat> %t2, <16 x bfloat> zeroinitializer %res3 = fmul <16 x bfloat> %res1, %res2 ret <16 x bfloat> %res3 } define <8 x bfloat> @test_int_x86_avx10_mul_bf16_128(<8 x bfloat> %x1, <8 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_mul_bf16_128: ; CHECK: # %bb.0: ; CHECK-NEXT: vmulbf16 %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x59,0xc1] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = fmul <8 x bfloat> %x1, %x2 ret <8 x bfloat> %res } define <8 x bfloat> @test_int_x86_avx10_mask_mul_bf16_128(<8 x bfloat> %src, <8 x bfloat> %x1, <8 x bfloat> %x2, i8 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_mask_mul_bf16_128: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vmulbf16 %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x75,0x09,0x59,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_mask_mul_bf16_128: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vmulbf16 %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x75,0x09,0x59,0xc2] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i8 %msk to <8 x i1> %res0 = fmul <8 x bfloat> %x1, %x2 %res = select <8 x i1> %mask, <8 x bfloat> %res0, <8 x bfloat> %src ret <8 x bfloat> %res } define <8 x bfloat> @test_int_x86_avx10_maskz_mul_bf16_128(<8 x bfloat> %src, <8 x bfloat> %x1, <8 x bfloat> %x2, i8 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_maskz_mul_bf16_128: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vmulbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x59,0xc2] ; X64-NEXT: vmulbf16 (%rsi), %xmm1, %xmm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x59,0x0e] ; X64-NEXT: vmulbf16 %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x59,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_maskz_mul_bf16_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vmulbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x59,0xc2] ; X86-NEXT: vmulbf16 (%eax), %xmm1, %xmm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x59,0x08] ; X86-NEXT: vmulbf16 %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x59,0xc1] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i8 %msk to <8 x i1> %val = load <8 x bfloat>, ptr %ptr %res0 = fmul <8 x bfloat> %x1, %x2 %res1 = select <8 x i1> %mask, <8 x bfloat> %res0, <8 x bfloat> zeroinitializer %t2 = fmul <8 x bfloat> %x1, %val %res2 = select <8 x i1> %mask, <8 x bfloat> %t2, <8 x bfloat> zeroinitializer %res3 = fmul <8 x bfloat> %res1, %res2 ret <8 x bfloat> %res3 } define <16 x bfloat> @test_int_x86_avx10_div_bf16_256(<16 x bfloat> %x1, <16 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_div_bf16_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vdivbf16 %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x5e,0xc1] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = fdiv <16 x bfloat> %x1, %x2 ret <16 x bfloat> %res } define <16 x bfloat> @test_int_x86_avx10_mask_div_bf16_256(<16 x bfloat> %src, <16 x bfloat> %x1, <16 x bfloat> %x2, i16 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_mask_div_bf16_256: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vdivbf16 %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x75,0x29,0x5e,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_mask_div_bf16_256: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vdivbf16 %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x75,0x29,0x5e,0xc2] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i16 %msk to <16 x i1> %res0 = fdiv <16 x bfloat> %x1, %x2 %res = select <16 x i1> %mask, <16 x bfloat> %res0, <16 x bfloat> %src ret <16 x bfloat> %res } ; FIXME: assembly order is different from fp16 ones define <16 x bfloat> @test_int_x86_avx10_maskz_div_bf16_256(<16 x bfloat> %src, <16 x bfloat> %x1, <16 x bfloat> %x2, i16 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_maskz_div_bf16_256: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vdivbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x5e,0xc2] ; X64-NEXT: vdivbf16 (%rsi), %ymm1, %ymm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x5e,0x0e] ; X64-NEXT: vdivbf16 %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x5e,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_maskz_div_bf16_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vdivbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x5e,0xc2] ; X86-NEXT: vdivbf16 (%eax), %ymm1, %ymm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x5e,0x08] ; X86-NEXT: vdivbf16 %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x5e,0xc1] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i16 %msk to <16 x i1> %val = load <16 x bfloat>, ptr %ptr %res0 = fdiv <16 x bfloat> %x1, %x2 %res1 = select <16 x i1> %mask, <16 x bfloat> %res0, <16 x bfloat> zeroinitializer %t2 = fdiv <16 x bfloat> %x1, %val %res2 = select <16 x i1> %mask, <16 x bfloat> %t2, <16 x bfloat> zeroinitializer %res3 = fdiv <16 x bfloat> %res1, %res2 ret <16 x bfloat> %res3 } define <8 x bfloat> @test_int_x86_avx10_div_bf16_128(<8 x bfloat> %x1, <8 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_div_bf16_128: ; CHECK: # %bb.0: ; CHECK-NEXT: vdivbf16 %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x5e,0xc1] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = fdiv <8 x bfloat> %x1, %x2 ret <8 x bfloat> %res } define <8 x bfloat> @test_int_x86_avx10_mask_div_bf16_128(<8 x bfloat> %src, <8 x bfloat> %x1, <8 x bfloat> %x2, i8 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_mask_div_bf16_128: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vdivbf16 %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x75,0x09,0x5e,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_mask_div_bf16_128: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vdivbf16 %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x75,0x09,0x5e,0xc2] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i8 %msk to <8 x i1> %res0 = fdiv <8 x bfloat> %x1, %x2 %res = select <8 x i1> %mask, <8 x bfloat> %res0, <8 x bfloat> %src ret <8 x bfloat> %res } ; FIXME: assembly order is different from fp16 ones define <8 x bfloat> @test_int_x86_avx10_maskz_div_bf16_128(<8 x bfloat> %src, <8 x bfloat> %x1, <8 x bfloat> %x2, i8 %msk, ptr %ptr) { ; X64-LABEL: test_int_x86_avx10_maskz_div_bf16_128: ; X64: # %bb.0: ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vdivbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x5e,0xc2] ; X64-NEXT: vdivbf16 (%rsi), %xmm1, %xmm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x5e,0x0e] ; X64-NEXT: vdivbf16 %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x5e,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_int_x86_avx10_maskz_div_bf16_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vdivbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x5e,0xc2] ; X86-NEXT: vdivbf16 (%eax), %xmm1, %xmm1 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x5e,0x08] ; X86-NEXT: vdivbf16 %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x5e,0xc1] ; X86-NEXT: retl # encoding: [0xc3] %mask = bitcast i8 %msk to <8 x i1> %val = load <8 x bfloat>, ptr %ptr %res0 = fdiv <8 x bfloat> %x1, %x2 %res1 = select <8 x i1> %mask, <8 x bfloat> %res0, <8 x bfloat> zeroinitializer %t2 = fdiv <8 x bfloat> %x1, %val %res2 = select <8 x i1> %mask, <8 x bfloat> %t2, <8 x bfloat> zeroinitializer %res3 = fdiv <8 x bfloat> %res1, %res2 ret <8 x bfloat> %res3 } define i16 @test_int_x86_avx10_vcmpbf16256(<16 x bfloat> %x1, <16 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_vcmpbf16256: ; CHECK: # %bb.0: ; CHECK-NEXT: vcmpunordbf16 %ymm1, %ymm0, %k0 # encoding: [0x62,0xf3,0x7f,0x28,0xc2,0xc1,0x03] ; CHECK-NEXT: kmovd %k0, %eax # encoding: [0xc5,0xfb,0x93,0xc0] ; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %1 = fcmp uno <16 x bfloat> %x1, %x2 %res = bitcast <16 x i1> %1 to i16 ret i16 %res } define i16 @test_int_x86_avx10_vcmpbf16256_mask2(<16 x bfloat> %x1, <16 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_vcmpbf16256_mask2: ; CHECK: # %bb.0: ; CHECK-NEXT: vcmpeqbf16 %ymm1, %ymm0, %k0 # encoding: [0x62,0xf3,0x7f,0x28,0xc2,0xc1,0x00] ; CHECK-NEXT: kmovd %k0, %eax # encoding: [0xc5,0xfb,0x93,0xc0] ; CHECK-NEXT: andl $3, %eax # encoding: [0x83,0xe0,0x03] ; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %1 = fcmp oeq <16 x bfloat> %x1, %x2 %2 = and <16 x i1> %1, %3 = bitcast <16 x i1> %2 to i16 ret i16 %3 } define i8 @test_int_x86_avx10_vcmpbf16128(<8 x bfloat> %x1, <8 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_vcmpbf16128: ; CHECK: # %bb.0: ; CHECK-NEXT: vcmpunordbf16 %xmm1, %xmm0, %k0 # encoding: [0x62,0xf3,0x7f,0x08,0xc2,0xc1,0x03] ; CHECK-NEXT: kmovd %k0, %eax # encoding: [0xc5,0xfb,0x93,0xc0] ; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %1 = fcmp uno <8 x bfloat> %x1, %x2 %res = bitcast <8 x i1> %1 to i8 ret i8 %res } define i8 @test_int_x86_avx10_vcmpbf16128_mask2(<8 x bfloat> %x1, <8 x bfloat> %x2) { ; CHECK-LABEL: test_int_x86_avx10_vcmpbf16128_mask2: ; CHECK: # %bb.0: ; CHECK-NEXT: vcmpeqbf16 %xmm1, %xmm0, %k0 # encoding: [0x62,0xf3,0x7f,0x08,0xc2,0xc1,0x00] ; CHECK-NEXT: kmovd %k0, %eax # encoding: [0xc5,0xfb,0x93,0xc0] ; CHECK-NEXT: andb $3, %al # encoding: [0x24,0x03] ; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %1 = fcmp oeq <8 x bfloat> %x1, %x2 %2 = and <8 x i1> %1, %3 = bitcast <8 x i1> %2 to i8 ret i8 %3 } define <16 x bfloat> @test_sqrt_bf16_256(<16 x bfloat> %a0) { ; CHECK-LABEL: test_sqrt_bf16_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vsqrtbf16 %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x51,0xc0] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %1 = tail call <16 x bfloat> @llvm.sqrt.v16bf16(<16 x bfloat> %a0) ret <16 x bfloat> %1 } define <16 x bfloat> @test_mm256_mask_sqrt_pbh(<16 x bfloat> %__W, i16 %__U, <16 x bfloat> %__A) { ; X64-LABEL: test_mm256_mask_sqrt_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vsqrtbf16 %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x51,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_mask_sqrt_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vsqrtbf16 %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x51,0xc1] ; X86-NEXT: retl # encoding: [0xc3] entry: %0 = tail call <16 x bfloat> @llvm.sqrt.v16bf16(<16 x bfloat> %__A) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> %__W ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_maskz_sqrt_pbh(i16 %__U, <16 x bfloat>%__A) { ; X64-LABEL: test_mm256_maskz_sqrt_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vsqrtbf16 %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x51,0xc0] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_maskz_sqrt_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vsqrtbf16 %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x51,0xc0] ; X86-NEXT: retl # encoding: [0xc3] entry: %0 = tail call <16 x bfloat> @llvm.sqrt.v16bf16(<16 x bfloat> %__A) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> zeroinitializer ret <16 x bfloat> %2 } define <8 x bfloat> @test_sqrt_bf16_128(<8 x bfloat> %a0) { ; CHECK-LABEL: test_sqrt_bf16_128: ; CHECK: # %bb.0: ; CHECK-NEXT: vsqrtbf16 %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x51,0xc0] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %1 = tail call <8 x bfloat> @llvm.sqrt.v8bf16(<8 x bfloat> %a0) ret <8 x bfloat> %1 } define <8 x bfloat> @test_mm_mask_sqrt_pbh(<8 x bfloat> %__W, i8 %__U, <8 x bfloat> %__A) { ; X64-LABEL: test_mm_mask_sqrt_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vsqrtbf16 %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x51,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_mask_sqrt_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vsqrtbf16 %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x51,0xc1] ; X86-NEXT: retl # encoding: [0xc3] entry: %0 = tail call <8 x bfloat> @llvm.sqrt.v8bf16(<8 x bfloat> %__A) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> %__W ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_maskz_sqrt_pbh(i8 %__U, <8 x bfloat>%__A) { ; X64-LABEL: test_mm_maskz_sqrt_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vsqrtbf16 %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x51,0xc0] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_maskz_sqrt_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vsqrtbf16 %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x51,0xc0] ; X86-NEXT: retl # encoding: [0xc3] entry: %0 = tail call <8 x bfloat> @llvm.sqrt.v8bf16(<8 x bfloat> %__A) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> zeroinitializer ret <8 x bfloat> %2 } define <16 x bfloat> @test_mm256_fmaddne_pbh(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; CHECK-LABEL: test_mm256_fmaddne_pbh: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vfmadd213bf16 %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf6,0x74,0x28,0xa8,0xc2] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] entry: %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) ret <16 x bfloat> %0 } define <16 x bfloat> @test_mm256_mask_fmaddne_pbh(<16 x bfloat> %__A, i16 zeroext %__U, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; X64-LABEL: test_mm256_mask_fmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmadd132bf16 %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x29,0x98,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_mask_fmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd132bf16 %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x29,0x98,0xc1] ; X86-NEXT: retl # encoding: [0xc3] entry: %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> %__A ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_mask3_fmaddne_pbh(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C, i16 zeroext %__U) { ; X64-LABEL: test_mm256_mask3_fmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmadd231bf16 %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x29,0xb8,0xd1] ; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_mask3_fmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd231bf16 %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x29,0xb8,0xd1] ; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> %__C ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_maskz_fmaddne_pbh(i16 zeroext %__U, <16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; X64-LABEL: test_mm256_maskz_fmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmadd213bf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0xa9,0xa8,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_maskz_fmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd213bf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0xa9,0xa8,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> zeroinitializer ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_fmsubne_pbh(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; CHECK-LABEL: test_mm256_fmsubne_pbh: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vfmsub213bf16 %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf6,0x74,0x28,0xaa,0xc2] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] entry: %fneg.i = fneg <16 x bfloat> %__C %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %fneg.i) ret <16 x bfloat> %0 } define <16 x bfloat> @test_mm256_mask_fmsubne_pbh(<16 x bfloat> %__A, i16 zeroext %__U, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; X64-LABEL: test_mm256_mask_fmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmsub132bf16 %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x29,0x9a,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_mask_fmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub132bf16 %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x29,0x9a,0xc1] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <16 x bfloat> %__C %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %fneg.i.i) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> %__A ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_mask3_fmsubne_pbh(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C, i16 zeroext %__U) { ; X64-LABEL: test_mm256_mask3_fmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmsub231bf16 %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x29,0xba,0xd1] ; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_mask3_fmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub231bf16 %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x29,0xba,0xd1] ; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <16 x bfloat> %__C %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %fneg.i.i) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> %__C ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_maskz_fmsubne_pbh(i16 zeroext %__U, <16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; X64-LABEL: test_mm256_maskz_fmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmsub213bf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0xa9,0xaa,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_maskz_fmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub213bf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0xa9,0xaa,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <16 x bfloat> %__C %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %fneg.i.i) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> zeroinitializer ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_fnmaddne_pbh(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; CHECK-LABEL: test_mm256_fnmaddne_pbh: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vfnmadd213bf16 %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf6,0x74,0x28,0xac,0xc2] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] entry: %fneg.i = fneg <16 x bfloat> %__B %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %fneg.i, <16 x bfloat> %__C) ret <16 x bfloat> %0 } define <16 x bfloat> @test_mm256_mask_fnmaddne_pbh(<16 x bfloat> %__A, i16 zeroext %__U, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; X64-LABEL: test_mm256_mask_fnmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmadd132bf16 %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x29,0x9c,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_mask_fnmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd132bf16 %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x29,0x9c,0xc1] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <16 x bfloat> %__B %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %fneg.i.i, <16 x bfloat> %__C) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> %__A ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_mask3_fnmaddne_pbh(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C, i16 zeroext %__U) { ; X64-LABEL: test_mm256_mask3_fnmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmadd231bf16 %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x29,0xbc,0xd1] ; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_mask3_fnmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd231bf16 %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x29,0xbc,0xd1] ; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <16 x bfloat> %__B %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %fneg.i.i, <16 x bfloat> %__C) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> %__C ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_maskz_fnmaddne_pbh(i16 zeroext %__U, <16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; X64-LABEL: test_mm256_maskz_fnmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmadd213bf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0xa9,0xac,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_maskz_fnmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd213bf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0xa9,0xac,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <16 x bfloat> %__B %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %fneg.i.i, <16 x bfloat> %__C) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> zeroinitializer ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_fnmsubne_pbh(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; CHECK-LABEL: test_mm256_fnmsubne_pbh: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vfnmsub213bf16 %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf6,0x74,0x28,0xae,0xc2] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] entry: %fneg.i = fneg <16 x bfloat> %__B %fneg1.i = fneg <16 x bfloat> %__C %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %fneg.i, <16 x bfloat> %fneg1.i) ret <16 x bfloat> %0 } define <16 x bfloat> @test_mm256_mask_fnmsubne_pbh(<16 x bfloat> %__A, i16 zeroext %__U, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; X64-LABEL: test_mm256_mask_fnmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmsub132bf16 %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x29,0x9e,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_mask_fnmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub132bf16 %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x29,0x9e,0xc1] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <16 x bfloat> %__B %fneg1.i.i = fneg <16 x bfloat> %__C %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %fneg.i.i, <16 x bfloat> %fneg1.i.i) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> %__A ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_mask3_fnmsubne_pbh(<16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C, i16 zeroext %__U) { ; X64-LABEL: test_mm256_mask3_fnmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmsub231bf16 %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x29,0xbe,0xd1] ; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_mask3_fnmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub231bf16 %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x29,0xbe,0xd1] ; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <16 x bfloat> %__B %fneg1.i.i = fneg <16 x bfloat> %__C %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %fneg.i.i, <16 x bfloat> %fneg1.i.i) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> %__C ret <16 x bfloat> %2 } define <16 x bfloat> @test_mm256_maskz_fnmsubne_pbh(i16 zeroext %__U, <16 x bfloat> %__A, <16 x bfloat> %__B, <16 x bfloat> %__C) { ; X64-LABEL: test_mm256_maskz_fnmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmsub213bf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0xa9,0xae,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm256_maskz_fnmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub213bf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0xa9,0xae,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <16 x bfloat> %__B %fneg1.i.i = fneg <16 x bfloat> %__C %0 = tail call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %__A, <16 x bfloat> %fneg.i.i, <16 x bfloat> %fneg1.i.i) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x bfloat> %0, <16 x bfloat> zeroinitializer ret <16 x bfloat> %2 } define <8 x bfloat> @test_mm_fmaddne_pbh(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; CHECK-LABEL: test_mm_fmaddne_pbh: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vfmadd213bf16 %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf6,0x74,0x08,0xa8,0xc2] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] entry: %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) ret <8 x bfloat> %0 } define <8 x bfloat> @test_mm_mask_fmaddne_pbh(<8 x bfloat> %__A, i8 zeroext %__U, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; X64-LABEL: test_mm_mask_fmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmadd132bf16 %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x09,0x98,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_mask_fmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd132bf16 %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x09,0x98,0xc1] ; X86-NEXT: retl # encoding: [0xc3] entry: %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> %__A ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_mask3_fmaddne_pbh(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C, i8 zeroext %__U) { ; X64-LABEL: test_mm_mask3_fmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmadd231bf16 %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x09,0xb8,0xd1] ; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_mask3_fmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd231bf16 %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x09,0xb8,0xd1] ; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> %__C ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_maskz_fmaddne_pbh(i8 zeroext %__U, <8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; X64-LABEL: test_mm_maskz_fmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmadd213bf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0x89,0xa8,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_maskz_fmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd213bf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0x89,0xa8,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> zeroinitializer ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_fmsubne_pbh(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; CHECK-LABEL: test_mm_fmsubne_pbh: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vfmsub213bf16 %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf6,0x74,0x08,0xaa,0xc2] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] entry: %fneg.i = fneg <8 x bfloat> %__C %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %fneg.i) ret <8 x bfloat> %0 } define <8 x bfloat> @test_mm_mask_fmsubne_pbh(<8 x bfloat> %__A, i8 zeroext %__U, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; X64-LABEL: test_mm_mask_fmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmsub132bf16 %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x09,0x9a,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_mask_fmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub132bf16 %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x09,0x9a,0xc1] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <8 x bfloat> %__C %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %fneg.i.i) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> %__A ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_mask3_fmsubne_pbh(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C, i8 zeroext %__U) { ; X64-LABEL: test_mm_mask3_fmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmsub231bf16 %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x09,0xba,0xd1] ; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_mask3_fmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub231bf16 %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x09,0xba,0xd1] ; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <8 x bfloat> %__C %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %fneg.i.i) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> %__C ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_maskz_fmsubne_pbh(i8 zeroext %__U, <8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; X64-LABEL: test_mm_maskz_fmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfmsub213bf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0x89,0xaa,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_maskz_fmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub213bf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0x89,0xaa,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <8 x bfloat> %__C %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %fneg.i.i) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> zeroinitializer ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_fnmaddne_pbh(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; CHECK-LABEL: test_mm_fnmaddne_pbh: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vfnmadd213bf16 %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf6,0x74,0x08,0xac,0xc2] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] entry: %fneg.i = fneg <8 x bfloat> %__B %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %fneg.i, <8 x bfloat> %__C) ret <8 x bfloat> %0 } define <8 x bfloat> @test_mm_mask_fnmaddne_pbh(<8 x bfloat> %__A, i8 zeroext %__U, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; X64-LABEL: test_mm_mask_fnmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmadd132bf16 %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x09,0x9c,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_mask_fnmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd132bf16 %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x09,0x9c,0xc1] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <8 x bfloat> %__B %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %fneg.i.i, <8 x bfloat> %__C) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> %__A ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_mask3_fnmaddne_pbh(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C, i8 zeroext %__U) { ; X64-LABEL: test_mm_mask3_fnmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmadd231bf16 %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x09,0xbc,0xd1] ; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_mask3_fnmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd231bf16 %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x09,0xbc,0xd1] ; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <8 x bfloat> %__B %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %fneg.i.i, <8 x bfloat> %__C) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> %__C ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_maskz_fnmaddne_pbh(i8 zeroext %__U, <8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; X64-LABEL: test_mm_maskz_fnmaddne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmadd213bf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0x89,0xac,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_maskz_fnmaddne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd213bf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0x89,0xac,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <8 x bfloat> %__B %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %fneg.i.i, <8 x bfloat> %__C) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> zeroinitializer ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_fnmsubne_pbh(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; CHECK-LABEL: test_mm_fnmsubne_pbh: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vfnmsub213bf16 %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf6,0x74,0x08,0xae,0xc2] ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] entry: %fneg.i = fneg <8 x bfloat> %__B %fneg1.i = fneg <8 x bfloat> %__C %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %fneg.i, <8 x bfloat> %fneg1.i) ret <8 x bfloat> %0 } define <8 x bfloat> @test_mm_mask_fnmsubne_pbh(<8 x bfloat> %__A, i8 zeroext %__U, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; X64-LABEL: test_mm_mask_fnmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmsub132bf16 %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x09,0x9e,0xc1] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_mask_fnmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub132bf16 %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6c,0x09,0x9e,0xc1] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <8 x bfloat> %__B %fneg1.i.i = fneg <8 x bfloat> %__C %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %fneg.i.i, <8 x bfloat> %fneg1.i.i) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> %__A ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_mask3_fnmsubne_pbh(<8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C, i8 zeroext %__U) { ; X64-LABEL: test_mm_mask3_fnmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmsub231bf16 %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x09,0xbe,0xd1] ; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_mask3_fnmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub231bf16 %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7c,0x09,0xbe,0xd1] ; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <8 x bfloat> %__B %fneg1.i.i = fneg <8 x bfloat> %__C %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %fneg.i.i, <8 x bfloat> %fneg1.i.i) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> %__C ret <8 x bfloat> %2 } define <8 x bfloat> @test_mm_maskz_fnmsubne_pbh(i8 zeroext %__U, <8 x bfloat> %__A, <8 x bfloat> %__B, <8 x bfloat> %__C) { ; X64-LABEL: test_mm_maskz_fnmsubne_pbh: ; X64: # %bb.0: # %entry ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vfnmsub213bf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0x89,0xae,0xc2] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: test_mm_maskz_fnmsubne_pbh: ; X86: # %bb.0: # %entry ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub213bf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x74,0x89,0xae,0xc2] ; X86-NEXT: retl # encoding: [0xc3] entry: %fneg.i.i = fneg <8 x bfloat> %__B %fneg1.i.i = fneg <8 x bfloat> %__C %0 = tail call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %__A, <8 x bfloat> %fneg.i.i, <8 x bfloat> %fneg1.i.i) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x bfloat> %0, <8 x bfloat> zeroinitializer ret <8 x bfloat> %2 } define <32 x bfloat> @addv(<32 x bfloat> %a, <32 x bfloat> %b) nounwind { ; X64-LABEL: addv: ; X64: # %bb.0: ; X64-NEXT: vaddbf16 %ymm2, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x58,0xc2] ; X64-NEXT: vaddbf16 %ymm3, %ymm1, %ymm1 # encoding: [0x62,0xf5,0x75,0x28,0x58,0xcb] ; X64-NEXT: retq # encoding: [0xc3] ; ; X86-LABEL: addv: ; X86: # %bb.0: ; X86-NEXT: pushl %ebp # encoding: [0x55] ; X86-NEXT: movl %esp, %ebp # encoding: [0x89,0xe5] ; X86-NEXT: andl $-32, %esp # encoding: [0x83,0xe4,0xe0] ; X86-NEXT: subl $32, %esp # encoding: [0x83,0xec,0x20] ; X86-NEXT: vaddbf16 %ymm2, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x58,0xc2] ; X86-NEXT: vaddbf16 8(%ebp), %ymm1, %ymm1 # encoding: [0x62,0xf5,0x75,0x28,0x58,0x8d,0x08,0x00,0x00,0x00] ; X86-NEXT: movl %ebp, %esp # encoding: [0x89,0xec] ; X86-NEXT: popl %ebp # encoding: [0x5d] ; X86-NEXT: retl # encoding: [0xc3] %add = fadd <32 x bfloat> %a, %b ret <32 x bfloat> %add }