From 6255bae6c9afe89470f264f903051f64bc15135f Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 24 Jan 2024 10:55:22 +0000 Subject: [X86] Add test coverage based on #78888 --- llvm/test/CodeGen/X86/icmp-pow2-mask.ll | 142 ++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 llvm/test/CodeGen/X86/icmp-pow2-mask.ll diff --git a/llvm/test/CodeGen/X86/icmp-pow2-mask.ll b/llvm/test/CodeGen/X86/icmp-pow2-mask.ll new file mode 100644 index 0000000..5eeb456 --- /dev/null +++ b/llvm/test/CodeGen/X86/icmp-pow2-mask.ll @@ -0,0 +1,142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 | FileCheck %s --check-prefixes=SSE,SSE2 +; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE,SSE41 +; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512 + +define <8 x i16> @pow2_mask_v16i8(i8 zeroext %0) { +; SSE2-LABEL: pow2_mask_v16i8: +; SSE2: # %bb.0: +; SSE2-NEXT: movd %edi, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: pcmpeqb %xmm0, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: pow2_mask_v16i8: +; SSE41: # %bb.0: +; SSE41-NEXT: movd %edi, %xmm0 +; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pcmpeqb %xmm0, %xmm1 +; SSE41-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE41-NEXT: pxor %xmm1, %xmm0 +; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 +; SSE41-NEXT: retq +; +; AVX2-LABEL: pow2_mask_v16i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: pow2_mask_v16i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastb %edi, %xmm0 +; AVX512-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0 +; AVX512-NEXT: vpmovm2w %k0, %xmm0 +; AVX512-NEXT: retq + %vec = insertelement <1 x i8> poison, i8 %0, i64 0 + %splat = shufflevector <1 x i8> %vec, <1 x i8> poison, <8 x i32> zeroinitializer + %mask = and <8 x i8> %splat, + %not = icmp ne <8 x i8> %mask, zeroinitializer + %ext = sext <8 x i1> %not to <8 x i16> + ret <8 x i16> %ext +} + +define <16 x i16> @pow2_mask_v16i16(i16 zeroext %0) { +; SSE-LABEL: pow2_mask_v16i16: +; SSE: # %bb.0: +; SSE-NEXT: movd %edi, %xmm0 +; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,64,32,16,8,4,2,1] +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pand %xmm2, %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [32768,16384,8192,4096,2048,1024,512,256] +; SSE-NEXT: pand %xmm3, %xmm0 +; SSE-NEXT: pcmpeqw %xmm3, %xmm0 +; SSE-NEXT: pcmpeqw %xmm2, %xmm1 +; SSE-NEXT: retq +; +; AVX2-LABEL: pow2_mask_v16i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2,1] +; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: pow2_mask_v16i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastw %edi, %ymm0 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2,1] +; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: retq + %vec = insertelement <1 x i16> poison, i16 %0, i64 0 + %splat = shufflevector <1 x i16> %vec, <1 x i16> poison, <16 x i32> zeroinitializer + %mask = and <16 x i16> %splat, + %not = icmp ne <16 x i16> %mask, zeroinitializer + %ext = sext <16 x i1> %not to <16 x i16> + ret <16 x i16> %ext +} + +; PR78888 +define i64 @pow2_mask_v8i8(i8 zeroext %0) { +; SSE-LABEL: pow2_mask_v8i8: +; SSE: # %bb.0: +; SSE-NEXT: movd %edi, %xmm0 +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: pcmpeqb %xmm0, %xmm1 +; SSE-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE-NEXT: pxor %xmm1, %xmm0 +; SSE-NEXT: movq %xmm0, %rax +; SSE-NEXT: retq +; +; AVX2-LABEL: pow2_mask_v8i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: retq +; +; AVX512-LABEL: pow2_mask_v8i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastb %edi, %xmm0 +; AVX512-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovq %xmm0, %rax +; AVX512-NEXT: retq + %vec = insertelement <1 x i8> poison, i8 %0, i64 0 + %splat = shufflevector <1 x i8> %vec, <1 x i8> poison, <8 x i32> zeroinitializer + %mask = and <8 x i8> %splat, + %not = icmp ne <8 x i8> %mask, zeroinitializer + %ext = sext <8 x i1> %not to <8 x i8> + %res = bitcast <8 x i8> %ext to i64 + ret i64 %res +} -- cgit v1.1