diff options
author | Chandler Carruth <chandlerc@gmail.com> | 2014-09-21 12:49:46 +0000 |
---|---|---|
committer | Chandler Carruth <chandlerc@gmail.com> | 2014-09-21 12:49:46 +0000 |
commit | 33eda72802c127e330f41195755a6675c696ebdf (patch) | |
tree | 39f98cb5d5ff0dd863e420cab3ac3d975d244aaf | |
parent | 43f5974ea06cac83e3b8bcf128de98f7bf3ccaa8 (diff) | |
download | llvm-33eda72802c127e330f41195755a6675c696ebdf.zip llvm-33eda72802c127e330f41195755a6675c696ebdf.tar.gz llvm-33eda72802c127e330f41195755a6675c696ebdf.tar.bz2 |
[x86] Teach the new vector shuffle lowering the basics about insertion
of a single element into a zero vector for v4f64 and v4i64 in AVX.
Ironically, there is less to see here because xor+blend is so crazy fast
that we can't really beat that to zero the high 128-bit lane.
llvm-svn: 218214
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 18 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll | 49 |
2 files changed, 67 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index e0cf603..800f9d4 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -9243,6 +9243,15 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, if (isShuffleEquivalent(Mask, 5, 1, 7, 3)) return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1); + // If we have a single input to the zero element, insert that into V1 if we + // can do so cheaply. + int NumV2Elements = + std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }); + if (NumV2Elements == 1 && Mask[0] >= 4) + if (SDValue Insertion = lowerVectorShuffleAsElementInsertion( + MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG)) + return Insertion; + if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask, DAG)) return Blend; @@ -9306,6 +9315,15 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, if (is128BitLaneCrossingShuffleMask(MVT::v4i64, Mask)) return splitAndLower256BitVectorShuffle(Op, V1, V2, Subtarget, DAG); + // If we have a single input to the zero element, insert that into V1 if we + // can do so cheaply. + int NumV2Elements = + std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }); + if (NumV2Elements == 1 && Mask[0] >= 4) + if (SDValue Insertion = lowerVectorShuffleAsElementInsertion( + MVT::v4i64, DL, V1, V2, Mask, Subtarget, DAG)) + return Insertion; + // AVX1 doesn't provide any facilities for v4i64 shuffles, bitcast and // delegate to floating point code. V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f64, V1); diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll index 56bbce6..c5f49d7 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -563,3 +563,52 @@ define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) { ret <4 x i64> %f } + +define <4 x i64> @insert_reg_and_zero_v4i64(i64 %a) { +; ALL-LABEL: @insert_reg_and_zero_v4i64 +; ALL: # BB#0: +; ALL-NEXT: vmovq %rdi, %xmm0 +; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3] +; ALL-NEXT: retq + %v = insertelement <4 x i64> undef, i64 %a, i64 0 + %shuffle = shufflevector <4 x i64> %v, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x i64> %shuffle +} + +define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) { +; ALL-LABEL: @insert_mem_and_zero_v4i64 +; ALL: # BB#0: +; ALL-NEXT: vmovq (%rdi), %xmm0 +; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3] +; ALL-NEXT: retq + %a = load i64* %ptr + %v = insertelement <4 x i64> undef, i64 %a, i64 0 + %shuffle = shufflevector <4 x i64> %v, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x i64> %shuffle +} + +define <4 x double> @insert_reg_and_zero_v4f64(double %a) { +; ALL-LABEL: @insert_reg_and_zero_v4f64 +; ALL: # BB#0: +; ALL: vxorpd %ymm1, %ymm1, %ymm1 +; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3] +; ALL-NEXT: retq + %v = insertelement <4 x double> undef, double %a, i32 0 + %shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x double> %shuffle +} + +define <4 x double> @insert_mem_and_zero_v4f64(double* %ptr) { +; ALL-LABEL: @insert_mem_and_zero_v4f64 +; ALL: # BB#0: +; ALL-NEXT: vmovsd (%rdi), %xmm0 +; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3] +; ALL-NEXT: retq + %a = load double* %ptr + %v = insertelement <4 x double> undef, double %a, i32 0 + %shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x double> %shuffle +} |