aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2014-09-21 12:20:44 +0000
committerChandler Carruth <chandlerc@gmail.com>2014-09-21 12:20:44 +0000
commit43f5974ea06cac83e3b8bcf128de98f7bf3ccaa8 (patch)
treee379186d9aeb3b7032357eee7e858a18218a1efb
parent78f479891360d0291cc1af88e976740cf58e1a47 (diff)
downloadllvm-43f5974ea06cac83e3b8bcf128de98f7bf3ccaa8.zip
llvm-43f5974ea06cac83e3b8bcf128de98f7bf3ccaa8.tar.gz
llvm-43f5974ea06cac83e3b8bcf128de98f7bf3ccaa8.tar.bz2
[x86] Teach the new vector shuffle lowering how to lower to UNPCKLPS and
UNPCKHPS with AVX vectors by recognizing those patterns when they are repeated for both 128-bit lanes. With this, we now generate the exact same (really nice) code for Quentin's avx_test_case.ll which was the most significant regression reported for the new shuffle lowering. In fact, I'm out of specific test cases for AVX lowering, the rest were AVX2 I think. However, there are a bunch of pretty obvious remaining things to improve with AVX... llvm-svn: 218213
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp8
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll8
2 files changed, 8 insertions, 8 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 78cbcc6..e0cf603 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -7166,8 +7166,6 @@ bool isShuffleEquivalentImpl(ArrayRef<int> Mask, ArrayRef<const int *> Args) {
return false;
for (int i = 0, e = Mask.size(); i < e; ++i) {
assert(*Args[i] >= 0 && "Arguments must be positive integers!");
- assert(*Args[i] < (int)Args.size() * 2 &&
- "Argument outside the range of possible shuffle inputs!");
if (Mask[i] != -1 && Mask[i] != *Args[i])
return false;
}
@@ -9344,6 +9342,12 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (isSingleInputShuffleMask(Mask))
return DAG.getNode(X86ISD::VPERMILP, DL, MVT::v8f32, V1,
getV4X86ShuffleImm8ForMask(LoMask, DAG));
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(LoMask, 0, 8, 1, 9))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
+ if (isShuffleEquivalent(LoMask, 2, 10, 3, 11))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
}
if (isSingleInputShuffleMask(Mask))
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
index 9ef2706..2d4f157 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
@@ -196,9 +196,7 @@ define <8 x float> @shuffle_v8f32_9810dc54(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_08194c5d(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: @shuffle_v8f32_08194c5d
; ALL: # BB#0:
-; ALL-NEXT: vpermilps {{.*}} # ymm1 = ymm1[0,0,2,1,4,4,6,5]
-; ALL-NEXT: vpermilps {{.*}} # ymm0 = ymm0[0,1,1,3,4,5,5,7]
-; ALL-NEXT: vblendps {{.*}} # ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; ALL-NEXT: vunpcklps {{.*}} # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
ret <8 x float> %shuffle
@@ -207,9 +205,7 @@ define <8 x float> @shuffle_v8f32_08194c5d(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_2a3b6e7f(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: @shuffle_v8f32_2a3b6e7f
; ALL: # BB#0:
-; ALL-NEXT: vpermilps {{.*}} # ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; ALL-NEXT: vpermilps {{.*}} # ymm0 = ymm0[2,1,3,3,6,5,7,7]
-; ALL-NEXT: vblendps {{.*}} # ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; ALL-NEXT: vunpckhps {{.*}} # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
ret <8 x float> %shuffle