diff options
Diffstat (limited to 'llvm/unittests/CodeGen')
-rw-r--r-- | llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp | 95 | ||||
-rw-r--r-- | llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp | 61 | ||||
-rw-r--r-- | llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp | 27 |
3 files changed, 183 insertions, 0 deletions
diff --git a/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp b/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp index 1ff7fd9..9163663 100644 --- a/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp +++ b/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp @@ -77,6 +77,15 @@ static const LLT NXV3P0 = LLT::scalable_vector(3, P0); static const LLT NXV4P0 = LLT::scalable_vector(4, P0); static const LLT NXV12P0 = LLT::scalable_vector(12, P0); +static void collectNonCopyMI(SmallVectorImpl<MachineInstr *> &MIList, + MachineFunction *MF) { + for (auto &MBB : *MF) + for (MachineInstr &MI : MBB) { + if (MI.getOpcode() != TargetOpcode::COPY) + MIList.push_back(&MI); + } +} + TEST(GISelUtilsTest, getGCDType) { EXPECT_EQ(S1, getGCDType(S1, S1)); EXPECT_EQ(S32, getGCDType(S32, S32)); @@ -408,4 +417,90 @@ TEST_F(AArch64GISelMITest, ConstFalseTest) { } } } + +TEST_F(AMDGPUGISelMITest, isConstantOrConstantSplatVectorFP) { + StringRef MIRString = + " %cst0:_(s32) = G_FCONSTANT float 2.000000e+00\n" + " %cst1:_(s32) = G_FCONSTANT float 0.0\n" + " %cst2:_(s64) = G_FCONSTANT double 3.000000e-02\n" + " %cst3:_(s32) = G_CONSTANT i32 2\n" + " %cst4:_(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst0(s32)\n" + " %cst5:_(<2 x s32>) = G_BUILD_VECTOR %cst1(s32), %cst0(s32)\n" + " %cst6:_(<2 x s64>) = G_BUILD_VECTOR %cst2(s64), %cst2(s64)\n" + " %cst7:_(<2 x s32>) = G_BUILD_VECTOR %cst3(s32), %cst3:_(s32)\n" + " %cst8:_(<4 x s32>) = G_CONCAT_VECTORS %cst4:_(<2 x s32>), %cst4:_(<2 " + "x s32>)\n" + " %cst9:_(<4 x s64>) = G_CONCAT_VECTORS %cst6:_(<2 x s64>), %cst6:_(<2 " + "x s64>)\n" + " %cst10:_(<4 x s32>) = G_CONCAT_VECTORS %cst4:_(<2 x s32>), %cst5:_(<2 " + "x s32>)\n" + " %cst11:_(<4 x s32>) = G_CONCAT_VECTORS %cst7:_(<2 x s32>), %cst7:_(<2 " + "x s32>)\n" + " %cst12:_(s32) = G_IMPLICIT_DEF \n" + " %cst13:_(<2 x s32>) = G_BUILD_VECTOR %cst12(s32), %cst12(s32)\n" + " %cst14:_(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst12(s32)\n" + " %cst15:_(<4 x s32>) = G_CONCAT_VECTORS %cst4:_(<2 x s32>), " + "%cst14:_(<2 " + "x s32>)\n"; + + SmallVector<MachineInstr *, 16> MIList; + + setUp(MIRString); + if (!TM) + GTEST_SKIP(); + + collectNonCopyMI(MIList, MF); + + EXPECT_TRUE(isConstantOrConstantSplatVectorFP(*MIList[0], *MRI).has_value()); + auto val = isConstantOrConstantSplatVectorFP(*MIList[0], *MRI).value(); + EXPECT_EQ(2.0, val.convertToFloat()); + + EXPECT_TRUE(isConstantOrConstantSplatVectorFP(*MIList[1], *MRI).has_value()); + val = isConstantOrConstantSplatVectorFP(*MIList[1], *MRI).value(); + EXPECT_EQ(0.0, val.convertToFloat()); + + EXPECT_TRUE(isConstantOrConstantSplatVectorFP(*MIList[2], *MRI).has_value()); + val = isConstantOrConstantSplatVectorFP(*MIList[2], *MRI).value(); + EXPECT_EQ(0.03, val.convertToDouble()); + + EXPECT_FALSE(isConstantOrConstantSplatVectorFP(*MIList[3], *MRI).has_value()); + + EXPECT_TRUE(isConstantOrConstantSplatVectorFP(*MIList[4], *MRI).has_value()); + val = isConstantOrConstantSplatVectorFP(*MIList[4], *MRI).value(); + EXPECT_EQ(2.0, val.convertToFloat()); + + EXPECT_FALSE(isConstantOrConstantSplatVectorFP(*MIList[5], *MRI).has_value()); + + EXPECT_TRUE(isConstantOrConstantSplatVectorFP(*MIList[6], *MRI).has_value()); + val = isConstantOrConstantSplatVectorFP(*MIList[6], *MRI).value(); + EXPECT_EQ(0.03, val.convertToDouble()); + + EXPECT_FALSE(isConstantOrConstantSplatVectorFP(*MIList[7], *MRI).has_value()); + + EXPECT_TRUE(isConstantOrConstantSplatVectorFP(*MIList[8], *MRI).has_value()); + val = isConstantOrConstantSplatVectorFP(*MIList[8], *MRI).value(); + EXPECT_EQ(2.0, val.convertToFloat()); + + EXPECT_TRUE(isConstantOrConstantSplatVectorFP(*MIList[9], *MRI).has_value()); + val = isConstantOrConstantSplatVectorFP(*MIList[9], *MRI).value(); + EXPECT_EQ(0.03, val.convertToDouble()); + + EXPECT_FALSE( + isConstantOrConstantSplatVectorFP(*MIList[10], *MRI).has_value()); + + EXPECT_FALSE( + isConstantOrConstantSplatVectorFP(*MIList[11], *MRI).has_value()); + + EXPECT_FALSE( + isConstantOrConstantSplatVectorFP(*MIList[12], *MRI).has_value()); + + EXPECT_FALSE( + isConstantOrConstantSplatVectorFP(*MIList[13], *MRI).has_value()); + + EXPECT_FALSE( + isConstantOrConstantSplatVectorFP(*MIList[14], *MRI).has_value()); + + EXPECT_FALSE( + isConstantOrConstantSplatVectorFP(*MIList[15], *MRI).has_value()); +} } diff --git a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp index 59a86fa..40cd055 100644 --- a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp +++ b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp @@ -224,6 +224,32 @@ TEST_F(AArch64GISelMITest, MatchBinaryOp) { auto MIBAddCst = B.buildAdd(s64, MIBCst, Copies[0]); auto MIBUnmerge = B.buildUnmerge({s32, s32}, B.buildConstant(s64, 42)); + // Match min/max, and make sure they're commutative. + auto SMin = B.buildSMin(s64, Copies[2], MIBAdd); + EXPECT_TRUE(mi_match(SMin.getReg(0), *MRI, + m_GSMin(m_GAdd(m_Reg(Src1), m_Reg(Src2)), m_Reg(Src0)))); + EXPECT_EQ(Src0, Copies[2]); + EXPECT_EQ(Src1, Copies[0]); + EXPECT_EQ(Src2, Copies[1]); + auto SMax = B.buildSMax(s64, Copies[2], MIBAdd); + EXPECT_TRUE(mi_match(SMax.getReg(0), *MRI, + m_GSMax(m_GAdd(m_Reg(Src1), m_Reg(Src2)), m_Reg(Src0)))); + EXPECT_EQ(Src0, Copies[2]); + EXPECT_EQ(Src1, Copies[0]); + EXPECT_EQ(Src2, Copies[1]); + auto UMin = B.buildUMin(s64, Copies[2], MIBAdd); + EXPECT_TRUE(mi_match(UMin.getReg(0), *MRI, + m_GUMin(m_GAdd(m_Reg(Src1), m_Reg(Src2)), m_Reg(Src0)))); + EXPECT_EQ(Src0, Copies[2]); + EXPECT_EQ(Src1, Copies[0]); + EXPECT_EQ(Src2, Copies[1]); + auto UMax = B.buildUMax(s64, Copies[2], MIBAdd); + EXPECT_TRUE(mi_match(UMax.getReg(0), *MRI, + m_GUMax(m_GAdd(m_Reg(Src1), m_Reg(Src2)), m_Reg(Src0)))); + EXPECT_EQ(Src0, Copies[2]); + EXPECT_EQ(Src1, Copies[0]); + EXPECT_EQ(Src2, Copies[1]); + // m_BinOp with opcode. // Match binary instruction, opcode and its non-commutative operands. match = mi_match(MIBAddCst, *MRI, @@ -576,6 +602,11 @@ TEST_F(AArch64GISelMITest, MatchMiscellaneous) { auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]); Register Reg = MIBAdd.getReg(0); + // Extract the type. + LLT Ty; + EXPECT_TRUE(mi_match(Reg, *MRI, m_GAdd(m_Type(Ty), m_Reg()))); + EXPECT_EQ(Ty, s64); + // Only one use of Reg. B.buildCast(LLT::pointer(0, 32), MIBAdd); EXPECT_TRUE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg())))); @@ -889,6 +920,36 @@ TEST_F(AArch64GISelMITest, MatchSpecificReg) { EXPECT_TRUE(mi_match(Add.getReg(0), *MRI, m_GAdd(m_SpecificReg(Reg), m_Reg()))); } +TEST_F(AArch64GISelMITest, DeferredMatching) { + setUp(); + if (!TM) + GTEST_SKIP(); + auto s64 = LLT::scalar(64); + auto s32 = LLT::scalar(32); + + auto Cst1 = B.buildConstant(s64, 42); + auto Cst2 = B.buildConstant(s64, 314); + auto Add = B.buildAdd(s64, Cst1, Cst2); + auto Sub = B.buildSub(s64, Add, Cst1); + + auto TruncAdd = B.buildTrunc(s32, Add); + auto TruncSub = B.buildTrunc(s32, Sub); + auto NarrowAdd = B.buildAdd(s32, TruncAdd, TruncSub); + + Register X; + EXPECT_TRUE(mi_match(Sub.getReg(0), *MRI, + m_GSub(m_GAdd(m_Reg(X), m_Reg()), m_DeferredReg(X)))); + LLT Ty; + EXPECT_TRUE( + mi_match(NarrowAdd.getReg(0), *MRI, + m_GAdd(m_GTrunc(m_Type(Ty)), m_GTrunc(m_DeferredType(Ty))))); + + // Test commutative. + auto Add2 = B.buildAdd(s64, Sub, Cst1); + EXPECT_TRUE(mi_match(Add2.getReg(0), *MRI, + m_GAdd(m_Reg(X), m_GSub(m_Reg(), m_DeferredReg(X))))); +} + } // namespace int main(int argc, char **argv) { diff --git a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp index 259bdad..a2e1e58 100644 --- a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp +++ b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp @@ -119,6 +119,33 @@ TEST_F(SelectionDAGPatternMatchTest, matchValueType) { EXPECT_FALSE(sd_match(Op2, m_ScalableVectorVT())); } +TEST_F(SelectionDAGPatternMatchTest, matchVecShuffle) { + SDLoc DL; + auto Int32VT = EVT::getIntegerVT(Context, 32); + auto VInt32VT = EVT::getVectorVT(Context, Int32VT, 4); + const std::array<int, 4> MaskData = {2, 0, 3, 1}; + const std::array<int, 4> OtherMaskData = {1, 2, 3, 4}; + ArrayRef<int> Mask; + + SDValue V0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, VInt32VT); + SDValue V1 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 2, VInt32VT); + SDValue VecShuffleWithMask = + DAG->getVectorShuffle(VInt32VT, DL, V0, V1, MaskData); + + using namespace SDPatternMatch; + EXPECT_TRUE(sd_match(VecShuffleWithMask, m_Shuffle(m_Value(), m_Value()))); + EXPECT_TRUE(sd_match(VecShuffleWithMask, + m_Shuffle(m_Value(), m_Value(), m_Mask(Mask)))); + EXPECT_TRUE( + sd_match(VecShuffleWithMask, + m_Shuffle(m_Value(), m_Value(), m_SpecificMask(MaskData)))); + EXPECT_FALSE( + sd_match(VecShuffleWithMask, + m_Shuffle(m_Value(), m_Value(), m_SpecificMask(OtherMaskData)))); + EXPECT_TRUE( + std::equal(MaskData.begin(), MaskData.end(), Mask.begin(), Mask.end())); +} + TEST_F(SelectionDAGPatternMatchTest, matchTernaryOp) { SDLoc DL; auto Int32VT = EVT::getIntegerVT(Context, 32); |