aboutsummaryrefslogtreecommitdiff
path: root/llvm/unittests/Analysis/LazyCallGraphTest.cpp
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2017-07-09 13:45:11 +0000
committerChandler Carruth <chandlerc@gmail.com>2017-07-09 13:45:11 +0000
commitc213c67df8044bf277529c1dacaca669cd3d39e7 (patch)
treed277d640358ecf4e221646d6209ce3a53e462db0 /llvm/unittests/Analysis/LazyCallGraphTest.cpp
parent7c8964d885b2cf3f3f9f4aedaf16c15d9bcdf1a3 (diff)
downloadllvm-c213c67df8044bf277529c1dacaca669cd3d39e7.zip
llvm-c213c67df8044bf277529c1dacaca669cd3d39e7.tar.gz
llvm-c213c67df8044bf277529c1dacaca669cd3d39e7.tar.bz2
[PM] Fix a nasty bug in the new PM where we failed to properly
invalidation of analyses when merging SCCs. While I've added a bunch of testing of this, it takes something much more like the inliner to really trigger this as you need to have partially-analyzed SCCs with updates at just the right time. So I've added a direct test for this using the inliner and verifying the domtree. Without the changes here, this test ends up finding a stale dominator tree. However, to handle this properly, we need to invalidate analyses *before* merging the SCCs. After talking to Philip and Sanjoy about this they convinced me this was the right approach. To do this, we need a callback mechanism when merging SCCs so we can observe the cycle that will be merged before the merge happens. This API update ended up being surprisingly easy. With this commit, the new PM passes the test-suite again. It hadn't since MemorySSA was enabled for EarlyCSE as that also will find this bug very quickly. llvm-svn: 307498
Diffstat (limited to 'llvm/unittests/Analysis/LazyCallGraphTest.cpp')
-rw-r--r--llvm/unittests/Analysis/LazyCallGraphTest.cpp33
1 files changed, 17 insertions, 16 deletions
diff --git a/llvm/unittests/Analysis/LazyCallGraphTest.cpp b/llvm/unittests/Analysis/LazyCallGraphTest.cpp
index 8c251cf..6573048 100644
--- a/llvm/unittests/Analysis/LazyCallGraphTest.cpp
+++ b/llvm/unittests/Analysis/LazyCallGraphTest.cpp
@@ -1277,9 +1277,10 @@ TEST(LazyCallGraphTest, InternalEdgeMutation) {
// be invalidated.
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
- auto InvalidatedSCCs = RC.switchInternalEdgeToCall(A, C);
- ASSERT_EQ(1u, InvalidatedSCCs.size());
- EXPECT_EQ(&AC, InvalidatedSCCs[0]);
+ EXPECT_TRUE(RC.switchInternalEdgeToCall(A, C, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
+ ASSERT_EQ(1u, MergedCs.size());
+ EXPECT_EQ(&AC, MergedCs[0]);
+ }));
EXPECT_EQ(2, CC.size());
EXPECT_EQ(&CC, CG.lookupSCC(A));
EXPECT_EQ(&CC, CG.lookupSCC(C));
@@ -1586,8 +1587,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
// Switch the ref edge from A -> D to a call edge. This should have no
// effect as it is already in postorder and no new cycles are formed.
- auto MergedCs = RC.switchInternalEdgeToCall(A, D);
- EXPECT_EQ(0u, MergedCs.size());
+ EXPECT_FALSE(RC.switchInternalEdgeToCall(A, D));
ASSERT_EQ(4, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&BC, &RC[1]);
@@ -1596,8 +1596,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
// Switch B -> C to a call edge. This doesn't form any new cycles but does
// require reordering the SCCs.
- MergedCs = RC.switchInternalEdgeToCall(B, C);
- EXPECT_EQ(0u, MergedCs.size());
+ EXPECT_FALSE(RC.switchInternalEdgeToCall(B, C));
ASSERT_EQ(4, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&CC, &RC[1]);
@@ -1605,9 +1604,10 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
EXPECT_EQ(&AC, &RC[3]);
// Switch C -> B to a call edge. This forms a cycle and forces merging SCCs.
- MergedCs = RC.switchInternalEdgeToCall(C, B);
- ASSERT_EQ(1u, MergedCs.size());
- EXPECT_EQ(&CC, MergedCs[0]);
+ EXPECT_TRUE(RC.switchInternalEdgeToCall(C, B, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
+ ASSERT_EQ(1u, MergedCs.size());
+ EXPECT_EQ(&CC, MergedCs[0]);
+ }));
ASSERT_EQ(3, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&BC, &RC[1]);
@@ -1720,8 +1720,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCallNoCycleInterleaved) {
// Switch C3 -> B1 to a call edge. This doesn't form any new cycles but does
// require reordering the SCCs in the face of tricky internal node
// structures.
- auto MergedCs = RC.switchInternalEdgeToCall(C3, B1);
- EXPECT_EQ(0u, MergedCs.size());
+ EXPECT_FALSE(RC.switchInternalEdgeToCall(C3, B1));
ASSERT_EQ(8, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&B3C, &RC[1]);
@@ -1852,10 +1851,12 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCallBothPartitionAndMerge) {
// C F C | |
// \ / \ / |
// G G |
- auto MergedCs = RC.switchInternalEdgeToCall(F, B);
- ASSERT_EQ(2u, MergedCs.size());
- EXPECT_EQ(&FC, MergedCs[0]);
- EXPECT_EQ(&DC, MergedCs[1]);
+ EXPECT_TRUE(RC.switchInternalEdgeToCall(
+ F, B, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
+ ASSERT_EQ(2u, MergedCs.size());
+ EXPECT_EQ(&FC, MergedCs[0]);
+ EXPECT_EQ(&DC, MergedCs[1]);
+ }));
EXPECT_EQ(3, BC.size());
// And make sure the postorder was updated.