diff options
Diffstat (limited to 'llvm/unittests/ProfileData/MemProfTest.cpp')
-rw-r--r-- | llvm/unittests/ProfileData/MemProfTest.cpp | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/llvm/unittests/ProfileData/MemProfTest.cpp b/llvm/unittests/ProfileData/MemProfTest.cpp index b57567e..abe36bc 100644 --- a/llvm/unittests/ProfileData/MemProfTest.cpp +++ b/llvm/unittests/ProfileData/MemProfTest.cpp @@ -404,6 +404,75 @@ TEST(MemProf, RecordSerializationRoundTripVersion2HotColdSchema) { EXPECT_EQ(Record, GotRecord); } +TEST(MemProf, RecordSerializationRoundTripVersion4HotColdSchema) { + const auto Schema = getHotColdSchema(); + + MemInfoBlock Info; + Info.AllocCount = 11; + Info.TotalSize = 22; + Info.TotalLifetime = 33; + Info.TotalLifetimeAccessDensity = 44; + + llvm::SmallVector<CallStackId> CallStackIds = {0x123, 0x456}; + + llvm::SmallVector<CallStackId> CallSiteIds = {0x333, 0x444}; + + IndexedMemProfRecord Record; + for (const auto &CSId : CallStackIds) { + // Use the same info block for both allocation sites. + Record.AllocSites.emplace_back(CSId, Info, Schema); + } + for (auto CSId : CallSiteIds) + Record.CallSites.push_back(IndexedCallSiteInfo(CSId)); + + std::bitset<llvm::to_underlying(Meta::Size)> SchemaBitSet; + for (auto Id : Schema) + SchemaBitSet.set(llvm::to_underlying(Id)); + + // Verify that SchemaBitSet has the fields we expect and nothing else, which + // we check with count(). + EXPECT_EQ(SchemaBitSet.count(), 4U); + EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::AllocCount)]); + EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalSize)]); + EXPECT_TRUE(SchemaBitSet[llvm::to_underlying(Meta::TotalLifetime)]); + EXPECT_TRUE( + SchemaBitSet[llvm::to_underlying(Meta::TotalLifetimeAccessDensity)]); + + // Verify that Schema has propagated all the way to the Info field in each + // IndexedAllocationInfo. + ASSERT_THAT(Record.AllocSites, SizeIs(2)); + EXPECT_EQ(Record.AllocSites[0].Info.getSchema(), SchemaBitSet); + EXPECT_EQ(Record.AllocSites[1].Info.getSchema(), SchemaBitSet); + + std::string Buffer; + llvm::raw_string_ostream OS(Buffer); + // Need a dummy map for V4 serialization + llvm::DenseMap<CallStackId, LinearCallStackId> DummyMap = { + {0x123, 1}, {0x456, 2}, {0x333, 3}, {0x444, 4}}; + Record.serialize(Schema, OS, Version4, &DummyMap); + + const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize( + Schema, reinterpret_cast<const unsigned char *>(Buffer.data()), Version4); + + // Verify that Schema comes back correctly after deserialization. Technically, + // the comparison between Record and GotRecord below includes the comparison + // of their Schemas, but we'll verify the Schemas on our own. + ASSERT_THAT(GotRecord.AllocSites, SizeIs(2)); + EXPECT_EQ(GotRecord.AllocSites[0].Info.getSchema(), SchemaBitSet); + EXPECT_EQ(GotRecord.AllocSites[1].Info.getSchema(), SchemaBitSet); + + // Create the expected record using the linear IDs from the dummy map. + IndexedMemProfRecord ExpectedRecord; + for (const auto &CSId : CallStackIds) { + ExpectedRecord.AllocSites.emplace_back(DummyMap[CSId], Info, Schema); + } + for (const auto &CSId : CallSiteIds) { + ExpectedRecord.CallSites.emplace_back(DummyMap[CSId]); + } + + EXPECT_EQ(ExpectedRecord, GotRecord); +} + TEST(MemProf, SymbolizationFilter) { auto Symbolizer = std::make_unique<MockSymbolizer>(); |