diff options
author | Kazu Hirata <kazu@google.com> | 2024-11-27 08:19:07 -0800 |
---|---|---|
committer | Kazu Hirata <kazu@google.com> | 2024-11-27 08:19:07 -0800 |
commit | e98396f4846bfcaabe2c2ee568aab4b78655f307 (patch) | |
tree | b2b0044ef34ee894c2d90995a4cd5d7568ca83af /llvm/unittests/ProfileData/MemProfTest.cpp | |
parent | 32ff209b87a84890a1487b4e0bbb4a7645d31645 (diff) | |
download | llvm-e98396f4846bfcaabe2c2ee568aab4b78655f307.zip llvm-e98396f4846bfcaabe2c2ee568aab4b78655f307.tar.gz llvm-e98396f4846bfcaabe2c2ee568aab4b78655f307.tar.bz2 |
Reapply [memprof] Add YAML-based deserialization for MemProf profile (#117829)
This patch adds YAML-based deserialization for MemProf profile.
It's been painful to write tests for MemProf passes because we do not
have a text format for the MemProf profile. We would write a test
case in C++, run it for a binary MemProf profile, and then finally run
a test written in LLVM IR with the binary profile.
This patch paves the way toward YAML-based MemProf profile.
Specifically, it adds new class YAMLMemProfReader derived from
MemProfReader. For now, it only adds a function to parse StringRef
pointing to YAML data. Subseqeunt patches will wire it to
llvm-profdata and read from a file.
The field names are based on various printYAML functions in MemProf.h.
I'm not aiming for compatibility with the format used in printYAML,
but I don't see a point in changing the field names.
This iteration works around the unavailability of
ScalarTraits<uintptr_t> on macOS.
Diffstat (limited to 'llvm/unittests/ProfileData/MemProfTest.cpp')
-rw-r--r-- | llvm/unittests/ProfileData/MemProfTest.cpp | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/llvm/unittests/ProfileData/MemProfTest.cpp b/llvm/unittests/ProfileData/MemProfTest.cpp index b3b6249..5ab860e 100644 --- a/llvm/unittests/ProfileData/MemProfTest.cpp +++ b/llvm/unittests/ProfileData/MemProfTest.cpp @@ -34,6 +34,7 @@ using ::llvm::memprof::CallStackId; using ::llvm::memprof::CallStackMap; using ::llvm::memprof::Frame; using ::llvm::memprof::FrameId; +using ::llvm::memprof::hashCallStack; using ::llvm::memprof::IndexedAllocationInfo; using ::llvm::memprof::IndexedMemProfRecord; using ::llvm::memprof::MemInfoBlock; @@ -46,8 +47,11 @@ using ::llvm::memprof::RawMemProfReader; using ::llvm::memprof::SegmentEntry; using ::llvm::object::SectionedAddress; using ::llvm::symbolize::SymbolizableModule; +using ::testing::ElementsAre; +using ::testing::Pair; using ::testing::Return; using ::testing::SizeIs; +using ::testing::UnorderedElementsAre; class MockSymbolizer : public SymbolizableModule { public: @@ -742,4 +746,77 @@ TEST(MemProf, RadixTreeBuilderSuccessiveJumps) { EXPECT_THAT(Mappings, testing::Contains(testing::Pair( llvm::memprof::hashCallStack(CS4), 10U))); } + +// Verify that we can parse YAML and retrieve IndexedMemProfData as expected. +TEST(MemProf, YAMLParser) { + StringRef YAMLData = R"YAML( +--- +HeapProfileRecords: +- GUID: 0xdeadbeef12345678 + AllocSites: + - Callstack: + - {Function: 0x100, LineOffset: 11, Column: 10, Inline: true} + - {Function: 0x200, LineOffset: 22, Column: 20, Inline: false} + MemInfoBlock: + AllocCount: 777 + TotalSize: 888 + - Callstack: + - {Function: 0x300, LineOffset: 33, Column: 30, Inline: false} + - {Function: 0x400, LineOffset: 44, Column: 40, Inline: true} + MemInfoBlock: + AllocCount: 666 + TotalSize: 555 + CallSites: + - - {Function: 0x500, LineOffset: 55, Column: 50, Inline: true} + - {Function: 0x600, LineOffset: 66, Column: 60, Inline: false} + - - {Function: 0x700, LineOffset: 77, Column: 70, Inline: true} + - {Function: 0x800, LineOffset: 88, Column: 80, Inline: false} +)YAML"; + + llvm::memprof::YAMLMemProfReader YAMLReader; + YAMLReader.parse(YAMLData); + llvm::memprof::IndexedMemProfData MemProfData = YAMLReader.takeMemProfData(); + + Frame F1(0x100, 11, 10, true); + Frame F2(0x200, 22, 20, false); + Frame F3(0x300, 33, 30, false); + Frame F4(0x400, 44, 40, true); + Frame F5(0x500, 55, 50, true); + Frame F6(0x600, 66, 60, false); + Frame F7(0x700, 77, 70, true); + Frame F8(0x800, 88, 80, false); + + llvm::SmallVector<FrameId> CS1 = {F1.hash(), F2.hash()}; + llvm::SmallVector<FrameId> CS2 = {F3.hash(), F4.hash()}; + llvm::SmallVector<FrameId> CS3 = {F5.hash(), F6.hash()}; + llvm::SmallVector<FrameId> CS4 = {F7.hash(), F8.hash()}; + + // Verify the entire contents of MemProfData.Frames. + EXPECT_THAT(MemProfData.Frames, + UnorderedElementsAre(Pair(F1.hash(), F1), Pair(F2.hash(), F2), + Pair(F3.hash(), F3), Pair(F4.hash(), F4), + Pair(F5.hash(), F5), Pair(F6.hash(), F6), + Pair(F7.hash(), F7), Pair(F8.hash(), F8))); + + // Verify the entire contents of MemProfData.Frames. + EXPECT_THAT(MemProfData.CallStacks, + UnorderedElementsAre(Pair(hashCallStack(CS1), CS1), + Pair(hashCallStack(CS2), CS2), + Pair(hashCallStack(CS3), CS3), + Pair(hashCallStack(CS4), CS4))); + + // Verify the entire contents of MemProfData.Records. + ASSERT_THAT(MemProfData.Records, SizeIs(1)); + const auto &[GUID, Record] = *MemProfData.Records.begin(); + EXPECT_EQ(GUID, 0xdeadbeef12345678ULL); + ASSERT_THAT(Record.AllocSites, SizeIs(2)); + EXPECT_EQ(Record.AllocSites[0].CSId, hashCallStack(CS1)); + EXPECT_EQ(Record.AllocSites[0].Info.getAllocCount(), 777U); + EXPECT_EQ(Record.AllocSites[0].Info.getTotalSize(), 888U); + EXPECT_EQ(Record.AllocSites[1].CSId, hashCallStack(CS2)); + EXPECT_EQ(Record.AllocSites[1].Info.getAllocCount(), 666U); + EXPECT_EQ(Record.AllocSites[1].Info.getTotalSize(), 555U); + EXPECT_THAT(Record.CallSiteIds, + ElementsAre(hashCallStack(CS3), hashCallStack(CS4))); +} } // namespace |