aboutsummaryrefslogtreecommitdiff
path: root/llvm/unittests/ProfileData/MemProfTest.cpp
diff options
context:
space:
mode:
authorSnehasish Kumar <snehasishk@google.com>2022-03-21 19:39:24 -0700
committerSnehasish Kumar <snehasishk@google.com>2022-04-08 09:15:20 -0700
commit6dd6a6161f3a5c25162af9dc3625c8dfcc62a1ed (patch)
treecbb80987fdf31edb688183c3f357b58443102ae1 /llvm/unittests/ProfileData/MemProfTest.cpp
parent575a1d48e781a03a1bb892ff52e16ee0485d0a34 (diff)
downloadllvm-6dd6a6161f3a5c25162af9dc3625c8dfcc62a1ed.zip
llvm-6dd6a6161f3a5c25162af9dc3625c8dfcc62a1ed.tar.gz
llvm-6dd6a6161f3a5c25162af9dc3625c8dfcc62a1ed.tar.bz2
[memprof] Deduplicate and outline frame storage in the memprof profile.
The current implementation of memprof information in the indexed profile format stores the representation of each calling context fram inline. This patch uses an interned representation where the frame contents are stored in a separate on-disk hash table. The table is indexed via a hash of the contents of the frame. With this patch, the compressed size of a large memprof profile reduces by ~22%. Reviewed By: tejohnson Differential Revision: https://reviews.llvm.org/D123094
Diffstat (limited to 'llvm/unittests/ProfileData/MemProfTest.cpp')
-rw-r--r--llvm/unittests/ProfileData/MemProfTest.cpp49
1 files changed, 18 insertions, 31 deletions
diff --git a/llvm/unittests/ProfileData/MemProfTest.cpp b/llvm/unittests/ProfileData/MemProfTest.cpp
index 7f7cd64..48d6ee7 100644
--- a/llvm/unittests/ProfileData/MemProfTest.cpp
+++ b/llvm/unittests/ProfileData/MemProfTest.cpp
@@ -25,6 +25,9 @@ using ::llvm::DILineInfo;
using ::llvm::DILineInfoSpecifier;
using ::llvm::DILocal;
using ::llvm::memprof::CallStackMap;
+using ::llvm::memprof::Frame;
+using ::llvm::memprof::FrameId;
+using ::llvm::memprof::IndexedMemProfRecord;
using ::llvm::memprof::MemInfoBlock;
using ::llvm::memprof::MemProfRecord;
using ::llvm::memprof::MemProfSchema;
@@ -94,35 +97,21 @@ const DILineInfoSpecifier specifier() {
}
MATCHER_P4(FrameContains, FunctionName, LineOffset, Column, Inline, "") {
+ const Frame &F = arg;
+
const uint64_t ExpectedHash = llvm::Function::getGUID(FunctionName);
- if (arg.Function != ExpectedHash) {
+ if (F.Function != ExpectedHash) {
*result_listener << "Hash mismatch";
return false;
}
- if (arg.LineOffset == LineOffset && arg.Column == Column &&
- arg.IsInlineFrame == Inline) {
+ if (F.LineOffset == LineOffset && F.Column == Column &&
+ F.IsInlineFrame == Inline) {
return true;
}
*result_listener << "LineOffset, Column or Inline mismatch";
return false;
}
-MATCHER_P(EqualsRecord, Want, "") {
- if (arg == Want)
- return true;
-
- std::string Explanation;
- llvm::raw_string_ostream OS(Explanation);
- OS << "\n Want: \n";
- Want.print(OS);
- OS << "\n Got: \n";
- arg.print(OS);
- OS.flush();
-
- *result_listener << Explanation;
- return false;
-}
-
MemProfSchema getFullSchema() {
MemProfSchema Schema;
#define MIBEntryDef(NameTag, Name, Type) Schema.push_back(Meta::Name);
@@ -186,7 +175,7 @@ TEST(MemProf, FillsValue) {
ASSERT_EQ(Records.size(), 4U);
// Check the memprof record for foo.
- const llvm::GlobalValue::GUID FooId = MemProfRecord::getGUID("foo");
+ const llvm::GlobalValue::GUID FooId = IndexedMemProfRecord::getGUID("foo");
ASSERT_EQ(Records.count(FooId), 1U);
const MemProfRecord &Foo = Records[FooId];
ASSERT_EQ(Foo.AllocSites.size(), 1U);
@@ -202,7 +191,7 @@ TEST(MemProf, FillsValue) {
EXPECT_TRUE(Foo.CallSites.empty());
// Check the memprof record for bar.
- const llvm::GlobalValue::GUID BarId = MemProfRecord::getGUID("bar");
+ const llvm::GlobalValue::GUID BarId = IndexedMemProfRecord::getGUID("bar");
ASSERT_EQ(Records.count(BarId), 1U);
const MemProfRecord &Bar = Records[BarId];
ASSERT_EQ(Bar.AllocSites.size(), 1U);
@@ -222,7 +211,7 @@ TEST(MemProf, FillsValue) {
EXPECT_THAT(Bar.CallSites[0][1], FrameContains("bar", 51U, 20U, false));
// Check the memprof record for xyz.
- const llvm::GlobalValue::GUID XyzId = MemProfRecord::getGUID("xyz");
+ const llvm::GlobalValue::GUID XyzId = IndexedMemProfRecord::getGUID("xyz");
ASSERT_EQ(Records.count(XyzId), 1U);
const MemProfRecord &Xyz = Records[XyzId];
ASSERT_EQ(Xyz.CallSites.size(), 1U);
@@ -233,7 +222,7 @@ TEST(MemProf, FillsValue) {
EXPECT_THAT(Xyz.CallSites[0][1], FrameContains("abc", 5U, 30U, false));
// Check the memprof record for abc.
- const llvm::GlobalValue::GUID AbcId = MemProfRecord::getGUID("abc");
+ const llvm::GlobalValue::GUID AbcId = IndexedMemProfRecord::getGUID("abc");
ASSERT_EQ(Records.count(AbcId), 1U);
const MemProfRecord &Abc = Records[AbcId];
EXPECT_TRUE(Abc.AllocSites.empty());
@@ -275,14 +264,12 @@ TEST(MemProf, RecordSerializationRoundTrip) {
/*dealloc_timestamp=*/2000, /*alloc_cpu=*/3,
/*dealloc_cpu=*/4);
- llvm::SmallVector<llvm::SmallVector<MemProfRecord::Frame>> AllocCallStacks = {
- {{0x123, 1, 2, false}, {0x345, 3, 4, false}},
- {{0x123, 1, 2, false}, {0x567, 5, 6, false}}};
+ llvm::SmallVector<llvm::SmallVector<FrameId>> AllocCallStacks = {
+ {0x123, 0x345}, {0x123, 0x567}};
- llvm::SmallVector<llvm::SmallVector<MemProfRecord::Frame>> CallSites = {
- {{0x333, 1, 2, false}, {0x777, 3, 4, true}}};
+ llvm::SmallVector<llvm::SmallVector<FrameId>> CallSites = {{0x333, 0x777}};
- MemProfRecord Record;
+ IndexedMemProfRecord Record;
for (const auto &ACS : AllocCallStacks) {
// Use the same info block for both allocation sites.
Record.AllocSites.emplace_back(ACS, Info);
@@ -294,10 +281,10 @@ TEST(MemProf, RecordSerializationRoundTrip) {
Record.serialize(Schema, OS);
OS.flush();
- const MemProfRecord GotRecord = MemProfRecord::deserialize(
+ const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize(
Schema, reinterpret_cast<const unsigned char *>(Buffer.data()));
- EXPECT_THAT(GotRecord, EqualsRecord(Record));
+ EXPECT_EQ(Record, GotRecord);
}
TEST(MemProf, SymbolizationFilter) {