aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt/lib
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib')
-rw-r--r--compiler-rt/lib/builtins/cpu_model/x86.c12
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp12
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp7
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_config.def4
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h88
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/combined_test.cpp242
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp9
7 files changed, 350 insertions, 24 deletions
diff --git a/compiler-rt/lib/builtins/cpu_model/x86.c b/compiler-rt/lib/builtins/cpu_model/x86.c
index c21b2ba..45b7055 100644
--- a/compiler-rt/lib/builtins/cpu_model/x86.c
+++ b/compiler-rt/lib/builtins/cpu_model/x86.c
@@ -21,7 +21,9 @@
#if defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER)
+#if __STDC_HOSTED__
#include <assert.h>
+#endif // __STDC_HOSTED__
#if (defined(__GNUC__) || defined(__clang__)) && !defined(_MSC_VER)
#include <cpuid.h>
@@ -245,8 +247,8 @@ struct __processor_model {
unsigned int __cpu_features[1];
} __cpu_model = {0, 0, 0, {0}};
-static_assert(sizeof(__cpu_model) == 16,
- "Wrong size of __cpu_model will result in ABI break");
+_Static_assert(sizeof(__cpu_model) == 16,
+ "Wrong size of __cpu_model will result in ABI break");
// This code is copied from lib/Support/Host.cpp.
// Changes to either file should be mirrored in the other.
@@ -1200,8 +1202,8 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
unsigned Vendor;
unsigned Model, Family;
unsigned Features[(CPU_FEATURE_MAX + 31) / 32] = {0};
- static_assert(sizeof(Features) / sizeof(Features[0]) == 4, "");
- static_assert(sizeof(__cpu_features2) / sizeof(__cpu_features2[0]) == 3, "");
+ _Static_assert(sizeof(Features) / sizeof(Features[0]) == 4, "");
+ _Static_assert(sizeof(__cpu_features2) / sizeof(__cpu_features2[0]) == 3, "");
// This function needs to run just once.
if (__cpu_model.__cpu_vendor)
@@ -1234,9 +1236,11 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
} else
__cpu_model.__cpu_vendor = VENDOR_OTHER;
+#if __STDC_HOSTED__
assert(__cpu_model.__cpu_vendor < VENDOR_MAX);
assert(__cpu_model.__cpu_type < CPU_TYPE_MAX);
assert(__cpu_model.__cpu_subtype < CPU_SUBTYPE_MAX);
+#endif // __STDC_HOSTED__
return 0;
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
index b0a29db..90c0b66 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
@@ -960,7 +960,17 @@ static void DisableMmapExcGuardExceptions() {
RTLD_DEFAULT, "task_set_exc_guard_behavior");
if (set_behavior == nullptr) return;
const task_exc_guard_behavior_t task_exc_guard_none = 0;
- set_behavior(mach_task_self(), task_exc_guard_none);
+ kern_return_t res = set_behavior(mach_task_self(), task_exc_guard_none);
+ if (res != KERN_SUCCESS) {
+ Report(
+ "WARN: task_set_exc_guard_behavior returned %d (%s), "
+ "mmap may fail unexpectedly.\n",
+ res, mach_error_string(res));
+ if (res == KERN_DENIED)
+ Report(
+ "HINT: Check that task_set_exc_guard_behavior is allowed by "
+ "sandbox.\n");
+ }
}
static void VerifyInterceptorsWorking();
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
index f8d821e..7eb0c97 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
@@ -505,6 +505,13 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
}
# if SANITIZER_APPLE
+ if (list->empty()) {
+ Report(
+ "WARN: No external symbolizers found. Symbols may be missing or "
+ "unreliable.\n");
+ Report(
+ "HINT: Is PATH set? Does sandbox allow file-read of /usr/bin/atos?\n");
+ }
VReport(2, "Using dladdr symbolizer.\n");
list->push_back(new (*allocator) DlAddrSymbolizer());
# endif // SANITIZER_APPLE
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index 7485308..0aea7b8 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -57,6 +57,10 @@ BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false)
// Disable the quarantine code.
BASE_OPTIONAL(const bool, QuarantineDisabled, false)
+// If set to true, malloc_usable_size returns the exact size of the allocation.
+// If set to false, return the total available size in the allocation.
+BASE_OPTIONAL(const bool, ExactUsableSize, true)
+
// PRIMARY_REQUIRED_TYPE(NAME)
//
// SizeClassMap to use with the Primary.
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 329ec45..ffe9554 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -706,19 +706,26 @@ public:
if (!getChunkFromBlock(Block, &Chunk, &Header) &&
!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
return;
- } else {
- if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
- return;
+ } else if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) {
+ return;
}
- if (Header.State == Chunk::State::Allocated) {
- uptr TaggedChunk = Chunk;
- if (allocatorSupportsMemoryTagging<AllocatorConfig>())
- TaggedChunk = untagPointer(TaggedChunk);
- if (useMemoryTagging<AllocatorConfig>(Primary.Options.load()))
- TaggedChunk = loadTag(Chunk);
- Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
- Arg);
+
+ if (Header.State != Chunk::State::Allocated)
+ return;
+
+ uptr TaggedChunk = Chunk;
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ TaggedChunk = untagPointer(TaggedChunk);
+ uptr Size;
+ if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load()))) {
+ TaggedChunk = loadTag(Chunk);
+ Size = getSize(reinterpret_cast<void *>(Chunk), &Header);
+ } else if (AllocatorConfig::getExactUsableSize()) {
+ Size = getSize(reinterpret_cast<void *>(Chunk), &Header);
+ } else {
+ Size = getUsableSize(reinterpret_cast<void *>(Chunk), &Header);
}
+ Callback(TaggedChunk, Size, Arg);
};
Primary.iterateOverBlocks(Lambda);
Secondary.iterateOverBlocks(Lambda);
@@ -759,16 +766,50 @@ public:
return false;
}
- // Return the usable size for a given chunk. Technically we lie, as we just
- // report the actual size of a chunk. This is done to counteract code actively
- // writing past the end of a chunk (like sqlite3) when the usable size allows
- // for it, which then forces realloc to copy the usable size of a chunk as
- // opposed to its actual size.
+ ALWAYS_INLINE uptr getUsableSize(const void *Ptr,
+ Chunk::UnpackedHeader *Header) {
+ void *BlockBegin = getBlockBegin(Ptr, Header);
+ if (LIKELY(Header->ClassId)) {
+ return SizeClassMap::getSizeByClassId(Header->ClassId) -
+ (reinterpret_cast<uptr>(Ptr) - reinterpret_cast<uptr>(BlockBegin));
+ }
+
+ uptr UntaggedPtr = reinterpret_cast<uptr>(Ptr);
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>()) {
+ UntaggedPtr = untagPointer(UntaggedPtr);
+ BlockBegin = untagPointer(BlockBegin);
+ }
+ return SecondaryT::getBlockEnd(BlockBegin) - UntaggedPtr;
+ }
+
+ // Return the usable size for a given chunk. If MTE is enabled or if the
+ // ExactUsableSize config parameter is true, we report the exact size of
+ // the original allocation size. Otherwise, we will return the total
+ // actual usable size.
uptr getUsableSize(const void *Ptr) {
if (UNLIKELY(!Ptr))
return 0;
- return getAllocSize(Ptr);
+ if (AllocatorConfig::getExactUsableSize() ||
+ UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load())))
+ return getAllocSize(Ptr);
+
+ initThreadMaybe();
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
+ return GuardedAlloc.getSize(Ptr);
+#endif // GWP_ASAN_HOOKS
+
+ Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+
+ // Getting the alloc size of a chunk only makes sense if it's allocated.
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Sizing, Ptr);
+
+ return getUsableSize(Ptr, &Header);
}
uptr getAllocSize(const void *Ptr) {
@@ -951,6 +992,19 @@ public:
MemorySize, 2, 16);
}
+ uptr getBlockBeginTestOnly(const void *Ptr) {
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+ DCHECK(Header.State == Chunk::State::Allocated);
+
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ Ptr = untagPointer(const_cast<void *>(Ptr));
+ void *Begin = getBlockBegin(Ptr, &Header);
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ Begin = untagPointer(Begin);
+ return reinterpret_cast<uptr>(Begin);
+ }
+
private:
typedef typename PrimaryT::SizeClassMap SizeClassMap;
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 5fdfd1e..4837ac9 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -1152,6 +1152,248 @@ TEST(ScudoCombinedTest, QuarantineDisabled) {
EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos);
}
+struct UsableSizeClassConfig {
+ static const scudo::uptr NumBits = 1;
+ static const scudo::uptr MinSizeLog = 10;
+ static const scudo::uptr MidSizeLog = 10;
+ static const scudo::uptr MaxSizeLog = 13;
+ static const scudo::u16 MaxNumCachedHint = 8;
+ static const scudo::uptr MaxBytesCachedLog = 12;
+ static const scudo::uptr SizeDelta = 0;
+};
+
+struct TestExactUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = false;
+ static const bool QuarantineDisabled = true;
+
+ template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
+
+ struct Primary {
+ // In order to properly test the usable size, this Primary config has
+ // four real size classes: 1024, 2048, 4096, 8192.
+ using SizeClassMap = scudo::FixedSizeClassMap<UsableSizeClassConfig>;
+ static const scudo::uptr RegionSizeLog = 21U;
+ static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ typedef scudo::uptr CompactPtrT;
+ static const scudo::uptr CompactPtrScale = 0;
+ static const bool EnableRandomOffset = true;
+ static const scudo::uptr MapSizeIncrement = 1UL << 18;
+ static const scudo::uptr GroupSizeLog = 18;
+ };
+ template <typename Config>
+ using PrimaryT = scudo::SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config>
+ using CacheT = scudo::MapAllocatorNoCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
+};
+
+template <class AllocatorT> void VerifyExactUsableSize(AllocatorT &Allocator) {
+ // Scan through all sizes up to 10000 then some larger sizes.
+ for (scudo::uptr Size = 1; Size < 10000; Size++) {
+ void *P = Allocator.allocate(Size, Origin);
+ EXPECT_EQ(Size, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << Size;
+ Allocator.deallocate(P, Origin);
+ }
+
+ // Verify that aligned allocations also return the exact size allocated.
+ const scudo::uptr AllocSize = 313;
+ for (scudo::uptr Align = 1; Align <= 8; Align++) {
+ void *P = Allocator.allocate(AllocSize, Origin, 1U << Align);
+ EXPECT_EQ(AllocSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << AllocSize << " at align "
+ << 1 << Align;
+ Allocator.deallocate(P, Origin);
+ }
+
+ // Verify an explicitly large allocations.
+ const scudo::uptr LargeAllocSize = 1000000;
+ void *P = Allocator.allocate(LargeAllocSize, Origin);
+ EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P));
+ Allocator.deallocate(P, Origin);
+
+ // Now do it for aligned allocations for large allocations.
+ for (scudo::uptr Align = 1; Align <= 8; Align++) {
+ void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align);
+ EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << AllocSize << " at align "
+ << 1 << Align;
+ Allocator.deallocate(P, Origin);
+ }
+}
+
+template <class AllocatorT>
+void VerifyIterateOverUsableSize(AllocatorT &Allocator) {
+ // This will not verify if the size is the exact size or the size of the
+ // size class. Instead verify that the size matches the usable size and
+ // assume the other tests have verified getUsableSize.
+ std::unordered_map<void *, size_t> Pointers;
+ Pointers.insert({Allocator.allocate(128, Origin), 0U});
+ Pointers.insert({Allocator.allocate(128, Origin, 32), 0U});
+ Pointers.insert({Allocator.allocate(2000, Origin), 0U});
+ Pointers.insert({Allocator.allocate(2000, Origin, 64), 0U});
+ Pointers.insert({Allocator.allocate(8000, Origin), 0U});
+ Pointers.insert({Allocator.allocate(8000, Origin, 128), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin, 128), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin, 256), 0U});
+
+ Allocator.disable();
+ Allocator.iterateOverChunks(
+ 0, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
+ [](uintptr_t Base, size_t Size, void *Arg) {
+ std::unordered_map<void *, size_t> *Pointers =
+ reinterpret_cast<std::unordered_map<void *, size_t> *>(Arg);
+ (*Pointers)[reinterpret_cast<void *>(Base)] = Size;
+ },
+ reinterpret_cast<void *>(&Pointers));
+ Allocator.enable();
+
+ for (auto [Ptr, IterateSize] : Pointers) {
+ EXPECT_NE(0U, IterateSize)
+ << "Pointer " << Ptr << " not found in iterateOverChunks call.";
+ EXPECT_EQ(IterateSize, Allocator.getUsableSize(Ptr))
+ << "Pointer " << Ptr
+ << " mismatch between iterate size and usable size.";
+ Allocator.deallocate(Ptr, Origin);
+ }
+}
+
+TEST(ScudoCombinedTest, ExactUsableSize) {
+ using AllocatorT = scudo::Allocator<TestExactUsableSizeConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+struct TestExactUsableSizeMTEConfig : TestExactUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = true;
+};
+
+TEST(ScudoCombinedTest, ExactUsableSizeMTE) {
+ if (!scudo::archSupportsMemoryTagging() ||
+ !scudo::systemDetectsMemoryTagFaultsTestOnly())
+ TEST_SKIP("Only supported on systems that can enable MTE.");
+
+ scudo::enableSystemMemoryTaggingTestOnly();
+
+ using AllocatorT = scudo::Allocator<TestExactUsableSizeMTEConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+template <class AllocatorT>
+void VerifyUsableSizePrimary(AllocatorT &Allocator) {
+ std::vector<scudo::uptr> SizeClasses = {1024U, 2048U, 4096U, 8192U};
+ for (size_t I = 0; I < SizeClasses.size(); I++) {
+ scudo::uptr SizeClass = SizeClasses[I];
+ scudo::uptr StartSize;
+ if (I == 0)
+ StartSize = 1;
+ else
+ StartSize = SizeClasses[I - 1];
+ scudo::uptr UsableSize = SizeClass - scudo::Chunk::getHeaderSize();
+ for (scudo::uptr Size = StartSize; Size < UsableSize; Size++) {
+ void *P = Allocator.allocate(Size, Origin);
+ EXPECT_EQ(UsableSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << Size
+ << " for size class " << SizeClass;
+ memset(P, 0xff, UsableSize);
+ EXPECT_EQ(Allocator.getBlockBeginTestOnly(P) + SizeClass,
+ reinterpret_cast<scudo::uptr>(P) + UsableSize);
+ Allocator.deallocate(P, Origin);
+ }
+
+ StartSize = UsableSize + 1;
+ }
+
+ std::vector<scudo::uptr> Alignments = {32U, 128U};
+ for (size_t I = 0; I < SizeClasses.size(); I++) {
+ scudo::uptr SizeClass = SizeClasses[I];
+ scudo::uptr AllocSize;
+ if (I == 0)
+ AllocSize = 1;
+ else
+ AllocSize = SizeClasses[I - 1] + 1;
+
+ for (auto Alignment : Alignments) {
+ void *P = Allocator.allocate(AllocSize, Origin, Alignment);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ memset(P, 0xff, UsableSize);
+ EXPECT_EQ(Allocator.getBlockBeginTestOnly(P) + SizeClass,
+ reinterpret_cast<scudo::uptr>(P) + UsableSize)
+ << "Failed usable size at allocation size " << AllocSize
+ << " for size class " << SizeClass << " at alignment " << Alignment;
+ Allocator.deallocate(P, Origin);
+ }
+ }
+}
+
+template <class AllocatorT>
+void VerifyUsableSizeSecondary(AllocatorT &Allocator) {
+ const scudo::uptr LargeAllocSize = 996780;
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ void *P = Allocator.allocate(LargeAllocSize, Origin);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ memset(P, 0xff, UsableSize);
+ // Assumes that the secondary always rounds up allocations to a page boundary.
+ EXPECT_EQ(scudo::roundUp(reinterpret_cast<scudo::uptr>(P) + LargeAllocSize,
+ PageSize),
+ reinterpret_cast<scudo::uptr>(P) + UsableSize);
+ Allocator.deallocate(P, Origin);
+
+ // Check aligned allocations now.
+ for (scudo::uptr Alignment = 1; Alignment <= 8; Alignment++) {
+ void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Alignment);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ EXPECT_EQ(scudo::roundUp(reinterpret_cast<scudo::uptr>(P) + LargeAllocSize,
+ PageSize),
+ reinterpret_cast<scudo::uptr>(P) + UsableSize)
+ << "Failed usable size at allocation size " << LargeAllocSize
+ << " at alignment " << Alignment;
+ Allocator.deallocate(P, Origin);
+ }
+}
+
+struct TestFullUsableSizeConfig : TestExactUsableSizeConfig {
+ static const bool ExactUsableSize = false;
+};
+
+TEST(ScudoCombinedTest, FullUsableSize) {
+ using AllocatorT = scudo::Allocator<TestFullUsableSizeConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyUsableSizePrimary<AllocatorT>(*Allocator);
+ VerifyUsableSizeSecondary<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+struct TestFullUsableSizeMTEConfig : TestFullUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = true;
+};
+
+TEST(ScudoCombinedTest, FullUsableSizeMTE) {
+ if (!scudo::archSupportsMemoryTagging() ||
+ !scudo::systemDetectsMemoryTagFaultsTestOnly())
+ TEST_SKIP("Only supported on systems that can enable MTE.");
+
+ scudo::enableSystemMemoryTaggingTestOnly();
+
+ using AllocatorT = scudo::Allocator<TestFullUsableSizeMTEConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ // When MTE is enabled, you get exact sizes.
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
// Verify that no special quarantine blocks appear in iterateOverChunks.
TEST(ScudoCombinedTest, QuarantineIterateOverChunks) {
using AllocatorT = TestAllocator<TestQuarantineConfig>;
diff --git a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
index 612317b..9e5d065 100644
--- a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
@@ -588,8 +588,13 @@ TEST_F(ScudoWrappersCTest, MallocInfo) {
EXPECT_EQ(errno, 0);
fclose(F);
EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
- EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
- EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
+ std::string expected;
+ expected =
+ "<alloc size=\"" + std::to_string(malloc_usable_size(P1)) + "\" count=\"";
+ EXPECT_NE(nullptr, strstr(Buffer, expected.c_str()));
+ expected =
+ "<alloc size=\"" + std::to_string(malloc_usable_size(P2)) + "\" count=\"";
+ EXPECT_NE(nullptr, strstr(Buffer, expected.c_str()));
free(P1);
free(P2);